[Rivet-svn] r2205 - trunk/bin

blackhole at projects.hepforge.org blackhole at projects.hepforge.org
Thu Jan 7 22:52:35 GMT 2010


Author: buckley
Date: Thu Jan  7 22:52:34 2010
New Revision: 2205

Log:
Renaming rivet-specific scripts to use the rivet- prefix and two-part hyphenated form

Added:
   trunk/bin/rivet-chopbins
      - copied unchanged from r2204, trunk/bin/rivet-chop-bins
   trunk/bin/rivet-mergeruns
      - copied unchanged from r2204, trunk/bin/uemerge
   trunk/bin/rivet-mkhtml
      - copied, changed from r2204, trunk/bin/make-html
   trunk/bin/rivet-rescale
      - copied unchanged from r2204, trunk/bin/rivet-normalise-histos
   trunk/bin/rivet-rmgaps
      - copied unchanged from r2204, trunk/bin/gap_removal
Deleted:
   trunk/bin/aidamerge
   trunk/bin/chisquared
   trunk/bin/gap_removal
   trunk/bin/make-html
   trunk/bin/meta2yaml
   trunk/bin/rivet-chop-bins
   trunk/bin/rivet-normalise-histos
   trunk/bin/uemerge
Modified:
   trunk/bin/Makefile.am

Modified: trunk/bin/Makefile.am
==============================================================================
--- trunk/bin/Makefile.am	Sun Jan  3 19:31:50 2010	(r2204)
+++ trunk/bin/Makefile.am	Thu Jan  7 22:52:34 2010	(r2205)
@@ -1,11 +1,9 @@
-bin_SCRIPTS = rivet-config rivet-buildplugin
-dist_bin_SCRIPTS = aida2flat aida2root compare-histos make-html make-plots rivet-mkanalysis rivet-chop-bins rivet-normalise-histos
+bin_SCRIPTS = rivet-config 
+dist_bin_SCRIPTS = \
+	aida2flat aida2root \
+	compare-histos make-plots \
+    rivet \
+    rivet-mkanalysis rivet-buildplugin \
+    rivet-chopbins rivet-rmgaps rivet-rescale \
+    rivet-mergeruns rivet-mkhtml
 EXTRA_DIST = flat2aida
-
-if ENABLE_PYEXT
-dist_bin_SCRIPTS += rivet
-#TESTS_ENVIRONMENT = RIVET_ANALYSIS_PATH=$(top_srcdir)/plugindemo
-#TESTS = "rivet --list-analyses"
-else
-EXTRA_DIST += rivet
-endif

Copied: trunk/bin/rivet-chopbins (from r2204, trunk/bin/rivet-chop-bins)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ trunk/bin/rivet-chopbins	Thu Jan  7 22:52:34 2010	(r2205, copy of r2204, trunk/bin/rivet-chop-bins)
@@ -0,0 +1,144 @@
+#!/usr/bin/env python
+"""%prog -b <HISTO/PATH:min:max> [ -b ... ] <AIDAFILE> [...]
+
+Strip specified bins from data sets. Histograms not specified will be passed
+through without any chopping. Bins to be kept can be specified on command
+line via `-b' options. The format is
+    -b AIDAPATH:start:stop
+where start and stop are x values contained in the first and last bins,
+respectively, that should be kept. They need not to be the bin-center but
+must only lie somewhere in the bin's x-range.
+
+To chop bins from different observables can be achieved by using the `-b'
+option multiple times.
+
+Example:
+    %prog -b /ALEPH_1996_S3486095/d03-x01-y01:0.095:0.27 out.aida
+This will give you the all bins of the ALEPH 1-T distribution that are
+between the bins that contain the x-values 0.095 and 0.27 .
+
+TODO:
+    * what if the same observable is mentioned multiple times?
+"""
+
+import os
+import sys
+import logging
+
+import lighthisto
+## Make "sorted" a builtin function on Python < 2.4
+if 'sorted' not in dir(__builtins__):
+    def sorted(iterable, cmp=None, key=None, reverse=None):
+        rtn = iterable
+        rtn.sort(cmp)
+        return rtn
+
+## Add logging.log if needed
+if 'log' not in dir(logging):
+    def _logit(level, msg):
+        l = logging.getLogger()
+        l.log(level, msg)
+    logging.log = _logit
+
+
+## Try to load faster but non-standard cElementTree module
+try:
+    import xml.etree.cElementTree as ET
+except ImportError:
+    try:
+        import cElementTree as ET
+    except ImportError:
+        try:
+            import xml.etree.ElementTree as ET
+        except:
+            sys.stderr.write("Can't load the ElementTree XML parser: please install it!\n")
+            sys.exit(1)
+
+
+if __name__ == "__main__":
+    from optparse import OptionParser, OptionGroup
+    parser = OptionParser(usage=__doc__)
+
+    parser.add_option("-b", "--bins",
+                      action="append",
+                      help="Specify a histogram and bin range that is to be"
+                           " kept. The format is `AIDAPATH:start:stop'.")
+    parser.add_option("-o", "--out",
+                      dest="outdir",
+                      help="output directory (default: %default)")
+
+    verbgroup = OptionGroup(parser, "Verbosity control")
+    verbgroup.add_option("-V", "--verbose", action="store_const",
+                         const=logging.DEBUG, dest="LOGLEVEL",
+                         help="print debug (very verbose) messages")
+    verbgroup.add_option("-Q", "--quiet", action="store_const",
+                         const=logging.WARNING, dest="LOGLEVEL",
+                         help="be very quiet")
+    parser.set_defaults(bins=[],
+            outdir=".",
+            LOGLEVEL=logging.INFO)
+    opts, args = parser.parse_args()
+
+
+    ## Configure logging
+    try:
+        logging.basicConfig(level=opts.LOGLEVEL, format="%(message)s")
+    except:
+        pass
+    h = logging.StreamHandler()
+    h.setFormatter(logging.Formatter("%(message)s"))
+    logging.getLogger().setLevel(opts.LOGLEVEL)
+    if logging.getLogger().handlers:
+        logging.getLogger().handlers[0] = h
+    else:
+        logging.getLogger().addHandler(h)
+
+
+    if len(args) == 0:
+        sys.stderr.write("Must specify at least one AIDA histogram file!\n")
+        sys.exit(1)
+    if len(opts.bins) == 0:
+        sys.stderr.write("No bins specified, so I'm doing nothing!\n")
+        sys.exit(1)
+
+    bindefs = {}
+    for bd in opts.bins:
+        try:
+            path, low, high = bd.split(":")
+        except:
+            sys.stderr.write("Problem parsing bin definition `%s'" % (bd))
+            sys.exit(1)
+        if low == "":
+            low = None
+        else:
+            low = float(low)
+        if high == "":
+            high = None
+        else:
+            high = float(high)
+        bindefs[path] = (low, high)
+
+    for aidafile in args:
+        if not os.access(aidafile, os.R_OK):
+            logging.error("%s can not be read" % aidafile)
+            break
+
+        base, ext = os.path.splitext(os.path.basename(aidafile))
+        chopfile = os.path.join(opts.outdir, base + "-chop" + ext)
+        outhistos = []
+
+        tree = ET.parse(aidafile)
+        for dps in tree.findall("dataPointSet"):
+            thishist = lighthisto.Histo.fromDPS(dps)
+            if thishist.histopath in bindefs.keys():
+                outhistos.append(thishist.chop(bindefs[thishist.histopath]))
+            else:
+                outhistos.append(thishist)
+        out = open(chopfile, "w")
+        out.write('<?xml version="1.0" encoding="ISO-8859-1" ?>\n')
+        out.write('<!DOCTYPE aida SYSTEM "http://aida.freehep.org/schemas/3.3/aida.dtd">\n')
+        out.write('<aida version="3.3">\n')
+        out.write('  <implementation version="1.1" package="FreeHEP"/>\n')
+        out.write("\n\n".join([h.asAIDA() for h in sorted(outhistos)]) + "\n")
+        out.write("</aida>\n")
+        out.close()

Copied: trunk/bin/rivet-mergeruns (from r2204, trunk/bin/uemerge)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ trunk/bin/rivet-mergeruns	Thu Jan  7 22:52:34 2010	(r2205, copy of r2204, trunk/bin/uemerge)
@@ -0,0 +1,537 @@
+#! /usr/bin/env python
+
+## Script for merging parts of multiple histo files made with
+## different run params (kinematic pT cuts and energies) into one
+## histo file for plotting or further analysis.
+##
+## TODO:
+##  * take merge specs from a conf file instead of hard-coding
+##  * generalise to more generic merge ranges (i.e. not just sqrts & pT)
+##  * improve cmd line interface
+##  * rationalise all histogramming formats... remove AIDA!
+##  * use external histo classes (YODA), since I've now lost track
+##    of which changes have had to be made to which copies of Histo, Bin etc.!
+##
+## Usage example:
+##  $ uemerge hwpp/hpp-1800-{030.aida:1800:30,090.aida:1800:90} > hpp-hists.dat
+##  $ flat2aida hpp-hists.dat 
+##  $ mkdir plots && cd plots
+##  $ compare_histos.py ../ref04.aida ../hpp-hists.aida 
+##  $ make_plot.py --pdf *.dat
+
+
+import sys, os, copy, re
+from math import sqrt
+
+
+try:
+    sorted([])
+except:
+    def sorted(coll):
+        coll.sort()
+        return coll
+
+
+def mean(*args):
+    total, num = 0, 0
+    for a in args:
+        if a is not None:
+            total += a
+            num += 1
+    return total / float(num)
+
+
+class Histo:
+    def __init__(self):
+        self.clear()
+
+    def clear(self):
+        self._bins = []
+        self.path = None
+        self.name = None
+        self.title = None
+        self.xtitle = None
+        self.ytitle = None
+
+    def __cmp__(self, other):
+        """Sort by $path/$name string"""
+        return self.fullPath() > other.fullPath()
+
+    def __str__(self):
+        out = "Histogram '%s' with %d bins\n" % (self.fullPath(), self.numBins())
+        if self.title:
+            out += "Title: %s\n" % self.title
+        if self.xtitle:
+            out += "XLabel: %s\n" % self.xtitle
+        if self.ytitle:
+            out += "YLabel: %s\n" % self.ytitle
+        out += "\n".join([str(b) for b in self.getBins()])
+        return out
+
+    def fullPath(self):
+        return os.path.join(self.path, self.name)
+
+    def asFlat(self):
+        #global headerprefix
+        headerprefix = ""
+        global opts
+        out = "# BEGIN HISTOGRAM %s\n" % self.fullPath()
+        out += headerprefix + "AidaPath=%s\n" % self.fullPath()
+        if self.title:
+            out += headerprefix + "Title=%s\n" % self.title
+        if self.xtitle:
+            out += headerprefix + "XLabel=%s\n" % self.xtitle
+        if self.ytitle:
+            out += headerprefix + "YLabel=%s\n" % self.ytitle
+        try:
+            out += "## Area: %s\n" % self.area()
+        except:
+            out += "## Area: UNKNOWN (invalid bin details)"
+        out += "## Num bins: %d\n" % self.numBins()
+#         if opts.GNUPLOT:
+#             out += "## xval  \tyval    \txlow    \txhigh    \tylow     \tyhigh\n"
+#         else:
+        #out += "## xlow  \txhigh   \tyval    \tyerrminus\tyerrplus\n"
+        out += "\n".join([b.asFlat() for b in self.getBins()])
+        out += "\n# END HISTOGRAM"
+        return out
+
+    def numBins(self):
+        return len(self._bins)
+
+    def getBins(self):
+        return sorted(self._bins)
+
+    def setBins(self, bins):
+        self._bins = bins
+        return self
+
+    def setBin(self, index, bin):
+        self._bins[index] = bin
+        return self
+
+    def addBin(self, bin):
+        self._bins.append(bin)
+        return self
+
+    def getBin(self, index):
+        self._bins.sort()
+        return self.getBins()[index]
+
+    bins = property(getBins, setBins)
+
+    def area(self):
+        return sum([bin.area() for bin in self.bins])
+
+    def __iter__(self):
+        return iter(self.getBins())
+
+    def __len__(self):
+        return len(self._bins)
+
+    def __getitem__(self, index):
+        return self.getBin(index)
+
+
+class Bin:
+    """A simple container for a binned value with an error."""
+    def __init__(self, xlow=None, xhigh=None, yval=0, yerrplus=0, yerrminus=0, focus=None):
+        self.xlow = xlow
+        self.xhigh= xhigh
+        self.yval = yval
+        self.yerrplus = yerrplus
+        self.yerrminus = yerrminus
+        self.focus= focus
+
+    def clear(self):
+        #self.xlow = None
+        #self.xhigh= None
+        self.yval = 0
+        self.yerrplus = 0
+        self.yerrminus = 0
+        self.focus= None
+
+    def __str__(self):
+        meanyerr = None
+        try:
+            meanyerr = mean(self.yerrplus, self.yerrminus)
+        except:
+            pass
+        out = "%s to %s: %s +- %s" % (str(self.xlow), str(self.xhigh), str(self.yval), str(meanyerr))
+        return out
+
+    def asFlat(self):
+        global opts
+#         if opts.GNUPLOT:
+#             out = "%e\t%e\t%e\t%e\t%e\t%e" % (self.getBinCenter(), self.yval,
+#                                               self.xlow, self.xhigh, 
+#                                               self.yval-self.yerrminus, self.yval+self.yerrplus)
+#         else:
+        out = "%e\t%e\t%e\t%e" % (self.xlow, self.xhigh, self.yval, 0.5*(self.yerrminus+self.yerrplus))
+        return out
+
+    def __cmp__(self, other):
+        """Sort by mean x value (yeah, I know...)"""
+        rtn = True
+        lhnone = (self.xlow is None or self.xhigh is None)
+        rhnone = (other.xlow is None or other.xhigh is None)
+        somenones = lhnone or rhnone
+        if somenones:
+            if lhnone == rhnone:
+                return 0
+            elif lhnone:
+                return -1
+            else:
+                return 1;
+        else:
+            return cmp(self.xlow + self.xhigh, other.xlow + other.xhigh)
+
+    def getXRange(self):
+        return (self.xlow, self.xhigh)
+
+    def setXRange(self, xlow, xhigh):
+        self.xlow = xlow
+        self.xhigh = xhigh
+        return self
+
+    def getBinCenter(self):
+        """Geometric middle of the bin range."""
+        return self.xlow + .5*(self.xhigh - self.xlow)
+
+    def getFocus(self):
+        """Mean x-value of the bin."""
+        if self.focus is not None:
+            return (self.xlow + self.xhigh)/2.0
+        else:
+            return self.focus
+
+    def area(self):
+        return self.yval * (self.xhigh - self.xlow)
+
+    def getYErr(self):
+        """Get mean of +ve and -ve y-errors."""
+        return (self.yerrplus + self.yerrminus)/2.0
+
+    def setYErr(self, yerr):
+        """Set both +ve and -ve y-errors simultaneously."""
+        self.yerrplus = yerr
+        self.yerrminus = yerr
+        return self
+
+
+
+## Try to load faster but non-standard cElementTree module
+try:
+    import xml.etree.cElementTree as ET
+except ImportError:
+    try:
+        import cElementTree as ET
+    except ImportError:
+        try:
+            import xml.etree.ElementTree as ET
+        except:
+            sys.stderr.write("Can't load the ElementTree XML parser: please install it!\n")
+            sys.exit(1)
+
+
+def mkHistoFromDPS(dps):
+    """Make a mini histo representation from an AIDA dataPointSet tag."""
+    myhist = Histo()
+    myhist.name = dps.get("name")
+    myhist.title = dps.get("title")
+    myhist.path = dps.get("path")
+    dims = dps.findall("dimension")
+    for d in dims:
+        if d.get("dim") == "0":
+            myhist.xtitle = d.get("title")
+        elif d.get("dim") == "1":
+            myhist.ytitle = d.get("title")
+    points = dps.findall("dataPoint")
+    numbins = len(points)
+    for binnum, point in enumerate(points):
+        bin = Bin()
+        for d, m in enumerate(point.findall("measurement")):
+            val  = float(m.get("value"))
+            down = float(m.get("errorMinus"))
+            up = float(m.get("errorPlus"))
+            if d == 0:
+                low  = val - down
+                high = val + up
+                bin.setXRange(low, high)
+            elif d == 1:
+                bin.yval = val
+                bin.yerrplus = up
+                bin.yerrminus = down
+        myhist.addBin(bin)
+    return myhist
+
+
+
+#############################################
+
+
+
+def fillAbove(desthisto, sourcehistosbyptmin):
+    for i, b in enumerate(desthisto.getBins()):
+        ## Fill bins with pT-ordered histos (so that 'highest always wins')
+        for ptmin, h in sorted(sourcehistosbyptmin.iteritems()):
+            newb = h.getBin(i)
+            if newb.xlow >= float(ptmin):
+                desthisto.setBin(i, newb)
+                ##### This logging line breaks the output!!!!!!!
+                ####logging.debug("Copied: %s / %s" % (str(b), str(histosS[hpath][sqrts].getBin(i))) )
+    
+def mergeByPt(hpath, sqrts):
+    global inhistos
+    global outhistos
+    try:
+        fillAbove(outhistos[hpath], inhistos[hpath][sqrts])
+    except:
+        pass
+
+def useOnePt(hpath, sqrts, ptmin):
+    global inhistos
+    global outhistos
+    try:
+        ## Find best pT_min match
+        ptmins = inhistos[hpath][sqrts].keys()
+        closest_ptmin = None
+        for ptm in ptmins:
+            if closest_ptmin is None or \
+                    abs(float(ptm)-float(ptmin)) < abs(closest_ptmin-float(ptmin)):
+                closest_ptmin = float(ptm)
+        if closest_ptmin != float(ptmin):
+            logging.warning("Inexact match for requested pTmin=%s: " % ptmin + \
+                                "using pTmin=%f instead" % closest_ptmin)
+        outhistos[hpath] =  inhistos[hpath][sqrts][closest_ptmin]
+    except:
+        pass
+
+
+
+#######################################
+
+
+
+
+if __name__ == "__main__":
+    import logging
+    from optparse import OptionParser, OptionGroup
+    parser = OptionParser(usage="%prog aidafile:sqrts:minpt aidafile2:sqrts:minpt [...]")
+    parser.add_option("-o", "--out", dest="OUTFILE", default="-")
+    parser.add_option("--append", dest="APPEND_OUTPUT", action="store_true", default=False)
+    verbgroup = OptionGroup(parser, "Verbosity control")
+    verbgroup.add_option("-V", "--verbose", action="store_const", const=logging.DEBUG, dest="LOGLEVEL",
+                         default=logging.INFO, help="print debug (very verbose) messages")
+    verbgroup.add_option("-Q", "--quiet", action="store_const", const=logging.WARNING, dest="LOGLEVEL",
+                         default=logging.INFO, help="be very quiet")
+    parser.add_option_group(verbgroup)
+    (opts, args) = parser.parse_args()
+    logging.basicConfig(level=opts.LOGLEVEL, format="%(message)s")
+
+
+    ## Prefix used in dat file headers
+    headerprefix = "# "
+
+
+    ## Check args
+    if len(args) < 1:
+        logging.error("Must specify at least one AIDA histogram file")
+        sys.exit(1)
+
+
+    ## Get histos
+    inhistos = {}
+    weights = {}
+    try:
+        for aidafile_ptmin in args:
+            aidafile, sqrts, ptmin = None, None, None
+            try:
+                aidafile, sqrts, ptmin = aidafile_ptmin.rsplit(":", 2)
+            except ValueError, v:
+                raise Exception("Did you supply the file arguments in the 'name:sqrts:ptmin' format?")
+
+
+            ## Parse this AIDA file as XML
+            if not os.access(aidafile, os.R_OK):
+                logging.error("%s can not be read" % aidafile)
+                break
+            try:
+                tree = ET.parse(aidafile)
+            except:
+                logging.error("%s can not be parsed as XML" % aidafile)
+                break
+            tree = ET.parse(aidafile)
+
+
+            ## Get histos from this AIDA file
+            for dps in tree.findall("dataPointSet"):
+                h = mkHistoFromDPS(dps)
+                if not inhistos.has_key(h.fullPath()):
+                    inhistos[h.fullPath()] = {}
+                tmpE = inhistos[h.fullPath()]
+                if not tmpE.has_key(sqrts):
+                    tmpE[sqrts] = {}
+                tmpP = tmpE[sqrts]
+                if not tmpP.has_key(float(ptmin)):
+                    tmpP[float(ptmin)] = h
+                else:
+                    raise Exception("A set with sqrt(s) = %s, and ptmin = %s already exists" % (sqrts, ptmin))
+    except Exception, e:
+        logging.error("Danger, Will Robinson!")
+        logging.error(str(e))
+        sys.exit(1)
+
+
+    ## Make empty output histos
+    outhistos = {}
+    for hpath, hsets in sorted(inhistos.iteritems()):
+        logging.debug(hpath + " " + str(dict([(sqrts, hsets[sqrts].keys()) for sqrts in hsets.keys()])))
+        workhisto = copy.deepcopy(hsets.values()[0].values()[0])
+        logging.debug(workhisto)
+        outhistos[hpath] = workhisto
+        ## There's no reason to merge reference histos
+        if re.match(r'^/REF.*', hpath):
+            continue
+        ## Empty the bin set for histos which we're going to merge
+        for b in outhistos[hpath]:
+            b.clear()
+        logging.debug(outhistos[hpath])
+
+
+    ######
+
+
+    ## Field analysis
+    logging.info("Processing CDF_2001_S4751469")
+    ## Angular distributions in different pT bins
+    useOnePt("/CDF_2001_S4751469/d01-x01-y01", "1800", "0")
+    useOnePt("/CDF_2001_S4751469/d01-x01-y02", "1800", "0")
+    useOnePt("/CDF_2001_S4751469/d01-x01-y03", "1800", "30")
+    useOnePt("/CDF_2001_S4751469/d02-x01-y01", "1800", "0")
+    useOnePt("/CDF_2001_S4751469/d02-x01-y02", "1800", "0")
+    useOnePt("/CDF_2001_S4751469/d02-x01-y03", "1800", "30")
+    ## Number, profile in pT_lead (True?)
+    useOnePt("/CDF_2001_S4751469/d03-x01-y01", "1800", "0")
+    useOnePt("/CDF_2001_S4751469/d03-x01-y02", "1800", "0")
+    useOnePt("/CDF_2001_S4751469/d03-x01-y03", "1800", "0")
+    mergeByPt("/CDF_2001_S4751469/d04-x01-y01", "1800")
+    mergeByPt("/CDF_2001_S4751469/d04-x01-y02", "1800")
+    mergeByPt("/CDF_2001_S4751469/d04-x01-y03", "1800")
+    ## pT sums, profile in pT_lead (True?)
+    useOnePt("/CDF_2001_S4751469/d05-x01-y01", "1800", "0")
+    useOnePt("/CDF_2001_S4751469/d05-x01-y02", "1800", "0")
+    useOnePt("/CDF_2001_S4751469/d05-x01-y03", "1800", "0")
+    mergeByPt("/CDF_2001_S4751469/d06-x01-y01", "1800")
+    mergeByPt("/CDF_2001_S4751469/d06-x01-y02", "1800")
+    mergeByPt("/CDF_2001_S4751469/d06-x01-y03", "1800")
+    ## pT distributions (use a specific pT cut run?)
+    useOnePt("/CDF_2001_S4751469/d07-x01-y01", "1800", "0")
+    useOnePt("/CDF_2001_S4751469/d07-x01-y02", "1800", "0")
+    useOnePt("/CDF_2001_S4751469/d07-x01-y03", "1800", "30")
+
+    ## Acosta analysis
+    logging.info("Processing CDF_2004_S5839831")
+    ## Mean pT, profile in ET_lead
+    mergeByPt("/CDF_2004_S5839831/d01-x01-y01", "1800")
+    mergeByPt("/CDF_2004_S5839831/d01-x01-y02", "1800")
+    ## pT_max,min, profiles in ET_lead
+    mergeByPt("/CDF_2004_S5839831/d02-x01-y01", "1800")
+    mergeByPt("/CDF_2004_S5839831/d02-x01-y02", "1800")
+    mergeByPt("/CDF_2004_S5839831/d02-x01-y03", "1800")
+    ## pT distributions (want to use a specific pT cut run)
+    useOnePt("/CDF_2004_S5839831/d03-x01-y01", "1800", "40")
+    useOnePt("/CDF_2004_S5839831/d03-x01-y02", "1800", "80")
+    useOnePt("/CDF_2004_S5839831/d03-x01-y03", "1800", "120")
+    useOnePt("/CDF_2004_S5839831/d03-x01-y04", "1800", "160")
+    useOnePt("/CDF_2004_S5839831/d03-x01-y05", "1800", "200")
+    ## N_max,min, profiles in ET_lead
+    mergeByPt("/CDF_2004_S5839831/d04-x01-y01", "1800")
+    mergeByPt("/CDF_2004_S5839831/d04-x01-y02", "1800")
+    ## Min bias dbs (want to use min bias pT cut)
+    useOnePt("/CDF_2004_S5839831/d05-x01-y01", "1800", "0")
+    useOnePt("/CDF_2004_S5839831/d06-x01-y01", "1800", "0")
+    ## Swiss Cheese, profile in ET_lead
+    mergeByPt("/CDF_2004_S5839831/d07-x01-y01", "1800")
+    mergeByPt("/CDF_2004_S5839831/d07-x01-y02", "1800")
+    ## pT_max,min, profiles in ET_lead
+    mergeByPt("/CDF_2004_S5839831/d08-x01-y01", "630")
+    mergeByPt("/CDF_2004_S5839831/d08-x01-y02", "630")
+    mergeByPt("/CDF_2004_S5839831/d08-x01-y03", "630")
+    ## Swiss Cheese, profile in ET_lead
+    mergeByPt("/CDF_2004_S5839831/d09-x01-y01", "630")
+    mergeByPt("/CDF_2004_S5839831/d09-x01-y02", "630")
+    ## Min bias dbs (want to use min bias pT cut)
+    useOnePt("/CDF_2004_S5839831/d10-x01-y01", "630", "0")
+    useOnePt("/CDF_2004_S5839831/d11-x01-y01", "630", "0")
+
+    ## Rick Field Run-II Leading Jets analysis
+    logging.info("Processing CDF_2008_LEADINGJETS")
+    ## charged particle density
+    mergeByPt("/CDF_2008_LEADINGJETS/d01-x01-y01", "1960")
+    mergeByPt("/CDF_2008_LEADINGJETS/d02-x01-y01", "1960")
+    mergeByPt("/CDF_2008_LEADINGJETS/d03-x01-y01", "1960")
+    mergeByPt("/CDF_2008_LEADINGJETS/d04-x01-y01", "1960")
+    ## pT sum density
+    mergeByPt("/CDF_2008_LEADINGJETS/d05-x01-y01", "1960")
+    mergeByPt("/CDF_2008_LEADINGJETS/d06-x01-y01", "1960")
+    mergeByPt("/CDF_2008_LEADINGJETS/d07-x01-y01", "1960")
+    mergeByPt("/CDF_2008_LEADINGJETS/d08-x01-y01", "1960")
+    ## mean pT
+    mergeByPt("/CDF_2008_LEADINGJETS/d09-x01-y01", "1960")
+
+    ## D0 dijet correlation analysis
+    logging.info("Processing D0_2004_S5992206")
+    useOnePt("/D0_2004_S5992206/d01-x02-y01", "1960", "50")
+    useOnePt("/D0_2004_S5992206/d02-x02-y01", "1960", "75")
+    useOnePt("/D0_2004_S5992206/d03-x02-y01", "1960", "100")
+    useOnePt("/D0_2004_S5992206/d04-x02-y01", "1960", "150")
+
+    ## D0 incl jet cross-section analysis
+    logging.info("Processing D0_2008_S7662670")
+    mergeByPt("/D0_2008_S7662670/d01-x01-y01", "1960")
+    mergeByPt("/D0_2008_S7662670/d02-x01-y01", "1960")
+    mergeByPt("/D0_2008_S7662670/d03-x01-y01", "1960")
+    mergeByPt("/D0_2008_S7662670/d04-x01-y01", "1960")
+    mergeByPt("/D0_2008_S7662670/d05-x01-y01", "1960")
+    mergeByPt("/D0_2008_S7662670/d06-x01-y01", "1960")
+
+    ## STAR inclusive jet cross-section
+    logging.info("Processing STAR_2006_S6870392")
+    useOnePt("/STAR_2006_S6870392/d01-x01-y01", "200", "0")
+    useOnePt("/STAR_2006_S6870392/d02-x01-y01", "200", "3")
+
+    ## STAR underlying event (Helen Caines)
+    logging.info("Processing STAR_2009_UE_HELEN")
+    mergeByPt("/STAR_2009_UE_HELEN/d01-x01-y01", "200")
+    mergeByPt("/STAR_2009_UE_HELEN/d02-x01-y01", "200")
+    mergeByPt("/STAR_2009_UE_HELEN/d03-x01-y01", "200")
+
+    ## Choose output file
+    out = None
+    if opts.OUTFILE == "-":
+        out = sys.stdout
+    else:
+        if opts.APPEND_OUTPUT:
+            out = open(opts.OUTFILE, "a")
+        else:
+            out = open(opts.OUTFILE, "w")
+
+
+    ## Write out merged histos
+    for hpath, h in sorted(outhistos.iteritems()):
+        logging.debug("hpath = %s" % hpath)
+        out.write(h.asFlat() + "\n\n")
+
+
+    sys.exit(0)
+    ## Write to multiple auto-named dat files
+    for hpath, h in sorted(outhistos.iteritems()):
+        logging.debug("hpath = %s" % hpath)
+        safename = hpath.replace("/", "_") + ".dat"
+        if safename[0] == "_":
+            safename = safename[1:]
+        logging.info("Writing histo to %s" % safename)
+        f = open(safename, "w")
+        f.write(h.asFlat() + "\n")
+        f.close()

Copied and modified: trunk/bin/rivet-mkhtml (from r2204, trunk/bin/make-html)
==============================================================================
--- trunk/bin/make-html	Sun Jan  3 19:31:50 2010	(r2204, copy source)
+++ trunk/bin/rivet-mkhtml	Thu Jan  7 22:52:34 2010	(r2205)
@@ -19,7 +19,7 @@
 
 
 usage = """Webpages from histogram files written out by Rivet.
-You can specify multiple Monte-Carlo aida files to be compared. Reference data
+You can specify multiple Monte Carlo AIDA files to be compared. Reference data
 should be found automatically.
 
 Examples:
@@ -71,21 +71,25 @@
 ch_cmd.append("--mc-errs")
 ch_cmd.append("--hier-out")
 ch_cmd.append("--plot-info-dir=../")
-if len(aidafiles)+len(reffiles)<2:
+if len(aidafiles)+len(reffiles) < 2:
     ch_cmd.append("--show-ref-only")
     ch_cmd.append("--no-ratio")
 for file in aidafiles+reffiles:
     ch_cmd.append("%s" % os.path.abspath(file))
 Popen(ch_cmd, cwd=opts.OUTPUTDIR).wait()
 
+style = """<style>
+  html { font-family: sans-serif; }
+  img { border: 0; }
+</style>"""
 
 index = open(os.path.join(opts.OUTPUTDIR, "index.html"), "w")
-index.write('<html><head><title>%s</title></head><body>' % opts.OUTPUTDIR)
+index.write('<html>\n<head>\n<title>%s</title>\n%s</head>\n<body>' % (opts.OUTPUTDIR, style))
 for analysis in sorted(analyses):
     anapath = os.path.join(opts.OUTPUTDIR, analysis)
     anaindex = open(os.path.join(anapath, "index.html"), 'w')
-    anaindex.write("<html><head><title>%s - %s</title></head><body>\n" % (opts.OUTPUTDIR, analysis))
-    
+    anaindex.write('<html>\n<head>\n<title>%s - %s</title>\n%s</head>\n<body>' % (opts.OUTPUTDIR, analysis, style))
+
     datfiles = glob.glob("%s/*.dat" % anapath)
     for fulldatfile in sorted(datfiles):
         datfile = os.path.basename(fulldatfile)
@@ -102,15 +106,15 @@
 
         psfile = datfile.replace(".dat", ".ps")
         if os.access(os.path.join(anapath, psfile), os.R_OK):
-            # convert to png and add to webpage
+            # Convert to png and add to web page
             pngfile = datfile.replace(".dat", ".png")
             Popen(["convert", "-density", "100", psfile, pngfile], cwd=anapath)
-            anaindex.write('<div style="float:left;">')
-            anaindex.write('<a href="%s" style="font-size:small;text-decoration:none;font-weight:bold;">' % psfile)
+            anaindex.write('  <div style="float:left;">')
+            anaindex.write('<a href="%s" style="font-size:smaller; text-decoration:none; font-weight:bold;">' % psfile)
             anaindex.write('%s:<br><img border="0" src="%s"></a></div>\n' % (psfile, pngfile))
 
-    anaindex.write("</body></html>\n")
+    anaindex.write("</body>\n</html>\n")
     index.write('<h1><a href="%s/index.html" style="text-decoration:none;">%s</a></h1>\n' %(analysis, analysis))
     description = Popen(["rivet", "--show-analysis", analysis], stdout=PIPE).communicate()[0]
-    index.write('<pre>%s</pre>\n' % description)
-index.write('</body></html>')
+    index.write('<p style="white-space: pre;">%s</p>\n' % description)
+index.write('</body>\n</html>')

Copied: trunk/bin/rivet-rescale (from r2204, trunk/bin/rivet-normalise-histos)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ trunk/bin/rivet-rescale	Thu Jan  7 22:52:34 2010	(r2205, copy of r2204, trunk/bin/rivet-normalise-histos)
@@ -0,0 +1,275 @@
+#!/usr/bin/env python
+
+"""%prog -r <REFDATAPATH> -O <observable-file> <AIDAFILE>
+
+Normalise histos in observable-file of AIDAFILE to the area of the
+corresponding histos in REFAIDAPATH. REFAIDAPATH can either be
+a single AIDA-file or a directory containing AIDA-files.
+
+Or, if no REFAIDAPATH is given, normalise to new area given in
+observable-file "observables_and_areas", e.g.:
+CDF_2000_S4155203_d01-x01-y01  1.0
+
+You can also define bin to chop out in the observable-file in the
+same way as in chop_bins:
+CDF_2000_S4155203_d01-x01-y01:0:35  1.0
+This will chop the bins with Z-pT > 35 GeV and afterwards perform
+the renormalisation.
+
+
+Example:
+    %prog -r /usr/share/Rivet/ -O observables out.aida
+This will return the Z-boson pT-distribution, normalised to the histo-area
+measured by CDF.
+    %prog -O observables_and_areas out.aida
+This will return the Z-boson pT-distribution, normalised to 1.0
+
+"""
+
+import os, re, sys, logging
+from lighthisto import Histo
+## Make "sorted" a builtin function on Python < 2.4
+if not 'sorted' in dir(__builtins__):
+    def sorted(iterable, cmp=None, key=None, reverse=None):
+        rtn = iterable
+        rtn.sort(cmp)
+        return rtn
+
+## Add logging.log if needed
+if not 'log' in dir(logging):
+    def _logit(level, msg):
+        l = logging.getLogger()
+        l.log(level, msg)
+    logging.log = _logit
+
+try:
+    from IPython.Shell import IPShellEmbed
+    ipshell = IPShellEmbed([])
+except:
+    logging.info("Ipython shell not available.")
+
+
+## Try to load faster but non-standard cElementTree module
+try:
+    import xml.etree.cElementTree as ET
+except ImportError:
+    try:
+        import cElementTree as ET
+    except ImportError:
+        try:
+            import xml.etree.ElementTree as ET
+        except:
+            sys.stderr.write("Can't load the ElementTree XML parser: please install it!\n")
+            sys.exit(1)
+
+
+def getHistosFromAIDA(aidafile):
+    '''Get a dictionary of histograms indexed by name.'''
+    if not re.match(r'.*\.aida$', aidafile):
+        logging.error("Error: input file '%s' is not an AIDA file" % aidafile)
+        sys.exit(2)
+    aidafilepath = os.path.abspath(aidafile)
+    if not os.access(aidafilepath, os.R_OK):
+        logging.error("Error: cannot read from %s" % aidafile)
+        sys.exit(2)
+
+    histos = {}
+    tree = ET.parse(aidafilepath)
+    for dps in tree.findall("dataPointSet"):
+        ## Get this histogram's path name
+        dpsname = os.path.join(dps.get("path"), dps.get("name"))
+        ## Is it a data histo?
+        h = Histo.fromDPS(dps)
+        h.isdata = dpsname.upper().startswith("/REF")
+        if h.isdata:
+            dpsname = dpsname.replace("/REF", "")
+        histos[dpsname] = h
+    return histos
+
+def getRefHistos(refpath):
+    """ Return dictionary of reference histos {name: histo}.
+    Refpath can either be a single file or a directory.
+    """
+    refhistos = {}
+    if not refpath is None:
+        try:
+            refhistos=getHistosFromAIDA(refpath)
+        except:
+            for aida in os.listdir(refpath):
+                if aida.endswith(".aida"):
+                    temp = getHistosFromAIDA(os.path.join(refpath, aida))
+                    for k, v in temp.iteritems():
+                        if not k in refhistos.keys():
+                            refhistos[k]=v
+    return refhistos
+
+def readObservableFile(obsfile):
+    """ Read observables to normalise from file obsfile.
+        Return-values are a list of the histo-names to normalise and a
+        dictionary with name:newarea entries.
+    """
+    obslist = []
+    obsnorms = {}
+    bindefs = {}
+
+    if obsfile is not None:
+        try:
+            f = open(obsfile, 'r')
+        except:
+            logging.error("Cannot open histo list file %s" % opts.obsfile)
+            sys.exit(2)
+        for line in f:
+            stripped = line.strip()
+            # Skip empty or commented lines
+            if len(stripped) == 0 or stripped.startswith("#"):
+                continue
+
+            # Split the line to find out whether newarea is given in obsfile
+            splitline = stripped.split()
+
+            # Try to read bin specs for chopping
+            try:
+                path, low, high = splitline[0].split(":")
+            except:
+                path = splitline[0].split(":")[0]
+                low  = ""
+                high = ""
+                logging.warning("No bin definition given for %s" % (line))
+                #sys.exit(1)
+            if low == "":
+                low = None
+            else:
+                low = float(low)
+            if high == "":
+                high = None
+            else:
+                high = float(high)
+            bindefs[path] = (low, high)
+
+            # Try to read areas to normalise to from obsfile
+            if len(splitline) == 2:
+                obsnorms[path] = float(splitline[1])
+            obslist.append(path)
+        f.close()
+    return obslist, obsnorms, bindefs
+
+
+if __name__ == "__main__":
+    from optparse import OptionParser, OptionGroup
+    parser = OptionParser(usage=__doc__)
+
+    parser.add_option("-O", "--obsfile", default=None,
+                      help="Specify a file with histograms (and bin ranges) " +
+                      " that are to be normalised.")
+    parser.add_option("-r", "--refdir", default=None,
+                      help="File of folder with reference histos")
+    parser.add_option("-o", "--outdir",
+                      dest="outdir", default="renormalised",
+                      help="output directory (default: %default)")
+    parser.add_option("-a", dest="AIDA", default=True, action="store_true",
+                      help="Produce AIDA output rather than FLAT")
+    parser.add_option("-f", dest="AIDA", action="store_false",
+                      help="Produce FLAT output rather than AIDA")
+    verbgroup = OptionGroup(parser, "Verbosity control")
+    verbgroup.add_option("-V", "--verbose", action="store_const",
+                         const=logging.DEBUG, dest="LOGLEVEL",
+                         help="print debug (very verbose) messages")
+    verbgroup.add_option("-Q", "--quiet", action="store_const",
+                         const=logging.WARNING, dest="LOGLEVEL",
+                         help="be very quiet")
+    parser.set_defaults(bins=[],
+            outdir=".",
+            LOGLEVEL=logging.INFO)
+    opts, args = parser.parse_args()
+
+
+    # Configure logging
+    try:
+        logging.basicConfig(level=opts.LOGLEVEL, format="%(message)s")
+    except:
+        pass
+    h = logging.StreamHandler()
+    h.setFormatter(logging.Formatter("%(message)s"))
+    logging.getLogger().setLevel(opts.LOGLEVEL)
+    if logging.getLogger().handlers:
+        logging.getLogger().handlers[0] = h
+    else:
+        logging.getLogger().addHandler(h)
+
+
+
+    # Read in histogram files to normalise
+    histos = getHistosFromAIDA(args[0])
+
+    # Read in reference histos to get reference areas to normalise to
+    refhistos = getRefHistos(opts.refdir)
+
+    # Read in observables, if no observable file is given or observable file
+    # is empty all observables will be renormalised if possible
+    obslist, obsnorms, bindefs = readObservableFile(opts.obsfile)
+    if len(obslist) == 0:
+        logging.warning("No observable file given or empty, will try to ",
+                "normalise all histos.")
+        obslist = histos.keys()
+
+
+    # Create output filename
+    base = args[0].split("/")[-1].split(".aida")[0]
+    if opts.AIDA:
+        outfile = os.path.join(opts.outdir, base + "-renormed.aida")
+    else:
+        outfile = os.path.join(opts.outdir, base + "-renormed.dat")
+    if not os.path.exists(opts.outdir):
+        logging.info("Creating outdir %s"%opts.outdir)
+        os.mkdir(opts.outdir)
+
+    aidaheader = """<?xml version="1.0" encoding="ISO-8859-1" ?>
+<!DOCTYPE aida SYSTEM "http://aida.freehep.org/schemas/3.3/aida.dtd">
+<aida version="3.3">
+  <implementation version="1.1" package="FreeHEP"/>
+    """
+    # Open file for output
+    f = open(outfile, "w")
+    if opts.AIDA:
+        f.write(aidaheader)
+
+
+    # Iterate over all histos
+    for name, histo in histos.iteritems():
+        # Find out whether ref-histo is given
+        if name in refhistos.keys():
+            tempref = refhistos[name]
+        else:
+            tempref = histos[name]
+
+        # Try to chop bins
+        if name in bindefs.keys():
+            tempref = tempref.chop(bindefs[name])
+            tempold = histos[name].chop(bindefs[name])
+        else:
+            #tempref = refhistos[name]
+            tempold = histos[name]
+
+        # Get old and new histogram areas
+        oldarea = tempold.getArea()
+        if name in obsnorms.keys():
+            newarea = obsnorms[name]
+        else:
+            newarea = tempref.getArea()
+
+        # Renormalise histo
+        logging.info("Renormalising %s from %f to %f"%(name, oldarea,
+            newarea))
+        renormed = tempold.renormalise(newarea)
+
+        # Write to file
+        if opts.AIDA:
+            f.write(renormed.asAIDA())
+        else:
+            f.write(renormed.asFlat())
+
+
+    if opts.AIDA:
+        f.write("</aida>")
+
+    logging.info("Done. Written output to %s."%outfile)

Copied: trunk/bin/rivet-rmgaps (from r2204, trunk/bin/gap_removal)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ trunk/bin/rivet-rmgaps	Thu Jan  7 22:52:34 2010	(r2205, copy of r2204, trunk/bin/gap_removal)
@@ -0,0 +1,154 @@
+#! /usr/bin/env python
+
+import os, sys, tempfile
+
+class Inputdata:
+    def __init__(self, filename):
+        self.histos = {}
+        self.description = {}
+        self.description['DrawOnly'] = []
+        f = open(filename+'.dat', 'r')
+        for line in f:
+            if (line.count('#',0,1)):
+                if (line.count('BEGIN HISTOGRAM')):
+                    title = line.split('BEGIN HISTOGRAM', 1)[1].strip()
+                    self.description['DrawOnly'].append(title)
+                    self.histos[title] = Histogram(f)
+        f.close()
+
+
+class Histogram:
+    def __init__(self, f):
+        self.read_input(f)
+
+    def read_input(self, f):
+        self.description = {}
+        self.data = []
+        for line in f:
+            if (line.count('#',0,1)):
+                if (line.count('END HISTOGRAM')):
+                    break
+            else:
+                line = line.rstrip()
+                if (line.count('=')):
+                    linearray = line.split('=', 1)
+                    self.description[linearray[0]] = linearray[1]
+                else:
+                    linearray = line.split('\t')
+                    if len(linearray)==4:
+                        self.data.append({'LowEdge': float(linearray[0]),
+                                          'UpEdge':  float(linearray[1]),
+                                          'Content': float(linearray[2]),
+                                          'Error':   [float(linearray[3]),float(linearray[3])]})
+                    elif len(linearray)==5:
+                        self.data.append({'LowEdge': float(linearray[0]),
+                                          'UpEdge':  float(linearray[1]),
+                                          'Content': float(linearray[2]),
+                                          'Error':   [float(linearray[3]),float(linearray[4])]})
+
+    def write_datapoint(self, f, xval, xerr, yval, yerr):
+        f.write('    <dataPoint>\n')
+        f.write('      <measurement errorPlus="%e" value="%e" errorMinus="%e"/>\n' %(xerr, xval, xerr))
+        f.write('      <measurement errorPlus="%e" value="%e" errorMinus="%e"/>\n' %(yerr[1], yval, yerr[0]))
+        f.write('    </dataPoint>\n')
+
+    def write_datapointset_header(self, f):
+        title = self.description.setdefault('Title', None)
+        xlabel = self.description.setdefault('XLabel', None)
+        ylabel = self.description.setdefault('YLabel', None)
+        path = self.description.setdefault('AidaPath', None)
+        if path is not None:
+            path = path.replace('>', '&gt;').replace('<', '&lt;').replace('"', '&quot;')
+        f.write('  <dataPointSet name="%s" dimension="2"\n' % path.split('/')[-1])
+        f.write('    path="%s" title="%s">\n' % (os.path.abspath(path.replace(path.split('/')[-1], '')), 
+                                                 title.replace('>', '&gt;').replace('<', '&lt;').replace('"', '&quot;')))
+        f.write('    <annotation>\n')
+        if title is not None:
+            f.write('      <item key="Title" value="%s" sticky="true"/>\n' % title.replace('>', '&gt;').replace('<', '&lt;').replace('"', '&quot;'))
+        if xlabel is not None:
+            f.write('      <item key="XLabel" value="%s" sticky="true"/>\n' % xlabel.replace('>', '&gt;').replace('<', '&lt;').replace('"', '&quot;'))
+        if ylabel is not None:
+            f.write('      <item key="YLabel" value="%s" sticky="true"/>\n' % ylabel.replace('>', '&gt;').replace('<', '&lt;').replace('"', '&quot;'))
+            f.write('      <item key="AidaPath" value="%s" sticky="true"/>\n' % path)
+            f.write('      <item key="FullPath" value="/%s.aida%s" sticky="true"/>\n' % (filename.split('/')[-1], path))
+        f.write('    </annotation>\n')
+        f.write('    <dimension dim="0" title="%s" />\n' % xlabel)
+        f.write('    <dimension dim="1" title="%s" />\n' % ylabel)
+
+    def write_datapointset_footer(self, f):
+        f.write('  </dataPointSet>\n')
+
+    def write_datapointset(self, f):
+        self.write_datapointset_header(f)
+        for bin, bindata in enumerate(self.data):
+            xval = 0.5*(bindata['UpEdge'] + bindata['LowEdge'])
+            if bindata['UpEdge'] == bindata['LowEdge']:
+                xerr = 0.5
+            else:
+                xerr = 0.5*(bindata['UpEdge'] - bindata['LowEdge'])
+            yval = bindata['Content']
+            yerr = bindata['Error']
+            self.write_datapoint(f, xval, xerr, yval, yerr)
+        self.write_datapointset_footer(f)
+
+    def remove_gaps(self):
+        # only look at histograms which are present in the reference file:
+        try:
+            refhist = refdata.histos['/REF%s' % self.description['AidaPath']]
+        except:
+            return
+
+        # check for differences in the binning and remove superfluous MC bins:
+        if len(refhist.data) != len(self.data):
+            print self.description['AidaPath']
+            newdata = []
+            for i in range(len(self.data)):
+                if self.data[i]['LowEdge'] == refhist.data[i]['LowEdge'] and \
+                   self.data[i]['UpEdge'] == refhist.data[i]['UpEdge']:
+                    newdata.append(self.data[i])
+                else:
+                    print 'Deleted bin %d' %i
+                    refhist.data.insert(i, self.data[i])
+            self.data = newdata
+
+
+from optparse import OptionParser
+parser = OptionParser(usage="%prog datafile MCfile outputfile")
+opts, args = parser.parse_args()
+
+if len(args) != 3:
+    sys.stderr.write("Must specity a reference, a MC, and an output file\n")
+    sys.exit(1)
+
+# Convert the aida input files to flat files we can parse:
+tempdir=tempfile.mkdtemp('.gap_removal')
+
+filename = args[0].replace(".aida", "")
+os.system("%s/aida2flat %s.aida > %s/%s.dat" %(os.path.dirname(os.path.realpath(sys.argv[0])), filename, tempdir, os.path.basename(filename)))
+refdata = Inputdata("%s/%s" %(tempdir, os.path.basename(filename)))
+
+filename = args[1].replace(".aida", "")
+os.system("%s/aida2flat %s.aida > %s/%s.dat" %(os.path.dirname(os.path.realpath(sys.argv[0])), filename, tempdir, os.path.basename(filename)))
+mcdata = Inputdata("%s/%s" %(tempdir, os.path.basename(filename)))
+
+# Cleanup:
+for i in os.listdir(tempdir):
+    os.unlink('%s/%s' %(tempdir, i))
+os.rmdir(tempdir)
+
+# Remove gap bins:
+for i in mcdata.description['DrawOnly']:
+    mcdata.histos[i].remove_gaps()
+
+# Write the new aida file with removed gap bins:
+f = open(args[2], 'w')
+f.write('<?xml version="1.0" encoding="ISO-8859-1" ?>\n')
+f.write('<!DOCTYPE aida SYSTEM "http://aida.freehep.org/schemas/3.3/aida.dtd">\n')
+f.write('<aida version="3.3">\n')
+f.write('  <implementation version="1.1" package="FreeHEP"/>\n')
+
+for i in mcdata.description['DrawOnly']:
+    mcdata.histos[i].write_datapointset(f)
+
+f.write('</aida>\n')
+f.close


More information about the Rivet-svn mailing list