diff --git a/HighwayNetwork.py b/HighwayNetwork.py new file mode 100644 index 0000000..4b6f63d --- /dev/null +++ b/HighwayNetwork.py @@ -0,0 +1,186 @@ +import os, re, shutil, subprocess +from socket import gethostname + +from .HwySpecsRTP import HwySpecsRTP +from .Logger import WranglerLogger +from .Network import Network +from .NetworkException import NetworkException + +__all__ = ['HighwayNetwork'] + +class HighwayNetwork(Network): + """ + Representation of a roadway network. + """ + + def __init__(self, champVersion, basenetworkpath, isTiered=False, tag=None, + hwyspecsdir=None, hwyspecs=None, tempdir=None, networkName=None): + """ + *basenetworkpath* should be a starting point for this network, and include a ``FREEFLOW.net``, + as well as ``turns[am,pm,op].pen`` files. + Also a shapefile export: FREEFLOW.[dbf,prj,shp] and FREEFLOW_nodes.[dbf,prj,shp] + + *isTiered*: when False, checks out the *basenetworkpath* from Y:\networks. When True, + expects the basenetwork path to be a fullpath and uses that. + + *tag*: when not *isTiered*, a tag can optionally be used for cloning the base network + + *hwyspecs*, if passed in, should be an instance of :py:class:`HwySpecsRTP`. It + is only used for logging. + """ + Network.__init__(self, champVersion, networkName) + + if isTiered: + (head,tail) = os.path.split(basenetworkpath) + self.applyBasenetwork(head,tail,None) + else: + self.applyingBasenetwork = True + self.cloneAndApplyProject(networkdir=basenetworkpath, tempdir=tempdir, tag=tag) + + # keep a reference of the hwyspecsrtp for logging + self.hwyspecsdir = hwyspecsdir + self.hwyspecs = hwyspecs + + def getProjectVersion(self, parentdir, networkdir, gitdir, projectsubdir=None): + """ + Returns champVersion for this project + + See :py:meth:`Wrangler.Network.applyProject` for argument details. + """ + if projectsubdir: + champversionFilename = os.path.join(parentdir, networkdir, projectsubdir,"champVersion.txt") + else: + champversionFilename = os.path.join(parentdir, networkdir,"champVersion.txt") + + try: + WranglerLogger.debug("Reading %s" % champversionFilename) + champVersion = open(champversionFilename,'r').read() + champVersion = champVersion.strip() + except: + champVersion = Network.CHAMP_VERSION_DEFAULT + return champVersion + + def applyBasenetwork(self, parentdir, networkdir, gitdir): + + # copy the base network file to my workspace + shutil.copyfile(os.path.join(parentdir,networkdir,"FREEFLOW.net"), "FREEFLOW.BLD") + for filename in ["turnsam.pen", "turnspm.pen", "turnsop.pen"]: + shutil.copyfile(os.path.join(parentdir,networkdir,filename), filename) + + # done + self.applyingBasenetwork = False + + def applyProject(self, parentdir, networkdir, gitdir, projectsubdir=None, **kwargs): + """ + Applies a roadway project by calling ``runtpp`` on the ``apply.s`` script. + By convention, the input to ``apply.s`` is ``FREEFLOW.BLD`` and the output is + ``FREEFLOW.BLDOUT`` which is copied to ``FREEFLOW.BLD`` at the end of ``apply.s`` + + See :py:meth:`Wrangler.Network.applyProject` for argument details. + """ + # special case: base network + if self.applyingBasenetwork: + self.applyBasenetwork(parentdir, networkdir, gitdir) + self.logProject(gitdir=gitdir, + projectname=(networkdir + "\\" + projectsubdir if projectsubdir else networkdir), + projectdesc="Base network") + return + + if projectsubdir: + applyDir = os.path.join(parentdir, networkdir, projectsubdir) + applyScript = "apply.s" + descfilename = os.path.join(parentdir, networkdir, projectsubdir,"desc.txt") + turnsfilename = os.path.join(parentdir, networkdir, projectsubdir, "turns.pen") + else: + applyDir = os.path.join(parentdir, networkdir) + applyScript = "apply.s" + descfilename = os.path.join(parentdir, networkdir,'desc.txt') + turnsfilename = os.path.join(parentdir, networkdir, "turns.pen") + + # read the description + desc = None + try: + desc = open(descfilename,'r').read() + except: + pass + + # move the FREEFLOW.BLD into place + shutil.move("FREEFLOW.BLD", os.path.join(applyDir,"FREEFLOW.BLD")) + + # dispatch it, cube license + hostname = gethostname().lower() + if hostname not in ['berry','eureka','taraval','townsend','dolores','stockton','db0v07k1']: + f = open('runtpp_dispatch.tmp', 'w') + f.write("runtpp " + applyScript + "\n") + f.close() + (cuberet, cubeStdout, cubeStderr) = self._runAndLog("Y:/champ/util/bin/dispatch.bat runtpp_dispatch.tmp taraval", run_dir=applyDir) + else: + (cuberet, cubeStdout, cubeStderr) = self._runAndLog(cmd="runtpp "+applyScript, run_dir=applyDir) + + + nodemerge = re.compile("NODEMERGE: \d+") + linkmerge = re.compile("LINKMERGE: \d+-\d+") + for line in cubeStdout: + line = line.rstrip() + if re.match(nodemerge,line): continue + if re.match(linkmerge,line): continue + WranglerLogger.debug(line) + + if cuberet != 0 and cuberet != 1: + WranglerLogger.fatal("FAIL! Project: "+applyScript) + raise NetworkException("HighwayNetwork applyProject failed; see log file") + + # move it back + shutil.move(os.path.join(applyDir,"FREEFLOW.BLD"), "FREEFLOW.BLD") + + # append new turn penalty file to mine + if os.path.exists(turnsfilename): + for filename in ["turnsam.pen", "turnspm.pen", "turnsop.pen"]: + newturnpens = open(turnsfilename, 'r').read() + turnfile = open(filename, 'a') + turnfile.write(newturnpens) + turnfile.close() + WranglerLogger.debug("Appending turn penalties from "+turnsfilename) + + WranglerLogger.debug("") + WranglerLogger.debug("") + + year = None + county = None + if (networkdir==self.hwyspecsdir and + self.hwyspecs and + projectsubdir in self.hwyspecs.projectdict): + year = self.hwyspecs.projectdict[projectsubdir]["MOD YEAR"] + county = self.hwyspecs.projectdict[projectsubdir]["County"] + desc = (self.hwyspecs.projectdict[projectsubdir]["Facility"] + ", " + + self.hwyspecs.projectdict[projectsubdir]["Action"] + ", " + + self.hwyspecs.projectdict[projectsubdir]["Span"]) + + self.logProject(gitdir=gitdir, + projectname=(networkdir + "\\" + projectsubdir if projectsubdir else networkdir), + year=year, projectdesc=desc, county=county) + + def write(self, path='.', name='FREEFLOW.NET', writeEmptyFiles=True, suppressQuery=False, suppressValidation=False): + if not os.path.exists(path): + WranglerLogger.debug("\nPath [%s] doesn't exist; creating." % path) + os.mkdir(path) + + else: + netfile = os.path.join(path,"FREEFLOW.net") + if os.path.exists(netfile) and not suppressQuery: + print "File [%s] exists already. Overwrite contents? (y/n/s) " % netfile + response = raw_input("") + WranglerLogger.debug("response = [%s]" % response) + if response == "s" or response == "S": + WranglerLogger.debug("Skipping!") + return + + if response != "Y" and response != "y": + exit(0) + + shutil.copyfile("FREEFLOW.BLD",os.path.join(path,name)) + WranglerLogger.info("Writing into %s\\%s" % (path, name)) + WranglerLogger.info("") + + for filename in ["turnsam.pen", "turnspm.pen", "turnsop.pen"]: + shutil.copyfile(filename, os.path.join(path, filename)) diff --git a/HwySpecsRTP.py b/HwySpecsRTP.py new file mode 100644 index 0000000..a614fdf --- /dev/null +++ b/HwySpecsRTP.py @@ -0,0 +1,58 @@ +import logging + +class HwySpecsRTP: + """ Simple class to read in the RTP specifications from a CSV file. + """ + + def __init__(self,specsFile): + """ + Read and cache specifications. Will apply in order read in. + """ + self.projects = [] # list of RTP reference numbers + self.projectdict = {} # RTP reference number => dictionary of attributes + + specs = open(specsFile,'r') + i=0 + for line in specs: + i+=1 + if i==1: + head=line.strip().split(',') + else: + l = line.strip().split(',') + #print l + RTPref = l[head.index("RTP Ref#")] + self.projectdict[RTPref] = {} + self.projects.append(RTPref) + + self.projectdict[RTPref]["Facility"]=l[head.index("Corridor")] + self.projectdict[RTPref]["Action"]=l[head.index("Action")] + self.projectdict[RTPref]["Span"]=l[head.index("Span")] + self.projectdict[RTPref]["County"]=l[head.index("County")] + self.projectdict[RTPref]["MOD YEAR"]=int(l[head.index("MOD YEAR")]) + self.projectdict[RTPref]["RTP FUNDING"]=l[head.index("RTP FUNDING")] + + + def listOfProjects(self,maxYear=2035,baseYear=2000): + """ + Returns the project RTP Reference numbers that qualify (after *baseYear*, before and including *maxYear*) + """ + projectList = [] + for pref in self.projects: + if self.projectdict[pref]["MOD YEAR"]<=maxYear and self.projectdict[pref]["MOD YEAR"]>baseYear: + projectList.append(pref) + return projectList + + def printProjects(self,fileObj): + fileObj.write("YEAR RTP FACILITY COUNTY ACTION \n") + fileObj.write("----------------------------------------------------\n") + for p in self.projects: + fileObj.write( str(p["MOD YEAR"])+" "+p["RTP REF"]+" "+p["Facility"]+" "+p["Action"]+" "+p["County"]+"\n") + + def logProjects(self, logger): + logger.info("YEAR RTP FACILITY COUNTY ACTION ") + logger.info("----------------------------------------------------") + for p in self.projects: + logger.info( str(p["MOD YEAR"])+" "+p["RTP REF"]+" "+p["Facility"]+" "+p["Action"]+" "+p["County"]) + + + diff --git a/Linki.py b/Linki.py new file mode 100644 index 0000000..4f28f00 --- /dev/null +++ b/Linki.py @@ -0,0 +1,28 @@ +class Linki(dict): + """ Linki Link. Has A-node, B-node, possibly a comment and a distance. + """ + def __init__(self): + dict.__init__(self) + self.A='' + self.B='' + self.comment='' + self.distance='' + self.xferTime='' + self.accessType='' + + def __repr__(self): + s = "%8s %8s" % (self.A, self.B) + + # access links have a type and a transfer time + if self.accessType != '': + s += " %s" % self.accessType + + if self.xferTime != '': + s += " %3s" % self.xferTime + elif self.distance != '': + s += " %8s" % self.distance + + if self.comment != '': + s += " %s" % (self.comment) + return s + \ No newline at end of file diff --git a/Logger.py b/Logger.py new file mode 100644 index 0000000..5269b2d --- /dev/null +++ b/Logger.py @@ -0,0 +1,39 @@ +import logging + +__all__ = ['WranglerLogger', 'setupLogging'] + + +# for all the Wrangler logging needs! +WranglerLogger = logging.getLogger("WranglerLogger") + + +def setupLogging(infoLogFilename, debugLogFilename, logToConsole=True): + """ Sets up the logger. The infoLog is terse, just gives the bare minimum of details + so the network composition will be clear later. + The debuglog is very noisy, for debugging. + + Pass none to either. + Spews it all out to console too, if logToConsole is true. + """ + # create a logger + WranglerLogger.setLevel(logging.DEBUG) + + if infoLogFilename: + infologhandler = logging.StreamHandler(open(infoLogFilename, 'w')) + infologhandler.setLevel(logging.INFO) + infologhandler.setFormatter(logging.Formatter('%(message)s')) + WranglerLogger.addHandler(infologhandler) + + if debugLogFilename: + debugloghandler = logging.StreamHandler(open(debugLogFilename,'w')) + debugloghandler.setLevel(logging.DEBUG) + debugloghandler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s', '%Y-%m-%d %H:%M')) + WranglerLogger.addHandler(debugloghandler) + + if logToConsole: + consolehandler = logging.StreamHandler() + consolehandler.setLevel(logging.DEBUG) + consolehandler.setFormatter(logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')) + WranglerLogger.addHandler(consolehandler) + + diff --git a/Network.py b/Network.py new file mode 100644 index 0000000..24976ad --- /dev/null +++ b/Network.py @@ -0,0 +1,253 @@ +import os, re, string, subprocess, sys, tempfile +from .Logger import WranglerLogger +from .NetworkException import NetworkException +from .Regexes import git_commit_pattern, tag_num_pattern + +__all__ = ['Network'] + +class Network(object): + + CHAMP_VERSION_DEFAULT = "pre4.3" + # static variable + allNetworks = {} + + def __init__(self, champVersion, networkName = None): + """ + *champVersion* argument is for compatibility check. + Currently this should be one of *pre4.3* and *4.3* + Pass *networkName* to be added to the Networks dictionary + """ + if champVersion not in ["4.3", "pre4.3"]: + raise NetworkException("Do not understand champVersion %s") + + self.champVersion = champVersion + self.appliedProjects = {} + if networkName: Network.allNetworks[networkName] = self + + def _runAndLog(self, cmd, run_dir=".", logStdoutAndStderr=False): + """ + Runs the given command in the given *run_dir*. Returns a triple: + (return code, stdout, stderr) + where stdout and stderr are lists of strings. + """ + proc = subprocess.Popen( cmd, cwd = run_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) + retStdout = [] + for line in proc.stdout: + line = line.strip('\r\n') + if logStdoutAndStderr: WranglerLogger.debug("stdout: " + line) + retStdout.append(line) + + retStderr = [] + for line in proc.stderr: + line = line.strip('\r\n') + if logStdoutAndStderr: WranglerLogger.debug("stderr: " + line) + retStderr.append(line) + retcode = proc.wait() + WranglerLogger.debug("Received %d from [%s]" % (retcode, cmd)) + return (retcode, retStdout, retStderr) + + + def getProjectVersion(self, parentdir, networkdir, gitdir, projectsubdir=None): + """ + Implemented by subclasses. Returns champVersion for this project. + + See :py:meth:`Wrangler.Network.applyProject` for argument details. + """ + pass + + def checkProjectVersion(self, parentdir, networkdir, gitdir, projectsubdir=None): + """ + Verifies that this project is compatible with the champVersion, raises an exception + if not + + See :py:meth:`Wrangler.Network.applyProject` for argument details. + """ + # the subclass figures out what champVersion this project is + projChampVersion = self.getProjectVersion(parentdir=parentdir, networkdir=networkdir, + gitdir=gitdir, projectsubdir=projectsubdir) + WranglerLogger.debug("Checking champ version compatibility of project (%s) with requirement (%s)" % + (projChampVersion, self.champVersion)) + + if self.champVersion == "pre4.3": + if projChampVersion == "pre4.3": + return + if projChampVersion == "4.3": + # See if there's a pre4.3 branch we can checkout + cmd = r"git checkout preCHAMP4.3" + (retcode, retstdout, retstderr) = self._runAndLog(cmd, gitdir) + if retcode != 0: + raise NetworkException("Git checkout failed; see log file") + + if self.champVersion == "4.3": + if projChampVersion == "pre4.3": + raise NetworkException("Trying to use a pre4.3 network project (%s) to build a Champ4.3 network" % + (networkdir if not projectsubdir else os.path.join(networkdir,projectsubdir))) + + def applyProject(self, parentdir, networkdir, gitdir, projectsubdir=None, **kwargs): + """ + Implemented by subclasses. Args are as follows: + + * *parentdir* is the directory we're checking stuff out into (e.g. a temp dir) + * *networkdir* is the name of the dir within ``Y:\\networks`` + * *gitdir* is the git repo; either the same as *networkdir* if the git repo is at + that level (the typical case), or it's *networkdir\projectsubdir* + * *projectsubdir* is an optional subdir of *networkdir*; If the ``apply.s`` or ``__init__.py`` + is in a subdir, this is how it's specified + * *kwargs* are additional keyword args to pass into the apply() + """ + pass + + def cloneAndApplyProject(self, networkdir, projectsubdir=None, tag=None, tempdir=None, **kwargs): + """ + * *networkdir* corresponds to the dir relative to ``Y:\\networks`` + * *projectsubdir* is a subdir within that, or None if there's no subdir + * *tag* is "1.0" or "1-latest", or None for just the latest version + * *tempdir* is the parent dir to put the git clone dir; pass None for python to just choose + * *kwargs* are additional args for the apply + """ + if tempdir: + gitdir = os.path.join(tempdir, networkdir) + + if not os.path.exists(tempdir): + os.makedirs(tempdir) + + # if the tempdir exists and it's already here and the projectsubdir is present, + # then we already checked it out + elif projectsubdir and os.path.exists(os.path.join(tempdir,networkdir,projectsubdir)): + WranglerLogger.debug("Skipping checkout of %s, %s already exists" % + (networkdir, os.path.join(tempdir,networkdir,projectsubdir))) + + # verify we didn't require conflicting tags + try: + commitstr = self.getCommit(gitdir) + except: + gitdir = os.path.join(gitdir, projectsubdir) + commitstr = self.getCommit(gitdir) + + tags = self.getTags(gitdir, commitstr) + if tag and (not tags or tag not in tags): + # TODO: just checkout to the new tag + raise NetworkException("Conflicting tag requirements - FIXME!") + + self.checkProjectVersion(parentdir=tempdir, networkdir=networkdir, + gitdir=gitdir, projectsubdir=projectsubdir) + + self.applyProject(parentdir=tempdir, networkdir=networkdir, + gitdir=gitdir, projectsubdir=projectsubdir, **kwargs) + return + + elif not projectsubdir and os.path.exists(os.path.join(tempdir,networkdir)): + WranglerLogger.debug("Skipping checkout of %s, %s already exists" % + (networkdir, os.path.join(tempdir,networkdir))) + + self.checkProjectVersion(parentdir=tempdir, networkdir=networkdir, + gitdir=gitdir, projectsubdir=projectsubdir) + + # TODO: we should verify we didn't require conflicting tags? + self.applyProject(parentdir=tempdir, networkdir=networkdir, + gitdir=gitdir, projectsubdir=projectsubdir, **kwargs) + return + else: + tempdir = tempfile.mkdtemp(prefix="Wrangler_tmp_", dir=".") + WranglerLogger.debug("Using tempdir %s" % tempdir) + gitdir = os.path.join(tempdir, networkdir) + + WranglerLogger.debug("Checking out networkdir %s into tempdir %s %s" % + (networkdir,tempdir,"for "+projectsubdir if projectsubdir else "")) + cmd = r"git clone -b master --quiet Y:\networks\%s" % networkdir + (retcode, retstdout, retstderr) = self._runAndLog(cmd, tempdir) + + if retcode != 0: + if not projectsubdir: + raise NetworkException("Git clone failed; see log file") + + # if there was a subdir involved, try checking if the subdir is the git dir + gitdir = os.path.join(tempdir, networkdir, projectsubdir) + newtempdir = os.path.join(tempdir,networkdir) + if not os.path.exists(newtempdir): + os.makedirs(newtempdir) + + cmd = r"git clone -b master --quiet Y:\networks\%s\%s" % (networkdir, projectsubdir) + (retcode, retstdout, retstderr) = self._runAndLog(cmd, newtempdir) + + + + if tag != None: + cmd = r"git checkout %s" % tag + (retcode, retstdout, retstderr) = self._runAndLog(cmd, gitdir) + if retcode != 0: + raise NetworkException("Git checkout failed; see log file") + + self.checkProjectVersion(parentdir=tempdir, networkdir=networkdir, + gitdir=gitdir, projectsubdir=projectsubdir) + + self.applyProject(parentdir=tempdir, networkdir=networkdir, + gitdir=gitdir, projectsubdir=projectsubdir, **kwargs) + + def getCommit(self, gitdir): + """ + Figures out the commit string for the given gitdir (so gitdir is a git dir). + (e.g. a 40-character hex string) + """ + cmd = r"git log -1" + (retcode, retstdout, retstderr) = self._runAndLog(cmd, gitdir) + if len(retstdout)<3: + raise NetworkException("Git log failed; see log file") + + m = re.match(git_commit_pattern, retstdout[0]) + if not m: + raise NetworkException("Didn't understand git log output: [" + retstdout[0] + "]") + + return m.group(1) + + def getTags(self, gitdir, commitstr): + """ + Returns a list of all tags for this commit + """ + cmd = r"git tag --contains " + commitstr + (retcode, retstdout, retstderr) = self._runAndLog(cmd, gitdir) + if len(retstdout)==0: + return None + return retstdout + + def getNumericTag(self, gitdir, commitstr): + """ + Figures out the numeric tag, returns it. + """ + tags = self.getTags(gitdir, commitstr) + if not tags: return None + + for tag in tags: + # require them to be 1.x(.x) + if re.match(tag_num_pattern, tag): + return tag + print None + + def logProject(self, gitdir, projectname, year=None, projectdesc=None, county=None): + """ + Figures out the commit string and the tag. Subclass should figure out the rest. + """ + commitstr = self.getCommit(gitdir) + tag = self.getNumericTag(gitdir, commitstr) + + if year: + yearstr = "%4d" % year + else: + yearstr = " " + + WranglerLogger.info("%-4s | %-5s | %-40s | %-40s | %-10s | %s" % + (yearstr, + tag if tag else "notag", + commitstr if commitstr else "", + string.lstrip(projectname) if projectname else "", + string.lstrip(county) if county else "", + string.lstrip(projectdesc) if projectdesc else "" + ) + ) + self.appliedProjects[projectname] = tag if tag else commitstr + + def write(self, path='.', name='network', writeEmptyFiles=True, suppressQuery=False, suppressValidation=False): + """ + Implemented by subclass + """ + pass \ No newline at end of file diff --git a/NetworkException.py b/NetworkException.py new file mode 100644 index 0000000..a03d22b --- /dev/null +++ b/NetworkException.py @@ -0,0 +1,7 @@ +__all__ = ['NetworkException'] + +class NetworkException(Exception): + """ + This class is used to communicate Wrangler errors. + """ + pass diff --git a/Node.py b/Node.py new file mode 100644 index 0000000..d6f1e97 --- /dev/null +++ b/Node.py @@ -0,0 +1,125 @@ +import sys + +__all__ = ['Node'] + +class Node(object): + """ + Transit node. This can only exist as part of a transit line. + + * *num* is the string representation of the node number with stop-status (e.g. '-24322') + * *stop* is True or False + + All other attributes stored as a dictionary. e.g:: + + thisnode["DELAY"]="0.5" + + """ + + # static variables for nodes.xls + descriptions = {} + descriptions_read = False + + def __init__(self, n): + self.attr = {} + if isinstance(n,int): + self.num = str(n) + else: + self.num = n + self.stop=(self.num.find('-')<0 and True or False) + self.comment = None + + def setStop(self, isStop=True): + """ + Changes to stop-status of this node to *isStop* + """ + n = abs(int(self.num)) + self.stop = isStop + + if not self.stop: + n = -n + + self.num = str(n) + + def isStop(self): + """ + Returns True if this node is a stop, False if not. + """ + if int(self.num)>0: return True + return False + + def boardsDisallowed(self): + """ + Returns True if this node is a stop and boardings are disallowed (ACCESS=2) + """ + if not self.isStop(): return False + + if "ACCESS" not in self.attr: return False + + if int(self.attr["ACCESS"]) == 2: return True + + return False + + def lineFileRepr(self, prependNEquals=False, lastNode=False): + """ + String representation for line file + """ + + if prependNEquals: s=" N=" + else: s=" " + + # node number + if self.stop: s+= " " + s += self.num + # attributes + for k,v in sorted(self.attr.items()): + if k=="DELAY" and float(v)==0: continue # NOP + s +=", %s=%s" % (k,v) + # comma + if not lastNode: s+= "," + # comment + if self.comment: s+=' %s' % (self.comment,) + # eol + s += "\n" + return s + + # Dictionary methods + def __getitem__(self,key): return self.attr[key] + def __setitem__(self,key,value): self.attr[key]=value + def __cmp__(self,other): return cmp(int(self.num),other) + + def description(self): + """ + Returns the description of this node (a string), or None if unknown. + """ + Node.getDescriptions() + + if abs(int(self.num)) in Node.descriptions: + return Node.descriptions[abs(int(self.num))] + + return None + + @staticmethod + def getDescriptions(): + # if we've already done this, do nothing + if Node.descriptions_read: return + + try: + import xlrd + workbook = xlrd.open_workbook(filename=r"Y:\champ\util\nodes.xls", + encoding_override='ascii') + sheet = workbook.sheet_by_name("equiv") + row = 0 + while (row < sheet.nrows): + Node.descriptions[int(sheet.cell_value(row,0))] = \ + sheet.cell_value(row,1).encode('utf-8') + row+=1 + + # print "Read descriptions: " + str(Node.descriptions) + except ImportError: + print "Could not import xlrd module, Node descriptions unknown" + except: + print "Unexpected error reading Nodes.xls:", sys.exc_info()[0] + print sys.exc_info() + + Node.descriptions_read = True + \ No newline at end of file diff --git a/PNRLink.py b/PNRLink.py new file mode 100644 index 0000000..77844cc --- /dev/null +++ b/PNRLink.py @@ -0,0 +1,56 @@ +import re +from .Regexes import nodepair_pattern + +__all__ = ['PNRLink'] + +class PNRLink(dict): + """ PNR Support Link. + 'node' property is the node-pair for this link (e.g. 24133-34133) + 'comment' is any end-of-line comment for this link including the leading semicolon + All other attributes are stored in a dictionary (e.g. thislink['MODE']='1,2') + """ + UNNUMBERED = "unnumbered" + + def __init__(self): + dict.__init__(self) + self.id='' + self.comment='' + + self.pnr='' + self.station='' + + def __repr__(self): + s = "PNR NODE=%s " % (self.id,) + + # Deal w/all link attributes + fields = ['%s=%s' % (k,v) for k,v in self.items()] + + s += " ".join(fields) + s += self.comment + + return s + + def parseID(self): + """ + From CUBE documentation: + Normally, NODE is a transit stop node. However, at some locations, the NODE + might not be a stop node, but merely a drop-off node, or the entrance to a + parking lot associated with a transit-stop node. To accommodate the latter + scenario, append a second node to the first NODE value (NODE-NODE); the + program generates an additional support (lot) link between the two nodes. + """ + if self.id: + m = re.match(nodepair_pattern, self.id) + + # it's either just the station + if m == None: # it's a nodenum + self.station = self.id + self.pnr = self.UNNUMBERED + # or it's pnr,station + else: + self.pnr = m.group(1) + self.station = m.group(2) + + else: + pass + \ No newline at end of file diff --git a/Regexes.py b/Regexes.py new file mode 100644 index 0000000..0437b5e --- /dev/null +++ b/Regexes.py @@ -0,0 +1,7 @@ +import re + +__all__ = [ 'nodepair_pattern', 'git_commit_pattern', 'tag_num_pattern' ] + +nodepair_pattern = re.compile('(\d+)[-,\s]+(\d+)') +git_commit_pattern = re.compile('commit ([0-9a-f]{40}$)') +tag_num_pattern = re.compile('\d+([.]\d+)+') \ No newline at end of file diff --git a/Supplink.py b/Supplink.py new file mode 100644 index 0000000..58c3637 --- /dev/null +++ b/Supplink.py @@ -0,0 +1,119 @@ +from .NetworkException import NetworkException + +__all__ = ['Supplink'] + +class Supplink(dict): + """ PNR Support Link. + 'node' property is the node-pair for this link (e.g. 24133-34133) + 'comment' is any end-of-line comment for this link including the leading semicolon + All other attributes are stored in a dictionary (e.g. thislink['MODE']='1,2') + """ + MODES = {1:"WALK_ACCESS", + 2:"WALK_EGRESS", + 3:"DRIVE_ACCESS", + 4:"DRIVE_EGRESS", + 5:"TRANSIT_TRANSFER", + 6:"DRIVE_FUNNEL", + 7:"WALK_FUNNEL"} + MODES_INV = dict((v,k) for k,v in MODES.iteritems()) + + def __init__(self): + dict.__init__(self) + self.id='' # string, e.g. "1-7719" + self.comment=None + + # components of ID, ints + self.Anode = None + self.Bnode = None + self.mode = None + + def __repr__(self): + s = "SUPPLINK N=%5d-%5d " % (self.Anode,self.Bnode) + + # Deal w/all link attributes + fields = ['%s=%s' % (k,v) for k,v in self.items()] + + s += " ".join(fields) + if self.comment: + s = "%-80s %s" % (s, self.comment) + + return s + + def setId(self, id): + self.id = id + + nodeList=self.id.split('-') + self.Anode = int(nodeList[0]) + self.Bnode = int(nodeList[1]) + + def setMode(self, newmode=None): + """ + If newmode is passed, then uses that. + Otherwise, figure out the mode from the text in the dictionary. + """ + if newmode==None and self.mode: return + + # find it in my dictionary + for k,v in self.items(): + if k.lower() == "mode": + if newmode: + self.mode = newmode + self[k] = str(self.mode) + else: + self.mode = int(v) + + # it wasn't in the dictionary + if newmode and not self.mode: + self.mode = newmode + self["MODE"] = str(self.mode) + + if not self.mode: + raise NetworkException("Supplink mode not set: " + str(self)) + + def isWalkAccess(self): + self.setMode() + return (Supplink.MODES[self.mode]=="WALK_ACCESS") + + def isWalkEgress(self): + self.setMode() + return (Supplink.MODES[self.mode]=="WALK_EGRESS") + + def isDriveAccess(self): + self.setMode() + return (Supplink.MODES[self.mode]=="DRIVE_ACCESS") + + def isDriveEgress(self): + self.setMode() + return (Supplink.MODES[self.mode]=="DRIVE_EGRESS") + + def isTransitTransfer(self): + self.setMode() + return (Supplink.MODES[self.mode]=="TRANSIT_TRANSFER") + + def isWalkFunnel(self): + self.setMode() + return (Supplink.MODES[self.mode]=="WALK_FUNNEL") + + def isDriveFunnel(self): + self.setMode() + return (Supplink.MODES[self.mode]=="DRIVE_FUNNEL") + + def isOneWay(self): + for k,v in self.items(): + if k.upper() == "ONEWAY": return v.upper() in ["Y", "YES", "1", "T", "TRUE"] + # Cube says default is False + return False + + def reverse(self): + # not one-way; nothing to do + if not self.isOneWay(): return + + temp = self.Anode + self.Anode = self.Bnode + self.Bnode = temp + + self.id = "%d-%d" % (self.Anode, self.Bnode) + if self.isWalkAccess(): self.setMode(Supplink.MODES_INV["WALK_EGRESS"]) + elif self.isWalkEgress(): self.setMode(Supplink.MODES_INV["WALK_ACCESS"]) + elif self.isDriveAccess(): self.setMode(Supplink.MODES_INV["DRIVE_EGRESS"]) + elif self.isDriveEgress(): self.setMode(Supplink.MODES_INV["DRIVE_ACCESS"]) \ No newline at end of file diff --git a/TransitCapacity.py b/TransitCapacity.py new file mode 100644 index 0000000..940b858 --- /dev/null +++ b/TransitCapacity.py @@ -0,0 +1,273 @@ +import copy,csv,os,re,string +from .NetworkException import NetworkException + +__all__ = ['TransitCapacity'] + + +class TransitCapacity: + """ + Simple class for accessing and mutating (and reading and writing) + Transit Capacity information + """ + + TIMEPERIOD_TO_VEHTYPIDX = { "AM":2, "MD": 4, "PM":3, "EV":4, "EA":4 } + + # for self.linenameToAttributes + # linename -> [ system, full name, AM vehicletype, PM vehiceltype, OP vehicle type ] + ATTR_SYSTEM = 0 + ATTR_FULLNAME = 1 + ATTR_AMVEHTYPE = 2 + ATTR_PMVEHTYPE = 3 + ATTR_OPVEHTYPE = 4 + + # for self.vehicleTypeToDelays + DELAY_SIMPLE = 0 + DELAY_CONST = 1 + DELAY_PERBOARD = 2 + DELAY_PERALIGHT = 3 + + def __init__(self, directory=".", + transitLineToVehicle="transitLineToVehicle.csv", + transitVehicleToCapacity="transitVehicleToCapacity.csv", + transitPrefixToVehicle="transitPrefixToVehicle.csv"): + """ + Uses *transitLineToVehicle* and *transitVehicleToCapacity* to map transit lines to vehicle types, + and vehicle types to capacities. + """ + self.vehicleTypeToCapacity = {} + self.vehicleTypeToDelays = {} + self.linenameToAttributes = {} + self.linenameToSimple = {} + self.prefixToVehicleType = {} + + self.readTransitLineToVehicle(directory, filename=transitLineToVehicle) + self.readTransitVehicleToCapacity(directory, filename=transitVehicleToCapacity) + self.readTransitPrefixToVehicle(directory, filename=transitPrefixToVehicle) + + def readTransitVehicleToCapacity(self, directory=".", filename="transitVehicleToCapacity.csv"): + """ + Populate a self.vehicleTypeToCapacity from *filename*: + vehicletype -> 100% capacity (a float) + e.g. "LRV2" -> 238.0 + + Also populate a self.vehicleTypeToDelays: + vehicletype -> [ simple delay, + complex delay const, + complex delay per board, + complex delay per alight ] + """ + f = open(os.path.join(directory,filename), 'r') + lines = f.readlines() + f.close() + + for line in lines: + tokens = line.split(",") + if tokens[0]=="VehicleType": continue # header + vtype = tokens[0] + self.vehicleTypeToCapacity[vtype] = float(tokens[1]) + + if len(tokens) > 4: + self.vehicleTypeToDelays[vtype] = [0, 0, 0, 0] + self.vehicleTypeToDelays[vtype][TransitCapacity.DELAY_SIMPLE ] = float(tokens[4]) + self.vehicleTypeToDelays[vtype][TransitCapacity.DELAY_CONST ] = float(tokens[5]) + self.vehicleTypeToDelays[vtype][TransitCapacity.DELAY_PERBOARD ] = float(tokens[6]) + self.vehicleTypeToDelays[vtype][TransitCapacity.DELAY_PERALIGHT] = float(tokens[7]) + + # print "vehicleTypeToCapacity = " + str(self.vehicleTypeToCapacity) + # print "vehicleTypeToDelays = " + str(self.vehicleTypeToDelays) + + def writeTransitVehicleToCapacity(self, directory=".", filename="transitVehicleToCapacity.csv"): + """ + Writes it out in the same format + """ + f = open(os.path.join(directory,filename), 'w') + f.write("VehicleType,100%Capacity,85%Capacity,VehicleCategory,SimpleDelayPerStop,ConstDelayPerStop,DelayPerBoard,DelayPerAlight\n") + for vehicleType in sorted(self.vehicleTypeToCapacity.keys()): + f.write(vehicleType+",") + f.write("%d,%d" % (self.vehicleTypeToCapacity[vehicleType], + 0.85*self.vehicleTypeToCapacity[vehicleType])) + if vehicleType in self.vehicleTypeToDelays: + f.write(",%s" % vehicleType) # this is supposed to be vehicle category but we didn't keep that around since we don't use it + f.write(",%.3f,%.3f,%.3f,%.3f" % (self.vehicleTypeToDelays[vehicleType][TransitCapacity.DELAY_SIMPLE ], + self.vehicleTypeToDelays[vehicleType][TransitCapacity.DELAY_CONST ], + self.vehicleTypeToDelays[vehicleType][TransitCapacity.DELAY_PERBOARD ], + self.vehicleTypeToDelays[vehicleType][TransitCapacity.DELAY_PERALIGHT])) + f.write("\n") + f.close() + + def readTransitLineToVehicle(self, directory=".", filename="transitLineToVehicle.csv"): + """ + Populate self.linenameToAttributes from *filename*: + linename -> [ system, full name, AM vehicletype, PM vehiceltype, OP vehicle type ] + e.g. "MUNTI" -> [ "SF MUNI", "T - THIRD STREET", "LRV2", "LRV2", "LRV2" ] + Also self.linenameToSimple, but it's currently unused... + linename -> [ stripped, simplename ] + e.g. "MUN91I" -> [ "91I", "91" ] + """ + l2vReader = csv.reader(open(os.path.join(directory,filename))) + for name,system,stripped,simplename,fullLineName,vehicleTypeAM,vehicleTypePM,vehicleTypeOP in l2vReader: + self.linenameToAttributes[name] = [system, fullLineName, vehicleTypeAM,vehicleTypePM,vehicleTypeOP] + self.linenameToSimple[name] = [stripped, simplename] + # print "linenameToAttributes = " + str(self.linenameToAttributes) + + def writeTransitLineToVehicle(self, directory=".", filename="transitLineToVehicle.csv"): + """ + Writes it out in the same format + """ + f = open(os.path.join(directory,filename), 'w') + f.write("Name,System,Stripped,Line,FullLineName,AM VehicleType,PM VehicleType,OP Vehicle Type\n") + for linename in sorted(self.linenameToAttributes.keys()): + f.write(linename + ",") + f.write(self.linenameToAttributes[linename][TransitCapacity.ATTR_SYSTEM] + ",") + f.write(self.linenameToSimple[linename][0] + ",") # stripped + f.write(self.linenameToSimple[linename][1] + ",") # simplename + f.write(self.linenameToAttributes[linename][TransitCapacity.ATTR_FULLNAME]+",") + f.write(self.linenameToAttributes[linename][TransitCapacity.ATTR_AMVEHTYPE]+",") + f.write(self.linenameToAttributes[linename][TransitCapacity.ATTR_PMVEHTYPE]+",") + f.write(self.linenameToAttributes[linename][TransitCapacity.ATTR_OPVEHTYPE]+"\n") + f.close() + + def readTransitPrefixToVehicle(self, directory=".", filename="transitPrefixToVehicle.csv"): + """ + Populate self.prefixToVehicleType from *filename*: + prefix -> [ system, vehicletype ] + """ + p2vReader = csv.reader(open(os.path.join(directory,filename))) + for prefix, system, vehicleType in p2vReader: + self.prefixToVehicleType[prefix] = [system, vehicleType] + + def writeTransitPrefixToVehicle(self, directory=".", filename="transitPrefixToVehicle.csv"): + """ + Writes it out in the same format + """ + f = open(os.path.join(directory,filename), 'w') + f.write("Prefix,System,VehicleType\n") + for prefix in sorted(self.prefixToVehicleType.keys()): + f.write(prefix + ",") + f.write(self.prefixToVehicleType[prefix][0] + ",") # system + f.write(self.prefixToVehicleType[prefix][1] + "\n") # vehicleType + f.close() + + def getSystemAndVehicleType(self, linename, timeperiod): + """ + Convenience function. Returns tuple: best guess of (system, vehicletype) + """ + linenameU = linename.upper() + if self.linenameToAttributes.has_key(linenameU): + return (self.linenameToAttributes[linenameU][TransitCapacity.ATTR_SYSTEM], + self.linenameToAttributes[linenameU][TransitCapacity.TIMEPERIOD_TO_VEHTYPIDX[timeperiod]]) + + if linename[:4] in self.prefixToVehicleType: + return ( self.prefixToVehicleType[linenameU[:4]][0], self.prefixToVehicleType[linenameU[:4]][1]) + + if linename[:3] in self.prefixToVehicleType: + return ( self.prefixToVehicleType[linenameU[:3]][0], self.prefixToVehicleType[linenameU[:3]][1]) + + return ("", "") + + + def getVehicleTypeAndCapacity(self, linename, timeperiod): + """ returns (vehicletype, vehiclecapacity) + """ + (system, vehicleType) = self.getSystemAndVehicleType(linename, timeperiod) + + if vehicleType not in self.vehicleTypeToCapacity: + raise NetworkException("Vehicle type [%s] of system [%s] characteristics unknown; line name = [%s]" % (vehicleType, system, linename.upper())) + + capacity = self.vehicleTypeToCapacity[vehicleType] + return (vehicleType, capacity) + + def getFullname(self, linename, timeperiod): + """ + Returns best guess of fullname, or empty string if unknown + """ + linenameU = linename.upper() + if self.linenameToAttributes.has_key(linenameU): + return self.linenameToAttributes[linenameU][TransitCapacity.ATTR_FULLNAME] + else: + return "" + + def getSimpleDwell(self, linename, timeperiod): + """ + Returns a number + """ + (system, vehicleType) = self.getSystemAndVehicleType(linename, timeperiod) + if vehicleType not in self.vehicleTypeToDelays: + raise NetworkException("Vehicle type [%s] of system [%s] simple dwell unknown; line name = [%s]" % (vehicleType, system, linename.upper())) + + return self.vehicleTypeToDelays[vehicleType][TransitCapacity.DELAY_SIMPLE] + + def getComplexDwells(self, linename, timeperiod): + """ + Returns (constant, perboard, peralight), all three are numbers + """ + (system, vehicleType) = self.getSystemAndVehicleType(linename, timeperiod) + if vehicleType not in self.vehicleTypeToDelays: + raise NetworkException("Vehicle type [%s] of system [%s] complex dwell unknown; line name = [%s]" % (vehicleType, system, linename.upper())) + + return (self.vehicleTypeToDelays[vehicleType][TransitCapacity.DELAY_CONST], + self.vehicleTypeToDelays[vehicleType][TransitCapacity.DELAY_PERBOARD], + self.vehicleTypeToDelays[vehicleType][TransitCapacity.DELAY_PERALIGHT]) + + def addVehicleType(self, newVehicleType, newVehicleCapacity): + """ + Self explanatory + """ + self.vehicleTypeToCapacity[newVehicleType] = newVehicleCapacity + + def addLinenameFromTemplate(self, newLine, templateLine): + """ + Dupe the entry in self.linenameToAttributes for template into newline + """ + if templateLine not in self.linenameToAttributes: + raise NetworkException("addLinename with unknown templateLine %s for %s" % (templateLine, newLine)) + + self.linenameToAttributes[newLine] = copy.deepcopy(self.linenameToAttributes[templateLine]) + self.linenameToSimple[newLine] = copy.deepcopy(self.linenameToSimple[templateLine]) + + def addLineName(self, newLine, system, fullname, vehicletype_AM, vehicletype_PM, vehicletype_OP): + """ + Adds a new line with the given vehicle type information + """ + self.linenameToAttributes[newLine] = [system, fullname, vehicletype_AM, vehicletype_PM, vehicletype_OP] + self.linenameToSimple[newLine] = [fullname, fullname] + + def setAllVehicleTypes(self, linename, vehicleType, lineNameIsRegex = False): + """ + Simple method to set the vehicle types for this line name. + *linename* is a string; pass *lineNameIsRegex* to interpret it as a regex + """ + self.setVehicleTypes(linename, + vehicleType_AM=vehicleType, + vehicleType_PM=vehicleType, + vehicleType_OP=vehicleType, + lineNameIsRegex=lineNameIsRegex) + + def setVehicleTypes(self, linename, vehicleType_AM, vehicleType_PM, vehicleType_OP, lineNameIsRegex = False): + """ + Sets the vehicle types for this line name. + *linename* is a string; pass *lineNameIsRegex* to interpret it as a regex + """ + if vehicleType_AM not in self.vehicleTypeToCapacity: + WranglerLogger.warn("Setting vehicle type for line %s but vehicleType %s unknown" % (linename, vehicleType_AM)) + if vehicleType_PM not in self.vehicleTypeToCapacity: + WranglerLogger.warn("Setting vehicle type for line %s but vehicleType %s unknown" % (linename, vehicleType_PM)) + if vehicleType_OP not in self.vehicleTypeToCapacity: + WranglerLogger.warn("Setting vehicle type for line %s but vehicleType %s unknown" % (linename, vehicleType_OP)) + + + if lineNameIsRegex: + linename_re = re.compile(linename, flags=re.IGNORECASE) + for name in self.linenameToAttributes.keys(): + if re.search(linename_re, name): + self.linenameToAttributes[name][TransitCapacity.ATTR_AMVEHTYPE] = vehicleType_AM + self.linenameToAttributes[name][TransitCapacity.ATTR_PMVEHTYPE] = vehicleType_PM + self.linenameToAttributes[name][TransitCapacity.ATTR_OPVEHTYPE] = vehicleType_OP + else: + if linename.upper() not in self.linenameToAttributes: + raise NetworkException("TransitCapacity: setAllVehicleTypes for unknown linename %s" % linename) + + self.linenameToAttributes[linename.upper()][TransitCapacity.ATTR_AMVEHTYPE] = vehicleType_AM + self.linenameToAttributes[linename.upper()][TransitCapacity.ATTR_PMVEHTYPE] = vehicleType_PM + self.linenameToAttributes[linename.upper()][TransitCapacity.ATTR_OPVEHTYPE] = vehicleType_OP + \ No newline at end of file diff --git a/TransitLine.py b/TransitLine.py new file mode 100644 index 0000000..e9b4abf --- /dev/null +++ b/TransitLine.py @@ -0,0 +1,382 @@ +import copy +from .NetworkException import NetworkException +from .Node import Node +from .Logger import WranglerLogger + +__all__ = ['TransitLine'] + +class TransitLine(object): + """ + Transit route. Behaves like a dictionary of attributes. + *n* is list of Node objects (see :py:class:`Node`) + All other attributes are stored as a dictionary. e.g.:: + + thisroute['MODE']='5' + + """ + + HOURS_PER_TIMEPERIOD = {"AM":3.0, #what about 4-6a? + "MD":6.5, + "PM":3.0, + "EV":8.5, + "EA":3.0 + } + MODETYPE_TO_MODES = {"Local":[11,12,16,17,18,19], + "BRT":[13,20], + "LRT":[14,15,21], + "Premium":[22,23,24,25,26,27,28,29,30], + "Ferry":[31], + "BART":[32] + } + + # Do these modes have offstreet stops? + MODENUM_TO_OFFSTREET = {11:False, # muni bus + 12:False, # muni Express bus + 13:False, # mun BRT + 14:False, # cable car -- These are special because they don't have explicity WNR nodes + 15:False, # LRT -- and are just implemented by reading the muni.xfer line as muni.access + 16:False, # Shuttles + 17:False, # SamTrans bus + 18:False, # AC bus + 19:False, # other local bus + 20:False, # Regional BRT + 21:True, # Santa Clara LRT + 22:False, # AC premium bus + 23:False, # GG premium bus + 24:False, # SamTrans premium bus + 25:False, # Other premium bus + 26:True, # Caltrain + 27:True, # SMART + 28:True, # eBART + 29:True, # Regional Rail/ACE/Amtrak + 30:True, # HSR + 31:True, # Ferry + 32:True # BART + } + + def __init__(self, name=None, template=None): + self.attr = {} + self.n = [] + self.comment = None + + self.name = name + if name and name.find('"')==0: + self.name = name[1:-1] # Strip leading/trailing dbl-quotes + + if template: + self._applyTemplate(template) + + def __iter__(self): + """ + Iterator for looping through stops + """ + self.currentStopIdx = 0 + return self + + def next(self): + """ + Method for iterator. Iterator usage:: + + line = transitnet.line("MUN14LI") + for stop in line: + print stop # stop is an int + """ + if self.currentStopIdx >= len(self.n): + raise StopIteration + + self.currentStopIdx += 1 + return int(self.n[self.currentStopIdx-1].num) + + def setFreqs(self, freqs): + """ + Set all five headways (AM,MD,PM,EV,EA). *freqs* must be a list of five strings. + """ + if not len(freqs)==5: raise NetworkException('Must specify all 5 frequencies') + self.attr['FREQ[1]'] = freqs[0] + self.attr['FREQ[2]'] = freqs[1] + self.attr['FREQ[3]'] = freqs[2] + self.attr['FREQ[4]'] = freqs[3] + self.attr['FREQ[5]'] = freqs[4] + + def getFreqs(self): + """ + Return the frequencies for this line as a list of 5 strings representing AM,MD,PM,EV,EA. + """ + return [self.attr['FREQ[1]'], + self.attr['FREQ[2]'], + self.attr['FREQ[3]'], + self.attr['FREQ[4]'], + self.attr['FREQ[5]']] + + def getFreq(self, timeperiod): + """ + Returns a float version of the frequency for the given *timeperiod*, which should be one + of ``AM``, ``MD``, ``PM``, ``EV`` or ``EA`` + """ + if timeperiod=="AM": + return float(self.attr["FREQ[1]"]) + elif timeperiod=="MD": + return float(self.attr["FREQ[2]"]) + elif timeperiod=="PM": + return float(self.attr["FREQ[3]"]) + elif timeperiod=="EV": + return float(self.attr["FREQ[4]"]) + elif timeperiod=="EA": + return float(self.attr["FREQ[5]"]) + raise NetworkException("getFreq() received invalid timeperiod "+str(timeperiod)) + + def getModeType(self): + """ + Returns on of the keys in MODETYPE_TO_MODES + (e.g. one of "Local", "BRT", "LRT", "Premium", "Ferry" or "BART") + """ + modenum = int(self.attr['MODE']) + for modetype,modelist in TransitLine.MODETYPE_TO_MODES.iteritems(): + if modenum in modelist: + return modetype + return None + + def isOneWay(self): + """ + Returns a bool indicating if the line is oneway + """ + oneway = self.attr["ONEWAY"] + if oneway.upper() in ["N", "F"]: + return False + # default is true + return True + + def setOneWay(self): + """ + Turns on the oneway flag + """ + self.attr["ONEWAY"] = "T" + + def hasOffstreetNodes(self): + """ + Returns True if the line has offstreet nodes + """ + modenum = int(self.attr['MODE']) + return TransitLine.MODENUM_TO_OFFSTREET[modenum] + + def vehiclesPerPeriod(self, timeperiod): + """ + Returns the number of vehicles (as a float) that will run for the given time period. + E.g. for 10 minute frequencies in the AM, 3*6 = 18 + """ + freq = self.getFreq(timeperiod) + if freq < 0.01: + return 0.0 + + # minutes per time period divided by frequency + return 60.0*self.HOURS_PER_TIMEPERIOD[timeperiod]/freq + + def hasNode(self,nodeNumber): + """ + Returns True if the given *nodeNumber* is a node in this line (stop or no). + *nodeNumber* should be an integer. + """ + for node in self.n: + if abs(int(node.num)) == abs(nodeNumber): + return True + return False + + def hasLink(self,nodeA,nodeB): + """ + Returns True iff *(nodeA,nodeB)* is a link in this line. + *nodeA* and *nodeB* should be integers and this method is stop-insensitive. + However, it does not check for *(nodeB,nodeA)* even when the line is two-way. + """ + nodeNumPrev = -1 + for node in self.n: + nodeNum = abs(int(node.num)) + if nodeNum == abs(nodeB) and nodeNumPrev == abs(nodeA): + return True + nodeNumPrev = nodeNum + return False + + def hasSegment(self,nodeA,nodeB): + """ + Returns True iff *nodeA* and *nodeB* appear in this line, and *nodeA* appears before *nodeB*. + This method is stop-insensitive. Also it does not do any special checking for two-way + lines. + """ + hasA=False + for node in self.n: + nodeNum = abs(int(node.num)) + if nodeNum == abs(nodeA): + hasA=True + elif nodeNum == abs(nodeB): + if hasA: return True + else: return False + return False + + def numStops(self): + """ + Counts and returns the number of stops in the line. + """ + numStops = 0 + for node in self.n: + if node.isStop(): numStops += 1 + return numStops + + def setNodes(self, newnodelist): + """ + Given a list of ints representing node numbers, + converts these to Node types uses this new list, throwing away the previous node list. + """ + for i in range(len(newnodelist)): + if isinstance(newnodelist[i],int): newnodelist[i] = Node(newnodelist[i]) + self.n = newnodelist + + def insertNode(self,refNodeNum,newNodeNum,stop=False,after=True): + """ + Inserts the given *newNodeNum* into this line, as a stop if *stop* is True. + The new node is inserted after *refNodeNum* if *after* is True, otherwise it is inserted + before *refNodeNum*. + + *refNodeNum* and *newNodeNum* are ints. + """ + newNode = Node(newNodeNum) + newNode.setStop(stop) + + for nodeIdx in range(len(self.n)): + currentNodeNum = abs(int(self.n[nodeIdx].num)) + if currentNodeNum == abs(refNodeNum): + if after==True: + self.n.insert(nodeIdx+1,newNode) + WranglerLogger.debug("In line %s: inserted node %s after node %s" % (self.name,newNode.num,str(refNodeNum))) + else: + self.n.insert(nodeIdx,newNode) + WranglerLogger.debug("In line %s: inserted node %s before node %s" % (self.name,newNode.num,str(refNodeNum))) + + def splitLink(self,nodeA,nodeB,newNodeNum,stop=False): + """ + Checks to see if the link exists in the line (throws an exception if not) + and then inserts the *newNodeNum* in between *nodeA* and *nodeB* (as a stop, if *stop* is True) + + *nodeA*, *nodeB* and *newNodeNum* are all ints. + + This is stop-insensitive to *nodeA* and *nodeB*. + """ + if not self.hasLink(nodeA,nodeB): + raise NetworkException( "Line %s Doesn't have that link - so can't split it" % (self.name)) + newNode = Node(newNodeNum) + if stop==True: newNode.setStop(True) + + nodeNumPrev = -1 + for nodeIdx in range(len(self.n)): + currentNodeNum = abs(int(self.n[nodeIdx].num)) + if currentNodeNum == abs(nodeB) and nodeNumPrev == abs(nodeA): + self.n.insert(nodeIdx,newNode) + WranglerLogger.debug("In line %s: inserted node %s between node %s and node %s" % (self.name,newNode.num,str(nodeA),str(nodeB))) + nodeNumPrev = currentNodeNum + + def extendLine(self, oldnode, newsection, beginning=True): + """ + Replace nodes up through **and including** *oldnode* with *newsection*. + *newsection* can be a list of numbers; they will be converted to Nodes. + + **This is stop-sensitive!** If *oldnode* has the wrong sign, it will throw an exception. + + If beginning, does this at the beginning; otherwise at the end. + """ + try: + ind = self.n.index(oldnode) + except: + ind = self.n.index(-oldnode) + + # make the new nodes + for i in range(len(newsection)): + if isinstance(newsection[i],int): newsection[i] = Node(newsection[i]) + + if beginning: + # print self.n[:ind+1] + self.n[:ind+1] = newsection + else: + self.n[ind:] = newsection + + def replaceSegment(self, node1, node2, newsection): + """ Replaces the section from node1 to node2 with the newsection + Newsection can be an array of numbers; this will make nodes. + """ + WranglerLogger.debug("replacing segment " + str(node1) + " "+str(node2)+" with "+str(newsection)+" for "+self.name) + try: + ind1 = self.n.index(node1) + except: + ind1 = self.n.index(-node1) + + try: + ind2 = self.n.index(node2) + except: + ind2 = self.n.index(-node2) + + attr1 = self.n[ind1].attr + attr2 = self.n[ind2].attr + + # make the new nodes + for i in range(len(newsection)): + if isinstance(newsection[i],int): newsection[i] = Node(newsection[i]) + # xfer the attributes + newsection[0].attr=attr1 + newsection[-1].attr=attr2 + + self.n[ind1:ind2+1] = newsection + + def setStop(self, nodenum, isStop=True): + """ + Throws an exception if the nodenum isn't found + """ + found = False + for node in self.n: + if abs(int(node.num)) == abs(nodenum): + node.setStop(isStop) + found = True + if not found: + raise NetworkException("TransitLine %s setStop called but stop %d not found" % (self.name, nodenum)) + + def addStopsToSet(self, set): + for nodeIdx in range(len(self.n)): + if self.n[nodeIdx].isStop(): + set.add(int(self.n[nodeIdx].num)) + + def reverse(self): + """ + Reverses the current line -- adds a "-" to the name, and reverses the node order + """ + # if name is 12 chars, have to drop one -- cube has a MAX of 12 + if len(self.name)>=11: self.name = self.name[:11] + self.name = self.name + "R" + self.n.reverse() + + def _applyTemplate(self, template): + '''Copy all attributes (including nodes) from an existing transit line to this line''' + self.attr = copy.deepcopy(template.attr) + self.n = copy.deepcopy(template.n) + self.comment = template.comment + + # Dictionary methods + def __getitem__(self,key): return self.attr[key.upper()] + def __setitem__(self,key,value): self.attr[key.upper()]=value + def __cmp__(self,other): return cmp(self.name,other) + + # String representation: for outputting to line-file + def __repr__(self): + s = '\nLINE NAME=\"%s\",\n ' % (self.name,) + if self.comment: s+= self.comment + + # Line attributes + s += ",\n ".join(["%s=%s" % (k,v) for k,v in sorted(self.attr.items())]) + + # Node list + s += ",\n" + prevAttr = True + for nodeIdx in range(len(self.n)): + s += self.n[nodeIdx].lineFileRepr(prependNEquals=prevAttr, lastNode=(nodeIdx==len(self.n)-1)) + prevAttr = len(self.n[nodeIdx].attr)>0 + + return s + + def __str__(self): + s = 'Line name \"%s\" freqs=%s' % (self.name, str(self.getFreqs())) + return s \ No newline at end of file diff --git a/TransitLink.py b/TransitLink.py new file mode 100644 index 0000000..3fcfcdd --- /dev/null +++ b/TransitLink.py @@ -0,0 +1,62 @@ +import re +from .Regexes import nodepair_pattern + +__all__ = ['TransitLink'] + +class TransitLink(dict): + """ Transit support Link. + 'nodes' property is the node-pair for this link (e.g. 24133,34133) + 'comment' is any end-of-line comment for this link + (must include the leading semicolon) + All other attributes are stored in a dictionary (e.g. thislink['MODE']='1,2') + """ + def __init__(self): + dict.__init__(self) + self.id='' + self.comment='' + + self.Anode = None + self.Bnode = None + + def __repr__(self): + s = "LINK nodes=%s, " % (self.id,) + + # Deal w/all link attributes + fields = [] + for k in sorted(self.keys()): + fields.append("%s=%s" % (k,self[k])) + s += ", ".join(fields) + s += self.comment + + return s + + def addNodesToSet(self, set): + """ Add integer versions of the nodes in this like to the given set + """ + m = re.match(nodepair_pattern, self.id) + set.add(int(m.group(1))) + set.add(int(m.group(2))) + + def setId(self, id): + self.id = id + + m = re.match(nodepair_pattern, self.id) + self.Anode = int(m.group(1)) + self.Bnode = int(m.group(2)) + + def isOneway(self): + for key in self.keys(): + + if key.upper()=="ONEWAY": + if self[key].upper() in ["NO", "N", "0", "F", "FALSE"]: return False + return True + # key not found - what's the default? + return True + + def setOneway(self, oneway_str): + for key in self.keys(): + if key.upper()=="ONEWAY": + self[key] = oneway_str + return + # key not found + self["ONEWAY"] = oneway_str \ No newline at end of file diff --git a/TransitNetwork.py b/TransitNetwork.py new file mode 100644 index 0000000..0471442 --- /dev/null +++ b/TransitNetwork.py @@ -0,0 +1,985 @@ +import copy, glob, inspect, math, os, re, sys, xlrd +from collections import defaultdict +from .Linki import Linki +from .Logger import WranglerLogger +from .Network import Network +from .NetworkException import NetworkException +from .PNRLink import PNRLink +from .Regexes import nodepair_pattern +from .TransitAssignmentData import TransitAssignmentData, TransitAssignmentDataException +from .TransitCapacity import TransitCapacity +from .TransitLine import TransitLine +from .TransitLink import TransitLink +from .TransitParser import TransitParser, transit_file_def +from .ZACLink import ZACLink + +__all__ = ['TransitNetwork'] + +class TransitNetwork(Network): + """ + Full Cube representation of a transit network (all components) + """ + FARE_FILES = ["caltrain.fare", + "smart.fare", + "ebart.fare", + "amtrak.fare", + "hsr.fare", + "ferry.fare", + "bart.fare", + "xfer.fare", + "farelinks.fare"] + + # Static reference to a TransitCapacity instance + capacity = None + + def __init__(self, champVersion, basenetworkpath=None, isTiered=False, networkName=None): + """ + If *basenetworkpath* is passed and *isTiered* is True, then start by reading the files + named *networkName*.* in the *basenetworkpath* + """ + Network.__init__(self, champVersion, networkName) + self.lines = [] + self.links = [] + self.pnrs = [] + self.zacs = [] + self.accessli = [] + self.xferli = [] + self.farefiles = {} # farefile name -> [ lines in farefile ] + for farefile in TransitNetwork.FARE_FILES: + self.farefiles[farefile] = [] + + self.DELAY_VALUES = None + self.currentLineIdx = 0 + + if basenetworkpath and isTiered: + if not networkName: + raise NetworkException("Cannot initialize tiered TransitNetwork with basenetworkpath %s: no networkName specified" % basenetworkpath) + + for filename in glob.glob(os.path.join(basenetworkpath, networkName + ".*")): + suffix = filename.rsplit(".")[-1].lower() + if suffix in ["lin","link","pnr","zac","access","xfer"]: + self.parseFile(filename) + + # fares + for farefile in TransitNetwork.FARE_FILES: + fullfarefile = os.path.join(basenetworkpath, farefile) + linecount = 0 + # WranglerLogger.debug("cwd=%s farefile %s exists? %d" % (os.getcwd(), fullfarefile, os.path.exists(fullfarefile))) + + if os.path.exists(fullfarefile): + infile = open(fullfarefile, 'r') + lines = infile.readlines() + self.farefiles[farefile].extend(lines) + linecount = len(lines) + infile.close() + WranglerLogger.debug("Read %5d lines from fare file %s" % (linecount, fullfarefile)) + + + def __iter__(self): + """ + Iterator for looping through lines. + """ + self.currentLineIdx = 0 + return self + + def next(self): + """ + Method for iterator. Iterator usage:: + + net = TransitNetwork() + net.mergeDir("X:\some\dir\with_transit\lines") + for line in net: + print line + + """ + + if self.currentLineIdx >= len(self.lines): # are we out of lines? + raise StopIteration + + while not isinstance(self.lines[self.currentLineIdx],TransitLine): + self.currentLineIdx += 1 + + if self.currentLineIdx >= len(self.lines): + raise StopIteration + + self.currentLineIdx += 1 + return self.lines[self.currentLineIdx-1] + + def __repr__(self): + return "TransitNetwork: %s lines, %s links, %s PNRs, %s ZACs" % (len(self.lines),len(self.links),len(self.pnrs),len(self.zacs)) + + def isEmpty(self): + """ + TODO: could be smarter here and check that there are no non-comments since those + don't really count. + """ + if (len(self.lines) == 0 and + len(self.links) == 0 and + len(self.pnrs) == 0 and + len(self.zacs) == 0 and + len(self.accessli) == 0 and + len(self.xferli) == 0): + return True + + return False + + def clear(self, projectstr): + """ + Clears out all network data to prep for a project apply, e.g. the MuniTEP project is a complete + Muni network so clearing the existing contents beforehand makes sense. + If it's already clear then this is a no-op but otherwise + the user will be prompted (with the project string) so that the log will be clear. + """ + if self.isEmpty(): + # nothing to do! + return + + query = "Clearing network for %s:\n" % projectstr + query += " %d lines, %d links, %d pnrs, %d zacs, %d accessli, %d xferli\n" % (len(self.lines), + len(self.links), len(self.pnrs), len(self.zacs), len(self.accessli), len(self.xferli)) + query += "Is this ok? (y/n) " + WranglerLogger.debug(query) + response = raw_input("") + + WranglerLogger.debug("response=[%s]" % response) + if response != "Y" and response != "y": + exit(0) + + del self.lines[:] + del self.links[:] + del self.pnrs[:] + del self.zacs[:] + del self.accessli[:] + del self.xferli[:] + + def clearLines(self): + """ + Clears out all network **line** data to prep for a project apply, e.g. the MuniTEP project is a complete + Muni network so clearing the existing contents beforehand makes sense. + """ + del self.lines[:] + + + def validateWnrsAndPnrs(self): + """ + Goes through the transit lines in this network and for those that are offstreet (e.g. + modes 4 or 9), this method will validate that the xfer/pnr/wnr relationships look ship-shape. + Pretty verbose in the debug log. + """ + WranglerLogger.debug("Validating Off Street Transit Node Connections") + + nodeInfo = {} # lineset => { station node => { xfer node => [ walk node, pnr node ] }} + setToModeType = {} # lineset => list of ModeTypes ("Local", etc) + setToOffstreet = {} # lineset => True if has offstreet nodes + doneNodes = set() + + # For each line + for line in self: + if not isinstance(line,TransitLine): continue + # print "validating", line + + lineset = line.name[0:3] + if lineset not in nodeInfo: + nodeInfo[lineset] = {} + setToModeType[lineset] = [] + setToOffstreet[lineset] = False + if line.getModeType() not in setToModeType[lineset]: + setToModeType[lineset].append(line.getModeType()) + setToOffstreet[lineset] = (setToOffstreet[lineset] or line.hasOffstreetNodes()) + + # for each stop + for stopIdx in range(len(line.n)): + if not line.n[stopIdx].isStop(): continue + + stopNodeStr = line.n[stopIdx].num + + wnrNodes = set() + pnrNodes = set() + + if stopNodeStr in nodeInfo[lineset]: continue + nodeInfo[lineset][stopNodeStr] = {} + + #print " check if we have access to an on-street node" + for link in self.xferli: + if not isinstance(link,Linki): continue + # This xfer links the node to the on-street network + if link.A == stopNodeStr: + nodeInfo[lineset][stopNodeStr][link.B] = ["-","-"] + elif link.B == stopNodeStr: + nodeInfo[lineset][stopNodeStr][link.A] = ["-","-"] + + #print " Check for WNR" + for zac in self.zacs: + if not isinstance(zac,ZACLink): continue + + m = re.match(nodepair_pattern, zac.id) + if m.group(1)==stopNodeStr: wnrNodes.add(int(m.group(2))) + if m.group(2)==stopNodeStr: wnrNodes.add(int(m.group(1))) + + #print "Check for PNR" + for pnr in self.pnrs: + if not isinstance(pnr, PNRLink): continue + pnr.parseID() + if pnr.station==stopNodeStr and pnr.pnr!=PNRLink.UNNUMBERED: + pnrNodes.add(int(pnr.pnr)) + + #print "Check that our access links go from an onstreet xfer to a pnr or to a wnr" + for link in self.accessli: + if not isinstance(link,Linki): continue + try: + if int(link.A) in wnrNodes: + nodeInfo[lineset][stopNodeStr][link.B][0] = link.A + elif int(link.B) in wnrNodes: + nodeInfo[lineset][stopNodeStr][link.A][0] = link.B + elif int(link.A) in pnrNodes: + nodeInfo[lineset][stopNodeStr][link.B][1] = link.A + elif int(link.B) in pnrNodes: + nodeInfo[lineset][stopNodeStr][link.A][1] = link.B + except KeyError: + # if it's not offstreet then that's ok + if not setToOffstreet[lineset]: continue + + errorstr = "Invalid access link found in %s lineset %s (incl offstreet) stopNode %s -- Missing xfer? A=%s B=%s, xfernodes=%s wnrNodes=%s pnrNodes=%s" % \ + (line.getModeType(), lineset, stopNodeStr, link.A, link.B, str(nodeInfo[lineset][stopNodeStr].keys()), str(wnrNodes), str(pnrNodes)) + WranglerLogger.warning(errorstr) + # raise NetworkException(errorstr) + + book = xlrd.open_workbook(r"Y:\CHAMP\util\nodes.xls") + sh = book.sheet_by_index(0) + nodeNames = {} + for rx in range(0,sh.nrows): # skip header + therow = sh.row(rx) + nodeNames[int(therow[0].value)] = therow[1].value + # WranglerLogger.info(str(nodeNames)) + + # print it all out + for lineset in nodeInfo.keys(): + + stops = nodeInfo[lineset].keys() + stops.sort() + + WranglerLogger.debug("--------------- Line set %s %s -- hasOffstreet? %s------------------" % + (lineset, str(setToModeType[lineset]), str(setToOffstreet[lineset]))) + WranglerLogger.debug("%-30s %10s %10s %10s %10s" % ("stopname", "stop", "xfer", "wnr", "pnr")) + for stopNodeStr in stops: + numWnrs = 0 + stopname = "Unknown stop name" + if int(stopNodeStr) in nodeNames: stopname = nodeNames[int(stopNodeStr)] + for xfernode in nodeInfo[lineset][stopNodeStr].keys(): + WranglerLogger.debug("%-30s %10s %10s %10s %10s" % + (stopname, stopNodeStr, xfernode, + nodeInfo[lineset][stopNodeStr][xfernode][0], + nodeInfo[lineset][stopNodeStr][xfernode][1])) + if nodeInfo[lineset][stopNodeStr][xfernode][0] != "-": numWnrs += 1 + + if numWnrs == 0 and setToOffstreet[lineset]: + errorstr = "Zero wnrNodes or onstreetxfers for stop %s!" % stopNodeStr + WranglerLogger.critical(errorstr) + # raise NetworkException(errorstr) + + def line(self, name): + """ + If a string is passed in, return the line for that name exactly (a :py:class:`TransitLine` object). + If a regex, return all relevant lines (a list of TransitLine objects). + If 'all', return all lines (a list of TransitLine objects). + """ + if isinstance(name,str): + if name in self.lines: + return self.lines[self.lines.index(name)] + + if str(type(name))=="": + toret = [] + for i in range(len(self.lines)): + if isinstance(self.lines[i],str): continue + if name.match(self.lines[i].name): toret.append(self.lines[i]) + return toret + if name=='all': + allLines = [] + for i in range(len(self.lines)): + allLines.append(self.lines[i]) + return allLines + raise NetworkException('Line name not found: %s' % (name,)) + + def splitLinkInTransitLines(self,nodeA,nodeB,newNode,stop=False): + """ + Goes through each line and for any with links going from *nodeA* to *nodeB*, inserts + the *newNode* in between them (as a stop if *stop* is True). + """ + totReplacements = 0 + for line in self: + if line.hasLink(nodeA,nodeB): + line.splitLink(nodeA,nodeB,newNode,stop=stop) + totReplacements+=1 + WranglerLogger.debug("Total Lines with Link %s-%s split:%d" % (str(nodeA),str(nodeB),totReplacements)) + + def replaceSegmentInTransitLines(self,nodeA,nodeB,newNodes): + """ + *newNodes* should include nodeA and nodeB if they are not going away + """ + totReplacements = 0 + allExp=re.compile(".") + newSection=newNodes # [nodeA]+newNodes+[nodeB] + for line in self.line(allExp): + if line.hasSegment(nodeA,nodeB): + WranglerLogger.debug(line.name) + line.replaceSegment(nodeA,nodeB,newSection) + totReplacements+=1 + WranglerLogger.debug("Total Lines with Segment %s-%s replaced:%d" % (str(nodeA),str(nodeB),totReplacements)) + + def setCombiFreqsForShortLine(self, shortLine, longLine, combFreqs): + """ + Set all five headways for a short line to equal a combined + headway including long line. i.e. set 1-California Short frequencies + by inputing the combined frequencies of both lines. + + .. note:: Make sure *longLine* frequencies are set first! + """ + try: + longLineInst=self.line(longLine) + except: + raise NetworkException('Unknown Route! %s' % (longLine)) + try: + shortLineInst=self.line(shortLine) + except: + raise NetworkException('Unknown Route! %s' % (shortLine)) + + [amLong,mdLong,pmLong,evLong,eaLong] = longLineInst.getFreqs() + [amComb,mdComb,pmComb,evComb,eaComb] = combFreqs + [amShort,mdShort,pmShort,evShort,eaShort] = [0,0,0,0,0] + if (amLong-amComb)>0: amShort=amComb*amLong/(amLong-amComb) + if (mdLong-mdComb)>0: mdShort=mdComb*mdLong/(mdLong-mdComb) + if (pmLong-pmComb)>0: pmShort=pmComb*pmLong/(pmLong-pmComb) + if (evLong-evComb)>0: evShort=evComb*evLong/(evLong-evComb) + if (eaLong-eaComb)>0: eaShort=eaComb*eaLong/(eaLong-eaComb) + shortLineInst.setFreqs([amShort,mdShort,pmShort,evShort,eaShort]) + + + def getCombinedFreq(self, names, coverage_set=False): + """ + Pass a regex pattern, we'll show the combined frequency. This + doesn't change anything, it's just a useful tool. + """ + lines = self.line(names) + denom = [0,0,0,0,0] + for l in lines: + if coverage_set: coverage_set.discard(l.name) + freqs = l.getFreqs() + for t in range(5): + if float(freqs[t])>0.0: + denom[t] += 1/float(freqs[t]) + + combined = [0,0,0,0,0] + for t in range(5): + if denom[t] > 0: combined[t] = round(1/denom[t],2) + return combined + + def verifyTransitLineFrequencies(self, frequencies, coverage=None): + """ + Utility function to verify the frequencies are as expected. + + * *frequencies* is a dictionary of ``label => [ regex1, regex2, [freqlist] ]`` + * *coverage* is a regex string (not compiled) that says we want to know if we verified the + frequencies of all of these lines. e.g. ``MUNI*`` + + """ + covset = set([]) + if coverage: + covpattern = re.compile(coverage) + for i in range(len(self.lines)): + if isinstance(self.lines[i],str): continue + if covpattern.match(self.lines[i].name): covset.add(self.lines[i].name) + # print covset + + labels = frequencies.keys(); labels.sort() + for label in labels: + logstr = "Verifying %-40s: " % label + + for regexnum in [0,1]: + frequencies[label][regexnum]=frequencies[label][regexnum].strip() + if frequencies[label][regexnum]=="": continue + pattern = re.compile(frequencies[label][regexnum]) + freqs = self.getCombinedFreq(pattern, coverage_set=covset) + if freqs[0]+freqs[1]+freqs[2]+freqs[3]+freqs[4]==0: + logstr += "-- Found no matching lines for pattern [%s]" % (frequencies[label][regexnum]) + for timeperiod in range(5): + if abs(freqs[timeperiod]-frequencies[label][2][timeperiod])>0.2: + logstr += "-- Mismatch. Desired %s" % str(frequencies[label][2]) + logstr += "but got ",str(freqs) + lines = self.line(pattern) + WranglerLogger.error(logstr) + WranglerLogger.error("Problem lines:") + for line in lines: WranglerLogger.error(str(line)) + raise NetworkException("Mismatching frequency") + logstr += "-- Match%d!" % (regexnum+1) + WranglerLogger.debug(logstr) + + if coverage: + WranglerLogger.debug("Found %d uncovered lines" % len(covset)) + for linename in covset: + WranglerLogger.debug(self.line(linename)) + + + def write(self, path='.', name='transit', writeEmptyFiles=True, suppressQuery=False, suppressValidation=False): + """ + Write out this full transit network to disk in path specified. + """ + if not suppressValidation: + self.validateWnrsAndPnrs() + + if not os.path.exists(path): + WranglerLogger.debug("\nPath [%s] doesn't exist; creating." % path) + os.mkdir(path) + + else: + trnfile = os.path.join(path,name+".lin") + if os.path.exists(trnfile) and not suppressQuery: + print "File [%s] exists already. Overwrite contents? (y/n/s) " % trnfile + response = raw_input("") + WranglerLogger.debug("response = [%s]" % response) + if response == "s" or response == "S": + WranglerLogger.debug("Skipping!") + return + + if response != "Y" and response != "y": + exit(0) + + WranglerLogger.info("Writing into %s\\%s" % (path, name)) + logstr = "" + if len(self.lines)>0 or writeEmptyFiles: + logstr += " lines" + f = open(os.path.join(path,name+".lin"), 'w'); + f.write(";;<>;;\n") + for line in self.lines: + if isinstance(line,str): f.write(line) + else: f.write(repr(line)+"\n") + f.close() + + if len(self.links)>0 or writeEmptyFiles: + logstr += " links" + f = open(os.path.join(path,name+".link"), 'w'); + for link in self.links: + f.write(str(link)+"\n") + f.close() + + if len(self.pnrs)>0 or writeEmptyFiles: + logstr += " pnr" + f = open(os.path.join(path,name+".pnr"), 'w'); + for pnr in self.pnrs: + f.write(str(pnr)+"\n") + f.close() + + if len(self.zacs)>0 or writeEmptyFiles: + logstr += " zac" + f = open(os.path.join(path,name+".zac"), 'w'); + for zac in self.zacs: + f.write(str(zac)+"\n") + f.close() + + if len(self.accessli)>0 or writeEmptyFiles: + logstr += " access" + f = open(os.path.join(path,name+".access"), 'w'); + for accessli in self.accessli: + f.write(str(accessli)+"\n") + f.close() + + if len(self.xferli)>0 or writeEmptyFiles: + logstr += " xfer" + f = open(os.path.join(path,name+".xfer"), 'w'); + for xferli in self.xferli: + f.write(str(xferli)+"\n") + f.close() + + # fares + for farefile in TransitNetwork.FARE_FILES: + # don't write an empty one unless there isn't anything there + if len(self.farefiles[farefile]) == 0: + if writeEmptyFiles and not os.path.exists(os.path.join(path,farefile)): + logstr += " " + farefile + f = open(os.path.join(path,farefile), 'w') + f.write("; no fares known\n") + f.close() + + else: + logstr += " " + farefile + f = open(os.path.join(path,farefile), 'w') + for line in self.farefiles[farefile]: + f.write(line) + f.close() + + logstr += "... done." + WranglerLogger.debug(logstr) + WranglerLogger.info("") + + def parseAndPrintTransitFile(self, trntxt, verbosity=1): + """ + Verbosity=1: 1 line per line summary + Verbosity=2: 1 line per node + """ + success, children, nextcharacter = self.parser.parse(trntxt, production="transit_file") + if not nextcharacter==len(trntxt): + errorstr = "\n Did not successfully read the whole file; got to nextcharacter=%d out of %d total" % (nextcharacter, len(trntxt)) + errorstr += "\n Did read %d lines, next unread text = [%s]" % (len(children), trntxt[nextcharacter:nextcharacter+50]) + raise NetworkException(errorstr) + + # Convert from parser-tree format to in-memory transit data structures: + convertedLines = self.parser.convertLineData() + convertedLinks = self.parser.convertLinkData() + convertedPNR = self.parser.convertPNRData() + convertedZAC = self.parser.convertZACData() + convertedAccessLinki = self.parser.convertLinkiData("access") + convertedXferLinki = self.parser.convertLinkiData("xfer") + + return convertedLines, convertedLinks, convertedPNR, convertedZAC, \ + convertedAccessLinki, convertedXferLinki + + def parseFile(self, fullfile, insert_replace=True): + """ + fullfile is the filename, + insert_replace=True if you want to replace the data in place rather than appending + """ + suffix = fullfile.rsplit(".")[-1].lower() + self.parseFileAsSuffix(fullfile,suffix,insert_replace) + + def parseFileAsSuffix(self,fullfile,suffix,insert_replace): + """ + This is a little bit of a hack, but it's meant to allow us to do something + like read an xfer file as an access file... + """ + self.parser = TransitParser(transit_file_def, verbosity=0) + self.parser.tfp.liType = suffix + logstr = " Reading %s as %s" % (fullfile, suffix) + f = open(fullfile, 'r'); + lines,links,pnr,zac,accessli,xferli = self.parseAndPrintTransitFile(f.read(), verbosity=0) + f.close() + logstr += self.doMerge(fullfile,lines,links,pnr,zac,accessli,xferli,insert_replace) + WranglerLogger.debug(logstr) + + def doMerge(self,path,lines,links,pnrs,zacs,accessli,xferli,insert_replace=False): + """ + Merge a set of transit lines & support links with this network's transit representation. + """ + + logstr = " -- Merging" + + if len(lines)>0: + logstr += " %s lines" % len(lines) + + extendlines = copy.deepcopy(lines) + for line in lines: + if isinstance(line,TransitLine) and (line in self.lines): + # logstr += " *%s" % (line.name) + if insert_replace: + self.lines[self.lines.index(line)]=line + extendlines.remove(line) + else: + self.lines.remove(line) + + if len(extendlines)>0: + # for line in extendlines: print line + self.lines.extend(["\n;######################### From: "+path+"\n"]) + self.lines.extend(extendlines) + + if len(links)>0: + logstr += " %d links" % len(links) + self.links.extend(["\n;######################### From: "+path+"\n"]) + self.links.extend(links) #TODO: Need to replace existing links + + if len(pnrs)>0: + logstr += " %d PNRs" % len(pnrs) + self.pnrs.extend( ["\n;######################### From: "+path+"\n"]) + self.pnrs.extend(pnrs) #TODO: Need to replace existing PNRs + + if len(zacs)>0: + logstr += " %d ZACs" % len(zacs) + self.zacs.extend( ["\n;######################### From: "+path+"\n"]) + self.zacs.extend(zacs) #TODO: Need to replace existing PNRs + + if len(accessli)>0: + logstr += " %d accesslinks" % len(accessli) + self.accessli.extend( ["\n;######################### From: "+path+"\n"]) + self.accessli.extend(accessli) + + if len(xferli)>0: + logstr += " %d xferlinks" % len(xferli) + self.xferli.extend( ["\n;######################### From: "+path+"\n"]) + self.xferli.extend(xferli) + + + logstr += "...done." + return logstr + + def mergeDir(self,path,insert_replace=False): + """ + Append all the transit-related files in the given directory. + Does NOT apply __init__.py modifications from that directory. + """ + dirlist = os.listdir(path) + dirlist.sort() + WranglerLogger.debug("Path: %s" % path) + + for filename in dirlist: + suffix = filename.rsplit(".")[-1].lower() + if suffix in ["lin","link","pnr","zac","access","xfer"]: + self.parser = TransitParser(transit_file_def, verbosity=0) + self.parser.tfp.liType = suffix + fullfile = os.path.join(path,filename) + logstr = " Reading %s" % filename + f = open(fullfile, 'r'); + lines,links,pnr,zac,accessli,xferli = self.parseAndPrintTransitFile(f.read(), verbosity=0) + f.close() + logstr += self.doMerge(fullfile,lines,links,pnr,zac,accessli,xferli,insert_replace) + WranglerLogger.debug(logstr) + + @staticmethod + def initializeTransitCapacity(directory="."): + TransitNetwork.capacity = TransitCapacity(directory=directory) + + def findSimpleDwellDelay(self, line): + """ + Returns the simple mode/owner-based dwell delay for the given *line*. This could + be a method in :py:class:`TransitLine` but I think it's more logical to be + :py:class:`TransitNetwork` specific... + """ + # use AM to lookup the vehicle + simpleDwell = TransitNetwork.capacity.getSimpleDwell(line.name, "AM") + + owner = None + if 'OWNER' in line.attr: + owner = line.attr['OWNER'].strip(r'"\'') + + if owner and owner.upper() == 'TPS': + simpleDwell -= 0.1 + + if owner and owner.upper() == 'BRT': + # (20% Savings Low Floor)*(20% Savings POP) + simpleDwell = simpleDwell*0.8*0.8 + # but lets not go below 0.3 + if simpleDwell < 0.3: + simpleDwell = 0.3 + + return simpleDwell + + def addDelay(self, timeperiod="Simple", additionalLinkFile=None, + complexDelayModes=[], complexAccessModes=[], + transitAssignmentData=None, + MSAweight=1.0, previousNet=None, logPrefix="", stripTimeFacRunTimeAttrs=True): + """ + Replaces the old ``addDelay.awk`` script. + + The simple version simply looks up a delay for all stops based on the + transit line's OWNER and MODE. (Owners ``TPS`` and ``BRT`` get shorter delays.) + It will also dupe any two-way lines that are one of the complexAccessModes because those + access mode shutoffs only make sense if the lines are one-way. + + Exempts nodes that are in the network's TransitLinks and in the optional + *additionalLinkFile*, from dwell delay; the idea being that these are LRT or fixed + guideway links and the link time includes a dwell delay. + + If *transitAssignmentData* is passed in, however, then the boards, alights and vehicle + type from that data are used to calculate delay for the given *complexDelayModes*. + + When *MSAweight* < 1.0, then the delay is modified + to be a linear combination of (prev delay x (1.0-*MSAweight*)) + (new delay x *MSAweight*)) + + *logPrefix* is a string used for logging: this method appends to the following files: + + * ``lineStats[timeperiod].csv`` contains *logPrefix*, line name, total Dwell for the line, + number of closed nodes for the line + * ``dwellbucket[timeperiod].csv`` contails distribution information for the dwells. + It includes *logPrefix*, dwell bucket number, and dwell bucket count. + Currently dwell buckets are 0.1 minutes + + When *stripTimeFacRunTimeAttrs* is passed as TRUE, TIMEFAC and RUNTIME is stripped for ALL + modes. Otherwise it's ignored. + """ + + # Use own links and, if passed, additionaLinkFile to form linSet, which is the set of + # nodes in the links + linkSet = set() + for link in self.links: + if isinstance(link,TransitLink): + link.addNodesToSet(linkSet) + # print linkSet + logstr = "addDelay: Size of linkset = %d" % (len(linkSet)) + + if additionalLinkFile: + linknet = TransitNetwork(self.champVersion) + linknet.parser = TransitParser(transit_file_def, verbosity=0) + f = open(additionalLinkFile, 'r'); + junk,additionallinks,junk,junk,junk,junk = \ + linknet.parseAndPrintTransitFile(f.read(), verbosity=0) + f.close() + for link in additionallinks: + if isinstance(link,TransitLink): + link.addNodesToSet(linkSet) + # print linkSet + logstr += " => %d with %s\n" % (len(linkSet), additionalLinkFile) + WranglerLogger.info(logstr) + + # record keeping for logging + statsfile = open("lineStats"+timeperiod+".csv", "a") + dwellbucketfile = open("dwellbucket"+timeperiod+".csv", "a") + totalLineDwell = {} # linename => total dwell + totalClosedNodes = {} # linename => closed nodes + DWELL_BUCKET_SIZE = 0.1 # minutes + dwellBuckets = defaultdict(int) # initialize to index => bucket + + # Dupe the one-way lines for complexAccessModes + if timeperiod=="Simple" and len(complexAccessModes)>0: + line_idx = 0 + while True: + # out of lines, done! + if line_idx >= len(self.lines): break + + # skip non-TransitLines + if not isinstance(self.lines[line_idx],TransitLine): + line_idx += 1 + continue + + # skip non-ComplexAccessMode lines + if int(self.lines[line_idx].attr['MODE']) not in complexAccessModes: + line_idx += 1 + continue + + # this is a relevant line -- is it oneway? then we're ok + if self.lines[line_idx].isOneWay(): + line_idx += 1 + continue + + # make it one way and add a reverse copy + self.lines[line_idx].setOneWay() + reverse_line = copy.deepcopy(self.lines[line_idx]) + reverse_line.reverse() + + WranglerLogger.debug("Reversed line %s to line %s" % (str(self.lines[line_idx]), str(reverse_line))) + self.lines.insert(line_idx+1,reverse_line) + line_idx += 2 + + + # iterate through my lines + for line in self: + + totalLineDwell[line.name] = 0.0 + totalClosedNodes[line.name] = 0 + + # strip the TIMEFAC and the RUNTIME, if desired + if stripTimeFacRunTimeAttrs: + if "RUNTIME" in line.attr: + WranglerLogger.debug("Stripping RUNTIME from %s" % line.name) + del line.attr["RUNTIME"] + if "TIMEFAC" in line.attr: + WranglerLogger.debug("Stripping TIMEFAC from %s" % line.name) + del line.attr["TIMEFAC"] + + # Passing on all the lines that do not have service during the specific time of day + if timeperiod in TransitLine.HOURS_PER_TIMEPERIOD and line.getFreq(timeperiod) == 0.0: continue + + + simpleDwellDelay = self.findSimpleDwellDelay(line) + + for nodeIdx in range(len(line.n)): + + # linkSet nodes exempt - don't add delay 'cos that's inherent to the link + if int(line.n[nodeIdx].num) in linkSet: continue + # last stop - no delay, end of the line + if nodeIdx == len(line.n)-1: continue + # dwell delay for stop nodes only + if not line.n[nodeIdx].isStop(): continue + + # ======================================================================================= + # turn off access? + if (transitAssignmentData and + (nodeIdx>0) and + (int(line.attr["MODE"]) in complexAccessModes)): + try: + loadFactor = transitAssignmentData.loadFactor(line.name, + abs(int(line.n[nodeIdx-1].num)), + abs(int(line.n[nodeIdx].num)), + nodeIdx) + except: + WranglerLogger.warning("Failed to get loadfactor for (%s, A=%d B=%d SEQ=%d); assuming 0" % + (line.name, abs(int(line.n[nodeIdx-1].num)), abs(int(line.n[nodeIdx].num)),nodeIdx)) + loadFactor = 0.0 + + # disallow boardings (ACCESS=2) (for all nodes except first stop) + # if the previous link has load factor greater than 1.0 + if loadFactor > 1.0: + line.n[nodeIdx].attr["ACCESS"] = 2 + totalClosedNodes[line.name] += 1 + + # ======================================================================================= + # Simple delay if + # - we do not have boards/alighting data, + # - or if we're not configured to do a complex delay operation + if not transitAssignmentData or (int(line.attr["MODE"]) not in complexDelayModes): + if simpleDwellDelay > 0: + line.n[nodeIdx].attr["DELAY"] = str(simpleDwellDelay) + totalLineDwell[line.name] += simpleDwellDelay + dwellBuckets[int(math.floor(simpleDwellDelay/DWELL_BUCKET_SIZE))] += 1 + continue + + # Complex Delay + # ======================================================================================= + vehiclesPerPeriod = line.vehiclesPerPeriod(timeperiod) + try: + boards = transitAssignmentData.numBoards(line.name, + abs(int(line.n[nodeIdx].num)), + abs(int(line.n[nodeIdx+1].num)), + nodeIdx+1) + except: + WranglerLogger.warning("Failed to get boards for (%s, A=%d B=%d SEQ=%d); assuming 0" % + (line.name, abs(int(line.n[nodeIdx].num)), abs(int(line.n[nodeIdx+1].num)),nodeIdx+1)) + boards = 0 + + # At the first stop, vehicle has no exits and load factor + if nodeIdx == 0: + exits = 0 + else: + try: + exits = transitAssignmentData.numExits(line.name, + abs(int(line.n[nodeIdx-1].num)), + abs(int(line.n[nodeIdx].num)), + nodeIdx) + except: + WranglerLogger.warning("Failed to get exits for (%s, A=%d B=%d SEQ=%d); assuming 0" % + (line.name, abs(int(line.n[nodeIdx-1].num)), abs(int(line.n[nodeIdx].num)),nodeIdx)) + exits = 0 + + + + if MSAweight < 1.0: + try: + existingDelay = float(previousNet.line(line.name).n[nodeIdx].attr["DELAY"]) + except: + WranglerLogger.debug("No delay found for line %s node %s -- using 0" % + (line.name, previousNet.line(line.name).n[nodeIdx].num)) + existingDelay = 0.0 # this can happen if no boards/alights and const=0 + else: + MSAdelay = -99999999 + existingDelay = 0.0 + + (delay_const,delay_per_board,delay_per_alight) = transitAssignmentData.capacity.getComplexDwells(line.name, timeperiod) + + WranglerLogger.debug("line name=%s, timeperiod=%s, delay_const,perboard,peralight=%.3f, %.3f, %.3f" % + (line.name, timeperiod, delay_const, delay_per_board, delay_per_alight)) + + dwellDelay = (1.0-MSAweight)*existingDelay + \ + MSAweight*((delay_per_board*float(boards)/vehiclesPerPeriod) + + (delay_per_alight*float(exits)/vehiclesPerPeriod) + + delay_const) + line.n[nodeIdx].attr["DELAY"] ="%.3f" % dwellDelay + totalLineDwell[line.name] += dwellDelay + dwellBuckets[int(math.floor(dwellDelay/DWELL_BUCKET_SIZE))] += 1 + # end for each node loop + + statsfile.write("%s,%s,%f,%d\n" % (logPrefix, line.name, + totalLineDwell[line.name], totalClosedNodes[line.name])) + # end for each line loop + + for bucketnum, count in dwellBuckets.iteritems(): + dwellbucketfile.write("%s,%d,%d\n" % (logPrefix, bucketnum, count)) + statsfile.close() + dwellbucketfile.close() + + def checkCapacityConfiguration(self, complexDelayModes, complexAccessModes): + """ + Verify that we have the capacity configuration for all lines in the complex modes. + To save heart-ache later. + return Success + """ + if not TransitNetwork.capacity: + TransitNetwork.capacity = TransitCapacity() + + failures = 0 + for line in self: + linename = line.name.upper() + mode = int(line.attr["MODE"]) + if mode in complexDelayModes or mode in complexAccessModes: + + for timeperiod in ["AM", "MD", "PM", "EV", "EA"]: + if line.getFreq(timeperiod) == 0: continue + + try: + (vehicletype, cap) = TransitNetwork.capacity.getVehicleTypeAndCapacity(linename, timeperiod) + if mode in complexDelayModes: + (delc,delpb,delpa) = TransitNetwork.capacity.getComplexDwells(linename, timeperiod) + + except NetworkException as e: + print e + failures += 1 + return (failures == 0) + + def getProjectVersion(self, parentdir, networkdir, gitdir, projectsubdir=None): + """ + Returns champVersion for this project + + See :py:meth:`Wrangler.Network.applyProject` for argument details. + """ + if projectsubdir: + projectname = projectsubdir + sys.path.append(os.path.join(os.getcwd(), parentdir, networkdir)) + + else: + projectname = networkdir + sys.path.append(os.path.join(os.getcwd(), parentdir)) + + evalstr = "import %s" % projectname + exec(evalstr) + evalstr = "dir(%s)" % projectname + projectdir = eval(evalstr) + + # WranglerLogger.debug("projectdir = " + str(projectdir)) + pchampVersion = (eval("%s.champVersion()" % projectname) if 'champVersion' in projectdir else Network.CHAMP_VERSION_DEFAULT) + return pchampVersion + + def applyProject(self, parentdir, networkdir, gitdir, projectsubdir=None, **kwargs): + """ + Apply the given project by calling import and apply. Currently only supports + one level of subdir (so projectsubdir can be one level, no more). + e.g. parentdir=``tmp_blah``, networkdir=``Muni_GearyBRT``, projectsubdir=``center_center`` + + See :py:meth:`Wrangler.Network.applyProject` for argument details. + """ + # paths are already taken care of in checkProjectVersion + if projectsubdir: + projectname = projectsubdir + else: + projectname = networkdir + + evalstr = "import %s; %s.apply(self" % (projectname, projectname) + for key,val in kwargs.iteritems(): + evalstr += ", %s=%s" % (key, str(val)) + evalstr += ")" + try: + exec(evalstr) + except: + print "Failed to exec [%s]" % evalstr + raise + + evalstr = "dir(%s)" % projectname + projectdir = eval(evalstr) + # WranglerLogger.debug("projectdir = " + str(projectdir)) + pyear = (eval("%s.year()" % projectname) if 'year' in projectdir else None) + pdesc = (eval("%s.desc()" % projectname) if 'desc' in projectdir else None) + + # print "projectname=" + str(projectname) + # print "pyear=" + str(pyear) + # print "pdesc=" + str(pdesc) + + # fares + for farefile in TransitNetwork.FARE_FILES: + fullfarefile = os.path.join(gitdir, farefile) + linecount = 0 + # WranglerLogger.debug("cwd=%s farefile %s exists? %d" % (os.getcwd(), fullfarefile, os.path.exists(fullfarefile))) + + if os.path.exists(fullfarefile): + infile = open(fullfarefile, 'r') + lines = infile.readlines() + self.farefiles[farefile].extend(lines) + linecount = len(lines) + infile.close() + WranglerLogger.debug("Read %5d lines from fare file %s" % (linecount, fullfarefile)) + + self.logProject(gitdir=gitdir, + projectname=(networkdir + "\\" + projectsubdir if projectsubdir else networkdir), + year=pyear, projectdesc=pdesc) diff --git a/TransitParser.py b/TransitParser.py new file mode 100644 index 0000000..5feff06 --- /dev/null +++ b/TransitParser.py @@ -0,0 +1,532 @@ +from simpleparse.common import numbers, strings, comments +from simpleparse import generator +from simpleparse.parser import Parser +from simpleparse.dispatchprocessor import * +import re + +from .Linki import Linki +from .Logger import WranglerLogger +from .Node import Node +from .PNRLink import PNRLink +from .Supplink import Supplink +from .TransitLine import TransitLine +from .TransitLink import TransitLink +from .ZACLink import ZACLink + +__all__ = [ 'TransitParser' ] + +WRANGLER_FILE_SUFFICES = [ "lin", "link", "pnr", "zac", "access", "xfer" ] + +# PARSER DEFINITION ------------------------------------------------------------------------------ +# NOTE: even though XYSPEED and TIMEFAC are node attributes here, I'm not sure that's really ok -- +# Cube documentation implies TF and XYSPD are node attributes... +transit_file_def=r''' +transit_file := ( accessli / line / link / pnr / zac / supplink )+, smcw*, whitespace* + +line := whitespace?, smcw?, c"LINE", whitespace, lin_attr*, lin_node*, whitespace? +lin_attr := ( lin_attr_name, whitespace?, "=", whitespace?, attr_value, whitespace?, + comma, whitespace?, semicolon_comment* ) +lin_nodeattr := ( lin_nodeattr_name, whitespace?, "=", whitespace?, attr_value, whitespace?, comma?, whitespace?, semicolon_comment* ) +lin_attr_name := c"allstops" / c"color" / (c"freq",'[',[1-5],']') / c"mode" / c"name" / c"oneway" / c"owner" / c"runtime" / c"timefac" / c"xyspeed" / c"longname" +lin_nodeattr_name := c"access_c" / c"access" / c"delay" / c"xyspeed" / c"timefac" +lin_node := lin_nodestart?, whitespace?, nodenum, spaces*, comma?, spaces*, semicolon_comment?, whitespace?, lin_nodeattr* +lin_nodestart := (whitespace?, "N", whitespace?, "=") + +link := whitespace?, smcw?, c"LINK", whitespace, link_attr*, whitespace?, semicolon_comment* +link_attr := (( (link_attr_name, whitespace?, "=", whitespace?, attr_value) / + (word_nodes, whitespace?, "=", whitespace?, nodepair) / + (word_modes, whitespace?, "=", whitespace?, numseq) ), + whitespace?, comma?, whitespace?) +link_attr_name := c"dist" / c"speed" / c"time" / c"oneway" + +pnr := whitespace?, smcw?, c"PNR", whitespace, pnr_attr*, whitespace? +pnr_attr := (( (pnr_attr_name, whitespace?, "=", whitespace?, attr_value) / + (word_node, whitespace?, "=", whitespace?, ( nodepair / nodenum )) / + (word_zones, whitespace?, "=", whitespace?, numseq )), + whitespace?, comma?, whitespace?, semicolon_comment*) +pnr_attr_name := c"time" / c"maxtime" / c"distfac" / c"cost" + +zac := whitespace?, smcw?, c"ZONEACCESS", whitespace, zac_attr*, whitespace?, semicolon_comment* +zac_attr := (( (c"link", whitespace?, "=", whitespace?, nodepair) / + (zac_attr_name, whitespace?, "=", whitespace?, attr_value) ), + whitespace?, comma?, whitespace?) +zac_attr_name := c"mode" + +supplink := whitespace?, smcw?, c"SUPPLINK", whitespace, supplink_attr*, whitespace?, semicolon_comment* +supplink_attr := (( (supplink_attr_name, whitespace?, "=", whitespace?, attr_value) / + (c"n", whitespace?, "=", whitespace?, nodepair )), + whitespace?, comma?, whitespace?) +supplink_attr_name:= c"mode" / c"dist" / c"speed" / c"oneway" / c"time" + +accessli := whitespace?, smcw?, nodenumA, spaces?, nodenumB, spaces?, accesstag?, spaces?, (float/int)?, spaces?, semicolon_comment? +accesstag := c"wnr" / c"pnr" + +word_nodes := c"nodes" +word_node := c"node" +word_modes := c"modes" +word_zones := c"zones" +numseq := int, (spaces?, ("-" / ","), spaces?, int)* +nodepair := nodenum, spaces?, ("-" / ","), spaces?, nodenum +nodenumA := nodenum +nodenumB := nodenum +nodenum := int +attr_value := alphanums / string_single_quote / string_double_quote +alphanums := [a-zA-Z0-9\.]+ + := [,] + := [ \t\r\n]+ + := [ \t]+ +smcw := whitespace?, (semicolon_comment / c_comment, whitespace?)+ +''' + +class TransitFileProcessor(DispatchProcessor): + """ Class to process transit files + """ + def __init__(self, verbosity=1): + self.verbosity=verbosity + self.lines = [] + self.links = [] + self.pnrs = [] + self.zacs = [] + self.accesslis = [] + self.xferlis = [] + self.liType = '' + self.supplinks = [] + + self.endcomments = [] + + def crackTags(self, leaf, buffer): + tag = leaf[0] + text = buffer[leaf[1]:leaf[2]] + subtags = leaf[3] + + b = [] + + if subtags: + for leaf in subtags: + b.append(self.crackTags(leaf, buffer)) + + return (tag,text,b) + + def line(self, (tag,start,stop,subtags), buffer): + # this is the whole line + if self.verbosity>=1: + print tag, start, stop + + # Append list items for this line + for leaf in subtags: + xxx = self.crackTags(leaf,buffer) + self.lines.append(xxx) + + if self.verbosity==2: + # lines are composed of smcw (semicolon-comment / whitespace), line_attr and lin_node + for linepart in subtags: + print " ",linepart[0], " -> [ ", + for partpart in linepart[3]: + print partpart[0], "(", buffer[partpart[1]:partpart[2]],")", + print " ]" + + def link(self, (tag,start,stop,subtags), buffer): + # this is the whole link + if self.verbosity>=1: + print tag, start, stop + + # Append list items for this link + for leaf in subtags: + xxx = self.crackTags(leaf,buffer) + self.links.append(xxx) + + if self.verbosity==2: + # links are composed of smcw and link_attr + for linkpart in subtags: + print " ",linkpart[0], " -> [ ", + for partpart in linkpart[3]: + print partpart[0], "(", buffer[partpart[1]:partpart[2]], ")", + print " ]" + + def pnr(self, (tag,start,stop,subtags), buffer): + if self.verbosity>=1: + print tag, start, stop + + # Append list items for this link + for leaf in subtags: + xxx = self.crackTags(leaf,buffer) + self.pnrs.append(xxx) + + if self.verbosity==2: + # pnrs are composed of smcw and pnr_attr + for pnrpart in subtags: + print " ",pnrpart[0], " -> [ ", + for partpart in pnrpart[3]: + print partpart[0], "(", buffer[partpart[1]:partpart[2]], ")", + print " ]" + + def zac(self, (tag,start,stop,subtags), buffer): + if self.verbosity>=1: + print tag, start, stop + + if self.verbosity==2: + # zacs are composed of smcw and zac_attr + for zacpart in subtags: + print " ",zacpart[0], " -> [ ", + for partpart in zacpart[3]: + print partpart[0], "(", buffer[partpart[1]:partpart[2]], ")", + print " ]" + + # Append list items for this link + for leaf in subtags: + xxx = self.crackTags(leaf,buffer) + self.zacs.append(xxx) + + def supplink(self, (tag,start,stop,subtags), buffer): + if self.verbosity>=1: + print tag, start, stop + + if self.verbosity==2: + # supplinks are composed of smcw and zac_attr + for supplinkpart in subtags: + print " ",supplinkpart[0], " -> [ ", + for partpart in supplinkpart[3]: + print partpart[0], "(", buffer[partpart[1]:partpart[2]], ")", + print " ]" + + # Append list items for this link + # TODO: make the others more like this -- let the list separate the parse structures! + supplink = [] + for leaf in subtags: + xxx = self.crackTags(leaf,buffer) + supplink.append(xxx) + self.supplinks.append(supplink) + + def smcw(self, (tag,start,stop,subtags), buffer): + """ Semicolon comment whitespace + """ + if self.verbosity>=1: + print tag, start, stop + + for leaf in subtags: + xxx = self.crackTags(leaf,buffer) + self.endcomments.append(xxx) + + def accessli(self, (tag,start,stop,subtags), buffer): + if self.verbosity>=1: + print tag, start, stop + + for leaf in subtags: + xxx = self.crackTags(leaf,buffer) + if self.liType=="access": + self.accesslis.append(xxx) + elif self.liType=="xfer": + self.xferlis.append(xxx) + else: + raise NetworkException("Found access or xfer link without classification") + +class TransitParser(Parser): + + def __init__(self, filedef=transit_file_def, verbosity=1): + Parser.__init__(self, filedef) + self.verbosity=verbosity + self.tfp = TransitFileProcessor(self.verbosity) + + def buildProcessor(self): + return self.tfp + + def convertLineData(self): + """ Convert the parsed tree of data into a usable python list of transit lines + returns list of comments and transit line objects + """ + rows = [] + currentRoute = None + + for line in self.tfp.lines: + # Each line is a 3-tuple: key, value, list-of-children. + + # Add comments as simple strings + if line[0] == 'smcw': + cmt = line[1].strip() + if not cmt==';;<>;;': + rows.append(cmt) + continue + + # Handle Line attributes + if line[0] == 'lin_attr': + key = None + value = None + comment = None + # Pay attention only to the children of lin_attr elements + kids = line[2] + for child in kids: + if child[0]=='lin_attr_name': key=child[1] + if child[0]=='attr_value': value=child[1] + if child[0]=='semicolon_comment': comment=child[1].strip() + + # If this is a NAME attribute, we need to start a new TransitLine! + if key=='NAME': + if currentRoute: + rows.append(currentRoute) + currentRoute = TransitLine(name=value) + else: + currentRoute[key] = value # Just store all other attributes + + # And save line comment if there is one + if comment: currentRoute.comment = comment + continue + + # Handle Node list + if line[0] == "lin_node": + # Pay attention only to the children of lin_attr elements + kids = line[2] + node = None + for child in kids: + if child[0]=='nodenum': + node = Node(child[1]) + if child[0]=='lin_nodeattr': + key = None + value = None + for nodechild in child[2]: + if nodechild[0]=='lin_nodeattr_name': key = nodechild[1] + if nodechild[0]=='attr_value': value = nodechild[1] + if nodechild[0]=='semicolon_comment': comment=nodechild[1].strip() + node[key] = value + if comment: node.comment = comment + currentRoute.n.append(node) + continue + + # Got something other than lin_node, lin_attr, or smcw: + WranglerLogger.critical("** SHOULD NOT BE HERE: %s (%s)" % (line[0], line[1])) + + # End of tree; store final route and return + if currentRoute: rows.append(currentRoute) + return rows + + def convertLinkData(self): + """ Convert the parsed tree of data into a usable python list of transit lines + returns list of comments and transit line objects + """ + rows = [] + currentLink = None + key = None + value = None + comment = None + + for link in self.tfp.links: + # Each link is a 3-tuple: key, value, list-of-children. + + # Add comments as simple strings: + if link[0] in ('smcw','semicolon_comment'): + if currentLink: + currentLink.comment = " "+link[1].strip() # Link comment + rows.append(currentLink) + currentLink = None + else: + rows.append(link[1].strip()) # Line comment + continue + + # Link records + if link[0] == 'link_attr': + # Pay attention only to the children of lin_attr elements + kids = link[2] + for child in kids: + if child[0] in ('link_attr_name','word_nodes','word_modes'): + key = child[1] + # If this is a NAME attribute, we need to start a new TransitLink. + if key in ('nodes','NODES'): + if currentLink: rows.append(currentLink) + currentLink = TransitLink() # Create new dictionary for this transit support link + + if child[0]=='nodepair': + currentLink.setId(child[1]) + + if child[0] in ('attr_value','numseq'): + currentLink[key] = child[1] + continue + + # Got something unexpected: + WranglerLogger.critical("** SHOULD NOT BE HERE: %s (%s)" % (link[0], link[1])) + + # Save last link too + if currentLink: rows.append(currentLink) + return rows + + def convertPNRData(self): + """ Convert the parsed tree of data into a usable python list of PNR objects + returns list of strings and PNR objects + """ + rows = [] + currentPNR = None + key = None + value = None + + for pnr in self.tfp.pnrs: + # Each pnr is a 3-tuple: key, value, list-of-children. + # Add comments as simple strings + + # Textline Comments + if pnr[0] =='smcw': + # Line comment; thus existing PNR must be finished. + if currentPNR: + rows.append(currentPNR) + currentPNR = None + + rows.append(pnr[1].strip()) # Append line-comment + continue + + # PNR records + if pnr[0] == 'pnr_attr': + # Pay attention only to the children of attr elements + kids = pnr[2] + for child in kids: + if child[0] in ('pnr_attr_name','word_node','word_zones'): + key = child[1] + # If this is a NAME attribute, we need to start a new PNR. + if key in ('node','NODE'): + if currentPNR: + rows.append(currentPNR) + currentPNR = PNRLink() # Create new dictionary for this PNR + + if child[0]=='nodepair' or child[0]=='nodenum': + #print "child[0]/[1]",child[0],child[1] + currentPNR.id = child[1] + currentPNR.parseID() + + if child[0] in ('attr_value','numseq'): + currentPNR[key.upper()] = child[1] + + if child[0]=='semicolon_comment': + currentPNR.comment = ' '+child[1].strip() + + continue + + # Got something unexpected: + WranglerLogger.critical("** SHOULD NOT BE HERE: %s (%s)" % (pnr[0], pnr[1])) + + # Save last link too + if currentPNR: rows.append(currentPNR) + return rows + + def convertZACData(self): + """ Convert the parsed tree of data into a usable python list of ZAC objects + returns list of strings and ZAC objects + """ + rows = [] + currentZAC = None + key = None + value = None + + for zac in self.tfp.zacs: + # Each zac is a 3-tuple: key, value, list-of-children. + # Add comments as simple strings + + # Textline Comments + if zac[0] in ('smcw','semicolon_comment'): + if currentZAC: + currentZAC.comment = ' '+zac[1].strip() + rows.append(currentZAC) + currentZAC = None + else: + rows.append(zac[1].strip()) # Append value + + continue + + # Link records + if zac[0] == 'zac_attr': + # Pay attention only to the children of lin_attr elements + kids = zac[2] + for child in kids: + if child[0]=='nodepair': + # Save old ZAC + if currentZAC: rows.append(currentZAC) + # Start new ZAC + currentZAC = ZACLink() # Create new dictionary for this ZAC. + currentZAC.id=child[1] + + if child[0] =='zac_attr_name': + key = child[1] + + if child[0]=='attr_value': + currentZAC[key] = child[1] + + continue + + # Got something unexpected: + WranglerLogger.critical("** SHOULD NOT BE HERE: %s (%s)" % (zac[0], zac[1])) + + # Save last link too + if currentZAC: rows.append(currentZAC) + return rows + + def convertLinkiData(self, linktype): + """ Convert the parsed tree of data into a usable python list of ZAC objects + returns list of strings and ZAC objects + """ + rows = [] + currentLinki = None + key = None + value = None + + linkis = [] + if linktype=="access": + linkis=self.tfp.accesslis + elif linktype=="xfer": + linkis=self.tfp.xferlis + else: + raise NetworkException("ConvertLinkiData with invalid linktype") + + for accessli in linkis: + # whitespace?, smcw?, nodenumA, spaces?, nodenumB, spaces?, (float/int)?, spaces?, semicolon_comment? + if accessli[0]=='smcw': + rows.append(accessli[1].strip()) + elif accessli[0]=='nodenumA': + currentLinki = Linki() + rows.append(currentLinki) + currentLinki.A = accessli[1].strip() + elif accessli[0]=='nodenumB': + currentLinki.B = accessli[1].strip() + elif accessli[0]=='float': + currentLinki.distance = accessli[1].strip() + elif accessli[0]=='int': + currentLinki.xferTime = accessli[1].strip() + elif accessli[0]=='semicolon_comment': + currentLinki.comment = accessli[1].strip() + elif accessli[0]=='accesstag': + currentLinki.accessType = accessli[1].strip() + else: + # Got something unexpected: + WranglerLogger.critical("** SHOULD NOT BE HERE: %s (%s)" % (accessli[0], accessli[1])) + + return rows + + def convertSupplinksData(self): + """ Convert the parsed tree of data into a usable python list of Supplink objects + returns list of strings and Supplink objects + """ + rows = [] + currentSupplink = None + key = None + value = None + + for supplink in self.tfp.supplinks: + + # Supplink records are lists + if currentSupplink: rows.append(currentSupplink) + currentSupplink = Supplink() # Create new dictionary for this PNR + + for supplink_attr in supplink: + if supplink_attr[0] == 'supplink_attr': + if supplink_attr[2][0][0]=='supplink_attr_name': + currentSupplink[supplink_attr[2][0][1]] = supplink_attr[2][1][1] + elif supplink_attr[2][0][0]=='nodepair': + currentSupplink.setId(supplink_attr[2][0][1]) + else: + WranglerLogger.critical("** SHOULD NOT BE HERE: %s (%s)" % (supplink[0], supplink[1])) + raise + elif supplink_attr[0] == "semicolon_comment": + currentSupplink.comment = supplink_attr[1].strip() + elif supplink_attr[0] == 'smcw': + currentSupplink.comment = supplink_attr[1].strip() + else: + WranglerLogger.critical("** SHOULD NOT BE HERE: %s (%s)" % (supplink[0], supplink[1])) + raise + + # Save last link too + if currentSupplink: rows.append(currentSupplink) + return rows diff --git a/Wrangler.py b/Wrangler.py new file mode 100644 index 0000000..9598eb2 --- /dev/null +++ b/Wrangler.py @@ -0,0 +1,1444 @@ +from simpleparse.common import numbers, strings, comments +from simpleparse import generator +from simpleparse.parser import Parser +from simpleparse.dispatchprocessor import * +from types import * + +from mx.TextTools import TextTools +import copy, inspect, logging, os, re, pdb, copy, subprocess, sys, tempfile, time, xlrd + +class NetworkException(Exception): pass + +# for all the logging in this file +WranglerLogger = logging.getLogger("WranglerLogger") + +WRANGLER_FILE_SUFFICES = [ "lin", "link", "pnr", "zac", "access", "xfer" ] + +# PARSER DEFINITION ------------------------------------------------------------------------------ +# NOTE: even though XYSPEED and TIMEFAC are node attributes here, I'm not sure that's really ok -- +# Cube documentation implies TF and XYSPD are node attributes... +transit_file_def=r''' +transit_file := ( accessli / line / link / pnr / zac )+, smcw*, whitespace* + +line := whitespace?, smcw?, c"LINE", whitespace, lin_attr*, lin_node*, whitespace? +lin_attr := ( lin_attr_name, whitespace?, "=", whitespace?, attr_value, whitespace?, + comma, whitespace?, semicolon_comment* ) +lin_nodeattr := ( lin_nodeattr_name, whitespace?, "=", whitespace?, attr_value, whitespace?, comma?, whitespace?, semicolon_comment* ) +lin_attr_name := c"allstops" / c"color" / (c"freq",'[',[1-5],']') / c"mode" / c"name" / c"oneway" / c"owner" / c"runtime" / c"timefac" / c"xyspeed" +lin_nodeattr_name := c"access_c" / c"access" / c"delay" / c"xyspeed" / c"timefac" +lin_node := lin_nodestart?, whitespace?, nodenum, spaces*, comma?, spaces*, semicolon_comment?, whitespace?, lin_nodeattr* +lin_nodestart := (whitespace?, "N", whitespace?, "=") + +link := whitespace?, smcw?, c"LINK", whitespace, link_attr*, whitespace?, semicolon_comment* +link_attr := (( (link_attr_name, whitespace?, "=", whitespace?, attr_value) / + (word_nodes, whitespace?, "=", whitespace?, nodepair) / + (word_modes, whitespace?, "=", whitespace?, numseq) ), + whitespace?, comma?, whitespace?) +link_attr_name := c"dist" / c"speed" / c"time" / c"oneway" + +pnr := whitespace?, smcw?, c"PNR", whitespace, pnr_attr*, whitespace? +pnr_attr := (( (pnr_attr_name, whitespace?, "=", whitespace?, attr_value) / + (word_node, whitespace?, "=", whitespace?, ( nodepair / nodenum )) / + (word_zones, whitespace?, "=", whitespace?, numseq )), + whitespace?, comma?, whitespace?, semicolon_comment*) +pnr_attr_name := c"time" / c"maxtime" / c"distfac" / c"cost" + +zac := whitespace?, smcw?, c"ZONEACCESS", whitespace, zac_attr*, whitespace?, semicolon_comment* +zac_attr := (( (c"link", whitespace?, "=", whitespace?, nodepair) / + (zac_attr_name, whitespace?, "=", whitespace?, attr_value) ), + whitespace?, comma?, whitespace?) +zac_attr_name := c"mode" + +accessli := whitespace?, smcw?, nodenumA, spaces?, nodenumB, spaces?, (float/int)?, spaces?, semicolon_comment? + +word_nodes := c"nodes" +word_node := c"node" +word_modes := c"modes" +word_zones := c"zones" +numseq := int, (spaces?, ("-" / ","), spaces?, int)* +nodepair := nodenum, spaces?, ("-" / ","), spaces?, nodenum +nodenumA := nodenum +nodenumB := nodenum +nodenum := int +attr_value := alphanums / string_single_quote / string_double_quote +alphanums := [a-zA-Z0-9\.]+ + := [,] + := [ \t\r\n]+ + := [ \t]+ +smcw := whitespace?, (semicolon_comment / c_comment, whitespace?)+ +''' +nodepair_pattern = re.compile('(\d+)[-,\s]*(\d+)') + +def setupLogging(infoLogFilename, debugLogFilename, logToConsole=True): + """ Sets up the logger. The infoLog is terse, just gives the bare minimum of details + so the network composition will be clear later. + The debuglog is very noisy, for debugging. + Spews it all out to console too, if logToConsole is true. + """ + # create a logger + WranglerLogger.setLevel(logging.DEBUG) + + infologhandler = logging.StreamHandler(open(infoLogFilename, 'w')) + infologhandler.setLevel(logging.INFO) + infologhandler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s','%Y-%m-%d %H:%M')) + WranglerLogger.addHandler(infologhandler) + + debugloghandler = logging.StreamHandler(open(debugLogFilename,'w')) + debugloghandler.setLevel(logging.DEBUG) + debugloghandler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s', '%Y-%m-%d %H:%M')) + WranglerLogger.addHandler(debugloghandler) + + if logToConsole: + consolehandler = logging.StreamHandler() + consolehandler.setLevel(logging.DEBUG) + consolehandler.setFormatter(logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')) + WranglerLogger.addHandler(consolehandler) + + +# Data structures ----------------------------------------------------------- + +class TransitLine(object): + """Transit route. Behaves like a dictionary of attributes. + 'n' is list of Node objects (see Wrangler.Node) + All other attributes are stored as a dictionary (e.g. thisroute['MODE']='5') + """ + def __init__(self, name=None, template=None): + self.attr = {} + self.n = [] + self.comment = None + + self.name = name + if name and name.find('"')==0: + self.name = name[1:-1] # Strip leading/trailing dbl-quotes + + if template: + self._applyTemplate(template) + + def setFreqs(self, freqs): + '''Set all five headways (AM,MD,PM,EV,EA)''' + if not len(freqs)==5: raise NetworkException('Must specify all 5 frequencies') + self.attr['FREQ[1]'] = freqs[0] + self.attr['FREQ[2]'] = freqs[1] + self.attr['FREQ[3]'] = freqs[2] + self.attr['FREQ[4]'] = freqs[3] + self.attr['FREQ[5]'] = freqs[4] + + def getFreqs(self): + return [self.attr['FREQ[1]'], + self.attr['FREQ[2]'], + self.attr['FREQ[3]'], + self.attr['FREQ[4]'], + self.attr['FREQ[5]']] + + + def hasNode(self,nodeNumber): + for node in self.n: + if abs(int(node.num)) == abs(nodeNumber): + return True + return False + + def hasLink(self,nodeA,nodeB): + nodeNumPrev = -1 + for node in self.n: + nodeNum = abs(int(node.num)) + if nodeNum == abs(nodeB) and nodeNumPrev == abs(nodeA): + return True + nodeNumPrev = nodeNum + return False + + def hasSegment(self,nodeA,nodeB): + hasA=False + for node in self.n: + nodeNum = abs(int(node.num)) + if nodeNum == abs(nodeA): + hasA=True + elif nodeNum == abs(nodeB): + if hasA: return True + else: return False + return False + + def numStops(self): + numStops = 0 + for node in self.n: + if node.isStop(): numStops += 1 + return numStops + + def setNodes(self, newnodelist): + for i in range(len(newnodelist)): + if isinstance(newnodelist[i],int): newnodelist[i] = Node(newnodelist[i]) + self.n = newnodelist + + def insertNode(self,refNodeNum,newNodeNum,stop=False,after=True): + newNode = Node(newNodeNum) + if stop==True: newNode.setStop(True) + for nodeIdx in range(len(self.n)): + currentNodeNum = abs(int(self.n[nodeIdx].num)) + if currentNodeNum == abs(refNodeNum): + if after==True: + self.n.insert(nodeIdx+1,newNode) + WranglerLogger.DEBUG("In line %s: inserted node %s after node %s" % (self.name,newNode.num,str(refNodeNum))) + else: + self.n.insert(nodeIdx,newNode) + WranglerLogger.DEBUG("In line %s: inserted node %s before node %s" % (self.name,newNode.num,str(refNodeNum))) + + def splitLink(self,nodeA,nodeB,newNodeNum,stop=False): + """checks to see if the link exists in the line and then inserts the + new node in between node A and nodeB + """ + if not self.hasLink(nodeA,nodeB): + raise NetworkException( "Line %s Doesn't have that link - so can't split it" % (self.name)) + newNode = Node(newNodeNum) + if stop==True: newNode.setStop(True) + + nodeNumPrev = -1 + for nodeIdx in range(len(self.n)): + currentNodeNum = abs(int(self.n[nodeIdx].num)) + if currentNodeNum == abs(nodeB) and nodeNumPrev == abs(nodeA): + self.n.insert(nodeIdx,newNode) + WranglerLogger.debug("In line %s: inserted node %s between node %s and node %s" % (self.name,newNode.num,str(nodeA),str(nodeB))) + nodeNumPrev = currentNodeNum + + def extendLine(self, oldnode, newsection, beginning=True): + """ Replace nodes up through oldnode with newsection. + Newsection can be an array of numbers; this will make nodes. + If beginning, does this at the beginning; otherwise at the end. + """ + ind = self.n.index(oldnode) + # make the new nodes + for i in range(len(newsection)): + if isinstance(newsection[i],int): newsection[i] = Node(newsection[i]) + + if beginning: + # print self.n[:ind+1] + self.n[:ind+1] = newsection + else: + self.n[ind:] = newsection + + def replaceSegment(self, node1, node2, newsection): + """ Replaces the section from node1 to node2 with the newsection + Newsection can be an array of numbers; this will make nodes. + """ + WranglerLogger.debug("replacing segment",node1,node2) + try: + ind1 = self.n.index(node1) + except: + ind1 = self.n.index(-node1) + + try: + ind2 = self.n.index(node2) + except: + ind2 = self.n.index(-node2) + + attr1 = self.n[ind1].attr + attr2 = self.n[ind2].attr + + # make the new nodes + for i in range(len(newsection)): + if isinstance(newsection[i],int): newsection[i] = Node(newsection[i]) + # xfer the attributes + newsection[0].attr=attr1 + newsection[-1].attr=attr2 + + self.n[ind1:ind2+1] = newsection + + def setStop(self, nodenum, isStop=True): + i = self.n.index(nodenum) + self.n[i].setStop(isStop) + + def addStopsToSet(self, set): + for nodeIdx in range(len(self.n)): + if self.n[nodeIdx].isStop(): + set.add(int(self.n[nodeIdx].num)) + + def _applyTemplate(self, template): + '''Copy all attributes (including nodes) from an existing transit line to this line''' + self.attr = copy.deepcopy(template.attr) + self.n = copy.deepcopy(template.n) + self.comment = template.comment + + # Dictionary methods + def __getitem__(self,key): return self.attr[key] + def __setitem__(self,key,value): self.attr[key]=value + def __cmp__(self,other): return cmp(self.name,other) + + # String representation: for outputting to line-file + def __repr__(self): + s = '\nLINE NAME=\"%s\",\n ' % (self.name,) + if self.comment: s+= self.comment + + # Line attributes + s += ",\n ".join(["%s=%s" % (k,v) for k,v in sorted(self.attr.items())]) + + # Node list + s += ",\n" + prevAttr = True + for nodeIdx in range(len(self.n)): + s += self.n[nodeIdx].lineFileRepr(prependNEquals=prevAttr, lastNode=(nodeIdx==len(self.n)-1)) + prevAttr = len(self.n[nodeIdx].attr)>0 + + return s + + def __str__(self): + s = 'Line name \"%s\" freqs=%s' % (self.name, str(self.getFreqs())) + return s + +class Node(object): + """Transit node. This can only exist as part of a transit line that it belongs to. + 'num' is the string representation of the node number with stop-status (e.g. '-24322') + 'stop' is True or False + All other attributes stored as a dictionary (e.g. thisnode["DELAY"]="0.5") + """ + + def __init__(self, n): + self.attr = {} + if isinstance(n,int): + self.num = str(n) + else: + self.num = n + self.stop=(self.num.find('-')<0 and True or False) + self.comment = None + + def setStop(self, isStop=True): + n = abs(int(self.num)) + self.stop = isStop + + if not self.stop: + n = -n + + self.num = str(n) + + def isStop(self): + if int(self.num)>0: return True + return False + + # String representation for line file + def lineFileRepr(self, prependNEquals=False, lastNode=False): + if prependNEquals: s=" N=" + else: s=" " + + # node number + if self.stop: s+= " " + s += self.num + # attributes + for k,v in sorted(self.attr.items()): + s +=", %s=%s" % (k,v) + # comma + if not lastNode: s+= "," + # comment + if self.comment: s+=' %s' % (self.comment,) + # eol + s += "\n" + return s + + # Dictionary methods + def __getitem__(self,key): return self.attr[key] + def __setitem__(self,key,value): self.attr[key]=value + def __cmp__(self,other): return cmp(int(self.num),other) + +class TransitLink(dict): + """ Transit support Link. + 'nodes' property is the node-pair for this link (e.g. 24133,34133) + 'comment' is any end-of-line comment for this link + (must include the leading semicolon) + All other attributes are stored in a dictionary (e.g. thislink['MODE']='1,2') + """ + def __init__(self): + dict.__init__(self) + self.id='' + self.comment='' + + def __repr__(self): + s = "LINK nodes=%s, " % (self.id,) + + # Deal w/all link attributes + fields = ['%s=%s' % (k,v) for k,v in self.items()] + s += ", ".join(fields) + s += self.comment + + return s + + def addNodesToSet(self, set): + """ Add integer versions of the nodes in this like to the given set + """ + m = re.match(nodepair_pattern, self.id) + set.add(int(m.group(1))) + set.add(int(m.group(2))) + +class PNRLink(dict): + """ PNR Support Link. + 'node' property is the node-pair for this link (e.g. 24133-34133) + 'comment' is any end-of-line comment for this link including the leading semicolon + All other attributes are stored in a dictionary (e.g. thislink['MODE']='1,2') + """ + def __init__(self): + dict.__init__(self) + self.id='' + self.comment='' + + def __repr__(self): + s = "PNR NODE=%s " % (self.id,) + + # Deal w/all link attributes + fields = ['%s=%s' % (k,v) for k,v in self.items()] + + s += " ".join(fields) + s += self.comment + + return s + +class ZACLink(dict): + """ ZAC support Link. + 'link' property is the node-pair for this link (e.g. 24133-34133) + 'comment' is any end-of-line comment for this link + (must include the leading semicolon) + All other attributes are stored in a dictionary (e.g. thislink['MODE']='17') + """ + def __init__(self): + dict.__init__(self) + self.id='' + self.comment='' + + def __repr__(self): + s = "ZONEACCESS link=%s " % (self.id,) + + # Deal w/all link attributes + fields = ['%s=%s' % (k,v) for k,v in self.items()] + + s += " ".join(fields) + s += self.comment + + return s + +class Linki(dict): + """ Linki Link. Has A-node, B-node, possibly a comment and a distance. + """ + def __init__(self): + dict.__init__(self) + self.A='' + self.B='' + self.comment='' + self.distance='' + + def __repr__(self): + s = "%8s %8s" % (self.A, self.B) + if self.distance != '': + s += " %8s" % self.distance + if self.comment != '': + s += " %s" % (self.comment) + return s + +# ------------------------------------------------------------------------------ +# End of Data Structures +# ------------------------------------------------------------------------------ + +class TransitFileProcessor(DispatchProcessor): + """ Class to process transit files + """ + def __init__(self, verbosity=1): + self.verbosity=verbosity + self.lines = [] + self.links = [] + self.pnrs = [] + self.zacs = [] + self.accesslis = [] + self.xferlis = [] + self.liType = '' + + def crackTags(self, leaf, buffer): + tag = leaf[0] + text = buffer[leaf[1]:leaf[2]] + subtags = leaf[3] + + b = [] + + if subtags: + for leaf in subtags: + b.append(self.crackTags(leaf, buffer)) + + return (tag,text,b) + + def line(self, (tag,start,stop,subtags), buffer): + # this is the whole line + if self.verbosity>=1: + print tag, start, stop + + # Append list items for this line + for leaf in subtags: + xxx = self.crackTags(leaf,buffer) + self.lines.append(xxx) + + if self.verbosity==2: + # lines are composed of smcw (semicolon-comment / whitespace), line_attr and lin_node + for linepart in subtags: + print " ",linepart[0], " -> [ ", + for partpart in linepart[3]: + print partpart[0], "(", buffer[partpart[1]:partpart[2]],")", + print " ]" + + def link(self, (tag,start,stop,subtags), buffer): + # this is the whole link + if self.verbosity>=1: + print tag, start, stop + + # Append list items for this link + for leaf in subtags: + xxx = self.crackTags(leaf,buffer) + self.links.append(xxx) + + if self.verbosity==2: + # links are composed of smcw and link_attr + for linkpart in subtags: + print " ",linkpart[0], " -> [ ", + for partpart in linkpart[3]: + print partpart[0], "(", buffer[partpart[1]:partpart[2]], ")", + print " ]" + + def pnr(self, (tag,start,stop,subtags), buffer): + if self.verbosity>=1: + print tag, start, stop + + # Append list items for this link + for leaf in subtags: + xxx = self.crackTags(leaf,buffer) + self.pnrs.append(xxx) + + if self.verbosity==2: + # pnrs are composed of smcw and pnr_attr + for pnrpart in subtags: + print " ",pnrpart[0], " -> [ ", + for partpart in pnrpart[3]: + print partpart[0], "(", buffer[partpart[1]:partpart[2]], ")", + print " ]" + + def zac(self, (tag,start,stop,subtags), buffer): + if self.verbosity>=1: + print tag, start, stop + + if self.verbosity==2: + # zacs are composed of smcw and zac_attr + for zacpart in subtags: + print " ",zacpart[0], " -> [ ", + for partpart in zacpart[3]: + print partpart[0], "(", buffer[partpart[1]:partpart[2]], ")", + print " ]" + + # Append list items for this link + for leaf in subtags: + xxx = self.crackTags(leaf,buffer) + self.zacs.append(xxx) + + def smcw(self, (tag,start,stop,subtags), buffer): + """ Semicolon comment whitespace + """ + if self.verbosity>=1: + print tag, start, stop + + def accessli(self, (tag,start,stop,subtags), buffer): + if self.verbosity>=1: + print tag, start, stop + + for leaf in subtags: + xxx = self.crackTags(leaf,buffer) + if self.liType=="access": + self.accesslis.append(xxx) + elif self.liType=="xfer": + self.xferlis.append(xxx) + else: + raise NetworkException("Found access or xfer link without classification") + +class TransitParser(Parser): + + def __init__(self, filedef, verbosity=1): + Parser.__init__(self, filedef) + self.verbosity=verbosity + self.tfp = TransitFileProcessor(self.verbosity) + + def buildProcessor(self): + return self.tfp + + def convertLineData(self): + """ Convert the parsed tree of data into a usable python list of transit lines + returns list of comments and transit line objects + """ + rows = [] + currentRoute = None + + for line in self.tfp.lines: + # Each line is a 3-tuple: key, value, list-of-children. + + # Add comments as simple strings + if line[0] == 'smcw': + cmt = line[1].strip() + if not cmt==';;<>;;': + rows.append(cmt) + continue + + # Handle Line attributes + if line[0] == 'lin_attr': + key = None + value = None + comment = None + # Pay attention only to the children of lin_attr elements + kids = line[2] + for child in kids: + if child[0]=='lin_attr_name': key=child[1] + if child[0]=='attr_value': value=child[1] + if child[0]=='semicolon_comment': comment=child[1].strip() + + # If this is a NAME attribute, we need to start a new TransitLine! + if key=='NAME': + if currentRoute: + rows.append(currentRoute) + currentRoute = TransitLine(name=value) + else: + currentRoute[key] = value # Just store all other attributes + + # And save line comment if there is one + if comment: currentRoute.comment = comment + continue + + # Handle Node list + if line[0] == "lin_node": + # Pay attention only to the children of lin_attr elements + kids = line[2] + node = None + for child in kids: + if child[0]=='nodenum': + node = Node(child[1]) + if child[0]=='lin_nodeattr': + key = None + value = None + for nodechild in child[2]: + if nodechild[0]=='lin_nodeattr_name': key = nodechild[1] + if nodechild[0]=='attr_value': value = nodechild[1] + if nodechild[0]=='semicolon_comment': comment=nodechild[1].strip() + node[key] = value + if comment: node.comment = comment + currentRoute.n.append(node) + continue + + # Got something other than lin_node, lin_attr, or smcw: + WranglerLogger.critical("** SHOULD NOT BE HERE: %s" % line[0]) + + # End of tree; store final route and return + if currentRoute: rows.append(currentRoute) + return rows + + def convertLinkData(self): + """ Convert the parsed tree of data into a usable python list of transit lines + returns list of comments and transit line objects + """ + rows = [] + currentLink = None + key = None + value = None + comment = None + + for link in self.tfp.links: + # Each link is a 3-tuple: key, value, list-of-children. + + # Add comments as simple strings: + if link[0] in ('smcw','semicolon_comment'): + if currentLink: + currentLink.comment = " "+link[1].strip() # Link comment + rows.append(currentLink) + currentLink = None + else: + rows.append(link[1].strip()) # Line comment + continue + + # Link records + if link[0] == 'link_attr': + # Pay attention only to the children of lin_attr elements + kids = link[2] + for child in kids: + if child[0] in ('link_attr_name','word_nodes','word_modes'): + key = child[1] + # If this is a NAME attribute, we need to start a new TransitLink. + if key in ('nodes','NODES'): + if currentLink: rows.append(currentLink) + currentLink = TransitLink() # Create new dictionary for this transit support link + + if child[0]=='nodepair': + currentLink.id = child[1] + + if child[0] in ('attr_value','numseq'): + currentLink[key] = child[1] + continue + + # Got something unexpected: + WranglerLogger.critical("** SHOULD NOT BE HERE: %s" % link[0]) + + # Save last link too + if currentLink: rows.append(currentLink) + return rows + + def convertPNRData(self): + """ Convert the parsed tree of data into a usable python list of PNR objects + returns list of strings and PNR objects + """ + rows = [] + currentPNR = None + key = None + value = None + + for pnr in self.tfp.pnrs: + # Each pnr is a 3-tuple: key, value, list-of-children. + # Add comments as simple strings + + # Textline Comments + if pnr[0] =='smcw': + # Line comment; thus existing PNR must be finished. + if currentPNR: + rows.append(currentPNR) + currentPNR = None + + rows.append(pnr[1].strip()) # Append line-comment + continue + + # PNR records + if pnr[0] == 'pnr_attr': + # Pay attention only to the children of attr elements + kids = pnr[2] + for child in kids: + if child[0] in ('pnr_attr_name','word_node','word_zones'): + key = child[1] + # If this is a NAME attribute, we need to start a new PNR. + if key in ('node','NODE'): + if currentPNR: + rows.append(currentPNR) + currentPNR = PNRLink() # Create new dictionary for this PNR + + if child[0]=='nodepair' or child[0]=='nodenum': + currentPNR.id = child[1] + + if child[0] in ('attr_value','numseq'): + currentPNR[key] = child[1] + + if child[0]=='semicolon_comment': + currentPNR.comment = ' '+child[1].strip() + + continue + + # Got something unexpected: + WranglerLogger.critical("** SHOULD NOT BE HERE: %s" % pnr[0]) + + # Save last link too + if currentPNR: rows.append(currentPNR) + return rows + + def convertZACData(self): + """ Convert the parsed tree of data into a usable python list of ZAC objects + returns list of strings and ZAC objects + """ + rows = [] + currentZAC = None + key = None + value = None + + for zac in self.tfp.zacs: + # Each zac is a 3-tuple: key, value, list-of-children. + # Add comments as simple strings + + # Textline Comments + if zac[0] in ('smcw','semicolon_comment'): + if currentZAC: + currentZAC.comment = ' '+zac[1].strip() + rows.append(currentZAC) + currentZAC = None + else: + rows.append(zac[1].strip()) # Append value + + continue + + # Link records + if zac[0] == 'zac_attr': + # Pay attention only to the children of lin_attr elements + kids = zac[2] + for child in kids: + if child[0]=='nodepair': + # Save old ZAC + if currentZAC: rows.append(currentZAC) + # Start new ZAC + currentZAC = ZACLink() # Create new dictionary for this ZAC. + currentZAC.id=child[1] + + if child[0] =='zac_attr_name': + key = child[1] + + if child[0]=='attr_value': + currentZAC[key] = child[1] + + continue + + # Got something unexpected: + WranglerLogger.critical("** SHOULD NOT BE HERE: %s" % zac[0]) + + # Save last link too + if currentZAC: rows.append(currentZAC) + return rows + + def convertLinkiData(self, linktype): + """ Convert the parsed tree of data into a usable python list of ZAC objects + returns list of strings and ZAC objects + """ + rows = [] + currentLinki = None + key = None + value = None + + linkis = [] + if linktype=="access": + linkis=self.tfp.accesslis + elif linktype=="xfer": + linkis=self.tfp.xferlis + else: + raise NetworkException("ConvertLinkiData with invalid linktype") + + for accessli in linkis: + # whitespace?, smcw?, nodenumA, spaces?, nodenumB, spaces?, (float/int)?, spaces?, semicolon_comment? + if accessli[0]=='smcw': + rows.append(accessli[1].strip()) + elif accessli[0]=='nodenumA': + currentLinki = Linki() + rows.append(currentLinki) + currentLinki.A = accessli[1].strip() + elif accessli[0]=='nodenumB': + currentLinki.B = accessli[1].strip() + elif accessli[0]=='float' or accessli[0]=='int': + currentLinki.distance = accessli[1].strip() + elif accessli[0]=='semicolon_comment': + currentLinki.comment = accessli[1].strip() + else: + # Got something unexpected: + WranglerLogger.critical("** SHOULD NOT BE HERE: %s" % accessli[0]) + + return rows + +class Network(object): + """Full Cube network representation (all components)""" + + def __init__(self): + self.lines = [] + self.links = [] + self.pnrs = [] + self.zacs = [] + self.accessli = [] + self.xferli = [] + + def __repr__(self): + return "Network: %s lines, %s links, %s PNRs, %s ZACs" % (len(self.lines),len(self.links),len(self.pnrs),len(self.zacs)) + + def isEmpty(self): + """ TODO: could be smarter here and check that there are no non-comments since those + don't really count + """ + if (len(self.lines) == 0 and + len(self.links) == 0 and + len(self.pnrs) == 0 and + len(self.zacs) == 0 and + len(self.accessli) == 0 and + len(self.xferli) == 0): + return True + + return False + + def clear(self, projectstr): + """ Clears out all network data to prep for a project apply. + If it's already clear then this is a no-op but otherwise + the user will be prompted (with the project string) + """ + if self.isEmpty(): + # nothing to do! + return + + query = "Clearing network for %s:\n" % projectstr + query += " %d lines, %d links, %d pnrs, %d zacs, %d accessli, %d xferli\n" % (len(self.lines), + len(self.links), len(self.pnrs), len(self.zacs), len(self.accessli), len(self.xferli)) + query += "Is this ok? (y/n) " + WranglerLogger.debug(query) + response = raw_input("") + + WranglerLogger.debug("response=[%s]" % response) + if response != "Y" and response != "y": + exit(0) + + del self.lines[:] + del self.links[:] + del self.pnrs[:] + del self.zacs[:] + del self.accessli[:] + del self.xferli[:] + + def validateOffstreet(self): + print "validating off street" + WranglerLogger.debug("Validating Off Street Transit Node Connections") + + nodeInfo = {} # lineset => { station node => { xfer node => [ walk node, pnr node ] }} + doneNodes = set() + + # For each line + for line in self.lines: + if not isinstance(line,TransitLine): continue + print "validating", line + # The only off-road modes are BART, caltrain/ferry/rail, or LRT + if line.attr["MODE"] != "4" and line.attr["MODE"] != "9": # and line.attr["MODE"] != "3": + # WranglerLogger.info("-- Not mode 4 or 9, skipping check!") + continue + + lineset = line.name[0:3] + if lineset not in nodeInfo: + nodeInfo[lineset] = {} + + # for each stop + for stopIdx in range(len(line.n)): + if not line.n[stopIdx].isStop(): continue + + stopNodeStr = line.n[stopIdx].num + + wnrNodes = set() + pnrNodes = set() + + if stopNodeStr in nodeInfo[lineset]: continue + nodeInfo[lineset][stopNodeStr] = {} + + #print " check if we have access to an on-street node" + for link in self.xferli: + if not isinstance(link,Linki): continue + # This xfer links the node to the on-street network + if link.A == stopNodeStr: + nodeInfo[lineset][stopNodeStr][link.B] = ["-","-"] + elif link.B == stopNodeStr: + nodeInfo[lineset][stopNodeStr][link.A] = ["-","-"] + + #print " Check for WNR" + for zac in self.zacs: + if not isinstance(zac,ZACLink): continue + + m = re.match(nodepair_pattern, zac.id) + if m.group(1)==stopNodeStr: wnrNodes.add(int(m.group(2))) + if m.group(2)==stopNodeStr: wnrNodes.add(int(m.group(1))) + + #print "Check for PNR" + for pnr in self.pnrs: + if not isinstance(pnr, PNRLink): continue + m = re.match(nodepair_pattern, pnr.id) + if m == None and pnr.id==stopNodeStr: # it's a nodenum + pnrNodes.add("unnumbered") + elif m.group(2)==stopNodeStr: pnrNodes.add(int(m.group(1))) + # The second node should be the stop! + + #print "Check that our access links go from an onstreet xfer to a pnr or to a wnr" + for link in self.accessli: + if not isinstance(link,Linki): continue + try: + if int(link.A) in wnrNodes: + nodeInfo[lineset][stopNodeStr][link.B][0] = link.A + elif int(link.B) in wnrNodes: + nodeInfo[lineset][stopNodeStr][link.A][0] = link.B + elif int(link.A) in pnrNodes: + nodeInfo[lineset][stopNodeStr][link.B][1] = link.A + elif int(link.B) in pnrNodes: + nodeInfo[lineset][stopNodeStr][link.A][1] = link.B + except KeyError: + errorstr = "Invalid access link found in lineset %s stopNode %s -- Missing xfer? A=%s B=%s, xfernodes=%s wnrNodes=%s pnrNodes=%s" % \ + (lineset, stopNodeStr, link.A, link.B, str(nodeInfo[lineset][stopNodeStr].keys()), str(wnrNodes), str(pnrNodes)) + WranglerLogger.warning(errorstr) + # raise NetworkException(errorstr) + + book = xlrd.open_workbook(r"Y:\CHAMP\util\nodes.xls") + sh = book.sheet_by_index(0) + nodeNames = {} + for rx in range(0,sh.nrows): # skip header + therow = sh.row(rx) + nodeNames[int(therow[0].value)] = therow[1].value + # WranglerLogger.info(str(nodeNames)) + + # print it all out + for lineset in nodeInfo.keys(): + + stops = nodeInfo[lineset].keys() + stops.sort() + + WranglerLogger.debug("--------------- Line set %s -------------------------------" % lineset) + WranglerLogger.debug("%-30s %10s %10s %10s %10s" % ("stopname", "stop", "xfer", "wnr", "pnr")) + for stopNodeStr in stops: + numWnrs = 0 + stopname = "Unknown stop name" + if int(stopNodeStr) in nodeNames: stopname = nodeNames[int(stopNodeStr)] + for xfernode in nodeInfo[lineset][stopNodeStr].keys(): + WranglerLogger.debug("%-30s %10s %10s %10s %10s" % + (stopname, stopNodeStr, xfernode, + nodeInfo[lineset][stopNodeStr][xfernode][0], + nodeInfo[lineset][stopNodeStr][xfernode][1])) + if nodeInfo[lineset][stopNodeStr][xfernode][0] != "-": numWnrs += 1 + + if numWnrs == 0: + errorstr = "Zero wnrNodes or onstreetxfers for stop %s!" % stopNodeStr + WranglerLogger.critical(errorstr) + # raise NetworkException(errorstr) + + def line(self, name): + """ If a string is passed in, return the line for that name exactly. + If a regex, return all relevant lines in a list. + If 'all', returnall lines. + """ + if isinstance(name,str): + if name in self.lines: + return self.lines[self.lines.index(name)] + + if str(type(name))=="": + toret = [] + for i in range(len(self.lines)): + if isinstance(self.lines[i],str): continue + if name.match(self.lines[i].name): toret.append(self.lines[i]) + return toret + if name=='all': + allLines = [] + for i in range(len(self.lines)): + allLines.append(self.lines[i]) + return allLines + raise NetworkException('Line name not found: %s' % (name,)) + + def splitLinkInTransitLines(self,nodeA,nodeB,newNode,stop=False): + totReplacements = 0 + allExp=re.compile(".") + for line in self.line(allExp): + if line.hasLink(nodeA,nodeB): + line.splitLink(nodeA,nodeB,newNode,stop=stop) + totReplacements+=1 + WranglerLogger.debug("Total Lines with Link %s-%s split:%d" % (nodeA,nodeB,totReplacements)) + + def replaceSegmentInTransitLines(self,nodeA,nodeB,newNodes): + totReplacements = 0 + allExp=re.compile(".") + newSection=[nodeA]+newNodes+[nodeB] + for line in self.line(allExp): + if line.hasSegment(nodeA,nodeB): + WranglerLogger.debug(line.name) + line.replaceSegment(nodeA,nodeB,newSection) + totReplacements+=1 + WranglerLogger.debug("Total Lines with Segment %s-%s replaced:%d" % (nodeA,nodeB,totReplacements)) + + def setCombiFreqsForShortLine(self, shortLine, longLine, combFreqs): + '''set all five headways for a short line to equal a combined + headway including long line. i.e. set 1-California Short frequencies + by inputing the combined frequencies of both lines. + + NOTE: make sure longLine Frequencies are set first!''' + try: + longLineInst=self.line(longLine) + except: + raise NetworkException('Unknown Route! %s' % (longLine)) + try: + shortLineInst=self.line(shortLine) + except: + raise NetworkException('Unknown Route! %s' % (shortLine)) + + [amLong,mdLong,pmLong,evLong,eaLong] = longLineInst.getFreqs() + [amComb,mdComb,pmComb,evComb,eaComb] = combFreqs + [amShort,mdShort,pmShort,evShort,eaShort] = [0,0,0,0,0] + if (amLong-amComb)>0: amShort=amComb*amLong/(amLong-amComb) + if (mdLong-mdComb)>0: mdShort=mdComb*mdLong/(mdLong-mdComb) + if (pmLong-pmComb)>0: pmShort=pmComb*pmLong/(pmLong-pmComb) + if (evLong-evComb)>0: evShort=evComb*evLong/(evLong-evComb) + if (eaLong-eaComb)>0: eaShort=eaComb*eaLong/(eaLong-eaComb) + shortLineInst.setFreqs([amShort,mdShort,pmShort,evShort,eaShort]) + + + def getCombinedFreq(self, names, coverage_set=False): + """ pass a regex pattern, we'll show the combined frequency. This + doesn't change anything, it's just a useful tool. + """ + lines = self.line(names) + denom = [0,0,0,0,0] + for l in lines: + if coverage_set: coverage_set.discard(l.name) + freqs = l.getFreqs() + for t in range(5): + if float(freqs[t])>0.0: + denom[t] += 1/float(freqs[t]) + + combined = [0,0,0,0,0] + for t in range(5): + if denom[t] > 0: combined[t] = round(1/denom[t],2) + return combined + + def verifyTransitLineFrequencies(self, frequencies, coverage=""): + """ Utility function to verify the frequencies are as expected. + frequencies is a dictionary of label => [ regex1, regex2, [freqlist] ] + coverage is a regex that says we want to know if we verified the + frequencies of all of these lines. e.g. MUNI* + """ + covset = set([]) + if coverage != "": + covpattern = re.compile(coverage) + for i in range(len(self.lines)): + if isinstance(self.lines[i],str): continue + if covpattern.match(self.lines[i].name): covset.add(self.lines[i].name) + # print covset + + labels = frequencies.keys(); labels.sort() + for label in labels: + logstr = "Verifying %-40s: " % label + + for regexnum in [0,1]: + frequencies[label][regexnum]=frequencies[label][regexnum].strip() + if frequencies[label][regexnum]=="": continue + pattern = re.compile(frequencies[label][regexnum]) + freqs = self.getCombinedFreq(pattern, coverage_set=covset) + if freqs[0]+freqs[1]+freqs[2]+freqs[3]+freqs[4]==0: + logstr += "-- Found no matching lines for pattern [%s]" % (frequencies[label][regexnum]) + for timeperiod in range(5): + if abs(freqs[timeperiod]-frequencies[label][2][timeperiod])>0.2: + logstr += "-- Mismatch. Desired %s" % str(frequencies[label][2]) + logstr += "but got ",str(freqs) + lines = self.line(pattern) + WranglerLogger.error(logstr) + WranglerLogger.error("Problem lines:") + for line in lines: WranglerLogger.error(str(line)) + raise NetworkException("Mismatching frequency") + logstr += "-- Match%d!" % (regexnum+1) + WranglerLogger.debug(logstr) + + if coverage != "": + WranglerLogger.debug("Found %d uncovered lines" % len(covset)) + for linename in covset: + WranglerLogger.debug(self.line(linename)) + + + def write(self, path='.', name='transit', writeEmptyFiles=True, suppressQuery=False): + self.validateOffstreet() + + """Write out this full transit network to disk in path specified. + """ + if os.path.exists(path): + if not suppressQuery: + print "Path [%s] exists already. Overwrite contents? (y/n) " % path + response = raw_input("") + WranglerLogger.debug("response = [%s]" % response) + if response != "Y" and response != "y": + exit(0) + else: + WranglerLogger.debug("\nPath [%s] doesn't exist; creating." % path) + os.mkdir(path) + + logstr = "Writing into %s\\%s: " % (path, name) + WranglerLogger.info(logstr) + print "Writing into %s\\%s: " % (path, name) + logstr = "" + if len(self.lines)>0 or writeEmptyFiles: + logstr += " lines" + f = open(os.path.join(path,name+".lin"), 'w'); + f.write(";;<>;;\n") + for line in self.lines: + if isinstance(line,str): f.write(line) + else: f.write(repr(line)+"\n") + f.close() + + if len(self.links)>0 or writeEmptyFiles: + logstr += " links" + f = open(os.path.join(path,name+".link"), 'w'); + for link in self.links: + f.write(str(link)+"\n") + f.close() + + if len(self.pnrs)>0 or writeEmptyFiles: + logstr += " pnr" + f = open(os.path.join(path,name+".pnr"), 'w'); + for pnr in self.pnrs: + f.write(str(pnr)+"\n") + f.close() + + if len(self.zacs)>0 or writeEmptyFiles: + logstr += " zac" + f = open(os.path.join(path,name+".zac"), 'w'); + for zac in self.zacs: + f.write(str(zac)+"\n") + f.close() + + if len(self.accessli)>0 or writeEmptyFiles: + logstr += " access" + f = open(os.path.join(path,name+".access"), 'w'); + for accessli in self.accessli: + f.write(str(accessli)+"\n") + f.close() + + if len(self.xferli)>0 or writeEmptyFiles: + logstr += " xfer" + f = open(os.path.join(path,name+".xfer"), 'w'); + for xferli in self.xferli: + f.write(str(xferli)+"\n") + f.close() + + logstr += "... done.\n" + WranglerLogger.info(logstr) + + def parseAndPrintTransitFile(self, trntxt, verbosity=1): + """ Verbosity=1: 1 line per line summary + Verbosity=2: 1 line per node + """ + success, children, nextcharacter = self.parser.parse(trntxt, production="transit_file") + if not nextcharacter==len(trntxt): + errorstr = "\n Did not successfully read the whole file; got to nextcharacter=%d out of %d total" % (nextcharacter, len(trntxt)) + errorstr += "\n Did read %d lines, next unread text = [%s]" % (len(children), trntxt[nextcharacter:nextcharacter+50]) + raise NetworkException(errorstr) + + # Convert from parser-tree format to in-memory transit data structures: + convertedLines = self.parser.convertLineData() + convertedLinks = self.parser.convertLinkData() + convertedPNR = self.parser.convertPNRData() + convertedZAC = self.parser.convertZACData() + convertedAccessLinki = self.parser.convertLinkiData("access") + convertedXferLinki = self.parser.convertLinkiData("xfer") + + return convertedLines, convertedLinks, convertedPNR, convertedZAC, \ + convertedAccessLinki, convertedXferLinki + + def parseFile(self, fullfile, insert_replace): + """ fullfile is the filename, + insert_replace=True if you want to replace the data in place rather than appending + """ + suffix = fullfile.rsplit(".")[-1].lower() + self.parseFileAsSuffix(fullfile,suffix,insert_replace) + + def parseFileAsSuffix(self,fullfile,suffix,insert_replace): + """ This is a little bit of a hack, but it's meant to allow us to do something + like read an xfer file as an access file... + """ + self.parser = TransitParser(transit_file_def, verbosity=0) + self.parser.tfp.liType = suffix + logstr = " Reading %s as %s" % (fullfile, suffix) + f = open(fullfile, 'r'); + lines,links,pnr,zac,accessli,xferli = self.parseAndPrintTransitFile(f.read(), verbosity=0) + f.close() + logstr += self.doMerge(fullfile,lines,links,pnr,zac,accessli,xferli,insert_replace) + WranglerLogger.debug(logstr) + + def doMerge(self,path,lines,links,pnrs,zacs,accessli,xferli,insert_replace=False): + """Merge a set of transit lines & support links with this network's transit representation. + """ + + logstr = " -- Merging" + + if len(lines)>0: + logstr += " %s lines" % len(lines) + + extendlines = copy.deepcopy(lines) + for line in lines: + if isinstance(line,TransitLine) and (line in self.lines): + logstr += " *%s" % (line.name) + if insert_replace: + self.lines[self.lines.index(line)]=line + extendlines.remove(line) + else: + self.lines.remove(line) + + if len(extendlines)>0: + # for line in extendlines: print line + self.lines.extend(["\n;######################### From: "+path+"\n"]) + self.lines.extend(extendlines) + + if len(links)>0: + logstr += " %d links" % len(links) + self.links.extend(["\n;######################### From: "+path+"\n"]) + self.links.extend(links) #TODO: Need to replace existing links + + if len(pnrs)>0: + logstr += " %d PNRs" % len(pnrs) + self.pnrs.extend( ["\n;######################### From: "+path+"\n"]) + self.pnrs.extend(pnrs) #TODO: Need to replace existing PNRs + + if len(zacs)>0: + logstr += " %d ZACs" % len(zacs) + self.zacs.extend( ["\n;######################### From: "+path+"\n"]) + self.zacs.extend(zacs) #TODO: Need to replace existing PNRs + + if len(accessli)>0: + logstr += " %d accesslinks" % len(accessli) + self.accessli.extend( ["\n;######################### From: "+path+"\n"]) + self.accessli.extend(accessli) + + if len(xferli)>0: + logstr += " %d xferlinks" % len(xferli) + self.xferli.extend( ["\n;######################### From: "+path+"\n"]) + self.xferli.extend(xferli) + + + logstr += "...done." + return logstr + + def mergeDir(self,path,insert_replace=False): + """ Append all the transit-related files in the given directory. + Does NOT apply __init__.py modifications from that directory. + """ + dirlist = os.listdir(path) + dirlist.sort() + WranglerLogger.debug("Path: %s" % path) + + for filename in dirlist: + suffix = filename.rsplit(".")[-1].lower() + if suffix in ["lin","link","pnr","zac","access","xfer"]: + self.parser = TransitParser(transit_file_def, verbosity=0) + self.parser.tfp.liType = suffix + fullfile = os.path.join(path,filename) + logstr = " Reading %s" % filename + f = open(fullfile, 'r'); + lines,links,pnr,zac,accessli,xferli = self.parseAndPrintTransitFile(f.read(), verbosity=0) + f.close() + logstr += self.doMerge(fullfile,lines,links,pnr,zac,accessli,xferli,insert_replace) + WranglerLogger.debug(logstr) + + def _runAndLog(self, cmd, run_dir): + proc = subprocess.Popen( cmd, cwd = run_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) + for line in proc.stdout: + WranglerLogger.debug("stdout: " + line.strip('\r\n')) + for line in proc.stderr: + WranglerLogger.debug("stderr: " + line.strip('\r\n')) + ret = proc.wait() + WranglerLogger.debug("Received %d from [%s]" % (ret, cmd)) + return ret + + def cloneAndApplyProject(self, projectname, tag=None, tempdir=None, **kwargs): + """ Project name corresponds to projects in Y:\networks + tag is "1.0" or "1-latest", or None for just the latest version + tempdir is the parent dir to put the dir; pass None for python to just choose + kwargs are additional args for the apply + """ + if tempdir==None: + tempdir = tempfile.mkdtemp(prefix="Wrangler_tmp_", dir=".") + WranglerLogger.debug("Using tempdir %s" % tempdir) + elif not os.path.exists(tempdir): + os.makedirs(tempdir) + + cmd = r"git clone Y:\networks\%s" % projectname + ret = self._runAndLog(cmd, tempdir) + + if ret != 0: + raise NetworkException("Git clone failed; see log file") + + if tag != None: + newdir = os.path.join(tempdir, projectname) + cmd = r"git checkout %s" % tag + ret = self._runAndLog(cmd, newdir) + if ret != 0: + raise NetworkException("Git checkout failed; see log file") + + # apply it + sys.path.append(tempdir) + evalstr = "import %s; %s.apply(self" % (projectname, projectname) + for key,val in kwargs.iteritems(): + evalstr += ", %s=%s" % (key, str(val)) + evalstr += ")" + exec(evalstr) + + WranglerLogger.info("Applied %s" % projectname) + + # boo this doesn't work! + descstr = "for name,data in inspect.getmembers(%s):\n" % (projectname) + descstr += " if name==\"desc\" and inspect.ismethod(name):\n" + descstr += " WranglerLogger.info(\" - Description:\" + %s.desc())\n" % (projectname) + descstr += " elif name==\"year\" and inspect.ismethod(name):\n" + descstr += " WranglerLogger.info(\" - Year:\" + str(%s.year()))\n" % (projectname) + descstr += " else: WranglerLogger.debug(\"%s %s\" % (name, str(data)))\n" + exec(descstr) + # exec("members = inspect.getmembers(%s)" % projectname) + # WranglerLogger.debug(members) +# WranglerLogger.debug(inspect.getmembers() + + def addDelay(self, additionalLinkFile=""): + """ Replaces the addDelay.awk script which is mostly parsing + If additionalLinkFile is passed in, also uses that link file to supress dwell delay. + """ + DELAY_VALUES = {} + DELAY_VALUES['Std'] = {1:0.5, 2:0.5, 3:0.4, # Muni Express, Local, Metro + 4:0.0, # BART + 5:0.5, # Non-SF Regional + 6:0.2, # SamTrans Express + 7:0.2, # Golden Gate Express + 8:0.2, # AC Transit Express + 9:0} # Ferries, caltrain + DELAY_VALUES['TPS'] = copy.deepcopy(DELAY_VALUES['Std']) + for i in [1,2,3,5]: # Muni modes plus non-sf regional are a bit faster + DELAY_VALUES['TPS'][i] -= 0.1 + + DELAY_VALUES['BRT'] = copy.deepcopy(DELAY_VALUES['Std']) + DELAY_VALUES['BRT'][1]=0.32 # (20% Savings Low Floor)*(20% Savings POP)*Dwell=.8*.8*.5=.32 + DELAY_VALUES['BRT'][2]=0.32 # (20% Savings Low Floor)*(20% Savings POP)*Dwell=.8*.8*.5=.32 + DELAY_VALUES['BRT'][3]=0.3 # lmz changed to 0.3 from 0.1 + DELAY_VALUES['BRT'][5]=0.32 + DEFAULT_DELAY_VALUE=0.5 + + linkSet = set() + for link in self.links: + if isinstance(link,TransitLink): + link.addNodesToSet(linkSet) + logstr = "addDelay: Size of linkset = %d" % (len(linkSet)) + + if additionalLinkFile!="": + linknet = Network() + linknet.parser = TransitParser(transit_file_def, verbosity=0) + f = open(additionalLinkFile, 'r'); + junk,additionallinks,junk,junk,junk,junk = \ + linknet.parseAndPrintTransitFile(f.read(), verbosity=0) + f.close() + for link in additionallinks: + if isinstance(link,TransitLink): + link.addNodesToSet(linkSet) + # print linkSet + logstr += " => %d with %s\n" % (len(linkSet), additionalLinkFile) + WranglerLogger.debug(logstr) + + + for line in self.lines: + "addin for line:",line + if not isinstance(line,TransitLine): continue + # replace RUNTIME with TIMEFAC=1.0 + if "RUNTIME" in line.attr: + del line.attr["RUNTIME"] + # line.attr["TIMEFAC"] = 1.0 + + # figure out what the dwell delay is + dwellDelay = 0 + if 'MODE' not in line.attr: + WranglerLogger.warning("Mode unknown for line %s" % (line.name)) + dwellDelay = DEFAULT_DELAY_VALUE + else: + mode = int(line.attr['MODE'].strip(r'"\'')) + if 'OWNER' in line.attr: owner = line.attr['OWNER'].strip(r'"\'') + else: owner = 'Std' + + if owner not in DELAY_VALUES: + WranglerLogger.warning("addDelay: Didn't understand owner [%s] in line [%s] Using owner=[Std]" % (owner, line.name)) + owner = 'Std' + + dwellDelay = DELAY_VALUES[owner][mode] + + # print "line name=%s mode=%d owner=%s dwellDelay=%f" % (line.name, mode, owner, dwellDelay) + + + # add it in + for nodeIdx in range(len(line.n)): + # linkSet nodes - don't add delay 'cos that's inherent to the link + if int(line.n[nodeIdx].num) in linkSet: continue + + # last stop - no delay, end of the line + if nodeIdx == len(line.n)-1: continue + + # dwell delay for stop nodes only, first is ok if nonstop + if nodeIdx>0 and not line.n[nodeIdx].isStop(): continue + + line.n[nodeIdx].attr["DELAY"]=str(dwellDelay) + + +if __name__ == '__main__': + + LOG_FILENAME = "Wrangler_main_%s.info.LOG" % time.strftime("%Y%b%d.%H%M%S") + setupLogging(LOG_FILENAME, LOG_FILENAME.replace("info", "debug")) + + net = Network() + net.cloneAndApplyProject(projectname="Muni_TEP") + net.cloneAndApplyProject(projectname="Muni_CentralSubway", tag="1-latest", modelyear=2030) + net.cloneAndApplyProject(projectname="BART_eBART") + + net.write(name="muni", writeEmptyFiles=False) diff --git a/ZACLink.py b/ZACLink.py new file mode 100644 index 0000000..b58f9f6 --- /dev/null +++ b/ZACLink.py @@ -0,0 +1,24 @@ +__all__ = ['ZACLink'] + +class ZACLink(dict): + """ ZAC support Link. + 'link' property is the node-pair for this link (e.g. 24133-34133) + 'comment' is any end-of-line comment for this link + (must include the leading semicolon) + All other attributes are stored in a dictionary (e.g. thislink['MODE']='17') + """ + def __init__(self): + dict.__init__(self) + self.id='' + self.comment='' + + def __repr__(self): + s = "ZONEACCESS link=%s " % (self.id,) + + # Deal w/all link attributes + fields = ['%s=%s' % (k,v) for k,v in self.items()] + + s += " ".join(fields) + s += self.comment + + return s diff --git a/__init__.py b/__init__.py new file mode 100644 index 0000000..0ae2e6a --- /dev/null +++ b/__init__.py @@ -0,0 +1,38 @@ +from .Linki import Linki +from .Network import Network +from .NetworkException import NetworkException +from .PNRLink import PNRLink +from .Supplink import Supplink +try: + from .TransitAssignmentData import TransitAssignmentData +except: + from .transitAssignmentData import TransitAssignmentData +from .TransitCapacity import TransitCapacity +from .TransitLine import TransitLine +from .TransitLink import TransitLink +from .TransitNetwork import TransitNetwork +from .TransitParser import TransitParser +from .HighwayNetwork import HighwayNetwork +from .Logger import setupLogging, WranglerLogger +from .Node import Node +from .HwySpecsRTP import HwySpecsRTP + + +__all__ = ['NetworkException', 'setupLogging', 'WranglerLogger', + 'Network', 'TransitAssignmentData', 'TransitNetwork', 'TransitLine', 'TransitParser', + 'Node', 'TransitLink', 'Linki', 'PNRLink', 'Supplink', 'HighwayNetwork', 'HwySpecsRTP', + 'TransitCapacity', +] + + +if __name__ == '__main__': + + LOG_FILENAME = "Wrangler_main_%s.info.LOG" % time.strftime("%Y%b%d.%H%M%S") + setupLogging(LOG_FILENAME, LOG_FILENAME.replace("info", "debug")) + + net = Network() + net.cloneAndApplyProject(projectname="Muni_TEP") + net.cloneAndApplyProject(projectname="Muni_CentralSubway", tag="1-latest", modelyear=2030) + net.cloneAndApplyProject(projectname="BART_eBART") + + net.write(name="muni", writeEmptyFiles=False) diff --git a/transitAssignmentData.py b/transitAssignmentData.py new file mode 100644 index 0000000..74922b9 --- /dev/null +++ b/transitAssignmentData.py @@ -0,0 +1,798 @@ +# +# Original revision: Lisa Zorn 2010-8-5 +# based on old "combineTransitDBFs.py" +# +import csv,os,logging,string,sys,xlrd +from dataTable import DataTable, dbfTableReader, FieldType +from .TransitCapacity import TransitCapacity +from .TransitLine import TransitLine +from .Logger import WranglerLogger +from .NetworkException import NetworkException +from collections import defaultdict + +print "Importing ", __file__ + +__all__ = ['TransitAssignmentData', 'TransitAssignmentDataException'] + +class TransitAssignmentDataException(Exception): pass + +class TransitAssignmentData: + + TIMEPERIOD_TO_VEHTYPIDX = { "AM":2, "MD": 4, "PM":3, "EV":4, "EA":4 } + + + def __init__(self, directory=".", timeperiod="AM", champtype="champ4", muniTEP=True, ignoreModes=[], + system=[], profileNode=False,tpfactor="quickboards",grouping=None, + transitCapacity=None, + lineLevelAggregateFilename=None, linkLevelAggregateFilename=None): + """ + + * *directory* is the location of the transit assignment files + * *timeperiod* is a string in ["AM", "MD", "PM", "EV", "EA" ] + * *champtype* is a string in ["champ4", "champ3", "champ3-sfonly"] + * *muniTEP* is only important for Muni files, but it matters because vehicle type is different + * pass *ignoreModes* to ignore some, such as [11,12,13,14,15,16,17] to ignore access/egress/xfer + * pass *system* to restrict looking only at given systems, e.g. ["SF MUNI", "BART" ] + * pass *profileNode* to only look at links to or from that node + * *tpfactor* determines the time period peak hour factor. Must be one of ```quickboards``` + or ```constant``` or ```constant_with_peaked_muni```. + * Uses *transitLineToVehicle* and *transitVehicleToCapacity* to map transit lines to vehicle types, + and vehicle types to capacities. + * If *lineLevelAggregateFilename* or *linkLevelAggregateFilename* are passed in, then + it is assumed that the many transit assignment dbfs have already been aggregated (likely + by this very class!) and we should just read those instead of doing the work again. + """ + + + # from quickboards + if tpfactor=="quickboards": + self.TIMEPERIOD_FACTOR = { "AM":0.44, "MD":0.18, "PM":0.37, "EV":0.22, "EA":0.58 } + elif tpfactor=="constant": + self.TIMEPERIOD_FACTOR ={} + for tp in ["AM", "MD", "PM", "EV", "EA"]: + self.TIMEPERIOD_FACTOR[tp] = 1.0/TransitLine.HOURS_PER_TIMEPERIOD[tp] + elif tpfactor=="constant_with_peaked_muni": + # defaults + self.TIMEPERIOD_FACTOR ={} + for tp in ["AM", "MD", "PM", "EV", "EA"]: + self.TIMEPERIOD_FACTOR[tp] = 1.0/TransitLine.HOURS_PER_TIMEPERIOD[tp] + # muni peaking + self.TIMEPERIOD_FACTOR[11] = {"AM":0.45, # 0.39 / 0.85 (Muni peaking factor from 2010 APC / Muni's capacity ratio) + "MD":1/TransitLine.HOURS_PER_TIMEPERIOD["MD"], + "PM":0.45, + "EV":0.2, + "EA":1/TransitLine.HOURS_PER_TIMEPERIOD["EA"]} + self.TIMEPERIOD_FACTOR[12] = self.TIMEPERIOD_FACTOR[11] + self.TIMEPERIOD_FACTOR[13] = self.TIMEPERIOD_FACTOR[11] + self.TIMEPERIOD_FACTOR[14] = self.TIMEPERIOD_FACTOR[11] + self.TIMEPERIOD_FACTOR[15] = self.TIMEPERIOD_FACTOR[11] + else: + raise TransitAssignmentDataException("Invalid time period factor "+str(tpfactor)) + + self.assigndir = directory + self.timeperiod = timeperiod + self.champtype = champtype + self.ignoreModes= ignoreModes + self.system = system + self.profileNode= profileNode + self.aggregateAll = True # aggregate for A,B? + if transitCapacity: + self.capacity = transitCapacity + else: + self.capacity = TransitCapacity() + self.csvColnames= None # uninitialized + + if self.timeperiod not in ["AM", "MD", "PM", "EV", "EA"]: + raise TransitAssignmentDataException("Invalid timeperiod "+str(timeperiod)) + if self.champtype not in ["champ3","champ4","champ3-sfonly"]: + raise TransitAssignmentDataException("Invalid champtypte "+str(champtype)) + + # supplementary workbooks + if grouping and (grouping.upper() == "RAPID"): + self.lineToGroup = self.assignMuniRapid() + else: + self.lineToGroup = self.readTransitLineGrouping(mapfile=grouping) + + + # Already aggregated up? + if lineLevelAggregateFilename: + self.readAggregateDbfs(asgnFileName=lineLevelAggregateFilename, + aggregateFileName=linkLevelAggregateFilename) + return + + # To determine what files we'll open + if not 'ALLTRIPMODES' in os.environ: + raise NetworkException("No ALLTRIPMODES in environment for TransitAssignmentData to decide on input files") + self.MODES = os.environ["ALLTRIPMODES"].split(" ") + WranglerLogger.debug("TransitAssignmentData MODES = " + str(self.MODES)) + + self.readTransitAssignmentCsvs() + + def readTransitLineGrouping(self, mapfile=None): + """ + Read the transit line groupings file which assigns a grouping to lines + """ + if not mapfile: return {} + + lineToGroup = {} + try: + workbook = xlrd.open_workbook(filename=mapfile,encoding_override='ascii') + except: + print "couldn't find that workbook %s, yo! No Groupings used!" % (mapfile) + return lineToGroup + sheet = workbook.sheet_by_name("Lookup") + row = 1 + while (row < sheet.nrows): + group = sheet.cellvalue(row,1).encode('utf-8') + self.lineToGroup[lookupsheet.cell_value(row,0).encode('utf-8')]=lookupsheet.cell_value(row,1).encode('utf-8') + row+=1 + return lineToGroup + + def assignMuniRapid(self): + lineToGroup = {} + RapidList=["E","J","K","L","M","N", + "1","1DRM","1PRS","1STN","1CRN","1SHT", + "5","5SHT","5EV","5L", + "9","9SHT","9L","9EVE","9X", + "14L","14X", + "22", + "28L", + "30","30SHT","30WSQ","30X", + "38L", + "47", + "49","49L", + "71","71L"] + for genericLine in RapidList: + for dir in ["I","O"]: + lineToGroup["MUN"+genericLine+dir]="RAPID" + return lineToGroup + + + def initializeFields(self, headerRow=None): + """ + Initializes the *trnAsgnFields*, *trnAsgnCopyFields*, *trnAsgnAdditiveFields*, + and *aggregateFields* + """ + if headerRow: + self.csvColnames = headerRow + else: + self.csvColnames = ["A","B","TIME","MODE", #"FREQ", + "PLOT", #"COLOR", + "STOP_A","STOP_B","DIST", + "NAME", #"SEQ", + "OWNER", + "AB_VOL","AB_BRDA","AB_XITA","AB_BRDB","AB_XITB", + "BA_VOL","BA_BRDA","BA_XITA","BA_BRDB","BA_XITB"] + print "csvColnames = %s" % str(self.csvColnames) + + self.colnameToCsvIndex = dict((self.csvColnames[idx],idx) for idx in range(len(self.csvColnames))) + + # copy these directly + self.trnAsgnFields = {"A": 'u4', + "B": 'u4', + "TIME": 'u4', + "MODE": 'u1', + "PLOT": 'u1', + "STOP_A": 'b', + "STOP_B": 'b', + "DIST": 'u4', + "NAME": 'a13', + "OWNER": 'a10', + } + self.trnAsgnCopyFields = self.trnAsgnFields.keys() + # these are in the dbf not the csv (grrrr) + self.trnAsgnFields["FREQ"] = 'f4' + self.trnAsgnFields["SEQ"] = 'u1' + self.trnAsgnFields["COLOR"] = 'u1' + + # Lets also ad these for easy joining + self.trnAsgnFields["AB"] ='a15' + self.trnAsgnFields["ABNAMESEQ"] ='a30' + # Straight lookup based on the line name + self.trnAsgnFields["GROUP"] ='a20' + self.trnAsgnFields["FULLNAME"] ='a40' + self.trnAsgnFields["SYSTEM"] ='a25' + self.trnAsgnFields["VEHTYPE"] ='a40' + self.trnAsgnFields["VEHCAP"] ='u2' + + # Calculated in the first pass + self.trnAsgnFields["PERIODCAP"] ='f4' + + # Additive fields are all U4 + #Here, I need to read in if volume is float, if is, flag if it is, write accordingly + self.trnAsgnAdditiveFields = ["AB_VOL","AB_BRDA","AB_XITA","AB_BRDB","AB_XITB", + "BA_VOL","BA_BRDA","BA_XITA","BA_BRDB","BA_XITB"] + for field in self.trnAsgnAdditiveFields: + self.trnAsgnFields[field]='f4' + + + # Calculated at the end + self.trnAsgnFields["LOAD"] ='f4' + + # aggregate fields + self.aggregateFields = {"A": 'u4', + "B": 'u4', + "AB": 'a15', + "FREQ": 'f4', # combined freq + "DIST": 'u4', # should be the same so first + "VEHCAP":'u2', # sum + "PERIODCAP":'f4', # sum + "LOAD": 'f4', # combined + "MAXLOAD":'f4', # max load of any line on the link + } + for field in self.trnAsgnAdditiveFields: + self.aggregateFields[field]='f4' + + + def readTransitAssignmentCsvs(self): + """ + Read the transit assignment dbfs, the direct output of Cube's transit assignment. + """ + self.trnAsgnTable = False + self.aggregateTable = False + warnline = {} + ABNameSeq_List = [] # (A,B,NAME,SEQ) from the dbf/csvs + + # open the input assignment files + for mode in self.MODES: + if mode == "WMWVIS": + filename = os.path.join(self.assigndir, "VISWMW" + self.timeperiod + ".csv") + elif mode[1]=="T": + filename = os.path.join(self.assigndir, "NS" + mode + self.timeperiod + ".csv") + else: + filename = os.path.join(self.assigndir, "SF" + mode + self.timeperiod + ".csv") + + # Read the DBF file into datatable + WranglerLogger.info("Reading "+filename) + + # Create our table data structure once + if mode == self.MODES[0]: + # figure out how many records + numrecs = 0 + totalrows = 0 + + filereader = csv.reader(open(filename, 'rb'), delimiter=',', quoting=csv.QUOTE_NONE) + for row in filereader: + + # header row? + if row[0]=="A": + self.initializeFields(row) + continue + elif totalrows==0 and not self.csvColnames: + self.initializeFields() + + + totalrows += 1 + if self.profileNode and \ + (int(row[self.colnameToCsvIndex["A"]]) != self.profileNode and + int(row[self.colnameToCsvIndex["B"]]) != self.profileNode): continue + + if int(row[self.colnameToCsvIndex["MODE"]]) in self.ignoreModes: continue + + linename = row[self.colnameToCsvIndex["NAME"]].strip() + + # exclude this system? + (system, vehicletype) = self.capacity.getSystemAndVehicleType(linename, self.timeperiod) + + if len(self.system)>0 and system not in self.system: continue + + numrecs += 1 + WranglerLogger.info("Keeping %d records out of %d" % (numrecs, totalrows)) + + self.trnAsgnTable = DataTable(numRecords=numrecs, + fieldNames=self.trnAsgnFields.keys(), + numpyFieldTypes=self.trnAsgnFields.values()) + ABNameSeqSet = set() + + + # Go through the records + newrownum = 0 # row number in the trnAsgnTable,ABNameSeq_List -- rows we're keeping + oldrownum = 0 # row number in the csv,dbf -- all input rows + + filereader = csv.reader(open(filename, 'rb'), delimiter=',', quoting=csv.QUOTE_NONE) + + # for the first csv only, also read the dbf for the freq and seq fields + if mode == self.MODES[0]: + indbf = dbfTableReader(os.path.join(self.assigndir, "SFWBW" + self.timeperiod + ".dbf")) + else: + indbf = None + + for row in filereader: + # header row? + if row[0]=="A": continue + + if self.profileNode: + if (int(row[self.colnameToCsvIndex["A"]]) != self.profileNode and + int(row[self.colnameToCsvIndex["B"]]) != self.profileNode): continue + elif int(row[self.colnameToCsvIndex["AB_VOL"]]) > 0: + WranglerLogger.info("Link %s %s for mode %s has AB_VOL %s" % + (row[self.colnameToCsvIndex["A"]], + row[self.colnameToCsvIndex["B"]], mode, + row[self.colnameToCsvIndex["AB_VOL"]])) + + if int(row[self.colnameToCsvIndex["MODE"]]) in self.ignoreModes: + oldrownum += 1 + continue + + linename = row[self.colnameToCsvIndex["NAME"]].strip() + + # exclude this system? + (system, vehicletype) = self.capacity.getSystemAndVehicleType(linename, self.timeperiod) + if len(self.system)>0 and system not in self.system: continue + + # Initial table fill: Special stuff for the first time through + if mode == self.MODES[0]: + + # ------------ these fields just get used directly + for field in self.trnAsgnCopyFields: + + try: + + # integer fields + if self.trnAsgnFields[field][0] in ['u','b']: + if row[self.colnameToCsvIndex[field]]=="": + self.trnAsgnTable[newrownum][field] = 0 + elif field in ['TIME','DIST']: + # backwards compatibility - dbfs were 100ths of a mile/min + self.trnAsgnTable[newrownum][field] = float(row[self.colnameToCsvIndex[field]])*100.0 + else: + self.trnAsgnTable[newrownum][field] = int(row[self.colnameToCsvIndex[field]]) + # float fields + elif self.trnAsgnFields[field][0] == 'f': + if row[self.colnameToCsvIndex[field]]=="": + self.trnAsgnTable[newrownum][field] = 0.0 + else: + self.trnAsgnTable[newrownum][field] = float(row[self.colnameToCsvIndex[field]]) + # text fields + else: + self.trnAsgnTable[newrownum][field] = row[self.colnameToCsvIndex[field]] + + except: + WranglerLogger.fatal("Error intepreting field %s: [%s]" % (field, str(self.colnameToCsvIndex[field]))) + WranglerLogger.fatal("row=%s" % str(row)) + WranglerLogger.fatal(sys.exc_info()[0]) + WranglerLogger.fatal(sys.exc_info()[1]) + sys.exit(2) + # ------------ these fields come from the dbf because they're missing in the csv (sigh) + dbfRow = indbf.__getitem__(oldrownum) + if int(row[self.colnameToCsvIndex["A"]])<100000: + if dbfRow["A"]!=int(row[self.colnameToCsvIndex["A"]]): + raise NetworkException("Assertion error for A on row %d: %s != %s" % (oldrownum, str(dbfRow["A"]), str(row[self.colnameToCsvIndex["A"]]))) + if int(row[self.colnameToCsvIndex["B"]])<100000: + if dbfRow["B"]!=int(row[self.colnameToCsvIndex["B"]]): + raise NetworkException("Assertion error for B on row %d: %s != %s" % (oldrownum, str(dbfRow["B"]), str(row[self.colnameToCsvIndex["B"]]))) + self.trnAsgnTable[newrownum]["FREQ"] = dbfRow["FREQ"] + self.trnAsgnTable[newrownum]["SEQ"] = dbfRow["SEQ"] + + trySeq = dbfRow["SEQ"] + # ------------ special one-time computed fields + + # ABNameSeq is more complicated because we want it to be unique + AB = row[self.colnameToCsvIndex["A"]] + " " + row[self.colnameToCsvIndex["B"]] + self.trnAsgnTable[newrownum]["AB"] = AB + + ABNameSeq = AB + " " + linename + if trySeq>0: + tryABNameSeq = ABNameSeq + " " + str(trySeq) + + # This line seems to be a problem... A/B/NAME/SEQ are not unique + if tryABNameSeq in ABNameSeqSet: + WranglerLogger.warn("Non-Unique A/B/Name/Seq: " + tryABNameSeq + "; faking SEQ!") + # Find one that works + while tryABNameSeq in ABNameSeqSet: + trySeq += 1 + tryABNameSeq = ABNameSeq + " " + str(trySeq) + ABNameSeq = tryABNameSeq + self.trnAsgnTable[newrownum]["ABNAMESEQ"] = ABNameSeq + ABNameSeqSet.add(ABNameSeq) + + ABNameSeq_List.append((int(row[self.colnameToCsvIndex["A"]]), + int(row[self.colnameToCsvIndex["B"]]), + row[self.colnameToCsvIndex["NAME"]], + trySeq)) + + # ------------ straight lookup FULLNAME, VEHTYPE, VEHCAP; easy calc for PERIODCAP + self.trnAsgnTable[newrownum]["SYSTEM"] = system + self.trnAsgnTable[newrownum]["VEHTYPE"] = vehicletype + + self.trnAsgnTable[newrownum]["FULLNAME"] = self.capacity.getFullname(linename, self.timeperiod) + + try: + (vtype, vehcap) = self.capacity.getVehicleTypeAndCapacity(linename, self.timeperiod) + + self.trnAsgnTable[newrownum]["VEHCAP"] = vehcap + self.trnAsgnTable[newrownum]["PERIODCAP"] = TransitLine.HOURS_PER_TIMEPERIOD[self.timeperiod] * 60.0 * vehcap/self.trnAsgnTable[newrownum]["FREQ"] + except: + self.trnAsgnTable[newrownum]["VEHCAP"] = 0 + self.trnAsgnTable[newrownum]["PERIODCAP"] = 0 + + # if we still don't have a system, warn + if self.trnAsgnTable[newrownum]["SYSTEM"] == "" and not warnline.has_key(linename): + WranglerLogger.warning("No default system: " + linename) + warnline[linename] =1 + + #---------add in any grouping that may want to use + if self.lineToGroup.has_key(linename): + self.trnAsgnTable[newrownum]["GROUP"] = self.lineToGroup[linename] + else: + self.trnAsgnTable[newrownum]["GROUP"] = "" + + # initialize additive fields + for field in self.trnAsgnAdditiveFields: + if row[self.colnameToCsvIndex[field]]=="": + self.trnAsgnTable[newrownum][field] = 0.0 + else: + self.trnAsgnTable[newrownum][field] = float(row[self.colnameToCsvIndex[field]]) + + # end initial table fill + + # Add in the subsequent assignment files + else: + + # print oldrownum, newrownum, ABNameSeq_List[newrownum] + # print row[self.colnameToCsvIndex["NAME"]], ABNameSeq_List[oldrownum][2] + + assert(int(row[self.colnameToCsvIndex["A"]]) == ABNameSeq_List[newrownum][0]) + assert(int(row[self.colnameToCsvIndex["B"]]) == ABNameSeq_List[newrownum][1]) + # these don't nec match, can be *32 in ferry skim rather than the bart vehicle name, for example + # assert( row[self.colnameToCsvIndex["NAME"]] == ABNameSeq_List[newrownum][2]) + + ABNameSeq = row[self.colnameToCsvIndex["A"]] + " " + \ + row[self.colnameToCsvIndex["B"]] + " " + \ + row[self.colnameToCsvIndex["NAME"]].rstrip() + if ABNameSeq_List[newrownum][3]>0: + ABNameSeq += " " + str(ABNameSeq_List[newrownum][3]) + for field in self.trnAsgnAdditiveFields: + if row[self.colnameToCsvIndex[field]] !="": + self.trnAsgnTable[ABNameSeq][field] += float(row[self.colnameToCsvIndex[field]]) + + newrownum += 1 + oldrownum += 1 + + # we're done with this; free it up + del filereader + if indbf: + del indbf + + # Table is created and filled -- set the index + if mode == self.MODES[0]: + try: + self.trnAsgnTable.setIndex(fieldName="ABNAMESEQ") + except: + # failure - try to figure out why + ABNameSeqList = [] + for row in self.trnAsgnTable: + ABNameSeqList.append(row["ABNAMESEQ"]) + ABNameSeqList.sort() + for idx in range(len(ABNameSeqList)-1): + if ABNameSeqList[idx]==ABNameSeqList[idx+1]: + WranglerLogger.warn("Duplicate ABNAMESEQ at idx %d : [%s]" % (idx,ABNameSeqList[idx])) + exit(1) + + # ok the table is all filled in -- fill in the LOAD + for row in self.trnAsgnTable: + if row["VEHCAP"] == 0: continue + tpfactor = self.TIMEPERIOD_FACTOR[self.timeperiod] + + # mode-specific peaking factor will over-ride + if row["MODE"] in self.TIMEPERIOD_FACTOR: + tpfactor = self.TIMEPERIOD_FACTOR[row["MODE"]][self.timeperiod] + + row["LOAD"] = row["AB_VOL"] * tpfactor * row["FREQ"] / (60.0 * row["VEHCAP"]) + + # build the aggregate table for key="A B" + if self.aggregateAll: + self.buildAggregateTable() + + def buildAggregateTable(self): + # first find how big it is + ABSet = set() + for row in self.trnAsgnTable: + ABSet.add(row["AB"]) + + self.aggregateTable = DataTable(numRecords=len(ABSet), + fieldNames=self.aggregateFields.keys(), + numpyFieldTypes=self.aggregateFields.values()) + ABtoRowIndex = {} + rowsUsed = 0 + for row in self.trnAsgnTable: + if row["AB"] not in ABtoRowIndex: + rowIndex = rowsUsed + self.aggregateTable[rowIndex]["AB"] = row["AB"] + self.aggregateTable[rowIndex]["A"] = row["A"] + self.aggregateTable[rowIndex]["B"] = row["B"] + self.aggregateTable[rowIndex]["DIST"] = row["DIST"] + self.aggregateTable[rowIndex]["FREQ"] = 0.0 + self.aggregateTable[rowIndex]["PERIODCAP"] = 0.0 + self.aggregateTable[rowIndex]["LOAD"] = 0.0 + self.aggregateTable[rowIndex]["MAXLOAD"] = 0.0 + for field in self.trnAsgnAdditiveFields: # sum + self.aggregateTable[rowIndex][field] = 0.0 + ABtoRowIndex[row["AB"]] = rowsUsed + rowsUsed += 1 + else: + rowIndex = ABtoRowIndex[row["AB"]] + + for field in self.trnAsgnAdditiveFields: # sum + self.aggregateTable[rowIndex][field] += row[field] + self.aggregateTable[rowIndex]["AB_VOL"] += row["AB_VOL"] + self.aggregateTable[rowIndex]["BA_VOL"] += row["BA_VOL"] + self.aggregateTable[rowIndex]["PERIODCAP"] += row["PERIODCAP"] + self.aggregateTable[rowIndex]["MAXLOAD"] = max(row["LOAD"], self.aggregateTable[rowIndex]["MAXLOAD"]) + self.aggregateTable[rowIndex]["FREQ"] += 1/row["FREQ"] # combining -- will take reciprocal later + + self.aggregateTable.setIndex(fieldName="AB") + + count=0 + for row in self.aggregateTable: + count += 1 + if row["FREQ"]>0: + row["FREQ"] = 1/row["FREQ"] + if row["PERIODCAP"]>0: + row["LOAD"] = float(row["AB_VOL"]) / row["PERIODCAP"] + # print row["LOAD"] + WranglerLogger.debug("count "+str(count)+" lines in aggregate table") + + def calculateFleetCharacteristics(self): + """ Calculates the fleet characteristics - vehicle hours and vehicle miles - by vehicle type + """ + self.vehicleHours = defaultdict(float) + self.vehicleMiles = defaultdict(float) + for key in self.trnAsgnTable._index.keys(): + record = self.trnAsgnTable[key] + # don't process access, egress and transfer links + if record["MODE"]>9: continue + + # index by system, then by vehicle type + indexstr = record["SYSTEM"] + "," + record["VEHTYPE"] + + # number of vehicles = duration * 60 min/hour / freq + numveh = TransitLine.HOURS_PER_TIMEPERIOD[self.timeperiod] * 60.0 / record["FREQ"] + # vehicle hours = (# of vehicles) x time per link, or TIME * 1 hour/6000 hundredths of min + self.vehicleHours[indexstr] += numveh*(record["TIME"]/6000.0) + # vehicle miles = (# of vehicles) x dist per link, or DIST * 1 mile/100 hundredths of mile + self.vehicleMiles[indexstr] += numveh*(record["DIST"]/100.0) + + def readAggregateDbfs(self, asgnFileName, aggregateFileName=None): + """ + This is essentially the reverse of writeDbfs() below. + """ + self.initializeFields() # this may be unnecessary + + self.trnAsgnTable = dbfTableReader(asgnFileName) + self.trnAsgnTable.setIndex(fieldName="ABNAMESEQ") + + # a little bit of cleanup + headerTuples = [] + for headerFieldType in self.trnAsgnTable.header: + headerTuples.append(headerFieldType.toTuple()) + + # rstrip spaces off the end of the text fields + for row in self.trnAsgnTable: + for headerTuple in headerTuples: + if headerTuple[1] == 'C': row[headerTuple[0]] = string.rstrip(row[headerTuple[0]]) + + # this is the index! + self.trnAsgnTable.setIndex(fieldName="ABNAMESEQ") + + # the link-level aggregate table + if not aggregateFileName: + self.aggregateAll = False + self.aggregateTable = False + return + + self.aggregateAll = True + self.aggregateTable = dbfTableReader(aggregateFileName) + + # cleanup again + headerTuples = [] + for headerFieldType in self.aggregateTable.header: + headerTuples.append(headerFieldType.toTuple()) + + # rstrip spaces off the end of the text fields + for row in self.aggregateTable: + for headerTuple in headerTuples: + if headerTuple[1] == 'C': row[headerTuple[0]] = string.rstrip(row[headerTuple[0]]) + + def writePnrDrivers(self, pnrFileName): + """ + Writes PNR Auto Trips to DBF with fields: + ZONE + PNR + TO-DEMAND + FR-DEMAND + """ + self.pnrFields = {"ZONE": 'u4', + "PNR" : 'u4', + "TO" : 'f4', + "FROM": 'f4' + } + self.pnrTable = DataTable(fieldNames=self.pnrFields.keys(), + numpyFieldTypes=self.trnAsgnFields.values()) + + + def writeDbfs(self, asgnFileName, aggregateFileName=None): + """ + Writes the line-level (key=A,B,NAME,SEQ) dbf to *asgnFileName*, and + write the link-level (key=A,B) aggregated dbf to *aggregateFileName*. + """ + addtype = "F" + addlen = 9 + addnumdec = 2 + + # line-level + self.trnAsgnTable.header = \ + (FieldType("A", "N", 7, 0), + FieldType("B", "N", 7, 0), + FieldType("TIME", "N", 5, 0), + FieldType("MODE", "N", 3, 0), + FieldType("FREQ", "F", 6, 2), + FieldType("PLOT", "N", 1, 0), + FieldType("COLOR", "N", 2, 0), + FieldType("STOP_A", "N", 1, 0), + FieldType("STOP_B", "N", 1, 0), + FieldType("DIST", "N", 4, 0), + FieldType("NAME", "C", 13,0), + FieldType("SEQ", "N", 3, 0), + FieldType("OWNER", "C", 10,0), + FieldType("AB", "C", 15,0), + FieldType("ABNAMESEQ", "C", 30,0), + FieldType("FULLNAME", "C", 40,0), + FieldType("SYSTEM", "C", 25,0), + FieldType("GROUP", "C", 20,0), + FieldType("VEHTYPE", "C", 40,0), + FieldType("VEHCAP", "F", 8, 2), + FieldType("PERIODCAP", "F", 15, 2), + FieldType("LOAD", "F", 7, 3), + FieldType("AB_VOL", addtype, addlen, addnumdec), + FieldType("AB_BRDA", addtype, addlen, addnumdec), + FieldType("AB_XITA", addtype, addlen, addnumdec), + FieldType("AB_BRDB", addtype, addlen, addnumdec), + FieldType("AB_XITB", addtype, addlen, addnumdec), + FieldType("BA_VOL", addtype, addlen, addnumdec), + FieldType("BA_BRDA", addtype, addlen, addnumdec), + FieldType("BA_XITA", addtype, addlen, addnumdec), + FieldType("BA_BRDB", addtype, addlen, addnumdec), + FieldType("BA_XITB", addtype, addlen, addnumdec) + ) + self.trnAsgnTable.writeAsDbf(asgnFileName) + + if aggregateFileName==None: return + + if not self.aggregateTable: + self.buildAggregateTable() + + self.aggregateTable.header = \ + (FieldType("A", "N", 7, 0), + FieldType("B", "N", 7, 0), + FieldType("AB", "C", 15,0), + FieldType("FREQ", "F", 6, 2), + FieldType("DIST", "N", 4, 0), + FieldType("VEHCAP", "F", 8, 2), + FieldType("PERIODCAP", "F", 15, 2), + FieldType("LOAD", "F", 7, 3), + FieldType("MAXLOAD", "F", 7, 3), + FieldType("AB_VOL", addtype, addlen, addnumdec), + FieldType("AB_BRDA", addtype, addlen, addnumdec), + FieldType("AB_XITA", addtype, addlen, addnumdec), + FieldType("AB_BRDB", addtype, addlen, addnumdec), + FieldType("AB_XITB", addtype, addlen, addnumdec), + FieldType("BA_VOL", addtype, addlen, addnumdec), + FieldType("BA_BRDA", addtype, addlen, addnumdec), + FieldType("BA_XITA", addtype, addlen, addnumdec), + FieldType("BA_BRDB", addtype, addlen, addnumdec), + FieldType("BA_XITB", addtype, addlen, addnumdec) + ) + self.aggregateTable.writeAsDbf(aggregateFileName) + + def numBoards(self, linename, nodenum, nodenum_next, seq): + """ linename is something like MUN30I; it includes the direction. + nodenum is the node in question, nodenum_next is the next node in the line file + seq is for the sequence of (nodenum,nodenum_next). e.g. seq starts at 1 for the first link and increments + TODO: what if the line is two-way? + Returns an int representing number of boards in the whole time period. + Throws an exception if linename isnt recognized or if nodenum is not part of the line. + """ + key = "%d %d %s %d" % (nodenum, nodenum_next, linename.upper(), seq) + if key in self.trnAsgnTable: + return self.trnAsgnTable[key]["AB_BRDA"] + raise TransitAssignmentDataException("key [%s] not found in transit assignment data" % key) + + def numExits(self, linename, nodenum_prev, nodenum, seq): + """ See numBoards + """ + key = "%d %d %s %d" % (nodenum_prev, nodenum, linename.upper(), seq) + if key in self.trnAsgnTable: + return self.trnAsgnTable[key]["AB_XITB"] + raise TransitAssignmentDataException("key [%s] not found in transit assignment data" % key) + + def loadFactor(self, linename, a,b, seq): + """ Returns a fraction: peak hour pax per vehicle / vehicle capacity + e.g. 1.0 is a packed vehicle + + NOTE this assumes a distribution pax over the time period. For now, we'll use + the simple peak hour factors that quickboards uses but this could be refined + in the future. + """ + key = "%d %d %s %d" % (a, b, linename.upper(), seq) + if key not in self.trnAsgnTable: + raise TransitAssignmentDataException("Key [%s] not found in transit assignment data" % key) + return self.trnAsgnTable[key]["LOAD"] + + def linkVolume(self,linename,a,b,seq): + """Return number of people on a given link a b""" + key = "%d %d %s %d" % (a, b, linename.upper(), seq) + if key not in self.trnAsgnTable: + raise TransitAssignmentDataException("Key [%s] not found in transit assignment data" % key) + return self.trnAsgnTable[key]["AB_VOL"] + + def linkTime(self,linename,a,b,seq): + """Return time in minutes on a given link a b""" + key = "%d %d %s %d" % (a, b, linename.upper(), seq) + if key not in self.trnAsgnTable: + raise TransitAssignmentDataException("Key [%s] not found in transit assignment data" % key) + return self.trnAsgnTable[key]["TIME"] + + + def linkDistance(self,linename,a,b,seq): + """Return distance in miles on a given link a b""" + key = "%d %d %s %d" % (a, b, linename.upper(), seq) + if key not in self.trnAsgnTable: + raise TransitAssignmentDataException("Key [%s] not found in transit assignment data" % key) + return self.trnAsgnTable[key]["DIST"] + + +# Not complete.... TODO if it makes sense.... +class DailyTransitAssignmentData: + + def __init__(self, tadAM, tadMD, tadPM, tadEV, tadEA): + """ + For aggregating into a single version! + """ + keys = set(tadAM.trnAsgnTable._index.keys()) + keys = keys.union(tadMD.trnAsgnTable._index.keys()) + keys = keys.union(tadPM.trnAsgnTable._index.keys()) + keys = keys.union(tadEV.trnAsgnTable._index.keys()) + keys = keys.union(tadEA.trnAsgnTable._index.keys()) + + # self.trnAsgnTable = DataTable(numRecords=numrecs, + # fieldNames=self.trnAsgnFields.keys(), + # numpyFieldTypes=self.trnAsgnFields.values()) + + +if __name__ == '__main__': + logging.basicConfig(level=logging.DEBUG, + format="%(asctime)s - %(levelname)s - %(message)s", + datefmt='%Y-%b-%d %H:%M:%S',) + + if False: + tad1 = TransitAssignmentData(directory=r"X:\Projects\GHGReductionCE\2035", + timeperiod="AM", + tpfactor="constant") + print "Test1: vol for MUNKO, 13401-13402, 1) is " , tad1.linkVolume("MUNKO", 13401, 13402, 1) + tad1.writeDbfs(asgnFileName=r"X:\lmz\AM_asgn.dbf", aggregateFileName=r"X:\lmz\AM_agg.dbf") + tad1 = False + + if False: + tad2 = TransitAssignmentData(directory=r"X:\Projects\GHGReductionCE\2035", + timeperiod="AM", + tpfactor="constant") + print "Test2: vol for MUNKO, 13401-13402, 1) is " , tad2.linkVolume("MUNKO", 13401, 13402, 1) + tad2.writeDbfs(asgnFileName=r"X:\lmz\AM_asgnF.dbf", aggregateFileName=r"X:\lmz\AM_aggF.dbf") + tad2 = False + + if True: + tad3 = TransitAssignmentData(timeperiod="AM", + tpfactor="constant", + lineLevelAggregateFilename=r"X:\lmz\AM_asgn.dbf", + linkLevelAggregateFilename=r"X:\lmz\AM_agg.dbf") + print "Test3: vol for MUNKO, 13401-13402, 1) is " , tad3.linkVolume("MUNKO", 13401, 13402, 1) + tad3 = False + + if True: + tad4 = TransitAssignmentData(timeperiod="AM", + tpfactor="constant", + lineLevelAggregateFilename=r"X:\lmz\AM_asgnF.dbf", + linkLevelAggregateFilename=r"X:\lmz\AM_aggF.dbf") + print "Test4: vol for MUNKO, 13401-13402, 1) is " , tad4.linkVolume("MUNKO", 13401, 13402, 1) + tad4 = False \ No newline at end of file diff --git a/unittests/TransitNetworkTest.py b/unittests/TransitNetworkTest.py new file mode 100644 index 0000000..89076d9 --- /dev/null +++ b/unittests/TransitNetworkTest.py @@ -0,0 +1,66 @@ +import os, sys, unittest + +# test this version of Wrangler +curdir = os.path.dirname(__file__) +sys.path.insert(1, os.path.normpath(os.path.join(curdir, "..", ".."))) + +import Wrangler + +class TestTransitNetwork(unittest.TestCase): + + def setUp(self): + """ Initialize the TransitNetwork and read in the unittests dir + """ + self.tn = Wrangler.TransitNetwork() + thisdir = os.path.dirname(os.path.realpath(__file__)) + + self.tn.mergeDir(thisdir) + + def test_transit_network_iterator(self): + count = 0 + for line in self.tn: + count += 1 + self.assertTrue(isinstance(line,Wrangler.TransitLine)) + self.assertEqual(count,2) + + count = 0 + for line in self.tn: + count += 1 + self.assertTrue(isinstance(line,Wrangler.TransitLine)) + self.assertEqual(count,2) + + def test_transit_line_iterator(self): + count = 0 + for stop in self.tn.line("TEST_A"): + count += 1 + self.assertTrue(isinstance(stop,int)) + self.assertEqual(count,10) + + def test_transit_line_hasLink(self): + self.assertTrue(self.tn.line("TEST_A").hasLink(2,3)) + self.assertTrue(self.tn.line("TEST_A").hasLink(-2,-3)) + self.assertFalse(self.tn.line("TEST_A").hasLink(3,2)) + self.assertFalse(self.tn.line("TEST_A").hasLink(2,4)) + + def test_transit_line_hasSegment(self): + self.assertTrue(self.tn.line("TEST_A").hasSegment(2,3)) + self.assertTrue(self.tn.line("TEST_A").hasSegment(-2,-3)) + self.assertFalse(self.tn.line("TEST_A").hasSegment(3,2)) + self.assertTrue(self.tn.line("TEST_A").hasSegment(2,4)) + + def test_transit_line_extendLine(self): + self.assertRaises(ValueError, + self.tn.line("TEST_B").extendLine, + 14, [24,25,26,-27,28], False) + + self.tn.line("TEST_B").extendLine(-14,[24,25,26,-27,28], beginning=False) + self.assertEqual(len(self.tn.line("TEST_B").n),8) + # for stop in self.tn.line("TEST_B"): print stop + + # test doing an extend at the beginning + + def test_transit_line_index(self): + self.assertEqual(self.tn.line("TEST_A").n.index(4), 3) + +if __name__ == '__main__': + unittest.main() \ No newline at end of file diff --git a/unittests/TransitNodeTest.py b/unittests/TransitNodeTest.py new file mode 100644 index 0000000..418f51f --- /dev/null +++ b/unittests/TransitNodeTest.py @@ -0,0 +1,30 @@ +import os, sys, unittest + +# test this version of Wrangler +curdir = os.path.dirname(__file__) +sys.path.insert(1, os.path.normpath(os.path.join(curdir, "..", ".."))) + +import Wrangler + +class TestTransitNode(unittest.TestCase): + + def setUp(self): + """ Initialize the TransitNetwork and read in the unittests dir + """ + self.embarc = Wrangler.Node(16511) + self.embarc.attr["ACCESS"] = 2 + + self.invalid = Wrangler.Node(1) + + def test_transit_node_description(self): + + self.assertEqual(self.embarc.description(),"Embarcadero BART") + self.assertEqual(self.invalid.description(), None) + + def test_transit_node_boards_disallowed(self): + self.assertEqual(self.embarc.boardsDisallowed(), True) + self.assertEqual(self.invalid.boardsDisallowed(), False) + + +if __name__ == '__main__': + unittest.main() \ No newline at end of file diff --git a/unittests/test.lin b/unittests/test.lin new file mode 100644 index 0000000..066eb19 --- /dev/null +++ b/unittests/test.lin @@ -0,0 +1,40 @@ +; This is a comment for line TEST_A +; This is also a comment for line TEST_A +LINE NAME="TEST_A", + ONEWAY=F, + MODE=3, + OWNER="2", + FREQ[1]=10, + FREQ[2]=20, + FREQ[3]=30, + FREQ[4]=40, + FREQ[5]=50, + N=1, + 2, + -3, ; comment for stop 3 + 4, ; comment for stop 4 + 5, + -6, + 7, + 8, + -9, + 10 + +; This is a comment for line TEST_B +; This is a second comment for line TEST_B +LINE NAME="TEST_B", + ONEWAY=T, + MODE=1, + OWNER="BRT", + FREQ[1]=5, + FREQ[2]=10, + FREQ[3]=15, + FREQ[4]=20, + FREQ[5]=25, + N=11, ; comment for stop 11 + -12, ; comment for stop 12 + 13, + -14, + 15 + +; This is a comment at the end of the file. \ No newline at end of file