--- /dev/null
+from os import path
+import json
+import shutil
+import defaults
+import realtime
+
+
+def load(path):
+ if path.exists(path, typ):
+ try:
+ with open(path, 'r') as f:
+ j = json.load(f)
+ if type(j) == typ:
+ return j
+ else:
+ print 'Unexpected content in cache file'
+ print 'rebuilding cache'
+ shutil.copy(path, path + '.bak')
+ except ValueError:
+ print 'Corrupt cache file'
+ print 'rebuilding cache'
+ shutil.copy(path, path + '.bak')
+
+ return None
+
+class Lines(list):
+ def __init__(self, lines=[]):
+ l = load(defaults.cache_line)
+ if l and type(l) == list:
+ lines = l
+ self.lines = lines
+
+ def __iter__(self):
+ for line in self.lines:
+ yield line
+ raise StopIteration()
+
+ def __iadd__(self, y):
+ self.lines += y
+
+ def __add__(self, y):
+ return self.lines + y
+
+ def __getitem__(self, y):
+ return self.lines[y]
+
+ def __len__(self):
+ return len(self.lines)
+
+ def __str__(self):
+ return str(self.lines)
+
+ def __setitem__(self, i, y):
+ self.lines[i] = y
+
+class Stations(dict):
+ stations = {}
+
+ def __init__(self, line=False):
+ """ loads cache files
+ if line=False behaves as dict of all lines/stations
+ if line behaves as dict of directions/stations of line
+ """
+ if not Stations.stations:
+ s = load(defaults.cache_line, dict)
+ if s:
+ Stations.stations = st
+
+ self.current_line = line
+ if line == False:
+ self.dict = Stations.stations
+ elif line in Stations.stations:
+ self.dict = Stations.stations[line]
+ else:
+ Stations.stations[line] = {}
+ self.dict = Stations.stations[line]
+
+
+ def __getitem__(self, *args, **kwargs):
+ return self.dict.__getitem__(self, *args, **kwargs)
-from os import path
+from os import path, mkdir
# route search
-folder = path.dirname(__file__)
action = 'http://efa.vor.at/wvb/XSLT_TRIP_REQUEST2'
-hist_file = path.join(folder, '.wl_history')
+hist_file = path.expanduser('~/.gotovienna_history')
+sys_cache = path.expanduser('~/.cache')
+cache_folder = path.join(sys_cache, 'gotovienna')
+
+# FIXME more robust
+if not path.exists(sys_cache):
+ mkdir(sys_cache)
+if not path.exists(cache_folder):
+ mkdir(cache_folder)
+
+cache_line = path.join(cache_folder, 'lines.json')
+cache_stations = path.join(cache_folder, 'stations.json')
# iTip
'itdDateDayMonthYear': None, # DD.MM.YYYY
'itdTime': None, # HH:MM
'submitbutton': 'SUCHEN'
- }
\ No newline at end of file
+ }
--- /dev/null
+from realtime import ITipParser
+
+class Line:
+ def __init__(self, name):
+ self._stations = None
+ self.parser = ITipParser()
+ if name.strip() in self.parser.lines():
+ self.name = name.strip()
+ else:
+ raise LineNotFoundError('There is no line "%s"' % name.strip())
+
+ @property
+ def stations(self):
+ if not self._stations:
+ self._stations = parser.get_stations(self.name)
+ return self._stations
+
+ def get_departures(self, stationname):
+ stationname = stationname.strip().lower()
+ stations = self.stations
+
+ found = false
+
+ for direction in stations.keys():
+ # filter stations starting with stationname
+ stations[direction] = filter(lambda station: station[0].lower().starts_with(stationname), stations)
+ found = found or bool(stations[direction])
+
+ if found:
+ # TODO return departures
+ raise NotImplementedError()
+ else:
+ raise StationNotFoundError('There is no stationname called "%s" at route of line "%s"' % (stationname, self.name))
+
+class Station:
+ def __init__(self):
+ pass
return [(LINE_TYPE_NAMES[key], categorized_lines[key])
for key in sorted(categorized_lines)]
-
-
-class Line:
- def __init__(self, name):
- self._stations = None
- self.parser = ITipParser()
- if name.strip() in self.parser.lines():
- self.name = name.strip()
- else:
- raise LineNotFoundError('There is no line "%s"' % name.strip())
-
- @property
- def stations(self):
- if not self._stations:
- self._stations = parser.get_stations(self.name)
- return self._stations
-
- def get_departures(self, stationname):
- stationname = stationname.strip().lower()
- stations = self.stations
-
- found = false
-
- for direction in stations.keys():
- # filter stations starting with stationname
- stations[direction] = filter(lambda station: station[0].lower().starts_with(stationname), stations)
- found = found or bool(stations[direction])
-
- if found:
- # TODO return departures
- raise NotImplementedError()
- else:
- raise StationNotFoundError('There is no stationname called "%s" at route of line "%s"' % (stationname, self.name))
return station.split(',')[-1].strip()
else:
return 'Wien'
-
+
def extract_station(station):
""" Remove city from string
return station[:station.rindex(',')].strip()
else:
return station
-
+
def split_station(station):
""" >>> split_station('Karlsplatz, Wien')
('Karlsplatz', 'Wien')
origin, origin_type = origin_tuple
origin, origin_city = split_station(origin)
-
+
destination, destination_type = destination_tuple
destination, destination_city = split_station(destination)
post['place_destination'] = destination_city
params = urlencode(post)
url = '%s?%s' % (defaults.action, params)
-
- try:
- f = open(DEBUGLOG, 'a')
- f.write(url + '\n')
- f.close()
- except:
- print 'Unable to write to DEBUGLOG: %s' % DEBUGLOG
+ #print url
return urlopen(url)
names_destination = self.soup.find('select', {'id': 'nameList_destination'})
places_origin = self.soup.find('select', {'id': 'placeList_origin'})
places_destination = self.soup.find('select', {'id': 'placeList_destination'})
-
+
if any([names_origin, names_destination, places_origin, places_destination]):
dict = {}
-
+
if names_origin:
- dict['origin'] = map(lambda x: x.text,
+ dict['origin'] = map(lambda x: x.text,
names_origin.findAll('option'))
if names_destination:
- dict['destination'] = map(lambda x: x.text,
+ dict['destination'] = map(lambda x: x.text,
names_destination.findAll('option'))
-
+
if places_origin:
- dict['place_origin'] = map(lambda x: x.text,
+ dict['place_origin'] = map(lambda x: x.text,
names_origin.findAll('option'))
if names_destination:
- dict['place_destination'] = map(lambda x: x.text,
+ dict['place_destination'] = map(lambda x: x.text,
names_destination.findAll('option'))
-
+
return dict
-
+
else:
raise ParserError('Unable to parse html')
to_dtime = datetime.combine(d + timedelta(1), times[1])
else:
to_dtime = datetime.combine(d, times[1])
-
+
return [from_dtime, to_dtime]
-
+
else:
dtregex = {'date' : '\d\d\.\d\d',
'time': '\d\d:\d\d'}
-
+
regex = "\s*(?P<date1>{date})?\s*(?P<time1>{time})\s*(?P<date2>{date})?\s*(?P<time2>{time})\s*".format(**dtregex)
ma = re.match(regex, y)
-
+
if not ma:
return []
-
+
gr = ma.groupdict()
-
+
def extract_datetime(gr, n):
if 'date%d' % n in gr and gr['date%d' % n]:
from_dtime = datetime.strptime(str(datetime.today().year) + gr['date%d' % n] + gr['time%d' % n], '%Y%d.%m.%H:%M')
t = datetime.strptime(gr['time%d' % n], '%H:%M').time()
d = datetime.today().date()
return datetime.combine(d, t)
-
+
# detail mode
from_dtime = extract_datetime(gr, 1)
to_dtime = extract_datetime(gr, 2)
-
+
return [from_dtime, to_dtime]
-
+
else:
return []