2 # -*- coding: UTF-8 -*-
4 from gotovienna.BeautifulSoup import BeautifulSoup, NavigableString
5 from urllib2 import urlopen
6 from urllib import urlencode
7 from datetime import datetime, time, timedelta
8 from textwrap import wrap
13 from gotovienna import defaults
15 POSITION_TYPES = ('stop', 'address', 'poi')
17 DEBUGLOG = os.path.expanduser('~/gotoVienna.debug')
19 class ParserError(Exception):
21 def __init__(self, msg='Parser error'):
25 UNKNOWN, CORRECTION, RESULT = range(3)
28 def extract_city(station):
29 """ Extract city from string if present,
30 else return default city
32 >>> extract_city('Karlsplatz, Wien')
35 if len(station.split(',')) > 1:
36 return station.split(',')[-1].strip()
40 def extract_station(station):
41 """ Remove city from string
43 >>> extract_station('Karlsplatz, Wien')
46 if len(station.split(',')) > 1:
47 return station[:station.rindex(',')].strip()
51 def split_station(station):
52 """ >>> split_station('Karlsplatz, Wien')
53 ('Karlsplatz', 'Wien')
54 >>> split_station('Karlsplatz')
55 ('Karlsplatz', 'Wien')
57 if len(station.split(',')) > 1:
58 return (station[:station.rindex(',')].strip(), station.split(',')[-1].strip())
60 return (station, 'Wien')
62 def guess_location_type(location):
63 """Guess type (stop, address, poi) of a location
65 >>> guess_location_type('pilgramgasse')
68 >>> guess_location_type('karlsplatz 14')
71 >>> guess_location_type('reumannplatz 12/34')
74 parts = location.split()
78 # Assume all single-word locations are stops
82 # If the last part is numeric, assume address
83 if last_part.isdigit() and len(parts) > 1:
86 # Addresses with door number (e.g. "12/34")
87 if all(x.isdigit() or x == '/' for x in last_part):
90 # Sane default - assume it's a stop/station name
93 def search(origin_tuple, destination_tuple, dtime=None):
94 """ build route request
95 returns html result (as urllib response)
98 dtime = datetime.now()
100 origin, origin_type = origin_tuple
101 origin, origin_city = split_station(origin)
103 destination, destination_type = destination_tuple
104 destination, destination_city = split_station(destination)
107 if origin_type is None:
108 origin_type = guess_location_type(origin)
109 print 'Guessed origin type:', origin_type
111 if destination_type is None:
112 destination_type = guess_location_type(destination)
113 print 'Guessed destination type:', destination_type
115 if (origin_type not in POSITION_TYPES or
116 destination_type not in POSITION_TYPES):
117 raise ParserError('Invalid position type')
119 post = defaults.search_post
120 post['name_origin'] = origin
121 post['type_origin'] = origin_type
122 post['name_destination'] = destination
123 post['type_destination'] = destination_type
124 post['itdDateDayMonthYear'] = dtime.strftime('%d.%m.%Y')
125 post['itdTime'] = dtime.strftime('%H:%M')
126 post['place_origin'] = origin_city
127 post['place_destination'] = destination_city
128 params = urlencode(post)
129 url = '%s?%s' % (defaults.action, params)
136 """ Parser for search response
139 def __init__(self, html):
140 self.soup = BeautifulSoup(html)
142 def check_page(self):
143 if self.soup.find('form', {'id': 'form_efaresults'}):
144 return PageType.RESULT
146 if self.soup.find('div', {'class':'form_error'}):
147 return PageType.CORRECTION
149 return PageType.UNKNOWN
151 state = property(check_page)
153 def get_correction(self):
154 names_origin = self.soup.find('select', {'id': 'nameList_origin'})
155 names_destination = self.soup.find('select', {'id': 'nameList_destination'})
156 places_origin = self.soup.find('select', {'id': 'placeList_origin'})
157 places_destination = self.soup.find('select', {'id': 'placeList_destination'})
160 if any([names_origin, names_destination, places_origin, places_destination]):
164 dict['origin'] = map(lambda x: x.text,
165 names_origin.findAll('option'))
166 if names_destination:
167 dict['destination'] = map(lambda x: x.text,
168 names_destination.findAll('option'))
171 dict['place_origin'] = map(lambda x: x.text,
172 names_origin.findAll('option'))
173 if names_destination:
174 dict['place_destination'] = map(lambda x: x.text,
175 names_destination.findAll('option'))
180 raise ParserError('Unable to parse html')
182 def get_result(self):
183 return rParser(str(self.soup))
188 """ Parser for routing results
191 def __init__(self, html):
192 self.soup = BeautifulSoup(html)
193 self._overview = None
197 def get_tdtext(cls, x, cl):
198 return x.find('td', {'class': cl}).text
201 def get_change(cls, x):
202 y = rParser.get_tdtext(x, 'col_change')
209 def get_price(cls, x):
210 y = rParser.get_tdtext(x, 'col_price')
214 return float(y.replace(',', '.'))
219 def get_date(cls, x):
220 y = rParser.get_tdtext(x, 'col_date')
222 return datetime.strptime(y, '%d.%m.%Y').date()
227 def get_datetime(cls, x):
228 y = rParser.get_tdtext(x, 'col_time')
230 if (y.find("-") > 0):
232 times = map(lambda z: time(*map(int, z.split(':'))), y.split('-'))
233 d = rParser.get_date(x)
234 from_dtime = datetime.combine(d, times[0])
235 if times[0] > times[1]:
237 to_dtime = datetime.combine(d + timedelta(1), times[1])
239 to_dtime = datetime.combine(d, times[1])
241 return [from_dtime, to_dtime]
244 dtregex = {'date' : '\d\d\.\d\d',
247 regex = "\s*(?P<date1>{date})?\s*(?P<time1>{time})\s*(?P<date2>{date})?\s*(?P<time2>{time})\s*".format(**dtregex)
248 ma = re.match(regex, y)
255 def extract_datetime(gr, n):
256 if 'date%d' % n in gr and gr['date%d' % n]:
257 from_dtime = datetime.strptime(str(datetime.today().year) + gr['date%d' % n] + gr['time%d' % n], '%Y%d.%m.%H:%M')
259 t = datetime.strptime(gr['time%d' % n], '%H:%M').time()
260 d = datetime.today().date()
261 return datetime.combine(d, t)
264 from_dtime = extract_datetime(gr, 1)
265 to_dtime = extract_datetime(gr, 2)
267 return [from_dtime, to_dtime]
273 for detail in self.details():
276 def _parse_details(self):
277 tours = self.soup.findAll('div', {'class': 'data_table tourdetail'})
279 trips = map(lambda x: map(lambda y: {
280 'timespan': rParser.get_datetime(y),
281 'station': map(lambda z: z[2:].strip(),
282 filter(lambda x: type(x) == NavigableString, y.find('td', {'class': 'col_station'}).contents)), # filter non NaviStrings
283 'info': map(lambda x: x.strip(),
284 filter(lambda z: type(z) == NavigableString, y.find('td', {'class': 'col_info'}).contents)),
285 }, x.find('tbody').findAll('tr')),
291 """returns list of trip details
292 [ [ { 'time': [datetime.time, datetime.time] if time else [],
293 'station': [u'start', u'end'] if station else [],
294 'info': [u'start station' if station else u'details for walking', u'end station' if station else u'walking duration']
295 }, ... # next trip step
296 ], ... # next trip possibility
299 if not self._details:
300 self._details = self._parse_details()
304 def _parse_overview(self):
307 table = self.soup.find('table', {'id': 'tbl_fahrten'})
309 # check if there is an overview table
310 if table and table.findAll('tr'):
312 rows = table.findAll('tr')[1:] # cut off headline
314 overview = map(lambda x: {
315 'timespan': rParser.get_datetime(x),
316 'change': rParser.get_change(x),
317 'price': rParser.get_price(x),
321 raise ParserError('Unable to parse overview')
334 if not self._overview:
336 self._overview = self._parse_overview()
337 except AttributeError:
338 f = open(DEBUGLOG, 'w')
339 f.write(str(self.soup))
342 return self._overview