stdeb-based packaging for Harmattan
[pywienerlinien] / gotovienna / routing.py
1 #!/usr/bin/env python
2 # -*- coding: UTF-8 -*-
3
4 from gotovienna.BeautifulSoup import BeautifulSoup, NavigableString
5 from urllib2 import urlopen
6 from urllib import urlencode
7 from datetime import datetime, time, timedelta
8 from textwrap import wrap
9 import sys
10 import os.path
11 import re
12
13 from gotovienna import defaults
14
15 POSITION_TYPES = ('stop', 'address', 'poi')
16 TIMEFORMAT = '%H:%M'
17 DEBUGLOG = os.path.expanduser('~/gotoVienna.debug')
18
19 class ParserError(Exception):
20
21     def __init__(self, msg='Parser error'):
22         self.message = msg
23
24 class PageType:
25     UNKNOWN, CORRECTION, RESULT = range(3)
26
27
28 def extract_city(station):
29     """ Extract city from string if present,
30     else return default city
31     
32     >>> extract_city('Karlsplatz, Wien')
33     'Wien'
34     """
35     if len(station.split(',')) > 1:
36         return station.split(',')[-1].strip()
37     else:
38         return 'Wien'
39         
40 def extract_station(station):
41     """ Remove city from string
42     
43     >>> extract_station('Karlsplatz, Wien')
44     'Karlsplatz'
45     """
46     if len(station.split(',')) > 1:
47         return station[:station.rindex(',')].strip()
48     else:
49         return station
50     
51 def split_station(station):
52     """ >>> split_station('Karlsplatz, Wien')
53     ('Karlsplatz', 'Wien')
54     >>> split_station('Karlsplatz')
55     ('Karlsplatz', 'Wien')
56     """
57     if len(station.split(',')) > 1:
58         return (station[:station.rindex(',')].strip(), station.split(',')[-1].strip())
59     else:
60         return (station, 'Wien')
61
62 def guess_location_type(location):
63     """Guess type (stop, address, poi) of a location
64
65     >>> guess_location_type('pilgramgasse')
66     'stop'
67
68     >>> guess_location_type('karlsplatz 14')
69     'address'
70
71     >>> guess_location_type('reumannplatz 12/34')
72     'address'
73     """
74     parts = location.split()
75     first_part = parts[0]
76     last_part = parts[-1]
77
78     # Assume all single-word locations are stops
79     if len(parts) == 1:
80         return 'stop'
81
82     # If the last part is numeric, assume address
83     if last_part.isdigit() and len(parts) > 1:
84         return 'address'
85
86     # Addresses with door number (e.g. "12/34")
87     if all(x.isdigit() or x == '/' for x in last_part):
88         return 'address'
89
90     # Sane default - assume it's a stop/station name
91     return 'stop'
92
93 def search(origin_tuple, destination_tuple, dtime=None):
94     """ build route request
95     returns html result (as urllib response)
96     """
97     if not dtime:
98         dtime = datetime.now()
99
100     origin, origin_type = origin_tuple
101     origin, origin_city = split_station(origin)
102     
103     destination, destination_type = destination_tuple
104     destination, destination_city = split_station(destination)
105
106
107     if origin_type is None:
108         origin_type = guess_location_type(origin)
109         print 'Guessed origin type:', origin_type
110
111     if destination_type is None:
112         destination_type = guess_location_type(destination)
113         print 'Guessed destination type:', destination_type
114
115     if (origin_type not in POSITION_TYPES or
116             destination_type not in POSITION_TYPES):
117         raise ParserError('Invalid position type')
118
119     post = defaults.search_post
120     post['name_origin'] = origin
121     post['type_origin'] = origin_type
122     post['name_destination'] = destination
123     post['type_destination'] = destination_type
124     post['itdDateDayMonthYear'] = dtime.strftime('%d.%m.%Y')
125     post['itdTime'] = dtime.strftime('%H:%M')
126     post['place_origin'] = origin_city
127     post['place_destination'] = destination_city
128     params = urlencode(post)
129     url = '%s?%s' % (defaults.action, params)
130
131     try:
132         f = open(DEBUGLOG, 'a')
133         f.write(url + '\n')
134         f.close()
135     except:
136         print 'Unable to write to DEBUGLOG: %s' % DEBUGLOG
137
138     return urlopen(url)
139
140
141 class sParser:
142     """ Parser for search response
143     """
144
145     def __init__(self, html):
146         self.soup = BeautifulSoup(html)
147
148     def check_page(self):
149         if self.soup.find('form', {'id': 'form_efaresults'}):
150             return PageType.RESULT
151
152         if self.soup.find('div', {'class':'form_error'}):
153             return PageType.CORRECTION
154
155         return PageType.UNKNOWN
156
157     state = property(check_page)
158
159     def get_correction(self):
160         names_origin = self.soup.find('select', {'id': 'nameList_origin'})
161         names_destination = self.soup.find('select', {'id': 'nameList_destination'})
162         places_origin = self.soup.find('select', {'id': 'placeList_origin'})
163         places_destination = self.soup.find('select', {'id': 'placeList_destination'})
164         
165
166         if names_origin or names_destination or places_origin or places_destination:
167             dict = {}
168             
169             if names_origin:
170                 dict['origin'] = map(lambda x: x.text, names_origin.findAll('option'))
171             if names_destination:
172                 dict['destination'] = map(lambda x: x.text, names_destination.findAll('option'))
173                 
174             if places_origin:
175                 dict['place_origin'] = map(lambda x: x.text, names_origin.findAll('option'))
176             if names_destination:
177                 dict['place_destination'] = map(lambda x: x.text, names_destination.findAll('option'))
178     
179             return dict
180         
181         else:
182             raise ParserError('Unable to parse html')
183
184     def get_result(self):
185         return rParser(str(self.soup))
186
187
188
189 class rParser:
190     """ Parser for routing results
191     """
192
193     def __init__(self, html):
194         self.soup = BeautifulSoup(html)
195         self._overview = None
196         self._details = None
197
198     @classmethod
199     def get_tdtext(cls, x, cl):
200             return x.find('td', {'class': cl}).text
201
202     @classmethod
203     def get_change(cls, x):
204         y = rParser.get_tdtext(x, 'col_change')
205         if y:
206             return int(y)
207         else:
208             return 0
209
210     @classmethod
211     def get_price(cls, x):
212         y = rParser.get_tdtext(x, 'col_price')
213         if y == '*':
214             return 0.0
215         if y.find(','):
216             return float(y.replace(',', '.'))
217         else:
218             return 0.0
219
220     @classmethod
221     def get_date(cls, x):
222         y = rParser.get_tdtext(x, 'col_date')
223         if y:
224             return datetime.strptime(y, '%d.%m.%Y').date()
225         else:
226             return None
227
228     @classmethod
229     def get_datetime(cls, x):
230         y = rParser.get_tdtext(x, 'col_time')
231         if y:
232             if (y.find("-") > 0):
233                 # overview mode
234                 times = map(lambda z: time(*map(int, z.split(':'))), y.split('-'))
235                 d = rParser.get_date(x)
236                 from_dtime = datetime.combine(d, times[0])
237                 if times[0] > times[1]:
238                     # dateline crossing
239                     to_dtime = datetime.combine(d + timedelta(1), times[1])
240                 else:
241                     to_dtime = datetime.combine(d, times[1])
242                     
243                 return [from_dtime, to_dtime]
244             
245             else:
246                 dtregex = {'date' : '\d\d\.\d\d',
247                            'time': '\d\d:\d\d'}
248                 
249                 regex = "\s*(?P<date1>{date})?\s*(?P<time1>{time})\s*(?P<date2>{date})?\s*(?P<time2>{time})\s*".format(**dtregex)
250                 ma = re.match(regex, y)
251                 
252                 if not ma:
253                     return []
254                 
255                 gr = ma.groupdict()
256                 
257                 def extract_datetime(gr, n):
258                     if 'date%d' % n in gr and gr['date%d' % n]:
259                         from_dtime = datetime.strptime(str(datetime.today().year) + gr['date%d' % n] + gr['time%d' % n], '%Y%d.%m.%H:%M')
260                     else:
261                         t = datetime.strptime(gr['time%d' % n], '%H:%M').time()
262                         d = datetime.today().date()
263                         return datetime.combine(d, t)
264                 
265                 # detail mode
266                 from_dtime = extract_datetime(gr, 1)
267                 to_dtime = extract_datetime(gr, 2)
268                 
269                 return [from_dtime, to_dtime]
270                 
271         else:
272             return []
273
274     def __iter__(self):
275         for detail in self.details():
276             yield detail
277
278     def _parse_details(self):
279         tours = self.soup.findAll('div', {'class': 'data_table tourdetail'})
280
281         trips = map(lambda x: map(lambda y: {
282                         'timespan': rParser.get_datetime(y),
283                         'station': map(lambda z: z[2:].strip(),
284                                        filter(lambda x: type(x) == NavigableString, y.find('td', {'class': 'col_station'}).contents)), # filter non NaviStrings
285                         'info': map(lambda x: x.strip(),
286                                     filter(lambda z: type(z) == NavigableString, y.find('td', {'class': 'col_info'}).contents)),
287                     }, x.find('tbody').findAll('tr')),
288                     tours) # all routes
289         return trips
290
291     @property
292     def details(self):
293         """returns list of trip details
294         [ [ { 'time': [datetime.time, datetime.time] if time else [],
295               'station': [u'start', u'end'] if station else [],
296               'info': [u'start station' if station else u'details for walking', u'end station' if station else u'walking duration']
297             }, ... # next trip step
298           ], ... # next trip possibility
299         ]
300         """
301         if not self._details:
302             self._details = self._parse_details()
303
304         return self._details
305
306     def _parse_overview(self):
307
308         # get overview table
309         table = self.soup.find('table', {'id': 'tbl_fahrten'})
310
311         # check if there is an overview table
312         if table and table.findAll('tr'):
313             # get rows
314             rows = table.findAll('tr')[1:] # cut off headline
315
316             overview = map(lambda x: {
317                                'timespan': rParser.get_datetime(x),
318                                'change': rParser.get_change(x),
319                                'price': rParser.get_price(x),
320                            },
321                            rows)
322         else:
323             raise ParserError('Unable to parse overview')
324
325         return overview
326
327     @property
328     def overview(self):
329         """dict containing
330         date: datetime
331         time: [time, time]
332         duration: time
333         change: int
334         price: float
335         """
336         if not self._overview:
337             try:
338                 self._overview = self._parse_overview()
339             except AttributeError:
340                 f = open(DEBUGLOG, 'w')
341                 f.write(str(self.soup))
342                 f.close()
343
344         return self._overview
345