--- /dev/null
+# -*- coding: utf-8 -*-
+
+from BeautifulSoup import BeautifulSoup
+from urllib2 import urlopen
+import settings
+from datetime import time
+import argparse
+import re
+
+class ITipParser:
+ def __init__(self):
+ self._stations = {}
+ self._lines = {}
+
+ def get_stations(self, name):
+ """ Get station by direction
+ {'Directionname': [('Station name', 'url')]}
+ """
+ if not self._stations.has_key(name):
+ st = {}
+
+ if not self.lines.has_key(name):
+ return None
+
+ bs = BeautifulSoup(urlopen(self.lines[name]))
+ tables = bs.findAll('table', {'class': 'text_10pix'})
+ for i in range(2):
+ dir = tables[i].div.contents[-1].strip(' ')
+
+ sta = []
+ for tr in tables[i].findAll('tr', {'onmouseout': 'obj_unhighlight(this);'}):
+ if tr.a:
+ sta.append((tr.a.text, settings.line_overview + tr.a['href']))
+ else:
+ sta.append((tr.text.strip(' '), None))
+
+ st[dir] = sta
+ self._stations[name] = st
+
+ return self._stations[name]
+
+ @property
+ def lines(self):
+ """ Dictionary of Line names with url as value
+ """
+ if not self._lines:
+ bs = BeautifulSoup(urlopen(settings.line_overview))
+ # get tables
+ lines = bs.findAll('td', {'class': 'linie'})
+
+ for line in lines:
+ if line.a:
+ href = settings.line_overview + line.a['href']
+ if line.text:
+ self._lines[line.text] = href
+ elif line.img:
+ self._lines[line.img['alt']] = href
+
+ return self._lines
+
+ def get_departures(self, url):
+ """ Get list of next departures
+ integer if time until next departure
+ time if time of next departure
+ """
+
+ #TODO parse line name and direction for station site parsing
+
+ if not url:
+ # FIXME prevent from calling this method with None
+ return []
+
+ bs = BeautifulSoup(urlopen(url))
+ result_lines = bs.findAll('table')[-1].findAll('tr')
+
+ dep = []
+ for tr in result_lines[1:]:
+ th = tr.findAll('th')
+ if len(th) < 2:
+ #TODO replace with logger
+ print "[DEBUG] Unable to find th in:\n%s" % str(tr)
+ continue
+
+ # parse time
+ time = th[-2].text.split(' ')
+ if len(time) < 2:
+ print 'Invalid time: %s' % time
+ continue
+
+ time = time[1]
+
+ if time.find('rze...') >= 0:
+ dep.append(0)
+ elif time.isdigit():
+ # if time to next departure in cell convert to int
+ dep.append(int(time))
+ else:
+ # check if time of next departue in cell
+ t = time.strip(' ').split(':')
+ if len(t) == 2 and all(map(lambda x: x.isdigit(), t)):
+ t = map(int, t)
+ dep.append(time(*t))
+ else:
+ # Unexpected content
+ #TODO replace with logger
+ print "[DEBUG] Invalid data:\n%s" % time
+
+ return dep
+
--- /dev/null
+#!/usr/bin/env python
+# -*- coding: UTF-8 -*-
+
+from BeautifulSoup import BeautifulSoup, NavigableString
+from urllib2 import urlopen
+from urllib import urlencode
+import settings
+from datetime import datetime, time
+from textwrap import wrap
+import argparse
+import sys
+import os.path
+
+POSITION_TYPES = ('stop', 'address', 'poi')
+TIMEFORMAT = '%H:%M'
+DEBUGLOG = os.path.expanduser('~/gotoVienna.debug')
+
+class ParserError(Exception):
+
+ def __init__(self, msg='Parser error'):
+ self.message = msg
+
+class PageType:
+ UNKNOWN, CORRECTION, RESULT = range(3)
+
+
+def search(origin_tuple, destination_tuple, dtime=None):
+ """ build route request
+ returns html result (as urllib response)
+ """
+ if not dtime:
+ dtime = datetime.now()
+
+ origin, origin_type = origin_tuple
+ destination, destination_type = destination_tuple
+ if not origin_type in POSITION_TYPES or\
+ not destination_type in POSITION_TYPES:
+ raise ParserError('Invalid position type')
+
+ post = settings.search_post
+ post['name_origin'] = origin
+ post['type_origin'] = origin_type
+ post['name_destination'] = destination
+ post['type_destination'] = destination_type
+ post['itdDateDayMonthYear'] = dtime.strftime('%d.%m.%Y')
+ post['itdTime'] = dtime.strftime('%H:%M')
+ params = urlencode(post)
+ url = '%s?%s' % (settings.action, params)
+
+ try:
+ f = open(DEBUGLOG, 'a')
+ f.write(url + '\n')
+ f.close()
+ except:
+ print 'Unable to write to DEBUGLOG: %s' % DEBUGLOG
+
+ return urlopen(url)
+
+
+class sParser:
+ """ Parser for search response
+ """
+
+ def __init__(self, html):
+ self.soup = BeautifulSoup(html)
+
+ def check_page(self):
+ if self.soup.find('form', {'id': 'form_efaresults'}):
+ return PageType.RESULT
+
+ if self.soup.find('div', {'class':'form_error'}):
+ return PageType.CORRECTION
+
+ return PageType.UNKNOWN
+
+ def get_correction(self):
+ nlo = self.soup.find('select', {'id': 'nameList_origin'})
+ nld = self.soup.find('select', {'id': 'nameList_destination'})
+
+ if not nlo and not nld:
+ raise ParserError('Unable to parse html')
+
+ if nlo:
+ origin = map(lambda x: x.text, nlo.findAll('option'))
+ else:
+ origin = []
+ if nld:
+ destination = map(lambda x: x.text, nld.findAll('option'))
+ else:
+ destination = []
+
+ return (origin, destination)
+
+ def get_result(self):
+ return rParser(str(self.soup))
+
+
+
+class rParser:
+ """ Parser for routing results
+ """
+
+ def __init__(self, html):
+ self.soup = BeautifulSoup(html)
+ self._overview = None
+ self._details = None
+
+ @classmethod
+ def get_tdtext(cls, x, cl):
+ return x.find('td', {'class': cl}).text
+
+ @classmethod
+ def get_change(cls, x):
+ y = rParser.get_tdtext(x, 'col_change')
+ if y:
+ return int(y)
+ else:
+ return 0
+
+ @classmethod
+ def get_price(cls, x):
+ y = rParser.get_tdtext(x, 'col_price')
+ if y == '*':
+ return 0.0
+ if y.find(','):
+ return float(y.replace(',', '.'))
+ else:
+ return 0.0
+
+ @classmethod
+ def get_date(cls, x):
+ y = rParser.get_tdtext(x, 'col_date')
+ if y:
+ return datetime.strptime(y, '%d.%m.%Y').date()
+ else:
+ return None
+
+ @classmethod
+ def get_time(cls, x):
+ y = rParser.get_tdtext(x, 'col_time')
+ if y:
+ if (y.find("-") > 0):
+ return map(lambda z: time(*map(int, z.split(':'))), y.split('-'))
+ else:
+ return map(lambda z: time(*map(int, z.split(':'))), wrap(y, 5))
+ else:
+ return []
+
+ @classmethod
+ def get_duration(cls, x):
+ y = rParser.get_tdtext(x, 'col_duration')
+ if y:
+ return time(*map(int, y.split(":")))
+ else:
+ return None
+
+ def __iter__(self):
+ for detail in self.details():
+ yield detail
+
+ def _parse_details(self):
+ tours = self.soup.findAll('div', {'class': 'data_table tourdetail'})
+
+ trips = map(lambda x: map(lambda y: {
+ 'time': rParser.get_time(y),
+ 'station': map(lambda z: z[2:].strip(),
+ filter(lambda x: type(x) == NavigableString, y.find('td', {'class': 'col_station'}).contents)), # filter non NaviStrings
+ 'info': map(lambda x: x.strip(),
+ filter(lambda z: type(z) == NavigableString, y.find('td', {'class': 'col_info'}).contents)),
+ }, x.find('tbody').findAll('tr')),
+ tours) # all routes
+ return trips
+
+ @property
+ def details(self):
+ """returns list of trip details
+ [ [ { 'time': [datetime.time, datetime.time] if time else [],
+ 'station': [u'start', u'end'] if station else [],
+ 'info': [u'start station' if station else u'details for walking', u'end station' if station else u'walking duration']
+ }, ... # next trip step
+ ], ... # next trip possibility
+ ]
+ """
+ if not self._details:
+ self._details = self._parse_details()
+
+ return self._details
+
+ def _parse_overview(self):
+
+ # get overview table
+ table = self.soup.find('table', {'id': 'tbl_fahrten'})
+
+ # check if there is an overview table
+ if table and table.findAll('tr'):
+ # get rows
+ rows = table.findAll('tr')[1:] # cut off headline
+
+ overview = map(lambda x: {
+ 'date': rParser.get_date(x),
+ 'time': rParser.get_time(x),
+ 'duration': rParser.get_duration(x), # grab duration
+ 'change': rParser.get_change(x),
+ 'price': rParser.get_price(x),
+ },
+ rows)
+ else:
+ raise ParserError('Unable to parse overview')
+
+ return overview
+
+ @property
+ def overview(self):
+ """dict containing
+ date: datetime
+ time: [time, time]
+ duration: time
+ change: int
+ price: float
+ """
+ if not self._overview:
+ try:
+ self._overview = self._parse_overview()
+ except AttributeError:
+ f = open(DEBUGLOG, 'w')
+ f.write(str(self.soup))
+ f.close()
+
+ return self._overview
+
+++ /dev/null
-#!/usr/bin/env python
-# -*- coding: UTF-8 -*-
-
-from BeautifulSoup import BeautifulSoup
-from urllib2 import urlopen
-import settings
-from datetime import time
-import argparse
-import re
-
-class iParser:
-
- def __init__(self):
- self._stations = {}
- self._lines = {}
-
- def get_stations(self, name):
- """ Get station by direction
- {'Directionname': [('Station name', 'url')]}
- """
- if not self._stations.has_key(name):
- st = {}
-
- if not self.lines.has_key(name):
- return None
-
- bs = BeautifulSoup(urlopen(self.lines[name]))
- tables = bs.findAll('table', {'class': 'text_10pix'})
- for i in range(2):
- dir = tables[i].div.contents[-1].strip(' ')
-
- sta = []
- for tr in tables[i].findAll('tr', {'onmouseout': 'obj_unhighlight(this);'}):
- if tr.a:
- sta.append((tr.a.text, settings.line_overview + tr.a['href']))
- else:
- sta.append((tr.text.strip(' '), None))
-
- st[dir] = sta
- self._stations[name] = st
-
- return self._stations[name]
-
- @property
- def lines(self):
- """ Dictionary of Line names with url as value
- """
- if not self._lines:
- bs = BeautifulSoup(urlopen(settings.line_overview))
- # get tables
- lines = bs.findAll('td', {'class': 'linie'})
-
- for line in lines:
- if line.a:
- href = settings.line_overview + line.a['href']
- if line.text:
- self._lines[line.text] = href
- elif line.img:
- self._lines[line.img['alt']] = href
-
- return self._lines
-
- def get_departures(self, url):
- """ Get list of next departures
- integer if time until next departure
- time if time of next departure
- """
-
- #TODO parse line name and direction for station site parsing
-
- if not url:
- # FIXME prevent from calling this method with None
- return []
-
- bs = BeautifulSoup(urlopen(url))
- result_lines = bs.findAll('table')[-1].findAll('tr')
-
- dep = []
- for tr in result_lines[1:]:
- th = tr.findAll('th')
- if len(th) < 2:
- #TODO replace with logger
- print "[DEBUG] Unable to find th in:\n%s" % str(tr)
- continue
-
- # parse time
- time = th[-2].text.split(' ')
- if len(time) < 2:
- print 'Invalid time: %s' % time
- continue
-
- time = time[1]
-
- if time.find('rze...') >= 0:
- dep.append(0)
- elif time.isdigit():
- # if time to next departure in cell convert to int
- dep.append(int(time))
- else:
- # check if time of next departue in cell
- t = time.strip(' ').split(':')
- if len(t) == 2 and all(map(lambda x: x.isdigit(), t)):
- t = map(int, t)
- dep.append(time(*t))
- else:
- # Unexpected content
- #TODO replace with logger
- print "[DEBUG] Invalid data:\n%s" % time
-
- return dep
-
-if __name__ == '__main__':
- parser = argparse.ArgumentParser(description='Get realtime public transport information for Vienna')
- parser.add_argument('-l', metavar='name', type=str, help='line name')
- parser.add_argument('-s', metavar='name', type=str, help='station name')
-
- args = parser.parse_args()
-
- itip = iParser()
- lines = itip.lines
- if args.l:
- l = args.l.upper()
- else:
- l = None
- if args.s:
- s = args.s.decode('UTF-8')
- else:
- s = ''
-
- if l and l in lines:
- stations = itip.get_stations(l)
- for key in stations.keys():
- if not s:
- print '* %s:' % key
- for station in stations[key]:
- if s:
- if s.startswith(station[0]) or station[0].startswith(s):
- # FIXME
- print '* %s\n %s .....' % (key, station[0]), itip.get_departures(station[1])
- else:
- print ' %s' % station[0]
-
- elif not l:
- line = {'U-Bahn': '|', 'Strassenbahn': '|', 'Bus': '|', 'Andere': '|', 'Nightline': '|'}
- lines_sorted = lines.keys()
- lines_sorted.sort()
- for li in lines_sorted:
- if li.isdigit():
- type = 'Strassenbahn'
- elif li.endswith('A') or li.endswith('B') and li[1].isdigit():
- type = 'Bus'
- elif li.startswith('U'):
- type = 'U-Bahn'
- elif li.startswith('N'):
- type = 'Nightline'
- else:
- type = 'Andere'
-
- line[type] += ' %s |' % li
- for kv in line.items():
- print "%s:\n%s" % kv
--- /dev/null
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from BeautifulSoup import BeautifulSoup
+from urllib2 import urlopen
+import settings
+from datetime import time
+import argparse
+import re
+
+from gotovienna.realtime import ITipParser
+
+
+parser = argparse.ArgumentParser(description='Get realtime public transport information for Vienna')
+parser.add_argument('-l', metavar='name', type=str, help='line name')
+parser.add_argument('-s', metavar='name', type=str, help='station name')
+
+args = parser.parse_args()
+
+itip = ITipParser()
+lines = itip.lines
+if args.l:
+ l = args.l.upper()
+else:
+ l = None
+if args.s:
+ s = args.s.decode('UTF-8')
+else:
+ s = ''
+
+if l and l in lines:
+ stations = itip.get_stations(l)
+ for key in stations.keys():
+ if not s:
+ print '* %s:' % key
+ for station in stations[key]:
+ if s:
+ if s.startswith(station[0]) or station[0].startswith(s):
+ # FIXME
+ print '* %s\n %s .....' % (key, station[0]), itip.get_departures(station[1])
+ else:
+ print ' %s' % station[0]
+
+elif not l:
+ line = {'U-Bahn': '|', 'Strassenbahn': '|', 'Bus': '|', 'Andere': '|', 'Nightline': '|'}
+ lines_sorted = lines.keys()
+ lines_sorted.sort()
+ for li in lines_sorted:
+ if li.isdigit():
+ type = 'Strassenbahn'
+ elif li.endswith('A') or li.endswith('B') and li[1].isdigit():
+ type = 'Bus'
+ elif li.startswith('U'):
+ type = 'U-Bahn'
+ elif li.startswith('N'):
+ type = 'Nightline'
+ else:
+ type = 'Andere'
+
+ line[type] += ' %s |' % li
+ for kv in line.items():
+ print "%s:\n%s" % kv
--- /dev/null
+#!/usr/bin/env python
+# -*- coding: UTF-8 -*-
+
+from BeautifulSoup import BeautifulSoup, NavigableString
+from urllib2 import urlopen
+from urllib import urlencode
+import settings
+from datetime import datetime, time
+from textwrap import wrap
+import argparse
+import sys
+import os.path
+
+from gotovienna.routing import *
+
+parser = argparse.ArgumentParser(description='Get public transport route for Vienna')
+parser.add_argument('-ot', metavar='type', type=str, help='origin type: %s' % ' | '.join(POSITION_TYPES), default='stop', choices=POSITION_TYPES)
+parser.add_argument('-dt', metavar='type', type=str, help='destination type: %s' % ' | '.join(POSITION_TYPES), default='stop', choices=POSITION_TYPES)
+parser.add_argument('origin')
+parser.add_argument('destination')
+
+args = parser.parse_args()
+html = search((args.origin, args.ot), (args.destination, args.dt)).read()
+
+parser = sParser(html)
+state = parser.check_page()
+
+if state == PageType.CORRECTION:
+ try:
+ cor = parser.get_correction()
+ if cor[0]:
+ print
+ print '* Origin ambiguous:'
+ lo = None
+ while not lo or not lo.isdigit() or int(lo) > len(cor[0]):
+ i = 1
+ for c in cor[0]:
+ print '%d. %s' % (i, c)
+ i += 1
+ lo = sys.stdin.readline().strip()
+
+ args.origin = cor[0][int(lo) - 1]
+
+ if cor[1]:
+ print
+ print '* Destination ambiguous:'
+ ld = None
+ while not ld or not ld.isdigit() or int(ld) > len(cor[1]):
+ j = 1
+ for c in cor[1]:
+ print '%d. %s' % (j, c)
+ j += 1
+ ld = sys.stdin.readline().strip()
+
+ args.destination = cor[1][int(ld) - 1]
+
+ html = search((args.origin.encode('UTF-8'), args.ot), (args.destination.encode('UTF-8'), args.dt)).read()
+
+ parser = sParser(html)
+ state = parser.check_page()
+
+ except ParserError:
+ print 'PANIC at correction page'
+
+if state == PageType.RESULT:
+ parser = rParser(html)
+ try:
+ overviews = parser.overview
+ details = parser.details
+ l = ''
+ while not l == 'q':
+ for idx, overview in enumerate(overviews):
+ if not overview['date'] or not overview['time']:
+ # XXX: Bogus data for e.g. Pilgramgasse->Karlsplatz?!
+ continue
+
+ print '%d. [%s] %s-%s (%s)' % (idx + 1,
+ overview['date'],
+ overview['time'][0],
+ overview['time'][1],
+ overview['duration'])
+ print 'q. Quit'
+ l = sys.stdin.readline().strip()
+ print
+ print '~' * 100
+
+ if l.isdigit() and int(l) <= len(details):
+ for detail in details[int(l) - 1]:
+ if detail['time'] and detail['station']:
+ time = '%s - %s' % (detail['time'][0].strftime(TIMEFORMAT), detail['time'][1].strftime(TIMEFORMAT))
+ print '[%s] %s\n%s' % (time, ' -> '.join(detail['station']), '\n'.join(detail['info']))
+ else:
+ print '\n'.join(detail['info'])
+ print '-' * 100
+ print
+
+ except ParserError:
+ print 'parsererror'
+
+elif state == PageType.UNKNOWN:
+ print 'PANIC unknown result'
+++ /dev/null
-#!/usr/bin/env python
-# -*- coding: UTF-8 -*-
-
-from BeautifulSoup import BeautifulSoup, NavigableString
-from urllib2 import urlopen
-from urllib import urlencode
-import settings
-from datetime import datetime, time
-from textwrap import wrap
-import argparse
-import sys
-import os.path
-
-POSITION_TYPES = ('stop', 'address', 'poi')
-TIMEFORMAT = '%H:%M'
-DEBUGLOG = os.path.expanduser('~/gotoVienna.debug')
-
-class ParserError(Exception):
-
- def __init__(self, msg='Parser error'):
- self.message = msg
-
-class PageType:
- UNKNOWN, CORRECTION, RESULT = range(3)
-
-
-def search(origin_tuple, destination_tuple, dtime=None):
- """ build route request
- returns html result (as urllib response)
- """
- if not dtime:
- dtime = datetime.now()
-
- origin, origin_type = origin_tuple
- destination, destination_type = destination_tuple
- if not origin_type in POSITION_TYPES or\
- not destination_type in POSITION_TYPES:
- raise ParserError('Invalid position type')
-
- post = settings.search_post
- post['name_origin'] = origin
- post['type_origin'] = origin_type
- post['name_destination'] = destination
- post['type_destination'] = destination_type
- post['itdDateDayMonthYear'] = dtime.strftime('%d.%m.%Y')
- post['itdTime'] = dtime.strftime('%H:%M')
- params = urlencode(post)
- url = '%s?%s' % (settings.action, params)
-
- try:
- f = open(DEBUGLOG, 'a')
- f.write(url + '\n')
- f.close()
- except:
- print 'Unable to write to DEBUGLOG: %s' % DEBUGLOG
-
- return urlopen(url)
-
-
-class sParser:
- """ Parser for search response
- """
-
- def __init__(self, html):
- self.soup = BeautifulSoup(html)
-
- def check_page(self):
- if self.soup.find('form', {'id': 'form_efaresults'}):
- return PageType.RESULT
-
- if self.soup.find('div', {'class':'form_error'}):
- return PageType.CORRECTION
-
- return PageType.UNKNOWN
-
- def get_correction(self):
- nlo = self.soup.find('select', {'id': 'nameList_origin'})
- nld = self.soup.find('select', {'id': 'nameList_destination'})
-
- if not nlo and not nld:
- raise ParserError('Unable to parse html')
-
- if nlo:
- origin = map(lambda x: x.text, nlo.findAll('option'))
- else:
- origin = []
- if nld:
- destination = map(lambda x: x.text, nld.findAll('option'))
- else:
- destination = []
-
- return (origin, destination)
-
- def get_result(self):
- return rParser(str(self.soup))
-
-
-
-class rParser:
- """ Parser for routing results
- """
-
- def __init__(self, html):
- self.soup = BeautifulSoup(html)
- self._overview = None
- self._details = None
-
- @classmethod
- def get_tdtext(cls, x, cl):
- return x.find('td', {'class': cl}).text
-
- @classmethod
- def get_change(cls, x):
- y = rParser.get_tdtext(x, 'col_change')
- if y:
- return int(y)
- else:
- return 0
-
- @classmethod
- def get_price(cls, x):
- y = rParser.get_tdtext(x, 'col_price')
- if y == '*':
- return 0.0
- if y.find(','):
- return float(y.replace(',', '.'))
- else:
- return 0.0
-
- @classmethod
- def get_date(cls, x):
- y = rParser.get_tdtext(x, 'col_date')
- if y:
- return datetime.strptime(y, '%d.%m.%Y').date()
- else:
- return None
-
- @classmethod
- def get_time(cls, x):
- y = rParser.get_tdtext(x, 'col_time')
- if y:
- if (y.find("-") > 0):
- return map(lambda z: time(*map(int, z.split(':'))), y.split('-'))
- else:
- return map(lambda z: time(*map(int, z.split(':'))), wrap(y, 5))
- else:
- return []
-
- @classmethod
- def get_duration(cls, x):
- y = rParser.get_tdtext(x, 'col_duration')
- if y:
- return time(*map(int, y.split(":")))
- else:
- return None
-
- def __iter__(self):
- for detail in self.details():
- yield detail
-
- def _parse_details(self):
- tours = self.soup.findAll('div', {'class': 'data_table tourdetail'})
-
- trips = map(lambda x: map(lambda y: {
- 'time': rParser.get_time(y),
- 'station': map(lambda z: z[2:].strip(),
- filter(lambda x: type(x) == NavigableString, y.find('td', {'class': 'col_station'}).contents)), # filter non NaviStrings
- 'info': map(lambda x: x.strip(),
- filter(lambda z: type(z) == NavigableString, y.find('td', {'class': 'col_info'}).contents)),
- }, x.find('tbody').findAll('tr')),
- tours) # all routes
- return trips
-
- @property
- def details(self):
- """returns list of trip details
- [ [ { 'time': [datetime.time, datetime.time] if time else [],
- 'station': [u'start', u'end'] if station else [],
- 'info': [u'start station' if station else u'details for walking', u'end station' if station else u'walking duration']
- }, ... # next trip step
- ], ... # next trip possibility
- ]
- """
- if not self._details:
- self._details = self._parse_details()
-
- return self._details
-
- def _parse_overview(self):
-
- # get overview table
- table = self.soup.find('table', {'id': 'tbl_fahrten'})
-
- # check if there is an overview table
- if table and table.findAll('tr'):
- # get rows
- rows = table.findAll('tr')[1:] # cut off headline
-
- overview = map(lambda x: {
- 'date': rParser.get_date(x),
- 'time': rParser.get_time(x),
- 'duration': rParser.get_duration(x), # grab duration
- 'change': rParser.get_change(x),
- 'price': rParser.get_price(x),
- },
- rows)
- else:
- raise ParserError('Unable to parse overview')
-
- return overview
-
- @property
- def overview(self):
- """dict containing
- date: datetime
- time: [time, time]
- duration: time
- change: int
- price: float
- """
- if not self._overview:
- try:
- self._overview = self._parse_overview()
- except AttributeError:
- f = open(DEBUGLOG, 'w')
- f.write(str(self.soup))
- f.close()
-
- return self._overview
-
-if __name__ == '__main__':
- parser = argparse.ArgumentParser(description='Get public transport route for Vienna')
- parser.add_argument('-ot', metavar='type', type=str, help='origin type: %s' % ' | '.join(POSITION_TYPES), default='stop', choices=POSITION_TYPES)
- parser.add_argument('-dt', metavar='type', type=str, help='destination type: %s' % ' | '.join(POSITION_TYPES), default='stop', choices=POSITION_TYPES)
- parser.add_argument('origin')
- parser.add_argument('destination')
-
- args = parser.parse_args()
- html = search((args.origin, args.ot), (args.destination, args.dt)).read()
-
- parser = sParser(html)
- state = parser.check_page()
-
- if state == PageType.CORRECTION:
- try:
- cor = parser.get_correction()
- if cor[0]:
- print
- print '* Origin ambiguous:'
- lo = None
- while not lo or not lo.isdigit() or int(lo) > len(cor[0]):
- i = 1
- for c in cor[0]:
- print '%d. %s' % (i, c)
- i += 1
- lo = sys.stdin.readline().strip()
-
- args.origin = cor[0][int(lo) - 1]
-
- if cor[1]:
- print
- print '* Destination ambiguous:'
- ld = None
- while not ld or not ld.isdigit() or int(ld) > len(cor[1]):
- j = 1
- for c in cor[1]:
- print '%d. %s' % (j, c)
- j += 1
- ld = sys.stdin.readline().strip()
-
- args.destination = cor[1][int(ld) - 1]
-
- html = search((args.origin.encode('UTF-8'), args.ot), (args.destination.encode('UTF-8'), args.dt)).read()
-
- parser = sParser(html)
- state = parser.check_page()
-
- except ParserError:
- print 'PANIC at correction page'
-
- if state == PageType.RESULT:
- parser = rParser(html)
- try:
- overviews = parser.overview
- details = parser.details
- l = ''
- while not l == 'q':
- for idx, overview in enumerate(overviews):
- if not overview['date'] or not overview['time']:
- # XXX: Bogus data for e.g. Pilgramgasse->Karlsplatz?!
- continue
-
- print '%d. [%s] %s-%s (%s)' % (idx + 1,
- overview['date'],
- overview['time'][0],
- overview['time'][1],
- overview['duration'])
- print 'q. Quit'
- l = sys.stdin.readline().strip()
- print
- print '~' * 100
-
- if l.isdigit() and int(l) <= len(details):
- for detail in details[int(l) - 1]:
- if detail['time'] and detail['station']:
- time = '%s - %s' % (detail['time'][0].strftime(TIMEFORMAT), detail['time'][1].strftime(TIMEFORMAT))
- print '[%s] %s\n%s' % (time, ' -> '.join(detail['station']), '\n'.join(detail['info']))
- else:
- print '\n'.join(detail['info'])
- print '-' * 100
- print
-
- except ParserError:
- print 'parsererror'
-
- elif state == PageType.UNKNOWN:
- print 'PANIC unknown result'