Merge branch 'patch5884'
[feedingit] / src / rss.py
1 #!/usr/bin/env python2.5
2
3
4 # Copyright (c) 2007-2008 INdT.
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Lesser General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 #  This program is distributed in the hope that it will be useful,
11 #  but WITHOUT ANY WARRANTY; without even the implied warranty of
12 #  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 #  GNU Lesser General Public License for more details.
14 #
15 #  You should have received a copy of the GNU Lesser General Public License
16 #  along with this program.  If not, see <http://www.gnu.org/licenses/>.
17 #
18
19 # ============================================================================
20 # Name        : FeedingIt.py
21 # Author      : Yves Marcoz
22 # Version     : 0.5.4
23 # Description : Simple RSS Reader
24 # ============================================================================
25
26 from os.path import isfile, isdir
27 from shutil import rmtree
28 from os import mkdir, remove, utime
29 import pickle
30 import md5
31 import feedparser
32 import time
33 import urllib2
34 from BeautifulSoup import BeautifulSoup
35 from urlparse import urljoin
36
37 #CONFIGDIR="/home/user/.feedingit/"
38
39 def getId(string):
40     return md5.new(string).hexdigest()
41
42 #def getProxy():
43 #    import gconf
44 #    if gconf.client_get_default().get_bool('/system/http_proxy/use_http_proxy'):
45 #        port = gconf.client_get_default().get_int('/system/http_proxy/port')
46 #        http = gconf.client_get_default().get_string('/system/http_proxy/host')
47 #        proxy = proxy = urllib2.ProxyHandler( {"http":"http://%s:%s/"% (http,port)} )
48 #        return (True, proxy)
49 #    return (False, None)
50
51 # Enable proxy support for images and ArchivedArticles
52 #(proxy_support, proxy) = getProxy()
53 #if proxy_support:
54 #    opener = urllib2.build_opener(proxy)
55 #    urllib2.install_opener(opener)
56
57 # Entry = {"title":XXX, "content":XXX, "date":XXX, "link":XXX, images = [] }
58
59 class ImageHandler:
60     def __init__(self, configdir):
61         pass
62
63 class Feed:
64     def __init__(self, uniqueId, name, url):
65         self.titles = []
66         self.entries = {}
67         self.ids = []
68         self.readItems = {}
69         self.name = name
70         self.url = url
71         self.countUnread = 0
72         self.updateTime = "Never"
73         self.updateStamp = 0
74         self.uniqueId = uniqueId
75         self.etag = None
76         self.modified = None
77
78     def addImage(self, configdir, key, baseurl, url):
79         filename = configdir+key+".d/"+getId(url)
80         if not isfile(filename):
81             try:
82                 #if url.startswith("http"):
83                 #    f = urllib2.urlopen(url)
84                 #else:
85                 f = urllib2.urlopen(urljoin(baseurl,url))
86                 outf = open(filename, "w")
87                 outf.write(f.read())
88                 f.close()
89                 outf.close()
90             except:
91                 print "Could not download " + url
92         else:
93             #open(filename,"a").close()  # "Touch" the file
94             file = open(filename,"a")
95             utime(filename, None)
96             file.close()
97         return filename
98
99     def editFeed(self, url):
100         self.url = url
101
102     def saveFeed(self, configdir):
103         if not isdir(configdir+self.uniqueId+".d"):
104              mkdir(configdir+self.uniqueId+".d")
105         file = open(configdir+self.uniqueId+".d/feed", "w")
106         pickle.dump(self, file )
107         file.close()
108         self.saveUnread(configdir)
109         
110     def saveUnread(self, configdir):
111         if not isdir(configdir+self.uniqueId+".d"):
112             mkdir(configdir+self.uniqueId+".d")
113         file = open(configdir+self.uniqueId+".d/unread", "w")
114         pickle.dump(self.readItems, file )
115         file.close()
116
117     def reloadUnread(self, configdir):
118         try:
119             file = open(configdir+self.uniqueId+".d/unread", "r")
120             self.readItems = pickle.load( file )
121             file.close()
122             self.countUnread = 0
123             for id in self.getIds():
124                if self.readItems[id]==False:
125                   self.countUnread = self.countUnread + 1
126         except:
127             pass
128         return self.countUnread
129
130     def updateFeed(self, configdir, expiryTime=24, proxy=None, imageCache=False):
131         # Expiry time is in hours
132         if proxy == None:
133             tmp=feedparser.parse(self.url, etag = self.etag, modified = self.modified)
134         else:
135             tmp=feedparser.parse(self.url, etag = self.etag, modified = self.modified, handlers = [proxy])
136         expiry = float(expiryTime) * 3600.
137
138         # Check if the parse was succesful (number of entries > 0, else do nothing)
139         if len(tmp["entries"])>0:
140            # The etag and modified value should only be updated if the content was not null
141            try:
142                self.etag = tmp["etag"]
143            except KeyError:
144                self.etag = None
145            try:
146                self.modified = tmp["modified"]
147            except KeyError:
148                self.modified = None
149            #if len(tmp["entries"])>0:
150            if not isdir(configdir+self.uniqueId+".d"):
151                mkdir(configdir+self.uniqueId+".d")
152            try:
153                f = urllib2.urlopen(urljoin(tmp["feed"]["link"],"/favicon.ico"))
154                data = f.read()
155                f.close()
156                outf = open(configdir+self.uniqueId+".d/favicon.ico", "w")
157                outf.write(data)
158                outf.close()
159                del data
160            except:
161                #import traceback
162                #traceback.print_exc()
163                 pass
164
165
166            #reversedEntries = self.getEntries()
167            #reversedEntries.reverse()
168
169            currentTime = time.time()
170            tmpEntries = {}
171            tmpIds = []
172            for entry in tmp["entries"]:
173                (dateTuple, date) = self.extractDate(entry)
174                try:
175                    entry["title"]
176                except:
177                    entry["title"] = "No Title"
178                try:
179                    entry["link"]
180                except:
181                    entry["link"] = ""
182                tmpEntry = {"title":entry["title"], "content":self.extractContent(entry),
183                             "date":date, "dateTuple":dateTuple, "link":entry["link"], "images":[] }
184                id = self.generateUniqueId(tmpEntry)
185                
186                #articleTime = time.mktime(self.entries[id]["dateTuple"])
187                if not id in self.ids:
188                    soup = BeautifulSoup(self.getArticle(tmpEntry)) #tmpEntry["content"])
189                    images = soup('img')
190                    baseurl = tmpEntry["link"]
191                    if imageCache:
192                       for img in images:
193                           try:
194                             filename = self.addImage(configdir, self.uniqueId, baseurl, img['src'])
195                             img['src']=filename
196                             tmpEntry["images"].append(filename)
197                           except:
198                               print "Error downloading image %s" % img
199                    tmpEntry["contentLink"] = configdir+self.uniqueId+".d/"+id+".html"
200                    file = open(tmpEntry["contentLink"], "w")
201                    file.write(soup.prettify())
202                    file.close()
203                    tmpEntries[id] = tmpEntry
204                    tmpIds.append(id)
205                    if id not in self.readItems:
206                        self.readItems[id] = False
207                else:
208                    try:
209                        filename = configdir+self.uniqueId+".d/"+id+".html"
210                        file = open(filename,"a")
211                        utime(filename, None)
212                        file.close()
213                        for image in self.entries[id]["images"]:
214                             file = open(image,"a")
215                             utime(image, None)
216                             file.close()
217                    except:
218                        pass
219                    tmpEntries[id] = self.entries[id]
220                    tmpIds.append(id)
221             
222            oldIds = self.ids[:]
223            for entryId in oldIds:
224                 if not entryId in tmpIds:
225                     try:
226                         articleTime = time.mktime(self.entries[entryId]["dateTuple"])
227                         if (currentTime - articleTime > 2*expiry):
228                             self.removeEntry(entryId)
229                             continue
230                         if (currentTime - articleTime > expiry) and (self.isEntryRead(entryId)):
231                             # Entry is over 24 hours, and already read
232                             self.removeEntry(entryId)
233                             continue
234                         tmpEntries[entryId] = self.entries[entryId]
235                         tmpIds.append(entryId)
236                     except:
237                         print "Error purging old articles %s" % entryId
238                         self.removeEntry(entryId)
239
240            self.entries = tmpEntries
241            self.ids = tmpIds
242            tmpUnread = 0
243            
244
245            ids = self.ids[:]
246            for id in ids:
247                if not self.readItems.has_key(id):
248                    self.readItems[id] = False
249                if self.readItems[id]==False:
250                   tmpUnread = tmpUnread + 1
251            keys = self.readItems.keys()
252            for id in keys:
253                if not id in self.ids:
254                    del self.readItems[id]
255            del tmp
256            self.countUnread = tmpUnread
257            self.updateTime = time.asctime()
258            self.updateStamp = currentTime
259            self.saveFeed(configdir)
260            from glob import glob
261            from os import stat
262            for file in glob(configdir+self.uniqueId+".d/*"):
263                 #
264                 stats = stat(file)
265                 #
266                 # put the two dates into matching format
267                 #
268                 lastmodDate = stats[8]
269                 #
270                 expDate = time.time()-expiry*3
271                 # check if image-last-modified-date is outdated
272                 #
273                 if expDate > lastmodDate:
274                     #
275                     try:
276                         #
277                         #print 'Removing', file
278                         #
279                         remove(file) # commented out for testing
280                         #
281                     except OSError:
282                         #
283                         print 'Could not remove', file
284            
285
286     def extractContent(self, entry):
287         content = ""
288         if entry.has_key('summary'):
289             content = entry.get('summary', '')
290         if entry.has_key('content'):
291             if len(entry.content[0].value) > len(content):
292                 content = entry.content[0].value
293         if content == "":
294             content = entry.get('description', '')
295         return content
296         
297     def extractDate(self, entry):
298         if entry.has_key("updated_parsed"):
299             date1 = entry["updated_parsed"]
300             date = time.strftime("%a, %d %b %Y %H:%M:%S",entry["updated_parsed"])
301         elif entry.has_key("published_parsed"):
302             date1 = entry["published_parsed"]
303             date = time.strftime("%a, %d %b %Y %H:%M:%S", entry["published_parsed"])
304         else:
305             date1= ""
306             date = ""
307         #print date1, date
308         return (date1, date)
309
310     def setEntryRead(self, id):
311         if self.readItems[id]==False:
312             self.countUnread = self.countUnread - 1
313             self.readItems[id] = True
314             
315     def setEntryUnread(self, id):
316         if self.readItems[id]==True:
317             self.countUnread = self.countUnread + 1
318             self.readItems[id] = False
319     
320     def isEntryRead(self, id):
321         # Check if an entry is read; return False if the read
322         # status of an entry is unknown (id not in readItems)
323         return self.readItems.get(id, False)
324     
325     def getTitle(self, id):
326         return self.entries[id]["title"]
327     
328     def getContentLink(self, id):
329         if self.entries[id].has_key("contentLink"):
330             return self.entries[id]["contentLink"]
331         return self.entries[id]["link"]
332     
333     def getExternalLink(self, id):
334         return self.entries[id]["link"]
335     
336     def getDate(self, id):
337         return self.entries[id]["date"]
338
339     def getDateTuple(self, id):
340         return self.entries[id]["dateTuple"]
341  
342     def getUniqueId(self, index):
343         return self.ids[index]
344     
345     def generateUniqueId(self, entry):
346         return getId(entry["date"] + entry["title"])
347     
348     def getUpdateTime(self):
349         return self.updateTime
350     
351     def getUpdateStamp(self):
352         try:
353             return self.updateStamp
354         except:
355             self.updateStamp = 0
356             return self.updateStamp
357
358     def getEntries(self):
359         return self.entries
360     
361     def getIds(self):
362         return self.ids
363     
364     def getNextId(self, id):
365         return self.ids[(self.ids.index(id)+1) % self.getNumberOfEntries()]
366     
367     def getPreviousId(self, id):
368         return self.ids[(self.ids.index(id)-1) % self.getNumberOfEntries()]
369     
370     def getNumberOfUnreadItems(self):
371         return self.countUnread
372     
373     def getNumberOfEntries(self):
374         return len(self.ids)
375     
376     def getItem(self, id):
377         try:
378             return self.entries[id]
379         except:
380             return []
381     
382     def getContent(self, id):
383         if self.entries[id].has_key("contentLink"):
384             file = open(self.entries[id]["contentLink"])
385             content = file.read()
386             file.close()
387             return content
388         return self.entries[id]["content"]
389     
390     def removeEntry(self, id):
391         #try:
392         if self.entries.has_key(id):
393             entry = self.entries[id]
394             
395             if entry.has_key("contentLink"):
396                 try:
397                     remove(entry["contentLink"])  #os.remove
398                 except:
399                     print "File not found for deletion: %s" % entry["contentLink"]
400             del self.entries[id]
401         else:
402             print "Entries has no %s key" % id
403         if id in self.ids:
404             self.ids.remove(id)
405         else:
406             print "Ids has no %s key" % id
407         if self.readItems.has_key(id):
408             if self.readItems[id]==False:
409                 self.countUnread = self.countUnread - 1
410             del self.readItems[id]
411         else:
412             print "ReadItems has no %s key" % id
413         #except:
414         #    print "Error removing entry %s" %id
415     
416     def getArticle(self, entry):
417         #self.setEntryRead(id)
418         #entry = self.entries[id]
419         title = entry['title']
420         #content = entry.get('content', entry.get('summary_detail', {}))
421         content = entry["content"]
422
423         link = entry['link']
424         date = entry["date"]
425
426         #text = '''<div style="color: black; background-color: white;">'''
427         text = '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">'
428         text += "<html><head><title>" + title + "</title>"
429         text += '<meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/>\n'
430         #text += '<style> body {-webkit-user-select: none;} </style>'
431         text += '</head><body><div><a href=\"' + link + '\">' + title + "</a>"
432         text += "<BR /><small><i>Date: " + date + "</i></small></div>"
433         text += "<BR /><BR />"
434         text += content
435         text += "</body></html>"
436         return text
437         
438 class ArchivedArticles(Feed):    
439     def addArchivedArticle(self, title, link, updated_parsed, configdir):
440         entry = {}
441         entry["title"] = title
442         entry["link"] = link
443         entry["summary"] = '<a href=\"' + link + '\">' + title + "</a>"
444         entry["updated_parsed"] = updated_parsed
445         entry["time"] = time.time()
446         #print entry
447         (dateTuple, date) = self.extractDate(entry)
448         tmpEntry = {"title":entry["title"], "content":self.extractContent(entry),
449                             "date":date, "dateTuple":dateTuple, "link":entry["link"], "images":[], "downloaded":False, "time":entry["time"] }
450         id = self.generateUniqueId(tmpEntry)
451         self.entries[id] = tmpEntry
452         self.ids.append(id)  
453         self.readItems[id] = False
454         self.countUnread = self.countUnread + 1
455         self.saveFeed(configdir)
456         self.saveUnread(configdir)
457         
458     def updateFeed(self, configdir, expiryTime=24, proxy=None, imageCache=False):
459         for id in self.getIds():
460             entry = self.entries[id]
461             if not entry["downloaded"]:
462                 #try:
463                     f = urllib2.urlopen(entry["link"])
464                     #entry["content"] = f.read()
465                     html = f.read()
466                     f.close()
467                     soup = BeautifulSoup(html)
468                     images = soup('img')
469                     baseurl = entry["link"]
470                     for img in images:
471                         filename = self.addImage(configdir, self.uniqueId, baseurl, img['src'])
472                         img['src']=filename
473                     entry["contentLink"] = configdir+self.uniqueId+".d/"+id+".html"
474                     file = open(entry["contentLink"], "w")
475                     file.write(soup.prettify())
476                     file.close()
477                     if len(entry["content"]) > 0:
478                         entry["downloaded"] = True
479                         entry["time"] = time.time()
480                         self.setEntryUnread(id)
481                 #except:
482                 #    pass
483             #currentTime = time.time()
484             #expiry = float(expiryTime) * 3600
485             #if currentTime - entry["time"] > expiry:
486             #    if self.isEntryRead(id):
487             #        self.removeEntry(id)
488             #    else:
489             #        if currentTime - entry["time"] > 2*expiry:
490             #            self.removeEntry(id)
491         self.updateTime = time.asctime()
492         self.updateStamp = time.time()
493         self.saveFeed(configdir)
494         
495     def purgeReadArticles(self):
496         ids = self.getIds()
497         for id in ids:
498             entry = self.entries[id]
499             if self.isEntryRead(id):
500                 self.removeEntry(id)
501                 
502     def removeArticle(self, id):
503         self.removeEntry(id)
504
505     def getArticle(self, index):
506         self.setEntryRead(index)
507         content = self.getContent(index)
508         return content
509
510
511 class Listing:
512     # Lists all the feeds in a dictionary, and expose the data
513     def __init__(self, configdir):
514         self.configdir = configdir
515         #self.feeds = {}
516         if isfile(self.configdir+"feeds.pickle"):
517             file = open(self.configdir+"feeds.pickle")
518             self.listOfFeeds = pickle.load(file)
519             file.close()
520         else:
521             self.listOfFeeds = {getId("Maemo News"):{"title":"Maemo News", "url":"http://maemo.org/news/items.xml", "unread":0, "updateTime":"Never"}, }
522         if self.listOfFeeds.has_key("font"):
523             del self.listOfFeeds["font"]
524         if self.listOfFeeds.has_key("feedingit-order"):
525             self.sortedKeys = self.listOfFeeds["feedingit-order"]
526         else:
527             self.sortedKeys = self.listOfFeeds.keys()
528             if "font" in self.sortedKeys:
529                 self.sortedKeys.remove("font")
530             self.sortedKeys.sort(key=lambda obj: self.getFeedTitle(obj))
531         list = self.sortedKeys[:]
532         #self.closeCurrentlyDisplayedFeed()
533
534     def addArchivedArticle(self, key, index):
535         feed = self.getFeed(key)
536         title = feed.getTitle(index)
537         link = feed.getExternalLink(index)
538         date = feed.getDateTuple(index)
539         if not self.listOfFeeds.has_key("ArchivedArticles"):
540             self.listOfFeeds["ArchivedArticles"] = {"title":"Archived Articles", "url":"", "unread":0, "updateTime":"Never"}
541             self.sortedKeys.append("ArchivedArticles")
542             #self.feeds["Archived Articles"] = ArchivedArticles("Archived Articles", "")
543             self.saveConfig()
544         archFeed = self.getFeed("ArchivedArticles")
545         archFeed.addArchivedArticle(title, link, date, self.configdir)
546         self.listOfFeeds[key]["unread"] = archFeed.getNumberOfUnreadItems()
547         
548     def loadFeed(self, key):
549             if isfile(self.configdir+key+".d/feed"):
550                 file = open(self.configdir+key+".d/feed")
551                 feed = pickle.load(file)
552                 file.close()
553                 try:
554                     feed.uniqueId
555                 except AttributeError:
556                     feed.uniqueId = getId(feed.name)
557                 try:
558                     del feed.imageHandler
559                 except:
560                     pass
561                 try:
562                     feed.etag
563                 except AttributeError:
564                     feed.etag = None
565                 try:
566                     feed.modified
567                 except AttributeError:
568                     feed.modified = None
569                 #feed.reloadUnread(self.configdir)
570             else:
571                 #print key
572                 title = self.listOfFeeds[key]["title"]
573                 url = self.listOfFeeds[key]["url"]
574                 if key == "ArchivedArticles":
575                     feed = ArchivedArticles("ArchivedArticles", title, url)
576                 else:
577                     feed = Feed(getId(title), title, url)
578             return feed
579         
580     def updateFeeds(self, expiryTime=24, proxy=None, imageCache=False):
581         for key in self.getListOfFeeds():
582             feed = self.loadFeed(key)
583             feed.updateFeed(self.configdir, expiryTime, proxy, imageCache)
584             self.listOfFeeds[key]["unread"] = feed.getNumberOfUnreadItems()
585             self.listOfFeeds[key]["updateTime"] = feed.getUpdateTime()
586             self.listOfFeeds[key]["updateStamp"] = feed.getUpdateStamp()
587             
588     def updateFeed(self, key, expiryTime=24, proxy=None, imageCache=False):
589         feed = self.getFeed(key)
590         feed.updateFeed(self.configdir, expiryTime, proxy, imageCache)
591         self.listOfFeeds[key]["unread"] = feed.getNumberOfUnreadItems()
592         self.listOfFeeds[key]["updateTime"] = feed.getUpdateTime()
593         self.listOfFeeds[key]["updateStamp"] = feed.getUpdateStamp()
594         
595     def editFeed(self, key, title, url):
596         self.listOfFeeds[key]["title"] = title
597         self.listOfFeeds[key]["url"] = url
598         feed = self.loadFeed(key)
599         feed.editFeed(url)
600
601     def getFeed(self, key):
602         try:
603             feed = self.loadFeed(key)
604             feed.reloadUnread(self.configdir)
605         except:
606             # If the feed file gets corrupted, we need to reset the feed.
607             import traceback
608             traceback.print_exc()
609             import dbus
610             bus = dbus.SessionBus()
611             remote_object = bus.get_object("org.freedesktop.Notifications", # Connection name
612                                "/org/freedesktop/Notifications" # Object's path
613                               )
614             iface = dbus.Interface(remote_object, 'org.freedesktop.Notifications')
615             iface.SystemNoteInfoprint("Error opening feed %s, it has been reset." % self.getFeedTitle(key))
616             if isdir(self.configdir+key+".d/"):
617                 rmtree(self.configdir+key+".d/")
618             feed = self.loadFeed(key)
619         return feed
620     
621     def getFeedUpdateTime(self, key):
622         #print self.listOfFeeds.has_key(key)
623         if not self.listOfFeeds[key].has_key("updateTime"):
624             self.listOfFeeds[key]["updateTime"] = "Never"
625         return self.listOfFeeds[key]["updateTime"]
626     
627     def getFeedUpdateStamp(self, key):
628         #print self.listOfFeeds.has_key(key)
629         if not self.listOfFeeds[key].has_key("updateStamp"):
630             self.listOfFeeds[key]["updateStamp"] = 0
631         return self.listOfFeeds[key]["updateStamp"]
632
633     def getFeedNumberOfUnreadItems(self, key):
634         if not self.listOfFeeds[key].has_key("unread"):
635             self.listOfFeeds[key]["unread"] = 0
636         return self.listOfFeeds[key]["unread"]
637
638     def updateUnread(self, key, unreadItems):
639         self.listOfFeeds[key]["unread"] = unreadItems
640    
641     def getFeedTitle(self, key):
642         return self.listOfFeeds[key]["title"]
643     
644     def getFeedUrl(self, key):
645         return self.listOfFeeds[key]["url"]
646     
647     def getListOfFeeds(self):
648         return self.sortedKeys
649     
650     def getFavicon(self, key):
651         filename = self.configdir+key+".d/favicon.ico"
652         if isfile(filename):
653             return filename
654         else:
655             return False
656     
657     def addFeed(self, title, url):
658         if not self.listOfFeeds.has_key(getId(title)):
659             self.listOfFeeds[getId(title)] = {"title":title, "url":url, "unread":0, "updateTime":"Never"}
660             self.sortedKeys.append(getId(title))
661             self.saveConfig()
662             #self.feeds[getId(title)] = Feed(title, url)
663             return True
664         else:
665             return False
666         
667     def removeFeed(self, key):
668         del self.listOfFeeds[key]
669         self.sortedKeys.remove(key)
670         #del self.feeds[key]
671         if isdir(self.configdir+key+".d/"):
672            rmtree(self.configdir+key+".d/")
673         self.saveConfig()
674     
675     def saveConfig(self):
676         self.listOfFeeds["feedingit-order"] = self.sortedKeys
677         file = open(self.configdir+"feeds.pickle", "w")
678         pickle.dump(self.listOfFeeds, file)
679         file.close()
680         
681     def moveUp(self, key):
682         index = self.sortedKeys.index(key)
683         self.sortedKeys[index] = self.sortedKeys[index-1]
684         self.sortedKeys[index-1] = key
685         
686     def moveDown(self, key):
687         index = self.sortedKeys.index(key)
688         index2 = (index+1)%len(self.sortedKeys)
689         self.sortedKeys[index] = self.sortedKeys[index2]
690         self.sortedKeys[index2] = key
691     
692 if __name__ == "__main__":
693     listing = Listing('/home/user/.feedingit/')
694     list = listing.getListOfFeeds()[:]
695         #list.reverse()
696     for key in list:
697         if key.startswith('d8'):
698             print listing.getFeedUpdateTime(key)