Fixed etag value of None if a feed did not contain updates, plus parsing for gzipped...
[feedingit] / src / rss.py
index 5083ce7..a105f3d 100644 (file)
 # ============================================================================
 # Name        : FeedingIt.py
 # Author      : Yves Marcoz
-# Version     : 0.4.3
+# Version     : 0.5.4
 # Description : Simple RSS Reader
 # ============================================================================
 
-from os.path import isfile
-from os.path import isdir
-from os import remove
+from os.path import isfile, isdir
+from shutil import rmtree
+from os import mkdir, remove, utime
 import pickle
 import md5
 import feedparser
 import time
 import urllib2
+from BeautifulSoup import BeautifulSoup
+from urlparse import urljoin
 
 #CONFIGDIR="/home/user/.feedingit/"
 
 def getId(string):
     return md5.new(string).hexdigest()
 
+#def getProxy():
+#    import gconf
+#    if gconf.client_get_default().get_bool('/system/http_proxy/use_http_proxy'):
+#        port = gconf.client_get_default().get_int('/system/http_proxy/port')
+#        http = gconf.client_get_default().get_string('/system/http_proxy/host')
+#        proxy = proxy = urllib2.ProxyHandler( {"http":"http://%s:%s/"% (http,port)} )
+#        return (True, proxy)
+#    return (False, None)
+
+# Enable proxy support for images and ArchivedArticles
+#(proxy_support, proxy) = getProxy()
+#if proxy_support:
+#    opener = urllib2.build_opener(proxy)
+#    urllib2.install_opener(opener)
+
+# Entry = {"title":XXX, "content":XXX, "date":XXX, "link":XXX, images = [] }
+
+class ImageHandler:
+    def __init__(self, configdir):
+        pass
+
 class Feed:
-    # Contains all the info about a single feed (articles, ...), and expose the data
-    def __init__(self, name, url):
-        self.entries = []
+    def __init__(self, uniqueId, name, url):
+        self.titles = []
+        self.entries = {}
+        self.ids = []
         self.readItems = {}
-        self.countUnread = 0
         self.name = name
         self.url = url
+        self.countUnread = 0
         self.updateTime = "Never"
+        self.uniqueId = uniqueId
+        self.etag = None
+        self.modified = None
+
+    def addImage(self, configdir, key, baseurl, url):
+        filename = configdir+key+".d/"+getId(url)
+        if not isfile(filename):
+            try:
+                #if url.startswith("http"):
+                #    f = urllib2.urlopen(url)
+                #else:
+                f = urllib2.urlopen(urljoin(baseurl,url))
+                outf = open(filename, "w")
+                outf.write(f.read())
+                f.close()
+                outf.close()
+            except:
+                print "Could not download " + url
+        else:
+            #open(filename,"a").close()  # "Touch" the file
+            file = open(filename,"a")
+            utime(filename, None)
+            file.close()
+        return filename
 
     def editFeed(self, url):
         self.url = url
 
     def saveFeed(self, configdir):
-        file = open(configdir+getId(self.name), "w")
+        if not isdir(configdir+self.uniqueId+".d"):
+             mkdir(configdir+self.uniqueId+".d")
+        file = open(configdir+self.uniqueId+".d/feed", "w")
         pickle.dump(self, file )
         file.close()
+        self.saveUnread(configdir)
+        
+    def saveUnread(self, configdir):
+        if not isdir(configdir+self.uniqueId+".d"):
+            mkdir(configdir+self.uniqueId+".d")
+        file = open(configdir+self.uniqueId+".d/unread", "w")
+        pickle.dump(self.readItems, file )
+        file.close()
 
-    def updateFeed(self, configdir, expiryTime=24):
+    def reloadUnread(self, configdir):
+        try:
+            file = open(configdir+self.uniqueId+".d/unread", "r")
+            self.readItems = pickle.load( file )
+            file.close()
+            self.countUnread = 0
+            for id in self.getIds():
+               if self.readItems[id]==False:
+                  self.countUnread = self.countUnread + 1
+        except:
+            pass
+        return self.countUnread
+
+    def updateFeed(self, configdir, expiryTime=24, proxy=None, imageCache=False):
         # Expiry time is in hours
-        tmp=feedparser.parse(self.url)
+        if proxy == None:
+            tmp=feedparser.parse(self.url, etag = self.etag, modified = self.modified)
+        else:
+            tmp=feedparser.parse(self.url, etag = self.etag, modified = self.modified, handlers = [proxy])
+        expiry = float(expiryTime) * 3600.
+
         # Check if the parse was succesful (number of entries > 0, else do nothing)
         if len(tmp["entries"])>0:
+           # The etag and modified value should only be updated if the content was not null
+           try:
+               self.etag = tmp["etag"]
+           except KeyError:
+               self.etag = None
+           try:
+               self.modified = tmp["modified"]
+           except KeyError:
+               self.modified = None
+           #if len(tmp["entries"])>0:
+           if not isdir(configdir+self.uniqueId+".d"):
+               mkdir(configdir+self.uniqueId+".d")
+           try:
+               f = urllib2.urlopen(urljoin(tmp["feed"]["link"],"/favicon.ico"))
+               data = f.read()
+               f.close()
+               outf = open(configdir+self.uniqueId+".d/favicon.ico", "w")
+               outf.write(data)
+               outf.close()
+               del data
+           except:
+               #import traceback
+               #traceback.print_exc()
+                pass
+
+
            #reversedEntries = self.getEntries()
            #reversedEntries.reverse()
+
+           currentTime = time.time()
+           tmpEntries = {}
            tmpIds = []
            for entry in tmp["entries"]:
-               tmpIds.append(self.getUniqueId(-1, entry))
-           for entry in self.getEntries():
-               currentTime = time.time()
-               expiry = float(expiryTime) * 3600.
-               if entry.has_key("updated_parsed"):
-                   articleTime = time.mktime(entry["updated_parsed"])
-                   if currentTime - articleTime < expiry:
-                       id = self.getUniqueId(-1, entry)
-                       if not id in tmpIds:
-                           tmp["entries"].append(entry)
-                   
-           self.entries = tmp["entries"]
-           self.countUnread = 0
-           # Initialize the new articles to unread
-           tmpReadItems = self.readItems
-           self.readItems = {}
-           for index in range(self.getNumberOfEntries()):
-               if not tmpReadItems.has_key(self.getUniqueId(index)):
-                   self.readItems[self.getUniqueId(index)] = False
+               (dateTuple, date) = self.extractDate(entry)
+               try:
+                   entry["title"]
+               except:
+                   entry["title"] = "No Title"
+               try:
+                   entry["link"]
+               except:
+                   entry["link"] = ""
+               tmpEntry = {"title":entry["title"], "content":self.extractContent(entry),
+                            "date":date, "dateTuple":dateTuple, "link":entry["link"], "images":[] }
+               id = self.generateUniqueId(tmpEntry)
+               
+               #articleTime = time.mktime(self.entries[id]["dateTuple"])
+               if not id in self.ids:
+                   soup = BeautifulSoup(self.getArticle(tmpEntry)) #tmpEntry["content"])
+                   images = soup('img')
+                   baseurl = tmpEntry["link"]
+                   if imageCache:
+                      for img in images:
+                          try:
+                            filename = self.addImage(configdir, self.uniqueId, baseurl, img['src'])
+                            img['src']=filename
+                            tmpEntry["images"].append(filename)
+                          except:
+                              print "Error downloading image %s" % img
+                   tmpEntry["contentLink"] = configdir+self.uniqueId+".d/"+id+".html"
+                   file = open(tmpEntry["contentLink"], "w")
+                   file.write(soup.prettify())
+                   file.close()
+                   tmpEntries[id] = tmpEntry
+                   tmpIds.append(id)
+                   if id not in self.readItems:
+                       self.readItems[id] = False
                else:
-                   self.readItems[self.getUniqueId(index)] = tmpReadItems[self.getUniqueId(index)]
-               if self.readItems[self.getUniqueId(index)]==False:
-                  self.countUnread = self.countUnread + 1
+                   try:
+                       filename = configdir+self.uniqueId+".d/"+id+".html"
+                       file = open(filename,"a")
+                       utime(filename, None)
+                       file.close()
+                       for image in self.entries[id]["images"]:
+                            file = open(image,"a")
+                            utime(image, None)
+                            file.close()
+                   except:
+                       pass
+                   tmpEntries[id] = self.entries[id]
+                   tmpIds.append(id)
+            
+           oldIds = self.ids[:]
+           for entryId in oldIds:
+                if not entryId in tmpIds:
+                    try:
+                        articleTime = time.mktime(self.entries[entryId]["dateTuple"])
+                        if (currentTime - articleTime > 2*expiry):
+                            self.removeEntry(entryId)
+                            continue
+                        if (currentTime - articleTime > expiry) and (self.isEntryRead(entryId)):
+                            # Entry is over 24 hours, and already read
+                            self.removeEntry(entryId)
+                            continue
+                        tmpEntries[entryId] = self.entries[entryId]
+                        tmpIds.append(entryId)
+                    except:
+                        print "Error purging old articles %s" % entryId
+                        self.removeEntry(entryId)
+
+           self.entries = tmpEntries
+           self.ids = tmpIds
+           tmpUnread = 0
+           
+
+           ids = self.ids[:]
+           for id in ids:
+               if not self.readItems.has_key(id):
+                   self.readItems[id] = False
+               if self.readItems[id]==False:
+                  tmpUnread = tmpUnread + 1
+           keys = self.readItems.keys()
+           for id in keys:
+               if not id in self.ids:
+                   del self.readItems[id]
            del tmp
+           self.countUnread = tmpUnread
            self.updateTime = time.asctime()
            self.saveFeed(configdir)
-    
-    def setEntryRead(self, index):
-        if self.readItems[self.getUniqueId(index)]==False:
+           from glob import glob
+           from os import stat
+           for file in glob(configdir+self.uniqueId+".d/*"):
+                #
+                stats = stat(file)
+                #
+                # put the two dates into matching format
+                #
+                lastmodDate = stats[8]
+                #
+                expDate = time.time()-expiry*3
+                # check if image-last-modified-date is outdated
+                #
+                if expDate > lastmodDate:
+                    #
+                    try:
+                        #
+                        #print 'Removing', file
+                        #
+                        remove(file) # commented out for testing
+                        #
+                    except OSError:
+                        #
+                        print 'Could not remove', file
+           
+
+    def extractContent(self, entry):
+        content = ""
+        if entry.has_key('summary'):
+            content = entry.get('summary', '')
+        if entry.has_key('content'):
+            if len(entry.content[0].value) > len(content):
+                content = entry.content[0].value
+        if content == "":
+            content = entry.get('description', '')
+        return content
+        
+    def extractDate(self, entry):
+        if entry.has_key("updated_parsed"):
+            date1 = entry["updated_parsed"]
+            date = time.strftime("%a, %d %b %Y %H:%M:%S",entry["updated_parsed"])
+        elif entry.has_key("published_parsed"):
+            date1 = entry["published_parsed"]
+            date = time.strftime("%a, %d %b %Y %H:%M:%S", entry["published_parsed"])
+        else:
+            date1= ""
+            date = ""
+        #print date1, date
+        return (date1, date)
+
+    def setEntryRead(self, id):
+        if self.readItems[id]==False:
             self.countUnread = self.countUnread - 1
-            self.readItems[self.getUniqueId(index)] = True
+            self.readItems[id] = True
             
-    def setEntryUnread(self, index):
-        if self.readItems[self.getUniqueId(index)]==True:
+    def setEntryUnread(self, id):
+        if self.readItems[id]==True:
             self.countUnread = self.countUnread + 1
-            self.readItems[self.getUniqueId(index)] = False
+            self.readItems[id] = False
     
-    def isEntryRead(self, index):
-        return self.readItems[self.getUniqueId(index)]
+    def isEntryRead(self, id):
+        # Check if an entry is read; return False if the read
+        # status of an entry is unknown (id not in readItems)
+        return self.readItems.get(id, False)
     
-    def getTitle(self, index):
-        return self.entries[index]["title"]
+    def getTitle(self, id):
+        return self.entries[id]["title"]
     
-    def getLink(self, index):
-        return self.entries[index]["link"]
+    def getContentLink(self, id):
+        if self.entries[id].has_key("contentLink"):
+            return self.entries[id]["contentLink"]
+        return self.entries[id]["link"]
     
-    def getDate(self, index):
-        try:
-            return self.entries[index]["updated_parsed"]
-        except:
-            return time.localtime()
+    def getExternalLink(self, id):
+        return self.entries[id]["link"]
     
-    def getUniqueId(self, index, entry=None):
-        if index >=0:
-            entry = self.entries[index]
-        if entry.has_key("updated_parsed"):
-            return getId(time.strftime("%a, %d %b %Y %H:%M:%S",entry["updated_parsed"]) + entry["title"])
-        elif entry.has_key("link"):
-            return getId(entry["link"] + entry["title"])
-        else:
-            return getId(entry["title"])
+    def getDate(self, id):
+        return self.entries[id]["date"]
+
+    def getDateTuple(self, id):
+        return self.entries[id]["dateTuple"]
+    def getUniqueId(self, index):
+        return self.ids[index]
+    
+    def generateUniqueId(self, entry):
+        return getId(entry["date"] + entry["title"])
     
     def getUpdateTime(self):
         return self.updateTime
     
     def getEntries(self):
-        try:
-            return self.entries
-        except:
-            return []
+        return self.entries
+    
+    def getIds(self):
+        return self.ids
+    
+    def getNextId(self, id):
+        return self.ids[(self.ids.index(id)+1) % self.getNumberOfEntries()]
+    
+    def getPreviousId(self, id):
+        return self.ids[(self.ids.index(id)-1) % self.getNumberOfEntries()]
     
     def getNumberOfUnreadItems(self):
         return self.countUnread
     
     def getNumberOfEntries(self):
-        return len(self.entries)
+        return len(self.ids)
     
-    def getItem(self, index):
+    def getItem(self, id):
         try:
-            return self.entries[index]
+            return self.entries[id]
         except:
             return []
     
-    def getContent(self, index):
-        content = ""
-        entry = self.entries[index]
-        if entry.has_key('summary'):
-            content = entry.get('summary', '')
-        if entry.has_key('content'):
-            if len(entry.content[0].value) > len(content):
-                content = entry.content[0].value
-        if content == "":
-            content = entry.get('description', '')
-        return content
+    def getContent(self, id):
+        if self.entries[id].has_key("contentLink"):
+            file = open(self.entries[id]["contentLink"])
+            content = file.read()
+            file.close()
+            return content
+        return self.entries[id]["content"]
     
-    def getArticle(self, index):
-        self.setEntryRead(index)
-        entry = self.entries[index]
-        title = entry.get('title', 'No title')
+    def removeEntry(self, id):
+        #try:
+        if self.entries.has_key(id):
+            entry = self.entries[id]
+            
+            if entry.has_key("contentLink"):
+                try:
+                    remove(entry["contentLink"])  #os.remove
+                except:
+                    print "File not found for deletion: %s" % entry["contentLink"]
+            del self.entries[id]
+        else:
+            print "Entries has no %s key" % id
+        if id in self.ids:
+            self.ids.remove(id)
+        else:
+            print "Ids has no %s key" % id
+        if self.readItems.has_key(id):
+            if self.readItems[id]==False:
+                self.countUnread = self.countUnread - 1
+            del self.readItems[id]
+        else:
+            print "ReadItems has no %s key" % id
+        #except:
+        #    print "Error removing entry %s" %id
+    
+    def getArticle(self, entry):
+        #self.setEntryRead(id)
+        #entry = self.entries[id]
+        title = entry['title']
         #content = entry.get('content', entry.get('summary_detail', {}))
-        content = self.getContent(index)
+        content = entry["content"]
+
+        link = entry['link']
+        date = entry["date"]
 
-        link = entry.get('link', 'NoLink')
-        if entry.has_key("updated_parsed"):
-            date = time.strftime("%a, %d %b %Y %H:%M:%S",entry["updated_parsed"])
-        elif entry.has_key("published_parsed"):
-            date = time.strftime("%a, %d %b %Y %H:%M:%S", entry["published_parsed"])
-        else:
-            date = ""
         #text = '''<div style="color: black; background-color: white;">'''
-        text = '<meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/>'
-        text += '<head><style> body {-webkit-user-select: none;} </style></head>'
-        text += '<body><div><a href=\"' + link + '\">' + title + "</a>"
+        text = '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">'
+        text += "<html><head><title>" + title + "</title>"
+        text += '<meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/>\n'
+        #text += '<style> body {-webkit-user-select: none;} </style>'
+        text += '</head><body><div><a href=\"' + link + '\">' + title + "</a>"
         text += "<BR /><small><i>Date: " + date + "</i></small></div>"
         text += "<BR /><BR />"
         text += content
-        text += "</body>"
+        text += "</body></html>"
         return text
-
-class ArchivedArticles(Feed):
+        
+class ArchivedArticles(Feed):    
     def addArchivedArticle(self, title, link, updated_parsed, configdir):
         entry = {}
         entry["title"] = title
         entry["link"] = link
-        entry["downloaded"] = False
         entry["summary"] = '<a href=\"' + link + '\">' + title + "</a>"
         entry["updated_parsed"] = updated_parsed
         entry["time"] = time.time()
-        self.entries.append(entry)
-        self.readItems[self.getUniqueId(len(self.entries)-1)] = False
+        #print entry
+        (dateTuple, date) = self.extractDate(entry)
+        tmpEntry = {"title":entry["title"], "content":self.extractContent(entry),
+                            "date":date, "dateTuple":dateTuple, "link":entry["link"], "images":[], "downloaded":False, "time":entry["time"] }
+        id = self.generateUniqueId(tmpEntry)
+        self.entries[id] = tmpEntry
+        self.ids.append(id)  
+        self.readItems[id] = False
         self.countUnread = self.countUnread + 1
         self.saveFeed(configdir)
-        #print entry
+        self.saveUnread(configdir)
         
-    def updateFeed(self, configdir, expiryTime=24):
-        index = 0
-        for entry in self.getEntries():
+    def updateFeed(self, configdir, expiryTime=24, proxy=None, imageCache=False):
+        for id in self.getIds():
+            entry = self.entries[id]
             if not entry["downloaded"]:
-                try:
+                #try:
                     f = urllib2.urlopen(entry["link"])
-                    entry["summary"] = f.read()
+                    #entry["content"] = f.read()
+                    html = f.read()
                     f.close()
-                    if len(entry["summary"]) > 0:
+                    soup = BeautifulSoup(html)
+                    images = soup('img')
+                    baseurl = entry["link"]
+                    for img in images:
+                        filename = self.addImage(configdir, self.uniqueId, baseurl, img['src'])
+                        img['src']=filename
+                    entry["contentLink"] = configdir+self.uniqueId+".d/"+id+".html"
+                    file = open(entry["contentLink"], "w")
+                    file.write(soup.prettify())
+                    file.close()
+                    if len(entry["content"]) > 0:
                         entry["downloaded"] = True
                         entry["time"] = time.time()
-                        self.setEntryUnread(index)
-                except:
-                    pass
-            currentTime = time.time()
-            expiry = float(expiryTime) * 3600
-            if currentTime - entry["time"] > expiry:
-                self.entries.remove(entry)
-            index += 1
+                        self.setEntryUnread(id)
+                #except:
+                #    pass
+            #currentTime = time.time()
+            #expiry = float(expiryTime) * 3600
+            #if currentTime - entry["time"] > expiry:
+            #    if self.isEntryRead(id):
+            #        self.removeEntry(id)
+            #    else:
+            #        if currentTime - entry["time"] > 2*expiry:
+            #            self.removeEntry(id)
         self.updateTime = time.asctime()
         self.saveFeed(configdir)
+        
+    def purgeReadArticles(self):
+        ids = self.getIds()
+        for id in ids:
+            entry = self.entries[id]
+            if self.isEntryRead(id):
+                self.removeEntry(id)
+                
+    def removeArticle(self, id):
+        self.removeEntry(id)
 
     def getArticle(self, index):
         self.setEntryRead(index)
@@ -230,13 +502,13 @@ class Listing:
     # Lists all the feeds in a dictionary, and expose the data
     def __init__(self, configdir):
         self.configdir = configdir
-        self.feeds = {}
+        #self.feeds = {}
         if isfile(self.configdir+"feeds.pickle"):
             file = open(self.configdir+"feeds.pickle")
             self.listOfFeeds = pickle.load(file)
             file.close()
         else:
-            self.listOfFeeds = {getId("Slashdot"):{"title":"Slashdot", "url":"http://rss.slashdot.org/Slashdot/slashdot"}, }
+            self.listOfFeeds = {getId("Maemo News"):{"title":"Maemo News", "url":"http://maemo.org/news/items.xml", "unread":0, "updateTime":"Never"}, }
         if self.listOfFeeds.has_key("font"):
             del self.listOfFeeds["font"]
         if self.listOfFeeds.has_key("feedingit-order"):
@@ -247,65 +519,106 @@ class Listing:
                 self.sortedKeys.remove("font")
             self.sortedKeys.sort(key=lambda obj: self.getFeedTitle(obj))
         list = self.sortedKeys[:]
-        for key in list:
-            try:
-                self.loadFeed(key)
-            except:
-                #import traceback
-                #if key.startswith('d8'):
-                #traceback.print_exc()
-                self.sortedKeys.remove(key)
-            #print key
-                #print key in self.sortedKeys
-        #print "d8eb3f07572892a7b5ed9c81c5bb21a2" in self.sortedKeys
-        #print self.listOfFeeds["d8eb3f07572892a7b5ed9c81c5bb21a2"]
-        self.closeCurrentlyDisplayedFeed()
-        #self.saveConfig()
+        #self.closeCurrentlyDisplayedFeed()
 
     def addArchivedArticle(self, key, index):
-        title = self.getFeed(key).getTitle(index)
-        link = self.getFeed(key).getLink(index)
-        date = self.getFeed(key).getDate(index)
-        if not self.listOfFeeds.has_key(getId("Archived Articles")):
-            self.listOfFeeds[getId("Archived Articles")] = {"title":"Archived Articles", "url":""}
-            self.sortedKeys.append(getId("Archived Articles"))
-            self.feeds[getId("Archived Articles")] = ArchivedArticles("Archived Articles", "")
+        feed = self.getFeed(key)
+        title = feed.getTitle(index)
+        link = feed.getExternalLink(index)
+        date = feed.getDateTuple(index)
+        if not self.listOfFeeds.has_key("ArchivedArticles"):
+            self.listOfFeeds["ArchivedArticles"] = {"title":"Archived Articles", "url":"", "unread":0, "updateTime":"Never"}
+            self.sortedKeys.append("ArchivedArticles")
+            #self.feeds["Archived Articles"] = ArchivedArticles("Archived Articles", "")
             self.saveConfig()
-            
-        self.getFeed(getId("Archived Articles")).addArchivedArticle(title, link, date, self.configdir)
+        archFeed = self.getFeed("ArchivedArticles")
+        archFeed.addArchivedArticle(title, link, date, self.configdir)
+        self.listOfFeeds[key]["unread"] = archFeed.getNumberOfUnreadItems()
         
     def loadFeed(self, key):
-            if isfile(self.configdir+key):
-                file = open(self.configdir+key)
-                self.feeds[key] = pickle.load(file)
+            if isfile(self.configdir+key+".d/feed"):
+                file = open(self.configdir+key+".d/feed")
+                feed = pickle.load(file)
                 file.close()
+                try:
+                    feed.uniqueId
+                except AttributeError:
+                    feed.uniqueId = getId(feed.name)
+                try:
+                    del feed.imageHandler
+                except:
+                    pass
+                try:
+                    feed.etag
+                except AttributeError:
+                    feed.etag = None
+                try:
+                    feed.modified
+                except AttributeError:
+                    feed.modified = None
+                #feed.reloadUnread(self.configdir)
             else:
                 #print key
                 title = self.listOfFeeds[key]["title"]
                 url = self.listOfFeeds[key]["url"]
-                self.feeds[key] = Feed(title, url)
+                if key == "ArchivedArticles":
+                    feed = ArchivedArticles("ArchivedArticles", title, url)
+                else:
+                    feed = Feed(getId(title), title, url)
+            return feed
         
-    def updateFeeds(self, expiryTime=24):
+    def updateFeeds(self, expiryTime=24, proxy=None, imageCache=False):
         for key in self.getListOfFeeds():
-            self.feeds[key].updateFeed(self.configdir, expiryTime)
+            feed = self.loadFeed(key)
+            feed.updateFeed(self.configdir, expiryTime, proxy, imageCache)
+            self.listOfFeeds[key]["unread"] = feed.getNumberOfUnreadItems()
+            self.listOfFeeds[key]["updateTime"] = feed.getUpdateTime()
             
-    def updateFeed(self, key, expiryTime=24):
-        self.feeds[key].updateFeed(self.configdir, expiryTime)
+    def updateFeed(self, key, expiryTime=24, proxy=None, imageCache=False):
+        feed = self.getFeed(key)
+        feed.updateFeed(self.configdir, expiryTime, proxy, imageCache)
+        self.listOfFeeds[key]["unread"] = feed.getNumberOfUnreadItems()
+        self.listOfFeeds[key]["updateTime"] = feed.getUpdateTime()
         
     def editFeed(self, key, title, url):
         self.listOfFeeds[key]["title"] = title
         self.listOfFeeds[key]["url"] = url
-        self.feeds[key].editFeed(url)
-            
+        feed = self.loadFeed(key)
+        feed.editFeed(url)
+
     def getFeed(self, key):
-        return self.feeds[key]
+        try:
+            feed = self.loadFeed(key)
+            feed.reloadUnread(self.configdir)
+        except:
+            # If the feed file gets corrupted, we need to reset the feed.
+            import traceback
+            traceback.print_exc()
+            import dbus
+            bus = dbus.SessionBus()
+            remote_object = bus.get_object("org.freedesktop.Notifications", # Connection name
+                               "/org/freedesktop/Notifications" # Object's path
+                              )
+            iface = dbus.Interface(remote_object, 'org.freedesktop.Notifications')
+            iface.SystemNoteInfoprint("Error opening feed %s, it has been reset." % self.getFeedTitle(key))
+            if isdir(self.configdir+key+".d/"):
+                rmtree(self.configdir+key+".d/")
+            feed = self.loadFeed(key)
+        return feed
     
     def getFeedUpdateTime(self, key):
         #print self.listOfFeeds.has_key(key)
-        return self.feeds[key].getUpdateTime()
+        if not self.listOfFeeds[key].has_key("updateTime"):
+            self.listOfFeeds[key]["updateTime"] = "Never"
+        return self.listOfFeeds[key]["updateTime"]
     
     def getFeedNumberOfUnreadItems(self, key):
-        return self.feeds[key].getNumberOfUnreadItems()
+        if not self.listOfFeeds[key].has_key("unread"):
+            self.listOfFeeds[key]["unread"] = 0
+        return self.listOfFeeds[key]["unread"]
+
+    def updateUnread(self, key, unreadItems):
+        self.listOfFeeds[key]["unread"] = unreadItems
    
     def getFeedTitle(self, key):
         return self.listOfFeeds[key]["title"]
@@ -316,12 +629,19 @@ class Listing:
     def getListOfFeeds(self):
         return self.sortedKeys
     
+    def getFavicon(self, key):
+        filename = self.configdir+key+".d/favicon.ico"
+        if isfile(filename):
+            return filename
+        else:
+            return False
+    
     def addFeed(self, title, url):
         if not self.listOfFeeds.has_key(getId(title)):
-            self.listOfFeeds[getId(title)] = {"title":title, "url":url}
+            self.listOfFeeds[getId(title)] = {"title":title, "url":url, "unread":0, "updateTime":"Never"}
             self.sortedKeys.append(getId(title))
             self.saveConfig()
-            self.feeds[getId(title)] = Feed(title, url)
+            #self.feeds[getId(title)] = Feed(title, url)
             return True
         else:
             return False
@@ -329,9 +649,9 @@ class Listing:
     def removeFeed(self, key):
         del self.listOfFeeds[key]
         self.sortedKeys.remove(key)
-        del self.feeds[key]
-        if isfile(self.configdir+key):
-           remove(self.configdir+key)
+        #del self.feeds[key]
+        if isdir(self.configdir+key+".d/"):
+           rmtree(self.configdir+key+".d/")
         self.saveConfig()
     
     def saveConfig(self):
@@ -350,13 +670,6 @@ class Listing:
         index2 = (index+1)%len(self.sortedKeys)
         self.sortedKeys[index] = self.sortedKeys[index2]
         self.sortedKeys[index2] = key
-        
-    def setCurrentlyDisplayedFeed(self, key):
-        self.currentlyDisplayedFeed = key
-    def closeCurrentlyDisplayedFeed(self):
-        self.currentlyDisplayedFeed = False
-    def getCurrentlyDisplayedFeed(self):
-        return self.currentlyDisplayedFeed
     
 if __name__ == "__main__":
     listing = Listing('/home/user/.feedingit/')
@@ -364,4 +677,4 @@ if __name__ == "__main__":
         #list.reverse()
     for key in list:
         if key.startswith('d8'):
-            print listing.getFeedUpdateTime(key)
\ No newline at end of file
+            print listing.getFeedUpdateTime(key)