import gtk
import csv
import urllib2
-import urllib
import string
import os
import osso
import datetime
import shutil
import sys
+import socket
+
+socket.setdefaulttimeout(30)
supports_alpha = False
# constants. dbfile is the location of the csv
# comiccache is the location of the images
-APP_VERSION = "0.3.2-2"
+APP_VERSION = "0.4.1-1"
basedbdir = "/opt/comic-widget/db/"
imagedir = "/opt/comic-widget/images/"
dbdir = "/home/user/.comic-widget/"
activecomics = dbdir + "activecomics.cfg"
comiccache = "/home/user/MyDocs/.comics/"
-comics = {"xkcd":{"name":"xkcd","link":"http://xkcd.org/","start":666,"dbfile":dbdir + "comicdb.xkcd.csv"},
- "sinfest":{"name":"Sinfest","link":"http://sinfest.com/","start":3400,"dbfile":dbdir + "comicdb.sinfest.csv"},
+defaultcomics = ['xkcd','wulff','sinfest']
+comics = {
+ "9_chickweed_lanecomicscom":{"name":"9 Chickweed Lane","link":"http://comics.com/9_chickweed_lane/","start":"2010-02-01","dbfile":dbdir + "comicdb.9chickweedlane.csv"},
+ "agnescomicscom":{"name":"Agnes","link":"http://comics.com/agnes/","start":"2010-02-01","dbfile":dbdir + "comicdb.agnes.csv"},
+ "andy_cappcomicscom":{"name":"Andy Capp","link":"http://comics.com/andy_capp/","start":"2010-02-01","dbfile":dbdir + "comicdb.andycapp.csv"},
+ "alley_oopcomicscom":{"name":"Alley Oop","link":"http://comics.com/alley_oop/","start":"2010-02-01","dbfile":dbdir + "comicdb.alleyoop.csv"},
+ "arlonjaniscomicscom":{"name":"Arlo and Janis","link":'http://comics.com/arlo&janis/',"start":"2010-02-01","dbfile":dbdir + "comicdb.arlonjanis.csv"},
+ "bccomicscom":{"name":"B.C.","link":"http://comics.com/bc/","start":"2010-02-01","dbfile":dbdir + "comicdb.bc.csv"},
+ "ballard_streetcomicscom":{"name":"Ballard Street","link":"http://comics.com/ballard_street/","start":"2010-02-01","dbfile":dbdir + "comicdb.ballardstreet.csv"},
+ "babyblues":{"name":"Baby Blues","link":"http://www.babyblues.com/","start":"01/19/2010","dbfile":dbdir + "comicdb.babyblues.csv"},
+ "bencomicscom":{"name":"Ben","link":'http://comics.com/ben/',"start":"2010-02-01","dbfile":dbdir + "comicdb.ben.csv"},
+ "bettycomicscom":{"name":"Betty","link":"http://comics.com/betty/","start":"2010-02-01","dbfile":dbdir + "comicdb.betty.csv"},
+ "big_natecomicscom":{"name":"Big Nate","link":"http://comics.com/big_nate/","start":"2010-02-01","dbfile":dbdir + "comicdb.bignate.csv"},
+ "brevitycomicscom":{"name":"Brevity","link":"http://comics.com/brevity/","start":"2010-02-01","dbfile":dbdir + "comicdb.brevity.csv"},
+ "candorvillecomicscom":{"name":"Candorville","link":"http://comics.com/candorville/","start":"2010-02-01","dbfile":dbdir + "comicdb.candorville.csv"},
+ "cheap_thrillscomicscom":{"name":"Cheap Thrills","link":"http://comics.com/cheap_thrills/","start":"2010-02-01","dbfile":dbdir + "comicdb.cheapthrills.csv"},
+ "committedcomicscom":{"name":"Committed","link":"http://comics.com/committed/","start":"2010-02-01","dbfile":dbdir + "comicdb.committed.csv"},
+ "cownboycomicscom":{"name":"Cow and Boy","link":'http://comics.com/cow&boy/',"start":"2010-02-01","dbfile":dbdir + "comicdb.cownboy.csv"},
+ "cyanide":{"name":"Cyanide and Happiness","link":"http://explosm.com/","start":"1920","dbfile":dbdir + "comicdb.cyanide.csv"},
+ "daddyshomecomicscom":{"name":"Daddy's Home","link":"http://comics.com/daddys_home/","start":"2010-02-01","dbfile":dbdir + "comicdb.daddyshome.csv"},
+ "dilbert":{"name":"Dilbert","link":"http://dilbert.com/","start":"2010-01-01","dbfile":dbdir + "comicdb.dilbert.csv"},
+ "dog_eat_dougcomicscom":{"name":"Dog eat Doug","link":"http://comics.com/dog_eat_doug/","start":"2010-02-01","dbfile":dbdir + "comicdb.dogeatdoug.csv"},
+ "drabblecomicscom":{"name":"Drabble","link":"http://comics.com/drabble/","start":"2010-02-01","dbfile":dbdir + "comicdb.drabble.csv"},
+ "f_minuscomicscom":{"name":"F Minus","link":"http://comics.com/f_minus/","start":"2010-02-01","dbfile":dbdir + "comicdb.f_minus.csv"},
+ "family_treecomicscom":{"name":"Family Tree","link":"http://comics.com/family_tree/","start":"2010-02-01","dbfile":dbdir + "comicdb.familytree.csv"},
+ "farcuscomicscom":{"name":"Farcus","link":'http://comics.com/farcus/',"start":"2010-02-01","dbfile":dbdir + "comicdb.farcus.csv"},
+ "fat_catscomicscom":{"name":"Fat Cats","link":'http://comics.com/fat_cats_classics/',"start":"2010-02-01","dbfile":dbdir + "comicdb.fatcats.csv"},
+ "ferdnandcomicscom":{"name":"Ferd'nand","link":'http://comics.com/ferdnand/',"start":"2010-02-01","dbfile":dbdir + "comicdb.ferdnand.csv"},
+ "flight_deckcomicscom":{"name":"Flight_Deck","link":'http://comics.com/flight_deck/',"start":"2010-02-01","dbfile":dbdir + "comicdb.flightdeck.csv"},
+ "flonfriendscomicscom":{"name":"Flo and Friends","link":'http://comics.com/flo&friends/',"start":"2010-02-01","dbfile":dbdir + "comicdb.flonfriends.csv"},
+ "fort_knoxcomicscom":{"name":"Fort Knox","link":'http://comics.com/fort_knox/',"start":"2010-02-01","dbfile":dbdir + "comicdb.fortknox.csv"},
+ "franknernestcomicscom":{"name":"Frank and Ernest","link":'http://comics.com/frank&ernest/',"start":"2010-02-01","dbfile":dbdir + "comicdb.franknernest.csv"},
+ "frazzcomicscom":{"name":"Frazz","link":"http://comics.com/frazz/","start":"2010-02-01","dbfile":dbdir + "comicdb.frazz.csv"},
+ "free_rangecomicscom":{"name":"Free Range","link":'http://comics.com/free_range/',"start":"2010-02-01","dbfile":dbdir + "comicdb.freerange.csv"},
+ "geechcomicscom":{"name":"Geech","link":'http://comics.com/geech_classics/',"start":"2010-02-01","dbfile":dbdir + "comicdb.geech.csv"},
+ "getfuzzycomicscom":{"name":"Get Fuzzy","link":"http://comics.com/get_fuzzy/","start":"2010-02-01","dbfile":dbdir + "comicdb.getfuzzy.csv"},
+ "girlsnsportscomicscom":{"name":"Girls and Sports","link":'http://comics.com/girls&sports/',"start":"2010-02-01","dbfile":dbdir + "comicdb.gitlsnsports.csv"},
+ "graffiticomicscom":{"name":"Graffiti","link":'http://comics.com/graffiti/',"start":"2010-02-01","dbfile":dbdir + "comicdb.graffiti.csv"},
+ "grand_avenuecomicscom":{"name":"Grand Avenue","link":'http://comics.com/grand_avenue/',"start":"2010-02-01","dbfile":dbdir + "comicdb.grandavenue.csv"},
+ "heathcliffcomicscom":{"name":"Heathcliff","link":'http://comics.com/heathcliff/',"start":"2010-02-01","dbfile":dbdir + "comicdb.heathcliff.csv"},
+ "herb_and_jamaalcomicscom":{"name":"Herb and Jamaal","link":'http://comics.com/herb_and_jamaal/',"start":"2010-02-01","dbfile":dbdir + "comicdb.herbandjamaal.csv"},
+ "hermancomicscom":{"name":"Herman","link":'http://comics.com/herman/',"start":"2010-02-01","dbfile":dbdir + "comicdb.herman.csv"},
+ "home_and_awaycomicscom":{"name":"Home and Away","link":'http://comics.com/home_and_away/',"start":"2010-02-01","dbfile":dbdir + "comicdb.homeandaway.csv"},
+ "its_all_about_youcomicscom":{"name":"It's All About You","link":'http://comics.com/its_all_about_you/',"start":"2010-02-01","dbfile":dbdir + "comicdb.itsallaboutyou.csv"},
+ "janes_worldcomicscom":{"name":"Jane's World","link":'http://comics.com/janes_world/',"start":"2010-02-01","dbfile":dbdir + "comicdb.janesworld.csv"},
+ "jump_startcomicscom":{"name":"Jump Start","link":'http://comics.com/jump_start/',"start":"2010-02-01","dbfile":dbdir + "comicdb.jumpstart.csv"},
+ "kit_n_carlylecomicscom":{"name":"Kit 'n' Carlyle","link":'http://comics.com/kit_n_carlyle/',"start":"2010-02-01","dbfile":dbdir + "comicdb.kitncarlyle.csv"},
+ "lil_abnercomicscom":{"name":"Li'l Abner","link":'http://comics.com/lil_abner_classics/',"start":"2010-02-01","dbfile":dbdir + "comicdb.lilabner.csv"},
+ "liberty_meadowscomicscom":{"name":"Liberty Meadows","link":'http://comics.com/liberty_meadows/',"start":"2010-02-01","dbfile":dbdir + "comicdb.libertymeadows.csv"},
+ "little_dog_lostcomicscom":{"name":"Little Dog Lost","link":'http://comics.com/little_dog_lost/',"start":"2010-02-01","dbfile":dbdir + "comicdb.littledoglost.csv"},
+ "lolacomicscom":{"name":"Lola","link":'http://comics.com/lola/',"start":"2010-02-01","dbfile":dbdir + "comicdb.lola.csv"},
+ "luanncomicscom":{"name":"Luann","link":'http://comics.com/luann/',"start":"2010-02-01","dbfile":dbdir + "comicdb.luann.csv"},
+ "marmadukecomicscom":{"name":"Marmaduke","link":'http://comics.com/marmaduke/',"start":"2010-02-01","dbfile":dbdir + "comicdb.marmaduke.csv"},
+ "megcomicscom":{"name":"Meg","link":'http://comics.com/meg_classics/',"start":"2010-02-01","dbfile":dbdir + "comicdb.meg.csv"},
+ "minimum_securitycomicscom":{"name":"Minimum Security","link":'http://comics.com/minimum_security/',"start":"2010-02-01","dbfile":dbdir + "comicdb.minimumsecurity.csv"},
+ "moderately_confusedcomicscom":{"name":"Moderately Confused","link":'http://comics.com/moderately_confused/',"start":"2010-02-01","dbfile":dbdir + "comicdb.moderatelyconfused.csv"},
+ "mommacomicscom":{"name":"Momma","link":'http://comics.com/momma/',"start":"2010-02-01","dbfile":dbdir + "comicdb.momma.csv"},
+ "motleycomicscom":{"name":"Motley","link":'http://comics.com/motley_classics/',"start":"2010-02-01","dbfile":dbdir + "comicdb.motley.csv"},
+ "nancycomicscom":{"name":"Nancy","link":'http://comics.com/nancy/',"start":"2010-02-01","dbfile":dbdir + "comicdb.nancy.csv"},
+ "natural_selectioncomicscom":{"name":"Natural Selection","link":'http://comics.com/natural_selection/',"start":"2010-02-01","dbfile":dbdir + "comicdb.naturalselection.csv"},
+ "nest_headscomicscom":{"name":"Nest Heads","link":'http://comics.com/nest_heads/',"start":"2010-02-01","dbfile":dbdir + "comicdb.nestheads.csv"},
+ "off_the_markcomicscom":{"name":"Off the mark","link":"http://comics.com/off_the_mark/","start":"2010-02-01","dbfile":dbdir + "comicdb.offthemark.csv"},
+ "on_a_claire_daycomicscom":{"name":"On A Claire Day","link":'http://comics.com/on_a_claire_day/',"start":"2010-02-01","dbfile":dbdir + "comicdb.onaclaireday.csv"},
+ "one_big_happycomicscom":{"name":"One Big Happy","link":'http://comics.com/one_big_happy_classics/',"start":"2010-02-01","dbfile":dbdir + "comicdb.onebighappy.csv"},
+ "over_the_hedgecomicscom":{"name":"Over the Hedge","link":'http://comics.com/over_the_hedge/',"start":"2010-02-01","dbfile":dbdir + "comicdb.overthehedge.csv"},
+ "pc_and_pixelcomicscom":{"name":"PC and Pixel","link":'http://comics.com/pc_and_pixel/',"start":"2010-02-01","dbfile":dbdir + "comicdb.pcandpixel.csv"},
+ "peanutscomicscom":{"name":"Peanuts","link":"http://comics.com/peanuts/","start":"2010-02-01","dbfile":dbdir + "comicdb.peanuts.csv"},
+ "pearls_before_swinecomicscom":{"name":"Pearls Before Swine","link":'http://comics.com/pearls_before_swine/',"start":"2010-02-01","dbfile":dbdir + "comicdb.pearlsbeforeswine.csv"},
"phd":{"name":"PHD Comics","link":"http://www.phdcomics.com/","start":1240,"dbfile":dbdir + "comicdb.phd.csv"},
- "dilbert":{"name":"Dilbert","link":"http://dilbert.com/","start":"2009-01-01","dbfile":dbdir + "comicdb.dilbert.csv"},
- "cyanide":{"name":"C and H","link":"http://explosm.com/","start":"1920","dbfile":dbdir + "comicdb.cyanide.csv"},
+ "picklescomicscom":{"name":"Pickles","link":'http://comics.com/pickles/',"start":"2010-02-01","dbfile":dbdir + "comicdb.pickles.csv"},
+ "prickly_citycomicscom":{"name":"Prickly City","link":'http://comics.com/prickly_city/',"start":"2010-02-01","dbfile":dbdir + "comicdb.pricklycity.csv"},
+ "raising_duncancomicscom":{"name":"Raising Duncan","link":'http://comics.com/raising_duncan_classics/',"start":"2010-02-01","dbfile":dbdir + "comicdb.raisingduncan.csv"},
+ "reality_checkcomicscom":{"name":"Reality Check","link":'http://comics.com/reality_check/',"start":"2010-02-01","dbfile":dbdir + "comicdb.realitycheck.csv"},
+ "rednrovercomicscom":{"name":"Red and Rover","link":'http://comics.com/red&rover/',"start":"2010-02-01","dbfile":dbdir + "comicdb.rednrover.csv"},
+ "rip_haywirecomicscom":{"name":"Rip Haywire","link":'http://comics.com/rip_haywire/',"start":"2010-02-01","dbfile":dbdir + "comicdb.riphaywire.csv"},
+ "ripleys_believe_it_or_notcomicscom":{"name":"Ripley's Believe it or not","link":'http://comics.com/ripleys_believe_it_or_not/',"start":"2010-02-01","dbfile":dbdir + "comicdb.ripleysbelieveitornot.csv"},
+ "rose_is_rosecomicscom":{"name":"Rose Is Rose","link":'http://comics.com/rose_is_rose/',"start":"2010-02-01","dbfile":dbdir + "comicdb.roseisrose.csv"},
+ "rubescomicscom":{"name":"Rubes","link":'http://comics.com/rubes/',"start":"2010-02-01","dbfile":dbdir + "comicdb.rubes.csv"},
+ "rudy_parkcomicscom":{"name":"Rudy Park","link":'http://comics.com/rudy_park/',"start":"2010-02-01","dbfile":dbdir + "comicdb.rudypark.csv"},
+ "scary_garycomicscom":{"name":"Scary Gary","link":'http://comics.com/scary_gary/',"start":"2010-02-01","dbfile":dbdir + "comicdb.scarygary.csv"},
+ "shirley_and_soncomicscom":{"name":"Shirley and Son","link":'http://comics.com/shirley_and_son_classics/',"start":"2010-02-01","dbfile":dbdir + "comicdb.shirleyandson.csv"},
+ "sinfest":{"name":"Sinfest","link":"http://sinfest.com/","start":3400,"dbfile":dbdir + "comicdb.sinfest.csv"},
+ "soup_to_nutzcomicscom":{"name":"Soup to Nutz","link":'http://comics.com/soup_to_nutz/',"start":"2010-02-01","dbfile":dbdir + "comicdb.souptonutz.csv"},
+ "speed_bumpcomicscom":{"name":"Speed Bump","link":'http://comics.com/speed_bump/',"start":"2010-02-01","dbfile":dbdir + "comicdb.speedbump.csv"},
+ "spot_the_frogcomicscom":{"name":"Spot the Frog","link":'http://comics.com/spot_the_frog/',"start":"2010-02-01","dbfile":dbdir + "comicdb.spotthefrog.csv"},
+ "state_of_the_unioncomicscom":{"name":"State of the Union","link":'http://comics.com/state_of_the_union/',"start":"2010-02-01","dbfile":dbdir + "comicdb.stateoftheunion.csv"},
+ "strange_brewcomicscom":{"name":"Strange Brew","link":'http://comics.com/strange_brew/',"start":"2010-02-01","dbfile":dbdir + "comicdb.strangebrew.csv"},
+ "tarzancomicscom":{"name":"Tarzan","link":'http://comics.com/tarzan_classics/',"start":"2010-02-01","dbfile":dbdir + "comicdb.tarzan.csv"},
+ "thats_lifecomicscom":{"name":"That's Life","link":'http://comics.com/thats_life/',"start":"2010-02-01","dbfile":dbdir + "comicdb.thatslife.csv"},
+ "the_barncomicscom":{"name":"The Barn","link":'http://comics.com/the_barn/',"start":"2010-02-01","dbfile":dbdir + "comicdb.the_barn.csv"},
+ "the_born_losercomicscom":{"name":"The Born Loser","link":'http://comics.com/the_born_loser/',"start":"2010-02-01","dbfile":dbdir + "comicdb.thebornloser.csv"},
+ "the_bucketscomicscom":{"name":"The Buckets","link":'http://comics.com/the_buckets/',"start":"2010-02-01","dbfile":dbdir + "comicdb.thebuckets.csv"},
+ "the_dinette_setcomicscom":{"name":"The Dinette Set","link":'http://comics.com/the_dinette_set/',"start":"2010-02-01","dbfile":dbdir + "comicdb.thedinetteset.csv"},
+ "the_grizzwellscomicscom":{"name":"The Grizzwells","link":'http://comics.com/the_grizzwells/',"start":"2010-02-01","dbfile":dbdir + "comicdb.thegrizzwells.csv"},
+ "the_humble_stumblecomicscom":{"name":"The Humble Stumble","link":'http://comics.com/the_humble_stumble/',"start":"2010-02-01","dbfile":dbdir + "comicdb.thehumblestumble.csv"},
+ "the_knight_lifecomicscom":{"name":"The Knight Life","link":'http://comics.com/the_knight_life/',"start":"2010-02-01","dbfile":dbdir + "comicdb.theknightlife.csv"},
+ "the_meaning_of_lilacomicscom":{"name":"The Meaning of Lila","link":'http://comics.com/the_meaning_of_lila/',"start":"2010-02-01","dbfile":dbdir + "comicdb.themeaningoflila.csv"},
+ "the_other_coastcomicscom":{"name":"The Other Coast","link":'http://comics.com/the_other_coast/',"start":"2010-02-01","dbfile":dbdir + "comicdb.theothercoast.csv"},
+ "the_sunshine_clubcomicscom":{"name":"the Sunshine Club","link":'http://comics.com/the_sunshine_club/',"start":"2010-02-01","dbfile":dbdir + "comicdb.thesunshineclub.csv"},
+ "unstrange_phenomenacomicscom":{"name":"Unstrange Phenomena","link":'http://comics.com/unstrange_phenomena/',"start":"2010-02-01","dbfile":dbdir + "comicdb.unstrangephenomena.csv"},
+ "watch_your_headcomicscom":{"name":"Watch Your Head","link":'http://comics.com/watch_your_head/',"start":"2010-02-01","dbfile":dbdir + "comicdb.watchyourhead.csv"},
+ "wizard_of_idcomicscom":{"name":"Wizard of Id","link":'http://comics.com/wizard_of_id/',"start":"2010-02-01","dbfile":dbdir + "comicdb.wizardofid.csv"},
+ "working_dazecomicscom":{"name":"Working Daze","link":'http://comics.com/working_daze/',"start":"2010-02-01","dbfile":dbdir + "comicdb.workingdaze.csv"},
+ "working_it_outcomicscom":{"name":"Working It Out","link":'http://comics.com/working_it_out/',"start":"2010-02-01","dbfile":dbdir + "comicdb.workingitout.csv"},
+ "wulff":{"name":"Wulffmorgenthaler","link":"http://wulffmorgenthaler.com/","start":"edd3411b-96ca-4d93-bd5f-0cf1deb67c8a","dbfile":dbdir + "comicdb.wulff.csv"},
+ "xkcd":{"name":"xkcd","link":"http://xkcd.org/","start":666,"dbfile":dbdir + "comicdb.xkcd.csv"},
+ "zack_hillcomicscom":{"name":"Zack Hill","link":'http://comics.com/zack_hill/',"start":"2010-02-01","dbfile":dbdir + "comicdb.zackhill.csv"},
+
+
+
}
defaults = {'width':480,'height':230}
#defaults = {'width':480,'height':240}
self.fetch_newer(self.comic, fetchid)
self.refresh()
if len(self.db) < (self.currentcomic + 1):
+ print "Empty db"
self.currentcomic = len(self.db) - 1
if len(self.db) > 0:
fetchid = self.db[self.currentcomic]['id']
print "created dir"
except:
print "comic db creation failed on mkdir"
- urllib.urlretrieve(dbrow['url'], filename)
+ try:
+ f = open(filename, "wb")
+ print "writing to " + filename
+ req = urllib2.Request(dbrow['url'])
+ req.add_header('Referer', dbrow['link'])
+ tmpimg = urllib2.urlopen(req)
+ f.write(tmpimg.read())
+ tmpimg = None
+ f.close()
+
+# urllib.urlretrieve(dbrow['url'], filename)
+ except:
+ if os.path.isfile(filename):
+ os.remove(filename)
print "success\n"
return filename
for row in dbr:
self.db.insert(0,row)
dbf.close()
+ if len(self.db) == 0:
+ self.fetch_earlier(self.comic, self.start)
+ dbf = open(self.dbfile, 'r')
+ dbr = csv.DictReader(dbf)
+ self.db = []
+ for row in dbr:
+ self.db.insert(0,row)
+ dbf.close()
# fetch earlier
def fetch_earlier(self, comic, earliest):
print "fetch before, " + comic + " earliest"
- if comic == "cyanide":
+ print comic[-9:]
+ if comic == "cyanide" or comic == "wulff" or comic == "babyblues" or comic[-9:] == "comicscom":
+ print "getting get_prev_id..."
comicid = self.get_prev_id(comic,earliest)
+ if not comicid:
+ print "already at first comic"
+ return
print "got " + comicid + " as the one before current..."
- elif len(earliest) == 10:
+ elif len(str(earliest)) == 10:
# date id.
dt = string.split(earliest, "-")
d = datetime.date(int(dt[0]),int(dt[1]),int(dt[2]))
def fetch_newer(self, comic, newest):
- if comic == "cyanide":
+ if comic == "cyanide" or comic == "wulff" or comic == "babyblues" or comic[-9:] == "comicscom":
comicid = self.get_next_id(comic,newest)
+ if not comicid:
+ print "already at last comic"
+ return
elif len(newest) == 10:
# date id.
dt = string.split(newest, "-")
def get_next_id(self, comic, number):
+ if comic == 'babyblues':
+ link = "http://www.babyblues.com/archive/index.php?formname=getstrip&GoToDay=" + str(number)
+ print "link: " + link
+ try:
+ f = urllib2.urlopen(link)
+ hcode = f.code
+ except:
+ hcode = 404
+ print "got hcode = " + str(hcode) + "\n"
+ if (hcode != 200):
+ return False
+ else:
+ print "checking next Babyblues date.."
+ s = f.read()
+ f.close()
+ # title:
+ splt = string.split(s, 'nextStripLink', 1)
+ if len(splt) < 2:
+ print "no 'prev' found"
+ return False
+ else:
+ next = splt[1][48:58]
+ print "got next: " + next
+
+ return next
+
if comic == 'cyanide':
link = "http://www.explosm.net/comics/" + str(number) + "/"
print "link: " + link
else:
print "got next: " + splt[0]
return splt[0]
+ if comic == 'wulff':
+ link = "http://wulffmorgenthaler.com/default.aspx?id=" + number
+ print "link: " + link
+ try:
+ f = urllib2.urlopen(link)
+ hcode = f.code
+ except:
+ hcode = 404
+ print "got hcode = " + str(hcode) + "\n"
+ if (hcode != 200):
+ return False
+ else:
+ s = f.read()
+ f.close()
+ # title:
+ splt = string.split(s, '<a href="/default.aspx?id=', 1)
+ if not splt[1][36:69] == '" id="ctl00_content_Strip1_aPrev"':
+ print "no previous found"
+ if not splt[1][36:69] == '" id="ctl00_content_Strip1_aNext"':
+ print "no next found!"
+ return False
+ else:
+ splt = string.split(splt[1], '<a href="/default.aspx?id=', 1)
+ if len(splt) < 2:
+ print "no next found! At newest comic!"
+ return False
+ if not splt[1][36:69] == '" id="ctl00_content_Strip1_aNext"':
+ print "no next found!"
+ return False
+ else:
+ return splt[1][:36]
+ elif comic[-9:] == "comicscom":
+ link = comics[comic]['link'] + str(number) + "/"
+ print "link: " + link
+ try:
+ f = urllib2.urlopen(link)
+ hcode = f.code
+ except:
+ hcode = 404
+ print "got hcode = " + str(hcode) + "\n"
+ if (hcode != 200):
+ return False
+ else:
+ s = f.read()
+ f.close()
+ return self.parse_comics_com(comic, s, 'next')
+
+
+
+
+
def get_prev_id(self, comic, number):
+ if comic == 'babyblues':
+ link = 'http://www.babyblues.com/archive/index.php?formname=getstrip&GoToDay=' + str(number)
+ print "link: " + link
+ try:
+ f = urllib2.urlopen(link)
+ hcode = f.code
+ except:
+ hcode = 404
+ print "got hcode = " + str(hcode) + "\n"
+ if (hcode != 200):
+ return False
+ else:
+ print "checking prev Babyblues date.."
+ s = f.read()
+ f.close()
+ # title:
+ splt = string.split(s, 'prevStripLink', 1)
+ if len(splt) < 2:
+ print "no 'prev' found"
+ exit()
+ return False
+ else:
+ prev = splt[1][48:58]
+ print "got previous: " + prev
+ return prev
+
if comic == 'cyanide':
link = "http://www.explosm.net/comics/" + str(number) + "/"
print "link: " + link
return False
else:
print "got previous: " + splt[0]
- return splt[0]
-
+ return splt[0]
+ elif comic == 'wulff':
+ link = "http://wulffmorgenthaler.com/default.aspx?id=" + number
+ print "link: " + link
+ try:
+ f = urllib2.urlopen(link)
+ hcode = f.code
+ except:
+ hcode = 404
+ print "got hcode = " + str(hcode) + "\n"
+ if (hcode != 200):
+ return False
+ else:
+ s = f.read()
+ f.close()
+ # title:
+ splt = string.split(s, '<a href="/default.aspx?id=', 1)
+ if not splt[1][36:69] == '" id="ctl00_content_Strip1_aPrev"':
+ print splt[1][36:69]
+ print splt[1][:36]
+ print "no previous found"
+ return False
+ else:
+ return splt[1][:36]
+ elif comic[-9:] == "comicscom":
+ print "fetch prev, still in function..."
+ link = comics[comic]['link'] + str(number) + "/"
+ print "link: " + link
+ try:
+ f = urllib2.urlopen(link)
+ hcode = f.code
+ except:
+ hcode = 404
+ print "got hcode = " + str(hcode) + "\n"
+ if (hcode != 200):
+ return False
+ else:
+ s = f.read()
+ f.close()
+ return self.parse_comics_com(comic, s, 'prev')
link = "http://www.phdcomics.com/comics/archive.php?comicid=" + str(number)
elif comic == 'cyanide':
link = "http://www.explosm.net/comics/" + str(number) + "/"
+ elif comic == 'wulff':
+ link = "http://wulffmorgenthaler.com/default.aspx?id=" + str(number)
+ elif comic == 'babyblues':
+ link = "http://www.babyblues.com/archive/index.php?formname=getstrip&GoToDay=" + str(number)
+ elif comic[-9:] == 'comicscom':
+ link = comics[comic]['link'] + str(number) + "/"
else:
return False
s = f.read()
f.close()
# This should be done with regex but...
- splt = string.split(s, "<h3>Image URL (for hotlinking/embedding): ", 1)
- splt2 = string.split(splt[1], "</h3>", 1)
- url = splt2[0]
- splt = string.split(splt[0], "<h1>", 1)
- splt = string.split(splt[1], "</h1>", 1)
-
+ splt = string.split(s, 'png" title="', 1)
+ splt = string.split(splt[1], '" alt="', 1)
title = splt[0]
+ splt = string.split(splt[1], "<h3>Image URL (for hotlinking/embedding): ", 1)
+ splt = string.split(splt[1], "</h3>", 1)
+ url = splt[0]
+
elif comic == 'sinfest':
s = f.read()
filename = splt2[1]
irow = [comic,number,link,url,filename,title,next,prev]
return irow
-
+
+
+ elif comic == 'babyblues':
+ # babyblues is type .gif
+ s = f.read()
+ f.close()
+ splt = string.split(s, 'http://est.rbma.com/content/Baby_Blues?date=', 1)
+ flnm = splt[1][:8]
+ # check filename...
+ datesplt = string.split(number, "/")
+ flncheck = datesplt[2] + datesplt[0] + datesplt[1]
+ if not flnm == flncheck:
+ print "incorrect filename, end of line. Break break break!"
+ return False
+ url = 'http://est.rbma.com/content/Baby_Blues?date=' + flnm
+ title = number + " (2 weeks delay)"
+ filename = flnm + ".gif"
+ irow = [comic,number,link,url,filename,title]
+ return irow
+
+
+ elif comic == 'wulff':
+ # wulf is type .gif
+ s = f.read()
+ f.close()
+ splt = string.split(s, '<img src="pics/dates/month/', 1)
+ if not splt[1][1:2] == ".":
+ title = splt[1][:2]
+ else:
+ title = "0" + splt[1][:1]
+ splt = string.split(splt[1], '<img src="pics/dates/day/', 1)
+ if not splt[1][1:2] == ".":
+ title = title + "-" + splt[1][:2]
+ else:
+ title = title + "-0" + splt[1][:1]
+ splt = string.split(splt[1], '<img src="pics/dates/year/', 1)
+ title = splt[1][:4] + "-" + title
+ # title done...
+ splt = string.split(splt[1], 'id="ctl00_content_Strip1_imgStrip" class="strip" src="striphandler.ashx?stripid=', 1)
+ number = splt[1][:36]
+ url = "http://wulffmorgenthaler.com/striphandler.ashx?stripid=" + number
+ filename = number + ".gif"
+ irow = [comic,number,link,url,filename,title]
+ return irow
+ elif comic[-9:] == 'comicscom':
+ url = self.parse_comics_com(comic, f.read(), 'url')
+ f.close()
+ title = number
+
splt2 = string.rsplit(url, "/", 1)
filename = splt2[1]
- if filename == self.db[0]['filename']:
- print "already exists! Break break break!"
- return False
+ try:
+ if filename == self.db[0]['filename']:
+ print "already exists! Break break break!"
+ return False
+ except:
+ pass
irow = [comic,number,link,url,filename,title]
return irow
+ def parse_comics_com(self, comicname, source, get_what):
+ # rel="{StripID:309486, ComicID:36, Type:'Comic', DateStrip:'2010-02-06', URL_Comic: 'girls&sports', Link_Previous: '/girls&sports/2010-02-05/', Link_Next: '/girls&sports/2010-02-07/'}"
+ if get_what == "url":
+ splt = string.split(source, 'StripID:', 1)
+ splt = string.split(splt[1], ', ComicID:', 1)
+ url = "http://c0389161.cdn.cloudfiles.rackspacecloud.com/dyn/str_strip/" + splt[0] + ".full.gif"
+ return url
+ elif get_what == "next":
+ splt = string.split(source, "Link_Next: '", 1)
+ splt = string.split(splt[1], "'", 1)
+ splt = string.split(splt[0], "/")
+ if len(splt) < 3:
+ return False
+ else:
+
+ return splt[2]
+
+ elif get_what == "prev":
+ print "getting comics com prev: " + comicname
+ splt = string.split(source, "Link_Previous: '", 1)
+ splt = string.split(splt[1], "'", 1)
+ splt = string.split(splt[0], "/")
+ if len(splt) == 0:
+ return False
+ else:
+ if len(splt[2]) == 10:
+ return splt[2]
+ else:
+ return False
+
+
+
# ------------UI
class ComicHomePlugin(hildondesktop.HomePluginItem):
self.keypointer = 0
if len(self.active_comics) < 1:
self.active_comics = ['xkcd']
+
self.db = ComicDb(self.active_comics[self.keypointer])
self.comicname = comics[self.active_comics[self.keypointer]]['name']
self.set_name = "comicwidget"
self.label = gtk.Label()
self.label.set_alignment(xalign = 0, yalign = 0.5)
self.label.set_use_markup(True)
- titl = str(self.db.db[self.db.currentcomic]['title'])
- if len(titl) > 22:
- titl = titl[:19] + "..."
- self.label.set_markup('<span size="12000" face="monospace"> <b>' + self.comicname + '</b> ' + str(self.db.db[self.db.currentcomic]['id']) + '\n ' + titl + '</span>')
+ self.label.set_markup(self.get_markup())
self.e_goweb = gtk.EventBox()
self.e_goweb.set_name('goweb')
- self.e_goweb.set_size_request(220, 48)
+ self.e_goweb.set_size_request(self.sizes['width'] - 250, 48)
# e_open is the eventbox for the image. Does nothing, but it's convenient to have it.
# it will change to be a cairo thing when I have the time.
self.e_open = gtk.EventBox()
self.vbox.pack_end(self.hbox,False,False,0)
self.hbox.pack_start(self.e_switch,False,False,0)
- self.hbox.pack_start(self.e_goweb,False,False,10)
+ self.hbox.pack_start(self.e_goweb,False,False,5)
self.hbox.pack_end(self.e_next,False,False,0)
self.hbox.pack_end(self.e_prev,False,False,0)
self.hbox.pack_end(self.e_down,False,False,0)
print "pressed: " + func
if func == 'up':
self.e_open.remove(self.comic_image)
- self.imgvpos = self.imgvpos - 80
+ self.imgvpos = self.imgvpos - ((self.sizes['height'] - 48) / 2.1)
if self.imgvpos < 0:
self.imgvpos = 0
self.comic_image = self.get_resized_pixmap(self.db.get_comic(), self.imgvpos)
elif func == 'down':
self.e_open.remove(self.comic_image)
- self.imgvpos = self.imgvpos + 80
+ self.imgvpos = self.imgvpos + ((self.sizes['height'] - 48) / 2.1)
self.comic_image = self.get_resized_pixmap(self.db.get_comic(), self.imgvpos)
self.e_open.add(self.comic_image)
self.e_open.show_all()
titl = str(self.db.db[self.db.currentcomic]['title'])
if len(titl) > 22:
titl = titl[:19] + "..."
- self.label.set_markup('<span size="12000" face="monospace"> <b>' + self.comicname + '</b> ' + str(self.db.db[self.db.currentcomic]['id']) + '\n ' + titl + '</span>')
+ self.label.set_markup(self.get_markup())
self.e_goweb.add(self.label)
self.e_goweb.show_all()
titl = str(self.db.db[self.db.currentcomic]['title'])
if len(titl) > 22:
titl = titl[:19] + "..."
- self.label.set_markup('<span size="12000" face="monospace"> <b>' + self.comicname + '</b> ' + str(self.db.db[self.db.currentcomic]['id']) + '\n ' + titl + '</span>')
+ self.label.set_markup(self.get_markup())
self.e_goweb.add(self.label)
self.e_goweb.show_all()
titl = str(self.db.db[self.db.currentcomic]['title'])
if len(titl) > 22:
titl = titl[:19] + "..."
- self.label.set_markup('<span size="12000" face="monospace"> <b>' + self.comicname + '</b> ' + str(self.db.db[self.db.currentcomic]['id']) + '\n ' + titl + '</span>')
+ self.label.set_markup(self.get_markup())
self.e_goweb.add(self.label)
self.e_goweb.show_all()
self.e_open.add(self.comic_image)
print "error storing settings"
exit()
dbw = csv.writer(dbf)
- dbw.writerow(comics.keys())
- print comics.keys()
+ dbw.writerow(defaultcomics)
dbf.close()
- ret = comics.keys()
+ ret = defaultcomics
return ret
-
+ def get_markup(self):
+ if len(self.comicname + " " + str(self.db.db[self.db.currentcomic]['id'])) < 20:
+ return '<span size="12000" face="arial"> <b>' + self.comicname + '</b> ' + str(self.db.db[self.db.currentcomic]['id']) + '\n ' + str(self.db.db[self.db.currentcomic]['title']) + '</span>'
+ else:
+ return '<span size="12000" face="arial"> <b>' + self.comicname + '</b>' + '\n ' + str(self.db.db[self.db.currentcomic]['title']) + '</span>'
+
#check if settings file exists
dialog.run()
dialog.destroy()
- def show_comics(self, widget):
+ def show_comics(self, widget, data = None):
print "in comics dialog! yay!"
- dialog = gtk.Dialog("Configure Search Engines", None, gtk.DIALOG_DESTROY_WITH_PARENT | gtk.DIALOG_NO_SEPARATOR)
+ dialog = gtk.Dialog("Choose comics", None, gtk.DIALOG_DESTROY_WITH_PARENT | gtk.DIALOG_NO_SEPARATOR)
comiclist = comics.keys()
comiclist.sort()
buttonlist = {}
self.connlist = {}
+ self.liststore = gtk.ListStore(str,str,bool)
for comicid in comiclist:
- buttonlist[comicid] = hildon.Button(gtk.HILDON_SIZE_AUTO_WIDTH | gtk.HILDON_SIZE_FINGER_HEIGHT, hildon.BUTTON_ARRANGEMENT_VERTICAL)
- print buttonlist[comicid].get_name()
-
- print buttonlist[comicid].set_alignment(0, 0.5, 0, 0.5)
active = False
for acomic in self.active_comics:
if acomic == comicid:
active = True
break
if active:
- buttonlist[comicid].set_text(comics[comicid]['name'] + " *", comicid + ": click to remove")
- self.connlist[comicid] = buttonlist[comicid].connect("clicked", self.remove_comic)
+ self.liststore.append(["* " + comics[comicid]['name'],comicid, active])
else:
- buttonlist[comicid].set_text(comics[comicid]['name'], comicid + ": click to add")
- self.connlist[comicid] = buttonlist[comicid].connect("clicked", self.add_comic)
- dialog.vbox.pack_start(buttonlist[comicid], True, True, 0)
+ self.liststore.append([" " + comics[comicid]['name'],comicid, active])
+ self.treeview = hildon.GtkTreeView(gtk.HILDON_UI_MODE_EDIT)
+ self.treeview.set_model(self.liststore)
+ self.treeview.set_reorderable(True)
+ tvcolumn = gtk.TreeViewColumn('Column 0')
+ self.treeview.append_column(tvcolumn)
+ cell = gtk.CellRendererText()
+ tvcolumn.pack_start(cell, True)
+ tvcolumn.add_attribute(cell, 'text', 0)
+
+ ts = hildon.PannableArea()
+
+ ts.set_size_request(-1, 280)
+ ts.add(self.treeview)
+ selection = self.treeview.get_selection()
-
+ dialog.vbox.pack_start(ts,True,True,0)
dialog.show_all()
+ selection.connect("changed", self.pick_comic, comiclist)
dialog.run()
dialog.destroy()
- def add_comic(self, widget):
- print "add " + widget.get_title()
- splt = string.split(widget.get_value(), ':', 1)
- comicid = splt[0]
+ def pick_comic(self, widget, data):
+ selected = widget.get_selected()
+ if selected == None:
+ return False
+
+ print selected[0].get_value(selected[1],0) + ", " + selected[0].get_value(selected[1],1) + ", " + str(selected[0].get_value(selected[1],2))
+ if selected[0].get_value(selected[1],2):
+ self.remove_comic(selected[0].get_value(selected[1],1))
+ splt = selected[0].get_value(selected[1],0)[1:]
+ selected[0].set_value(selected[1],0," " + splt)
+ selected[0].set_value(selected[1],2,False)
+ else:
+ self.add_comic(selected[0].get_value(selected[1],1))
+ splt = selected[0].get_value(selected[1],0)[1:]
+ selected[0].set_value(selected[1],0,"*" + splt)
+ selected[0].set_value(selected[1],2,True)
+
+
+ def add_comic(self, comicid):
if os.path.isfile(activecomics) == True:
print "added " + comicid + " to " + str(self.active_comics)
try:
dbw = csv.writer(dbf)
dbw.writerow(self.active_comics)
dbf.close()
- widget.set_text(comics[comicid]['name'] + " *", comicid + ": click to remove")
- widget.disconnect(self.connlist[comicid])
- self.connlist[comicid] = widget.connect("clicked", self.remove_comic)
-
- def remove_comic(self, widget):
- print "remove " + widget.get_title()
- if len(self.active_comics) == 1:
- return
- splt = string.split(widget.get_value(), ':', 1)
- comicid = splt[0]
+
+ def remove_comic(self, comicid):
if os.path.isfile(activecomics) == True:
print "removing " + comicid + " from " + str(self.active_comics)
try:
dbw = csv.writer(dbf)
dbw.writerow(self.active_comics)
dbf.close()
- widget.set_text(comics[comicid]['name'], comicid + ": click to add")
- widget.disconnect(self.connlist[comicid])
- self.connlist[comicid] = widget.connect("clicked", self.add_comic)
+
def get_size_settings(self):
# defaults = {'width':480,'height':230}