X-Git-Url: http://dolda2000.com/gitweb/?p=automanga.git;a=blobdiff_plain;f=manga%2Fmangafox.py;h=30329c2db8170961d9148845e452a9f26700e6b9;hp=e06acfa2a2ee4b54055f2ed2bc430e58cb7f4805;hb=d2f58cfc0a7804bb3aad1cbe190c6b83a2a5cde3;hpb=f3ad0817587482b5a726db4c2f82072e191355e1 diff --git a/manga/mangafox.py b/manga/mangafox.py index e06acfa..30329c2 100644 --- a/manga/mangafox.py +++ b/manga/mangafox.py @@ -3,20 +3,26 @@ import BeautifulSoup import lib, htcache soup = BeautifulSoup.BeautifulSoup -class imgstream(object): +class imgstream(lib.imgstream): def __init__(self, url): self.bk = urllib.urlopen(url) - self.ctype = self.bk.info()["Content-Type"] + ok = False + try: + if self.bk.getcode() != 200: + raise IOError("Server error: " + str(self.bk.getcode())) + self.ctype = self.bk.info()["Content-Type"] + self.clen = int(self.bk.info()["Content-Length"]) + ok = True + finally: + if not ok: + self.bk.close() + + def fileno(self): + return self.bk.fileno() def close(self): self.bk.close() - def __enter__(self): - return self - - def __exit__(self, *exc_info): - self.close() - def read(self, sz = None): if sz is None: return self.bk.read() @@ -24,11 +30,14 @@ class imgstream(object): return self.bk.read(sz) class page(lib.page): - def __init__(self, chapter, n, url): + def __init__(self, chapter, stack, n, url): + self.stack = stack self.chapter = chapter self.volume = self.chapter.volume self.manga = self.volume.manga self.n = n + self.id = str(n) + self.name = u"Page %s" % n self.url = url self.ciurl = None @@ -41,10 +50,18 @@ class page(lib.page): def open(self): return imgstream(self.iurl()) + def __str__(self): + return self.name + + def __repr__(self): + return "" % (self.manga.name, self.volume.name, self.chapter.name, self.name) + class chapter(lib.pagelist): - def __init__(self, volume, name, url): + def __init__(self, volume, stack, id, name, url): + self.stack = stack self.volume = volume self.manga = volume.manga + self.id = id self.name = name self.url = url self.cpag = None @@ -64,7 +81,7 @@ class chapter(lib.pagelist): m = l.contents[2].strip() if m[:3] != u"of ": raise Exception("parse error: weird page list for %r" % self) - self.cpag = [page(self, n + 1, self.url + ("%i.html" % (n + 1))) for n in xrange(int(m[3:]))] + self.cpag = [page(self, self.stack + [(self, n)], n + 1, self.url + ("%i.html" % (n + 1))) for n in xrange(int(m[3:]))] return self.cpag def __str__(self): @@ -74,8 +91,10 @@ class chapter(lib.pagelist): return "" % (self.manga.name, self.volume.name, self.name) class volume(lib.pagelist): - def __init__(self, manga, name): + def __init__(self, manga, stack, id, name): + self.stack = stack self.manga = manga + self.id = id self.name = name self.ch = [] @@ -98,11 +117,13 @@ def nextel(el): return el class manga(lib.manga): - def __init__(self, lib, name, url): + def __init__(self, lib, id, name, url): self.lib = lib + self.id = id self.name = name self.url = url self.cvol = None + self.stack = [] def __getitem__(self, i): return self.vols()[i] @@ -114,15 +135,18 @@ class manga(lib.manga): if self.cvol is None: page = soup(htcache.fetch(self.url)) vls = page.find("div", id="chapters").findAll("div", attrs={"class": "slide"}) - self.cvol = [] - for i in xrange(len(vls)): - vol = volume(self, vls[i].find("h3", attrs={"class": "volume"}).contents[0].strip()) - cls = nextel(vls[i]) + cvol = [] + for i, vn in enumerate(reversed(vls)): + name = vn.find("h3", attrs={"class": "volume"}).contents[0].strip() + vid = name.encode("utf8") + vol = volume(self, [(self, i)], vid, name) + cls = nextel(vn) if cls.name != u"ul" or cls["class"] != u"chlist": raise Exception("parse error: weird volume list for %r" % self) - for ch in cls.findAll("li"): + for o, ch in enumerate(reversed(cls.findAll("li"))): n = ch.div.h3 or ch.div.h4 name = n.a.string + chid = name.encode("utf8") for span in ch("span"): try: if u" title " in (u" " + span["class"] + u" "): @@ -130,10 +154,15 @@ class manga(lib.manga): except KeyError: pass url = n.a["href"].encode("us-ascii") - if url[-7:] != "/1.html": + if url[-7:] == "/1.html": + url = url[:-6] + elif url[-1:] == "/": + pass + else: raise Exception("parse error: unexpected chapter URL for %r: %s" % (self, url)) - vol.ch.insert(0, chapter(vol, name, url[:-6])) - self.cvol.insert(0, vol) + vol.ch.append(chapter(vol, vol.stack + [(vol, o)], chid, name, url)) + cvol.append(vol) + self.cvol = cvol return self.cvol def __str__(self): @@ -147,17 +176,20 @@ def libalphacmp(a, b): class library(lib.library): def __init__(self): - self.base = "http://www.mangafox.com/" + self.base = "http://mangafox.me/" def alphapage(self, pno): page = soup(htcache.fetch(self.base + ("directory/%i.htm?az" % pno))) ls = page.find("div", id="mangalist").find("ul", attrs={"class": "list"}).findAll("li") ret = [] + ubase = self.base + "manga/" for m in ls: t = m.find("div", attrs={"class": "manga_text"}).find("a", attrs={"class": "title"}) name = t.string url = t["href"].encode("us-ascii") - ret.append(manga(self, name, url)) + if url[:len(ubase)] != ubase or url.find('/', len(ubase)) != (len(url) - 1): + raise Exception("parse error: unexpected manga URL for %r: %s" % (name, url)) + ret.append(manga(self, url[len(ubase):-1], name, url)) return ret def alphapages(self): @@ -198,3 +230,15 @@ class library(lib.library): pno += 1 ls = self.alphapage(pno) i = 0 + + def byid(self, id): + url = self.base + ("manga/%s/" % id) + page = soup(htcache.fetch(url)) + if page.find("div", id="title") is None: + # Assume we got the search page + raise KeyError(id) + name = page.find("div", id="series_info").find("div", attrs={"class": "cover"}).img["alt"] + return manga(self, id, name, url) + + def __iter__(self): + raise NotImplementedError("mangafox iterator")