Extracted the standard imgstream from mrnet and mangafox and put it in lib.
[automanga.git] / manga / mangafox.py
CommitLineData
59e32d8d 1import urllib, re
7038902e 2import BeautifulSoup, json
f3ad0817
FT
3import lib, htcache
4soup = BeautifulSoup.BeautifulSoup
5
f3ad0817 6class page(lib.page):
3683ab38
FT
7 def __init__(self, chapter, stack, n, url):
8 self.stack = stack
f3ad0817
FT
9 self.chapter = chapter
10 self.volume = self.chapter.volume
11 self.manga = self.volume.manga
12 self.n = n
46b3b90e 13 self.id = str(n)
699d0c17 14 self.name = u"Page %s" % n
f3ad0817
FT
15 self.url = url
16 self.ciurl = None
17
18 def iurl(self):
19 if self.ciurl is None:
20 page = soup(htcache.fetch(self.url))
21 self.ciurl = page.find("div", id="viewer").find("img", id="image")["src"]
22 return self.ciurl
23
24 def open(self):
b9e558ac 25 return lib.stdimgstream(self.iurl())
f3ad0817 26
699d0c17
FT
27 def __str__(self):
28 return self.name
29
30 def __repr__(self):
31 return "<mangafox.page %r.%r.%r.%r>" % (self.manga.name, self.volume.name, self.chapter.name, self.name)
32
f3ad0817 33class chapter(lib.pagelist):
46b3b90e 34 def __init__(self, volume, stack, id, name, url):
3683ab38 35 self.stack = stack
f3ad0817
FT
36 self.volume = volume
37 self.manga = volume.manga
46b3b90e 38 self.id = id
f3ad0817
FT
39 self.name = name
40 self.url = url
41 self.cpag = None
42
43 def __getitem__(self, i):
44 return self.pages()[i]
45
46 def __len__(self):
47 return len(self.pages())
48
49 def pages(self):
50 if self.cpag is None:
51 pg = soup(htcache.fetch(self.url + "1.html"))
52 l = pg.find("form", id="top_bar").find("div", attrs={"class": "l"})
53 if len(l.contents) != 3:
54 raise Exception("parse error: weird page list for %r" % self)
55 m = l.contents[2].strip()
56 if m[:3] != u"of ":
57 raise Exception("parse error: weird page list for %r" % self)
3683ab38 58 self.cpag = [page(self, self.stack + [(self, n)], n + 1, self.url + ("%i.html" % (n + 1))) for n in xrange(int(m[3:]))]
f3ad0817
FT
59 return self.cpag
60
61 def __str__(self):
62 return self.name
63
64 def __repr__(self):
65 return "<mangafox.chapter %r.%r.%r>" % (self.manga.name, self.volume.name, self.name)
66
67class volume(lib.pagelist):
46b3b90e 68 def __init__(self, manga, stack, id, name):
3683ab38 69 self.stack = stack
f3ad0817 70 self.manga = manga
46b3b90e 71 self.id = id
f3ad0817
FT
72 self.name = name
73 self.ch = []
74
75 def __getitem__(self, i):
76 return self.ch[i]
77
78 def __len__(self):
79 return len(self.ch)
80
81 def __str__(self):
82 return self.name
83
84 def __repr__(self):
85 return "<mangafox.volume %r.%r>" % (self.manga.name, self.name)
86
87def nextel(el):
88 while True:
89 el = el.nextSibling
90 if isinstance(el, BeautifulSoup.Tag):
91 return el
92
93class manga(lib.manga):
0cddd237 94 cure = re.compile(r"/c[\d.]+/$")
59e32d8d 95
46b3b90e 96 def __init__(self, lib, id, name, url):
f3ad0817 97 self.lib = lib
46b3b90e 98 self.id = id
f3ad0817
FT
99 self.name = name
100 self.url = url
101 self.cvol = None
3683ab38 102 self.stack = []
f3ad0817
FT
103
104 def __getitem__(self, i):
105 return self.vols()[i]
106
107 def __len__(self):
108 return len(self.vols())
109
110 def vols(self):
111 if self.cvol is None:
112 page = soup(htcache.fetch(self.url))
113 vls = page.find("div", id="chapters").findAll("div", attrs={"class": "slide"})
75732d5a 114 cvol = []
3683ab38 115 for i, vn in enumerate(reversed(vls)):
46b3b90e
FT
116 name = vn.find("h3", attrs={"class": "volume"}).contents[0].strip()
117 vid = name.encode("utf8")
118 vol = volume(self, [(self, i)], vid, name)
3683ab38 119 cls = nextel(vn)
f3ad0817
FT
120 if cls.name != u"ul" or cls["class"] != u"chlist":
121 raise Exception("parse error: weird volume list for %r" % self)
3683ab38 122 for o, ch in enumerate(reversed(cls.findAll("li"))):
f3ad0817
FT
123 n = ch.div.h3 or ch.div.h4
124 name = n.a.string
46b3b90e 125 chid = name.encode("utf8")
f3ad0817
FT
126 for span in ch("span"):
127 try:
128 if u" title " in (u" " + span["class"] + u" "):
129 name += " " + span.string
130 except KeyError:
131 pass
132 url = n.a["href"].encode("us-ascii")
59e32d8d
FT
133 if url[-7:] == "/1.html":
134 url = url[:-6]
135 elif self.cure.search(url) is not None:
136 pass
137 else:
f3ad0817 138 raise Exception("parse error: unexpected chapter URL for %r: %s" % (self, url))
59e32d8d 139 vol.ch.append(chapter(vol, vol.stack + [(vol, o)], chid, name, url))
75732d5a
FT
140 cvol.append(vol)
141 self.cvol = cvol
f3ad0817
FT
142 return self.cvol
143
144 def __str__(self):
145 return self.name
146
147 def __repr__(self):
148 return "<mangafox.manga %r>" % self.name
149
150def libalphacmp(a, b):
151 return cmp(a.upper(), b.upper())
152
153class library(lib.library):
154 def __init__(self):
6fab7b55 155 self.base = "http://mangafox.me/"
f3ad0817
FT
156
157 def alphapage(self, pno):
158 page = soup(htcache.fetch(self.base + ("directory/%i.htm?az" % pno)))
159 ls = page.find("div", id="mangalist").find("ul", attrs={"class": "list"}).findAll("li")
160 ret = []
46b3b90e 161 ubase = self.base + "manga/"
f3ad0817
FT
162 for m in ls:
163 t = m.find("div", attrs={"class": "manga_text"}).find("a", attrs={"class": "title"})
164 name = t.string
165 url = t["href"].encode("us-ascii")
46b3b90e
FT
166 if url[:len(ubase)] != ubase or url.find('/', len(ubase)) != (len(url) - 1):
167 raise Exception("parse error: unexpected manga URL for %r: %s" % (name, url))
168 ret.append(manga(self, url[len(ubase):-1], name, url))
f3ad0817
FT
169 return ret
170
171 def alphapages(self):
172 page = soup(htcache.fetch(self.base + "directory/?az"))
173 ls = page.find("div", id="mangalist").find("div", id="nav").find("ul").findAll("li")
174 return int(ls[-2].find("a").string)
175
176 def byname(self, prefix):
177 if not isinstance(prefix, unicode):
178 prefix = prefix.decode("utf8")
179 l = 1
180 r = self.alphapages()
181 while True:
182 if l > r:
183 return
184 c = l + ((r + 1 - l) // 2)
185 ls = self.alphapage(c)
186 if libalphacmp(ls[0].name, prefix) > 0:
187 r = c - 1
188 elif libalphacmp(ls[-1].name, prefix) < 0:
189 l = c + 1
190 else:
191 pno = c
192 break
193 i = 0
194 while i < len(ls):
195 m = ls[i]
196 if libalphacmp(m.name, prefix) >= 0:
197 break
198 i += 1
199 while True:
200 while i < len(ls):
201 m = ls[i]
202 if not m.name[:len(prefix)].upper() == prefix.upper():
203 return
204 yield m
205 i += 1
206 pno += 1
207 ls = self.alphapage(pno)
208 i = 0
943a9376 209
7038902e
FT
210 def search(self, expr):
211 resp = urllib.urlopen(self.base + ("ajax/search.php?term=%s" % urllib.quote(expr)))
212 try:
213 rc = json.load(resp)
214 finally:
215 resp.close()
216 return [manga(self, id.encode("utf8"), name, self.base + ("manga/%s/" % id.encode("utf8"))) for num, name, id, genres, author in rc]
217
46b3b90e
FT
218 def byid(self, id):
219 url = self.base + ("manga/%s/" % id)
220 page = soup(htcache.fetch(url))
221 if page.find("div", id="title") is None:
222 # Assume we got the search page
223 raise KeyError(id)
224 name = page.find("div", id="series_info").find("div", attrs={"class": "cover"}).img["alt"]
225 return manga(self, id, name, url)
226
943a9376
FT
227 def __iter__(self):
228 raise NotImplementedError("mangafox iterator")