Added alternative mangafox chapter URLs.
[automanga.git] / manga / mangafox.py
CommitLineData
f3ad0817
FT
1import urllib
2import BeautifulSoup
3import lib, htcache
4soup = BeautifulSoup.BeautifulSoup
5
3bba3a7b 6class imgstream(lib.imgstream):
f3ad0817
FT
7 def __init__(self, url):
8 self.bk = urllib.urlopen(url)
30053c2c
FT
9 ok = False
10 try:
11 if self.bk.getcode() != 200:
12 raise IOError("Server error: " + str(self.bk.getcode()))
13 self.ctype = self.bk.info()["Content-Type"]
14 self.clen = int(self.bk.info()["Content-Length"])
15 ok = True
16 finally:
17 if not ok:
18 self.bk.close()
f3ad0817 19
af730068
FT
20 def fileno(self):
21 return self.bk.fileno()
22
f3ad0817
FT
23 def close(self):
24 self.bk.close()
25
f3ad0817
FT
26 def read(self, sz = None):
27 if sz is None:
28 return self.bk.read()
29 else:
30 return self.bk.read(sz)
31
32class page(lib.page):
3683ab38
FT
33 def __init__(self, chapter, stack, n, url):
34 self.stack = stack
f3ad0817
FT
35 self.chapter = chapter
36 self.volume = self.chapter.volume
37 self.manga = self.volume.manga
38 self.n = n
46b3b90e 39 self.id = str(n)
699d0c17 40 self.name = u"Page %s" % n
f3ad0817
FT
41 self.url = url
42 self.ciurl = None
43
44 def iurl(self):
45 if self.ciurl is None:
46 page = soup(htcache.fetch(self.url))
47 self.ciurl = page.find("div", id="viewer").find("img", id="image")["src"]
48 return self.ciurl
49
50 def open(self):
51 return imgstream(self.iurl())
52
699d0c17
FT
53 def __str__(self):
54 return self.name
55
56 def __repr__(self):
57 return "<mangafox.page %r.%r.%r.%r>" % (self.manga.name, self.volume.name, self.chapter.name, self.name)
58
f3ad0817 59class chapter(lib.pagelist):
46b3b90e 60 def __init__(self, volume, stack, id, name, url):
3683ab38 61 self.stack = stack
f3ad0817
FT
62 self.volume = volume
63 self.manga = volume.manga
46b3b90e 64 self.id = id
f3ad0817
FT
65 self.name = name
66 self.url = url
67 self.cpag = None
68
69 def __getitem__(self, i):
70 return self.pages()[i]
71
72 def __len__(self):
73 return len(self.pages())
74
75 def pages(self):
76 if self.cpag is None:
77 pg = soup(htcache.fetch(self.url + "1.html"))
78 l = pg.find("form", id="top_bar").find("div", attrs={"class": "l"})
79 if len(l.contents) != 3:
80 raise Exception("parse error: weird page list for %r" % self)
81 m = l.contents[2].strip()
82 if m[:3] != u"of ":
83 raise Exception("parse error: weird page list for %r" % self)
3683ab38 84 self.cpag = [page(self, self.stack + [(self, n)], n + 1, self.url + ("%i.html" % (n + 1))) for n in xrange(int(m[3:]))]
f3ad0817
FT
85 return self.cpag
86
87 def __str__(self):
88 return self.name
89
90 def __repr__(self):
91 return "<mangafox.chapter %r.%r.%r>" % (self.manga.name, self.volume.name, self.name)
92
93class volume(lib.pagelist):
46b3b90e 94 def __init__(self, manga, stack, id, name):
3683ab38 95 self.stack = stack
f3ad0817 96 self.manga = manga
46b3b90e 97 self.id = id
f3ad0817
FT
98 self.name = name
99 self.ch = []
100
101 def __getitem__(self, i):
102 return self.ch[i]
103
104 def __len__(self):
105 return len(self.ch)
106
107 def __str__(self):
108 return self.name
109
110 def __repr__(self):
111 return "<mangafox.volume %r.%r>" % (self.manga.name, self.name)
112
113def nextel(el):
114 while True:
115 el = el.nextSibling
116 if isinstance(el, BeautifulSoup.Tag):
117 return el
118
119class manga(lib.manga):
46b3b90e 120 def __init__(self, lib, id, name, url):
f3ad0817 121 self.lib = lib
46b3b90e 122 self.id = id
f3ad0817
FT
123 self.name = name
124 self.url = url
125 self.cvol = None
3683ab38 126 self.stack = []
f3ad0817
FT
127
128 def __getitem__(self, i):
129 return self.vols()[i]
130
131 def __len__(self):
132 return len(self.vols())
133
134 def vols(self):
135 if self.cvol is None:
136 page = soup(htcache.fetch(self.url))
137 vls = page.find("div", id="chapters").findAll("div", attrs={"class": "slide"})
75732d5a 138 cvol = []
3683ab38 139 for i, vn in enumerate(reversed(vls)):
46b3b90e
FT
140 name = vn.find("h3", attrs={"class": "volume"}).contents[0].strip()
141 vid = name.encode("utf8")
142 vol = volume(self, [(self, i)], vid, name)
3683ab38 143 cls = nextel(vn)
f3ad0817
FT
144 if cls.name != u"ul" or cls["class"] != u"chlist":
145 raise Exception("parse error: weird volume list for %r" % self)
3683ab38 146 for o, ch in enumerate(reversed(cls.findAll("li"))):
f3ad0817
FT
147 n = ch.div.h3 or ch.div.h4
148 name = n.a.string
46b3b90e 149 chid = name.encode("utf8")
f3ad0817
FT
150 for span in ch("span"):
151 try:
152 if u" title " in (u" " + span["class"] + u" "):
153 name += " " + span.string
154 except KeyError:
155 pass
156 url = n.a["href"].encode("us-ascii")
d2f58cfc
FT
157 if url[-7:] == "/1.html":
158 url = url[:-6]
159 elif url[-1:] == "/":
160 pass
161 else:
f3ad0817 162 raise Exception("parse error: unexpected chapter URL for %r: %s" % (self, url))
d2f58cfc 163 vol.ch.append(chapter(vol, vol.stack + [(vol, o)], chid, name, url))
75732d5a
FT
164 cvol.append(vol)
165 self.cvol = cvol
f3ad0817
FT
166 return self.cvol
167
168 def __str__(self):
169 return self.name
170
171 def __repr__(self):
172 return "<mangafox.manga %r>" % self.name
173
174def libalphacmp(a, b):
175 return cmp(a.upper(), b.upper())
176
177class library(lib.library):
178 def __init__(self):
6fab7b55 179 self.base = "http://mangafox.me/"
f3ad0817
FT
180
181 def alphapage(self, pno):
182 page = soup(htcache.fetch(self.base + ("directory/%i.htm?az" % pno)))
183 ls = page.find("div", id="mangalist").find("ul", attrs={"class": "list"}).findAll("li")
184 ret = []
46b3b90e 185 ubase = self.base + "manga/"
f3ad0817
FT
186 for m in ls:
187 t = m.find("div", attrs={"class": "manga_text"}).find("a", attrs={"class": "title"})
188 name = t.string
189 url = t["href"].encode("us-ascii")
46b3b90e
FT
190 if url[:len(ubase)] != ubase or url.find('/', len(ubase)) != (len(url) - 1):
191 raise Exception("parse error: unexpected manga URL for %r: %s" % (name, url))
192 ret.append(manga(self, url[len(ubase):-1], name, url))
f3ad0817
FT
193 return ret
194
195 def alphapages(self):
196 page = soup(htcache.fetch(self.base + "directory/?az"))
197 ls = page.find("div", id="mangalist").find("div", id="nav").find("ul").findAll("li")
198 return int(ls[-2].find("a").string)
199
200 def byname(self, prefix):
201 if not isinstance(prefix, unicode):
202 prefix = prefix.decode("utf8")
203 l = 1
204 r = self.alphapages()
205 while True:
206 if l > r:
207 return
208 c = l + ((r + 1 - l) // 2)
209 ls = self.alphapage(c)
210 if libalphacmp(ls[0].name, prefix) > 0:
211 r = c - 1
212 elif libalphacmp(ls[-1].name, prefix) < 0:
213 l = c + 1
214 else:
215 pno = c
216 break
217 i = 0
218 while i < len(ls):
219 m = ls[i]
220 if libalphacmp(m.name, prefix) >= 0:
221 break
222 i += 1
223 while True:
224 while i < len(ls):
225 m = ls[i]
226 if not m.name[:len(prefix)].upper() == prefix.upper():
227 return
228 yield m
229 i += 1
230 pno += 1
231 ls = self.alphapage(pno)
232 i = 0
943a9376 233
46b3b90e
FT
234 def byid(self, id):
235 url = self.base + ("manga/%s/" % id)
236 page = soup(htcache.fetch(url))
237 if page.find("div", id="title") is None:
238 # Assume we got the search page
239 raise KeyError(id)
240 name = page.find("div", id="series_info").find("div", attrs={"class": "cover"}).img["alt"]
241 return manga(self, id, name, url)
242
943a9376
FT
243 def __iter__(self):
244 raise NotImplementedError("mangafox iterator")