Raise a well-defined error for directories that are probably invalid.
[automanga.git] / manga / mangafox.py
CommitLineData
f3ad0817
FT
1import urllib
2import BeautifulSoup
3import lib, htcache
4soup = BeautifulSoup.BeautifulSoup
5
3bba3a7b 6class imgstream(lib.imgstream):
f3ad0817
FT
7 def __init__(self, url):
8 self.bk = urllib.urlopen(url)
9 self.ctype = self.bk.info()["Content-Type"]
9948db89 10 self.clen = int(self.bk.info()["Content-Length"])
f3ad0817 11
af730068
FT
12 def fileno(self):
13 return self.bk.fileno()
14
f3ad0817
FT
15 def close(self):
16 self.bk.close()
17
f3ad0817
FT
18 def read(self, sz = None):
19 if sz is None:
20 return self.bk.read()
21 else:
22 return self.bk.read(sz)
23
24class page(lib.page):
3683ab38
FT
25 def __init__(self, chapter, stack, n, url):
26 self.stack = stack
f3ad0817
FT
27 self.chapter = chapter
28 self.volume = self.chapter.volume
29 self.manga = self.volume.manga
30 self.n = n
46b3b90e 31 self.id = str(n)
699d0c17 32 self.name = u"Page %s" % n
f3ad0817
FT
33 self.url = url
34 self.ciurl = None
35
36 def iurl(self):
37 if self.ciurl is None:
38 page = soup(htcache.fetch(self.url))
39 self.ciurl = page.find("div", id="viewer").find("img", id="image")["src"]
40 return self.ciurl
41
42 def open(self):
43 return imgstream(self.iurl())
44
699d0c17
FT
45 def __str__(self):
46 return self.name
47
48 def __repr__(self):
49 return "<mangafox.page %r.%r.%r.%r>" % (self.manga.name, self.volume.name, self.chapter.name, self.name)
50
f3ad0817 51class chapter(lib.pagelist):
46b3b90e 52 def __init__(self, volume, stack, id, name, url):
3683ab38 53 self.stack = stack
f3ad0817
FT
54 self.volume = volume
55 self.manga = volume.manga
46b3b90e 56 self.id = id
f3ad0817
FT
57 self.name = name
58 self.url = url
59 self.cpag = None
60
61 def __getitem__(self, i):
62 return self.pages()[i]
63
64 def __len__(self):
65 return len(self.pages())
66
67 def pages(self):
68 if self.cpag is None:
69 pg = soup(htcache.fetch(self.url + "1.html"))
70 l = pg.find("form", id="top_bar").find("div", attrs={"class": "l"})
71 if len(l.contents) != 3:
72 raise Exception("parse error: weird page list for %r" % self)
73 m = l.contents[2].strip()
74 if m[:3] != u"of ":
75 raise Exception("parse error: weird page list for %r" % self)
3683ab38 76 self.cpag = [page(self, self.stack + [(self, n)], n + 1, self.url + ("%i.html" % (n + 1))) for n in xrange(int(m[3:]))]
f3ad0817
FT
77 return self.cpag
78
79 def __str__(self):
80 return self.name
81
82 def __repr__(self):
83 return "<mangafox.chapter %r.%r.%r>" % (self.manga.name, self.volume.name, self.name)
84
85class volume(lib.pagelist):
46b3b90e 86 def __init__(self, manga, stack, id, name):
3683ab38 87 self.stack = stack
f3ad0817 88 self.manga = manga
46b3b90e 89 self.id = id
f3ad0817
FT
90 self.name = name
91 self.ch = []
92
93 def __getitem__(self, i):
94 return self.ch[i]
95
96 def __len__(self):
97 return len(self.ch)
98
99 def __str__(self):
100 return self.name
101
102 def __repr__(self):
103 return "<mangafox.volume %r.%r>" % (self.manga.name, self.name)
104
105def nextel(el):
106 while True:
107 el = el.nextSibling
108 if isinstance(el, BeautifulSoup.Tag):
109 return el
110
111class manga(lib.manga):
46b3b90e 112 def __init__(self, lib, id, name, url):
f3ad0817 113 self.lib = lib
46b3b90e 114 self.id = id
f3ad0817
FT
115 self.name = name
116 self.url = url
117 self.cvol = None
3683ab38 118 self.stack = []
f3ad0817
FT
119
120 def __getitem__(self, i):
121 return self.vols()[i]
122
123 def __len__(self):
124 return len(self.vols())
125
126 def vols(self):
127 if self.cvol is None:
128 page = soup(htcache.fetch(self.url))
129 vls = page.find("div", id="chapters").findAll("div", attrs={"class": "slide"})
75732d5a 130 cvol = []
3683ab38 131 for i, vn in enumerate(reversed(vls)):
46b3b90e
FT
132 name = vn.find("h3", attrs={"class": "volume"}).contents[0].strip()
133 vid = name.encode("utf8")
134 vol = volume(self, [(self, i)], vid, name)
3683ab38 135 cls = nextel(vn)
f3ad0817
FT
136 if cls.name != u"ul" or cls["class"] != u"chlist":
137 raise Exception("parse error: weird volume list for %r" % self)
3683ab38 138 for o, ch in enumerate(reversed(cls.findAll("li"))):
f3ad0817
FT
139 n = ch.div.h3 or ch.div.h4
140 name = n.a.string
46b3b90e 141 chid = name.encode("utf8")
f3ad0817
FT
142 for span in ch("span"):
143 try:
144 if u" title " in (u" " + span["class"] + u" "):
145 name += " " + span.string
146 except KeyError:
147 pass
148 url = n.a["href"].encode("us-ascii")
149 if url[-7:] != "/1.html":
150 raise Exception("parse error: unexpected chapter URL for %r: %s" % (self, url))
46b3b90e 151 vol.ch.append(chapter(vol, vol.stack + [(vol, o)], chid, name, url[:-6]))
75732d5a
FT
152 cvol.append(vol)
153 self.cvol = cvol
f3ad0817
FT
154 return self.cvol
155
156 def __str__(self):
157 return self.name
158
159 def __repr__(self):
160 return "<mangafox.manga %r>" % self.name
161
162def libalphacmp(a, b):
163 return cmp(a.upper(), b.upper())
164
165class library(lib.library):
166 def __init__(self):
6fab7b55 167 self.base = "http://mangafox.me/"
f3ad0817
FT
168
169 def alphapage(self, pno):
170 page = soup(htcache.fetch(self.base + ("directory/%i.htm?az" % pno)))
171 ls = page.find("div", id="mangalist").find("ul", attrs={"class": "list"}).findAll("li")
172 ret = []
46b3b90e 173 ubase = self.base + "manga/"
f3ad0817
FT
174 for m in ls:
175 t = m.find("div", attrs={"class": "manga_text"}).find("a", attrs={"class": "title"})
176 name = t.string
177 url = t["href"].encode("us-ascii")
46b3b90e
FT
178 if url[:len(ubase)] != ubase or url.find('/', len(ubase)) != (len(url) - 1):
179 raise Exception("parse error: unexpected manga URL for %r: %s" % (name, url))
180 ret.append(manga(self, url[len(ubase):-1], name, url))
f3ad0817
FT
181 return ret
182
183 def alphapages(self):
184 page = soup(htcache.fetch(self.base + "directory/?az"))
185 ls = page.find("div", id="mangalist").find("div", id="nav").find("ul").findAll("li")
186 return int(ls[-2].find("a").string)
187
188 def byname(self, prefix):
189 if not isinstance(prefix, unicode):
190 prefix = prefix.decode("utf8")
191 l = 1
192 r = self.alphapages()
193 while True:
194 if l > r:
195 return
196 c = l + ((r + 1 - l) // 2)
197 ls = self.alphapage(c)
198 if libalphacmp(ls[0].name, prefix) > 0:
199 r = c - 1
200 elif libalphacmp(ls[-1].name, prefix) < 0:
201 l = c + 1
202 else:
203 pno = c
204 break
205 i = 0
206 while i < len(ls):
207 m = ls[i]
208 if libalphacmp(m.name, prefix) >= 0:
209 break
210 i += 1
211 while True:
212 while i < len(ls):
213 m = ls[i]
214 if not m.name[:len(prefix)].upper() == prefix.upper():
215 return
216 yield m
217 i += 1
218 pno += 1
219 ls = self.alphapage(pno)
220 i = 0
943a9376 221
46b3b90e
FT
222 def byid(self, id):
223 url = self.base + ("manga/%s/" % id)
224 page = soup(htcache.fetch(url))
225 if page.find("div", id="title") is None:
226 # Assume we got the search page
227 raise KeyError(id)
228 name = page.find("div", id="series_info").find("div", attrs={"class": "cover"}).img["alt"]
229 return manga(self, id, name, url)
230
943a9376
FT
231 def __iter__(self):
232 raise NotImplementedError("mangafox iterator")