Used the pagetree stacks to extend the pageiter to a general page cursor.
[automanga.git] / manga / mangafox.py
CommitLineData
f3ad0817
FT
1import urllib
2import BeautifulSoup
3import lib, htcache
4soup = BeautifulSoup.BeautifulSoup
5
3bba3a7b 6class imgstream(lib.imgstream):
f3ad0817
FT
7 def __init__(self, url):
8 self.bk = urllib.urlopen(url)
9 self.ctype = self.bk.info()["Content-Type"]
10
11 def close(self):
12 self.bk.close()
13
f3ad0817
FT
14 def read(self, sz = None):
15 if sz is None:
16 return self.bk.read()
17 else:
18 return self.bk.read(sz)
19
20class page(lib.page):
3683ab38
FT
21 def __init__(self, chapter, stack, n, url):
22 self.stack = stack
f3ad0817
FT
23 self.chapter = chapter
24 self.volume = self.chapter.volume
25 self.manga = self.volume.manga
26 self.n = n
27 self.url = url
28 self.ciurl = None
29
30 def iurl(self):
31 if self.ciurl is None:
32 page = soup(htcache.fetch(self.url))
33 self.ciurl = page.find("div", id="viewer").find("img", id="image")["src"]
34 return self.ciurl
35
36 def open(self):
37 return imgstream(self.iurl())
38
39class chapter(lib.pagelist):
3683ab38
FT
40 def __init__(self, volume, stack, name, url):
41 self.stack = stack
f3ad0817
FT
42 self.volume = volume
43 self.manga = volume.manga
44 self.name = name
45 self.url = url
46 self.cpag = None
47
48 def __getitem__(self, i):
49 return self.pages()[i]
50
51 def __len__(self):
52 return len(self.pages())
53
54 def pages(self):
55 if self.cpag is None:
56 pg = soup(htcache.fetch(self.url + "1.html"))
57 l = pg.find("form", id="top_bar").find("div", attrs={"class": "l"})
58 if len(l.contents) != 3:
59 raise Exception("parse error: weird page list for %r" % self)
60 m = l.contents[2].strip()
61 if m[:3] != u"of ":
62 raise Exception("parse error: weird page list for %r" % self)
3683ab38 63 self.cpag = [page(self, self.stack + [(self, n)], n + 1, self.url + ("%i.html" % (n + 1))) for n in xrange(int(m[3:]))]
f3ad0817
FT
64 return self.cpag
65
66 def __str__(self):
67 return self.name
68
69 def __repr__(self):
70 return "<mangafox.chapter %r.%r.%r>" % (self.manga.name, self.volume.name, self.name)
71
72class volume(lib.pagelist):
3683ab38
FT
73 def __init__(self, manga, stack, name):
74 self.stack = stack
f3ad0817
FT
75 self.manga = manga
76 self.name = name
77 self.ch = []
78
79 def __getitem__(self, i):
80 return self.ch[i]
81
82 def __len__(self):
83 return len(self.ch)
84
85 def __str__(self):
86 return self.name
87
88 def __repr__(self):
89 return "<mangafox.volume %r.%r>" % (self.manga.name, self.name)
90
91def nextel(el):
92 while True:
93 el = el.nextSibling
94 if isinstance(el, BeautifulSoup.Tag):
95 return el
96
97class manga(lib.manga):
98 def __init__(self, lib, name, url):
99 self.lib = lib
100 self.name = name
101 self.url = url
102 self.cvol = None
3683ab38 103 self.stack = []
f3ad0817
FT
104
105 def __getitem__(self, i):
106 return self.vols()[i]
107
108 def __len__(self):
109 return len(self.vols())
110
111 def vols(self):
112 if self.cvol is None:
113 page = soup(htcache.fetch(self.url))
114 vls = page.find("div", id="chapters").findAll("div", attrs={"class": "slide"})
115 self.cvol = []
3683ab38
FT
116 for i, vn in enumerate(reversed(vls)):
117 vol = volume(self, [(self, i)], vn.find("h3", attrs={"class": "volume"}).contents[0].strip())
118 cls = nextel(vn)
f3ad0817
FT
119 if cls.name != u"ul" or cls["class"] != u"chlist":
120 raise Exception("parse error: weird volume list for %r" % self)
3683ab38 121 for o, ch in enumerate(reversed(cls.findAll("li"))):
f3ad0817
FT
122 n = ch.div.h3 or ch.div.h4
123 name = n.a.string
124 for span in ch("span"):
125 try:
126 if u" title " in (u" " + span["class"] + u" "):
127 name += " " + span.string
128 except KeyError:
129 pass
130 url = n.a["href"].encode("us-ascii")
131 if url[-7:] != "/1.html":
132 raise Exception("parse error: unexpected chapter URL for %r: %s" % (self, url))
3683ab38
FT
133 vol.ch.append(chapter(vol, vol.stack + [(vol, o)], name, url[:-6]))
134 self.cvol.append(vol)
f3ad0817
FT
135 return self.cvol
136
137 def __str__(self):
138 return self.name
139
140 def __repr__(self):
141 return "<mangafox.manga %r>" % self.name
142
143def libalphacmp(a, b):
144 return cmp(a.upper(), b.upper())
145
146class library(lib.library):
147 def __init__(self):
148 self.base = "http://www.mangafox.com/"
149
150 def alphapage(self, pno):
151 page = soup(htcache.fetch(self.base + ("directory/%i.htm?az" % pno)))
152 ls = page.find("div", id="mangalist").find("ul", attrs={"class": "list"}).findAll("li")
153 ret = []
154 for m in ls:
155 t = m.find("div", attrs={"class": "manga_text"}).find("a", attrs={"class": "title"})
156 name = t.string
157 url = t["href"].encode("us-ascii")
158 ret.append(manga(self, name, url))
159 return ret
160
161 def alphapages(self):
162 page = soup(htcache.fetch(self.base + "directory/?az"))
163 ls = page.find("div", id="mangalist").find("div", id="nav").find("ul").findAll("li")
164 return int(ls[-2].find("a").string)
165
166 def byname(self, prefix):
167 if not isinstance(prefix, unicode):
168 prefix = prefix.decode("utf8")
169 l = 1
170 r = self.alphapages()
171 while True:
172 if l > r:
173 return
174 c = l + ((r + 1 - l) // 2)
175 ls = self.alphapage(c)
176 if libalphacmp(ls[0].name, prefix) > 0:
177 r = c - 1
178 elif libalphacmp(ls[-1].name, prefix) < 0:
179 l = c + 1
180 else:
181 pno = c
182 break
183 i = 0
184 while i < len(ls):
185 m = ls[i]
186 if libalphacmp(m.name, prefix) >= 0:
187 break
188 i += 1
189 while True:
190 while i < len(ls):
191 m = ls[i]
192 if not m.name[:len(prefix)].upper() == prefix.upper():
193 return
194 yield m
195 i += 1
196 pno += 1
197 ls = self.alphapage(pno)
198 i = 0
943a9376
FT
199
200 def __iter__(self):
201 raise NotImplementedError("mangafox iterator")