Documented the behavior of the basic library classes.
[automanga.git] / manga / mangafox.py
CommitLineData
f3ad0817
FT
1import urllib
2import BeautifulSoup
3import lib, htcache
4soup = BeautifulSoup.BeautifulSoup
5
6class imgstream(object):
7 def __init__(self, url):
8 self.bk = urllib.urlopen(url)
9 self.ctype = self.bk.info()["Content-Type"]
10
11 def close(self):
12 self.bk.close()
13
f3ad0817
FT
14 def read(self, sz = None):
15 if sz is None:
16 return self.bk.read()
17 else:
18 return self.bk.read(sz)
19
20class page(lib.page):
21 def __init__(self, chapter, n, url):
22 self.chapter = chapter
23 self.volume = self.chapter.volume
24 self.manga = self.volume.manga
25 self.n = n
26 self.url = url
27 self.ciurl = None
28
29 def iurl(self):
30 if self.ciurl is None:
31 page = soup(htcache.fetch(self.url))
32 self.ciurl = page.find("div", id="viewer").find("img", id="image")["src"]
33 return self.ciurl
34
35 def open(self):
36 return imgstream(self.iurl())
37
38class chapter(lib.pagelist):
39 def __init__(self, volume, name, url):
40 self.volume = volume
41 self.manga = volume.manga
42 self.name = name
43 self.url = url
44 self.cpag = None
45
46 def __getitem__(self, i):
47 return self.pages()[i]
48
49 def __len__(self):
50 return len(self.pages())
51
52 def pages(self):
53 if self.cpag is None:
54 pg = soup(htcache.fetch(self.url + "1.html"))
55 l = pg.find("form", id="top_bar").find("div", attrs={"class": "l"})
56 if len(l.contents) != 3:
57 raise Exception("parse error: weird page list for %r" % self)
58 m = l.contents[2].strip()
59 if m[:3] != u"of ":
60 raise Exception("parse error: weird page list for %r" % self)
61 self.cpag = [page(self, n + 1, self.url + ("%i.html" % (n + 1))) for n in xrange(int(m[3:]))]
62 return self.cpag
63
64 def __str__(self):
65 return self.name
66
67 def __repr__(self):
68 return "<mangafox.chapter %r.%r.%r>" % (self.manga.name, self.volume.name, self.name)
69
70class volume(lib.pagelist):
71 def __init__(self, manga, name):
72 self.manga = manga
73 self.name = name
74 self.ch = []
75
76 def __getitem__(self, i):
77 return self.ch[i]
78
79 def __len__(self):
80 return len(self.ch)
81
82 def __str__(self):
83 return self.name
84
85 def __repr__(self):
86 return "<mangafox.volume %r.%r>" % (self.manga.name, self.name)
87
88def nextel(el):
89 while True:
90 el = el.nextSibling
91 if isinstance(el, BeautifulSoup.Tag):
92 return el
93
94class manga(lib.manga):
95 def __init__(self, lib, name, url):
96 self.lib = lib
97 self.name = name
98 self.url = url
99 self.cvol = None
100
101 def __getitem__(self, i):
102 return self.vols()[i]
103
104 def __len__(self):
105 return len(self.vols())
106
107 def vols(self):
108 if self.cvol is None:
109 page = soup(htcache.fetch(self.url))
110 vls = page.find("div", id="chapters").findAll("div", attrs={"class": "slide"})
111 self.cvol = []
112 for i in xrange(len(vls)):
113 vol = volume(self, vls[i].find("h3", attrs={"class": "volume"}).contents[0].strip())
114 cls = nextel(vls[i])
115 if cls.name != u"ul" or cls["class"] != u"chlist":
116 raise Exception("parse error: weird volume list for %r" % self)
117 for ch in cls.findAll("li"):
118 n = ch.div.h3 or ch.div.h4
119 name = n.a.string
120 for span in ch("span"):
121 try:
122 if u" title " in (u" " + span["class"] + u" "):
123 name += " " + span.string
124 except KeyError:
125 pass
126 url = n.a["href"].encode("us-ascii")
127 if url[-7:] != "/1.html":
128 raise Exception("parse error: unexpected chapter URL for %r: %s" % (self, url))
129 vol.ch.insert(0, chapter(vol, name, url[:-6]))
130 self.cvol.insert(0, vol)
131 return self.cvol
132
133 def __str__(self):
134 return self.name
135
136 def __repr__(self):
137 return "<mangafox.manga %r>" % self.name
138
139def libalphacmp(a, b):
140 return cmp(a.upper(), b.upper())
141
142class library(lib.library):
143 def __init__(self):
144 self.base = "http://www.mangafox.com/"
145
146 def alphapage(self, pno):
147 page = soup(htcache.fetch(self.base + ("directory/%i.htm?az" % pno)))
148 ls = page.find("div", id="mangalist").find("ul", attrs={"class": "list"}).findAll("li")
149 ret = []
150 for m in ls:
151 t = m.find("div", attrs={"class": "manga_text"}).find("a", attrs={"class": "title"})
152 name = t.string
153 url = t["href"].encode("us-ascii")
154 ret.append(manga(self, name, url))
155 return ret
156
157 def alphapages(self):
158 page = soup(htcache.fetch(self.base + "directory/?az"))
159 ls = page.find("div", id="mangalist").find("div", id="nav").find("ul").findAll("li")
160 return int(ls[-2].find("a").string)
161
162 def byname(self, prefix):
163 if not isinstance(prefix, unicode):
164 prefix = prefix.decode("utf8")
165 l = 1
166 r = self.alphapages()
167 while True:
168 if l > r:
169 return
170 c = l + ((r + 1 - l) // 2)
171 ls = self.alphapage(c)
172 if libalphacmp(ls[0].name, prefix) > 0:
173 r = c - 1
174 elif libalphacmp(ls[-1].name, prefix) < 0:
175 l = c + 1
176 else:
177 pno = c
178 break
179 i = 0
180 while i < len(ls):
181 m = ls[i]
182 if libalphacmp(m.name, prefix) >= 0:
183 break
184 i += 1
185 while True:
186 while i < len(ls):
187 m = ls[i]
188 if not m.name[:len(prefix)].upper() == prefix.upper():
189 return
190 yield m
191 i += 1
192 pno += 1
193 ls = self.alphapage(pno)
194 i = 0
943a9376
FT
195
196 def __iter__(self):
197 raise NotImplementedError("mangafox iterator")