local: Treat dots/periods as digits in destructuring directories.
[automanga.git] / manga / nelo.py
1 import bs4, json, urllib
2 from urllib.parse import urljoin
3 from . import lib, htcache
4 soup = bs4.BeautifulSoup
5 soupify = lambda cont: soup(cont, "html.parser")
6
7 class page(lib.page):
8     def __init__(self, chapter, stack, n, url):
9         self.stack = stack
10         self.chapter = chapter
11         self.manga = chapter.manga
12         self.n = n
13         self.id = str(n)
14         self.name = "Page %s" % (n + 1,)
15         self.iurl = url
16
17     def open(self):
18         return lib.stdimgstream(self.iurl, referer=self.chapter.url)
19
20     def __str__(self):
21         return self.name
22
23     def __repr__(self):
24         return "<nelo.page %r.%r.%r>" % (self.manga.name, self.chapter.name, self.name)
25
26 class chapter(lib.pagelist):
27     def __init__(self, manga, stack, id, name, url):
28         self.stack = stack
29         self.manga = manga
30         self.id = id
31         self.name = name
32         self.url = url
33         self.cpag = None
34
35     def __getitem__(self, i):
36         return self.pages()[i]
37
38     def __len__(self):
39         return len(self.pages())
40
41     def pages(self):
42         if self.cpag is None:
43             pg = soupify(htcache.fetch(self.url))
44             pag = []
45             for n, img in enumerate(pg.find("div", attrs={"class": "container-chapter-reader"}).findAll("img")):
46                 url = urljoin(self.url, img["src"])
47                 pag.append(page(self, self.stack + [(self, n)], n, url))
48             self.cpag = pag
49         return self.cpag
50
51     def __str__(self):
52         return self.name
53
54     def __repr__(self):
55         return "<nelo.chapter %r.%r>" % (self.manga.name, self.name)
56
57 class manga(lib.manga):
58     def __init__(self, lib, id, name, url):
59         self.lib = lib
60         self.id = id
61         self.name = name
62         self.url = url
63         self.cch = None
64         self.stack = []
65
66     def __getitem__(self, i):
67         return self.ch()[i]
68
69     def __len__(self):
70         return len(self.ch())
71
72     def ch(self):
73         if self.cch is None:
74             page = soupify(htcache.fetch(self.url))
75             cls = page.find("ul", attrs={"class": "row-content-chapter"})
76             cch = []
77             for row in reversed(cls.findAll("li", attrs={"class": "a-h"})):
78                 link = row.find("a", attrs={"class": "chapter-name"})
79                 url = urljoin(self.url, link["href"])
80                 p1 = url.rfind("/")
81                 p2 = url.rfind("/", 0, p1 - 1)
82                 if p1 < 0 or p2 < 0 or url[p2 + 1 : p1] != self.id:
83                     raise Exception("unexpected chapter url: %s" % (url,))
84                 cid = url[p1 + 1:]
85                 if len(cid) < 1:
86                     raise Exception("unexpected chapter url: %s" % (url,))
87                 name = link.string
88                 cch.append(chapter(self, [(self, len(cch))], cid, name, url))
89             self.cch = cch
90         return self.cch
91
92     def __str__(self):
93         return self.name
94
95     def __repr__(self):
96         return "<nelo.manga %r>" % self.name
97
98 class library(lib.library):
99     def __init__(self):
100         self.base = "https://manganelo.com/"
101
102     def byid(self, id):
103         url = urljoin(self.base + "manga/", id)
104         page = soupify(htcache.fetch(url))
105         div1 = page.find("div", attrs={"class": "panel-story-info"})
106         if div1 is None: raise KeyError(id)
107         div2 = div1.find("div", attrs={"class": "story-info-right"})
108         if div2 is None: raise KeyError(id)
109         name = div2.h1
110         if name is None: raise KeyError(id)
111         name = name.string
112         return manga(self, id, name, url)
113
114     def search(self, expr):
115         values = {"searchword": expr}
116         req = urllib.request.Request(self.base + "getstorysearchjson",
117                                      urllib.parse.urlencode(values).encode("ascii"),
118                                      headers={"User-Agent": "automanga/1"})
119         with urllib.request.urlopen(req) as resp:
120             rc = json.loads(resp.read().decode("utf-8"))
121         for obj in rc:
122             if "nameunsigned" in obj:
123                 try:
124                     yield self.byid(obj["nameunsigned"])
125                 except KeyError:
126                     pass