Use "standard" user-agent for Batoto.
[automanga.git] / manga / batoto.py
index b626dc1..8c25789 100644 (file)
@@ -38,7 +38,9 @@ def nextel(el):
             return el
 
 def fetchreader(lib, readerid, page):
-    pg = soupify(lib.sess.fetch(lib.base + "areader?" + urllib.parse.urlencode({"id": readerid, "p": str(page)}),
+    pg = soupify(lib.sess.fetch(lib.base + "areader?" + urllib.parse.urlencode({"id": readerid,
+                                                                                "p": str(page),
+                                                                                "supress_webtoon": "t"}),
                                 headers={"Referer": "http://bato.to/reader"}))
     return pg
 
@@ -286,6 +288,7 @@ class session(object):
         values["rememberMe"] = "1"
         values["anonymous"] = "1"
         req = urllib.request.Request(form["action"], urllib.parse.urlencode(values).encode("ascii"))
+        req.add_header("User-Agent", self.useragent)
         with self.web.open(req) as hs:
             page = soupify(hs.read())
         for resp in page.findAll("p", attrs={"class": "message"}):
@@ -298,8 +301,10 @@ class session(object):
     def open(self, url):
         return self.web.open(url)
 
+    useragent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.22 (KHTML, like Gecko) Chrome/25.0.1364.160 Safari/537.22"
     def fetch(self, url, headers=None):
         req = urllib.request.Request(url)
+        req.add_header("User-Agent", self.useragent)
         if headers is not None:
             for k, v in headers.items():
                 req.add_header(k, v)
@@ -336,7 +341,9 @@ class library(lib.library):
         while True:
             _pars = dict(pars)
             _pars["p"] = str(p)
-            resp = urllib.request.urlopen(self.base + "search?" + urllib.parse.urlencode(_pars))
+            req = urllib.request.Request(self.base + "search?" + urllib.parse.urlencode(_pars))
+            req.add_header("User-Agent", session.useragent)
+            resp = urllib.request.urlopen(req)
             try:
                 page = soupify(resp.read())
             finally: