From b6425713a88e6ab3e71324118ba91ae98428bc65 Mon Sep 17 00:00:00 2001 From: simon Date: Fri, 8 Nov 2019 21:01:33 -0500 Subject: [PATCH] add hispachan --- chan/chan.py | 22 +++++++++++++-- chan/hispachan_html.py | 64 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 83 insertions(+), 3 deletions(-) create mode 100644 chan/hispachan_html.py diff --git a/chan/chan.py b/chan/chan.py index 347bfe8..fd9ccf4 100644 --- a/chan/chan.py +++ b/chan/chan.py @@ -6,6 +6,7 @@ from chan.desuchan_html import DesuChanHtmlChanHelper from chan.doushio_html import DoushioHtmlChanHelper from chan.endchan_html import EndchanHtmlChanHelper from chan.fchan_html import FChanHtmlChanHelper +from chan.hispachan_html import HispachanHtmlHelper from chan.iichan_html import IichanHtmlChanHelper from chan.json import JsonChanHelper from chan.json_kun import JsonKunChanHelper @@ -381,7 +382,7 @@ CHANS = { rps=1 / 60 ), "8kun": JsonKunChanHelper( - 28, + 29, "https://8kun.net/", "https://media.8kun.net/", "/res/", @@ -443,6 +444,21 @@ CHANS = { "wx", "x", ), - rps=2/3 - ) + rps=1 + ), + "hispachan": HispachanHtmlHelper( + 30, + "https://www.hispachan.org/", + "https://www.hispachan.org/", + "/res/", + "/src/", + ( + "a", "ac", "c", "di", "f", "g", "hu", "k", "m", "mu", + "p", "pol", "q", "r", "t", "tv", "v", "ar", "bo", "cc", + "cl", "co", "ec", "es", "mx", "pe", "py", "uy", "ve", "d", + "h", "o", "s", "sar", "scl", "sco", "ses", "smx", "spe", "sve", + ), + rps=1/20 + ), + } diff --git a/chan/hispachan_html.py b/chan/hispachan_html.py new file mode 100644 index 0000000..60b5186 --- /dev/null +++ b/chan/hispachan_html.py @@ -0,0 +1,64 @@ +import datetime +import re +from urllib.parse import urljoin + +from bs4 import BeautifulSoup + +from chan.desuchan_html import DesuChanHtmlChanHelper + + +class HispachanHtmlHelper(DesuChanHtmlChanHelper): + + def item_urls(self, item, board): + return [ + x for + x in super().item_urls(item, board) + if "google.com" not in x and "javascript:" not in x + ] + + def parse_threads_list(self, r): + soup = BeautifulSoup(r.content.decode('utf-8', 'ignore'), "html.parser") + + threads = [] + + for threadEl in soup.find_all("div", id=lambda tid: tid and tid[6:7].isdigit()): + omit = threadEl.find("span", class_="typecount") + threads.append({ + "id": int(re.search("thread([0-9]+)[a-zA-Z]*", threadEl.get("id")).group(1)), + "omit": int(re.match(r"R:\s+([0-9]+).*", omit.text).group(1)) if omit else 0 + }) + + next_url = soup.find("a", attrs={"rel": "next"}) + if next_url: + return threads, urljoin(r.url, next_url.get("href")) + return threads, None + + @staticmethod + def parse_thread(r): + soup = BeautifulSoup(r.content.decode('utf-8', 'ignore'), "html.parser") + + op_el = soup.find("div", class_="thread") + + posts = [] + for post_el in op_el.find_all("table", recursive=False): + time = op_el.find("a", attrs={"data-date": lambda x: x}).get("data-date") + posts.append({ + "id": int(post_el.find("td", attrs={"class", "reply"}).get("id")[5:]), + "type": "post", + "html": str(post_el), + "time": int(datetime.datetime.strptime(time, "%d/%m/%y %H:%M UTC").timestamp()) + }) + post_el.decompose() + + time = op_el.find("a", attrs={"data-date": lambda x: x}).get("data-date") + tid = int(op_el.find("a", attrs={"name": lambda x: x and x.isdigit()}).get("name")) + yield { + "id": tid, + "type": "thread", + "html": str(op_el), + "time": int(datetime.datetime.strptime(time, "%d/%m/%y %H:%M UTC").timestamp()) + } + + for post in posts: + post["parent"] = tid + yield post