2ch.sc / KakoIndex.py
AnSungJae3489's picture
Add files using upload-large-folder tool
53f90c5 verified
raw
history blame
10.3 kB
import asyncio
from io import BytesIO
import pathlib
import re
from urllib.parse import urljoin, urlparse
import httpx
import msgspec
import typer
import tqdm
from bs4 import BeautifulSoup
from loguru import logger
from dateutil.parser import parse
from dateutil import tz
import aiofile
app = typer.Typer()
JST = tz.gettz("Japan/Tokyo")
class ThreadData(msgspec.Struct):
id: int
title: str
region: str # Root Server
city: str # Channel/Category/etc.
replies: int
class Message(msgspec.Struct):
name: str
mail: str | None
dateid: str | None
body: str
title: str | None
class BrokenMessage(msgspec.Struct):
blocks:list[str]
class FilledThreadData(ThreadData):
messages: list[Message]
date_refixer = re.compile(
r"\((?:[月火水木金土日]|Sat|Sun|Mon|Tue|Wed|Thr|Fri)\)", flags=re.IGNORECASE
)
def parse_date_id(dateid_string: str) -> tuple[float, str | None]:
"""
Parses a string to extract a timestamp and message IDs.
Args:
dateid_string: The input string potentially containing date, ID, and BE ID.
Returns:
A tuple containing the Unix timestamp (float) or None and the formatted
message ID string or None.
"""
# Standardize the input string
cleaned_id = dateid_string.removesuffix(".net").strip()
# Use regular expressions to find all ID parts
# This is more robust than splitting
id_parts = re.findall(r"(ID|BE):([\w\.\/]+)", cleaned_id)
message_ids = [f"{kind}:{value}" for kind, value in id_parts]
message_id_str = " | ".join(message_ids) if message_ids else None
# Extract the date part by removing the ID parts
date_part = re.sub(r"\s*(ID|BE):[\w\.\/+-=?!()★]+", "", cleaned_id).strip()
timestamp = -1
if date_part:
# Assuming date_refixer is a compiled regex object like:
# For demonstration, we'll define a simple one.
try:
clean_date_str = date_refixer.sub("", date_part) + " JST"
timestamp = parse(clean_date_str, tzinfos={"JST": JST}).timestamp()
except ValueError as e:
logger.error(f"{clean_date_str}|{dateid_string}|{e}")
# Handle cases where parsing might fail
timestamp = -1
return timestamp, message_id_str
def parse_splitter(line: str):
if not line:
return
blocks = line.split("<>")
if len(blocks) > 5:
print(blocks)
logger.warning(f"{blocks}\nBlock count validation failed. Please check.")
return BrokenMessage(blocks)
name, email, dateid, body, *rest = blocks
if body == "<em>■ このスレッドは過去ログ倉庫に格納されています</em>":
return
rest = [i for i in rest if i]
if dateid.lower().startswith(("停止", "あぼーん", "over 1000 ", "移転")):
return
# Remove
# XXX: not used and to be processed seperately. Because as it turns out, it's kind of hard!
# _, _ = parse_date_id(dateid)
# logger.debug(f"{jp_date, message_id}")
return Message(
name,
email if email else None,
dateid,
body,
"<>".join(rest) if rest else None,
)
def parse_messages(text: str) -> list[Message]:
messages = []
for message in text.split("\n"):
try:
specmsg = parse_splitter(message)
messages.append(specmsg)
except Exception as e:
logger.exception(e)
return messages
async def retry_error(session: httpx.AsyncClient, url: str):
while True:
try:
response = await session.get(url)
response.raise_for_status()
return response
except Exception as e:
logger.warning(f"Failed to fetch {url} | {e} | Trying again")
await asyncio.sleep(5)
async def main(output_dump: pathlib.Path):
session = httpx.AsyncClient()
session.headers["user-agent"] = ""
loop = asyncio.get_running_loop()
boards = await session.get("https://2ch.sc/bbsmenu.html")
boards.raise_for_status()
soup = BeautifulSoup(boards.text, "lxml")
encoder = msgspec.json.Encoder()
# with BytesIO() as fout:
for links in soup.find_all("a", attrs={"href": True}):
link = links["href"]
if (
"/be/" in link
or "//be." in link
or "//be." in link
or link
in [
"//2ch.sc/",
"//info.2ch.sc/guide/",
"//sp.2ch.sc/",
"//sweet.2ch.sc/headline/",
"//find.2ch.sc/",
"//irc.2ch.sc/",
]
or link.endswith(".html")
):
continue
if "2ch.sc" not in link:
continue
if not isinstance(link, str):
raise Exception()
decomposed = urlparse(link)
city = decomposed.path.strip("/")
region = decomposed.hostname.replace(".2ch.sc", "")
dumpFile = output_dump.with_stem(f"{output_dump.stem}-{region}_{city}")
if dumpFile.is_file() or dumpFile.with_suffix(".jsonl.zst").is_file():
continue
resolved_base = urljoin("https://", link)
warehouse_catalog = urljoin(resolved_base, "kako/subject.txt")
warehouses = await session.get(warehouse_catalog, timeout=None)
if warehouses.status_code == 404:
logger.warning(f"{warehouse_catalog} returned 404")
continue
warehouses.raise_for_status()
warehouse_ids = [
i.split("<>")[0] for i in warehouses.text.split("\n") if i.startswith("o")
]
threadsDump = []
concurrent = asyncio.Semaphore(128)
async def fetch_subjects(warehouse_id: str):
async with concurrent:
subject_id = urljoin(resolved_base, f"kako/{warehouse_id}/subject.txt")
subjects = [
i
for i in (await retry_error(session, subject_id)).text.split("\n")
if i
]
for subject in subjects:
subject = subject.strip()
thread_dat, *rest = subject.split("<>")
*thread_title, replies = "<>".join(rest).split("(")
thread = ThreadData(
int(thread_dat.split(".")[0]),
"[".join(thread_title).rstrip(" "),
region,
city,
int(replies.rstrip(")")),
)
threadsDump.append(thread)
threads = [
asyncio.create_task(fetch_subjects(subject_id))
for subject_id in warehouse_ids
]
for completed in tqdm.tqdm(
asyncio.as_completed(threads), total=len(warehouse_ids)
):
tid = await completed
if len(threadsDump) <= 1_000_000:
logger.warning(f"{warehouse_catalog} has less than 1,000,000 archived threads. Skipping.")
continue
logger.info("Requesting threads.")
taskQueue = asyncio.Queue(maxsize=1024 * 128)
responseQueue = asyncio.Queue(maxsize=1024 * 128)
totalThreads = len(threadsDump)
logger.info(f"total: {totalThreads}")
async def writer_task():
nonlocal dumpFile
rootStem = dumpFile.stem
i = 0
with tqdm.tqdm(total=totalThreads) as pbar:
fout = await aiofile.async_open(dumpFile, "wb")
while True:
tid = await responseQueue.get()
if tid is None:
break
data = await loop.run_in_executor(None,encoder.encode_lines,[tid])
# logger.debug(data)
await fout.write(data)
pbar.update(1)
if fout.tell() > (1024**3) * 10:
await fout.close()
i += 1
dumpFile = dumpFile.with_stem(f"{rootStem}-{i}")
fout = await aiofile.async_open(dumpFile, "wb")
async def fetch_thread_task():
while True:
thread_data = await taskQueue.get()
if thread_data is None:
break
thread_dat = urljoin(resolved_base, f"dat/{thread_data.id}.dat")
tries = 10
while tries > 0:
try:
response = await session.get(thread_dat)
response.raise_for_status()
break
except Exception as e:
tries -= 1
logger.warning(
f"Failed to fetch {thread_dat} | {e} | Trying again"
)
await responseQueue.put(
FilledThreadData(
thread_data.id,
thread_data.title,
thread_data.region,
thread_data.city,
thread_data.replies,
await asyncio.to_thread(parse_messages, response.text),
)
)
logger.info("Starting workers.")
workers: list[asyncio.Task] = [
loop.create_task(fetch_thread_task()) for _ in range(128)
]
writer = loop.create_task(writer_task())
for thread in threadsDump:
await taskQueue.put(thread)
for _ in range(128):
await taskQueue.put(None)
while workers:
workers = [i for i in workers if not i.done()]
await asyncio.sleep(0.01)
await responseQueue.put(None)
await writer
@app.command()
def dump_2ch_kako(output_catalogs: pathlib.Path):
asyncio.run(main(output_catalogs))
if __name__ == "__main__":
app()