mirror of
https://github.com/simon987/hexlib.git
synced 2025-12-19 01:29:02 +00:00
Compare commits
58 Commits
75bf2c2d85
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
| 4c8b74bd8f | |||
|
|
d82e1bccee | ||
| b1a1da3bac | |||
| a047366926 | |||
| 24230cdc1e | |||
| 3bd9f03996 | |||
| e267bbf1c8 | |||
| 42e33b72b2 | |||
| 5275c332cc | |||
| a7b1a6e1ec | |||
| 826312115c | |||
| 372abb0076 | |||
| 78c04ef6f3 | |||
| a51ad2cbb4 | |||
| 4befc3973d | |||
| c9fac7151a | |||
| 084acbe184 | |||
| d578be3218 | |||
| cd5a1ac50c | |||
| 62e74ed292 | |||
| 428c82bcfd | |||
| 4b3583358b | |||
| 90d434ec73 | |||
| 55fd4a66d2 | |||
| 3677815d57 | |||
| 1ce795a759 | |||
| e1537297d7 | |||
| 8d8f9e8751 | |||
| 18ba0024ea | |||
| 408735a926 | |||
| 2f6c2822b6 | |||
| d85ad919b3 | |||
| ed9d148411 | |||
| 5e00ddccdb | |||
| 7ecd55a1c6 | |||
| b746a91281 | |||
| 333083e8b9 | |||
| c295b5d30b | |||
| da0e117550 | |||
| c3fef7e7f8 | |||
| 9bd1f4b799 | |||
| c560cc2010 | |||
| f4a5e6cf53 | |||
| 71cd00c063 | |||
| 7349c9a5f1 | |||
| d19442b00e | |||
| 4711cd1b66 | |||
| 7e0ffafb8c | |||
| 60273fb6bd | |||
| 67c09cc10c | |||
| a7bf5b2d15 | |||
| 31b35e3a32 | |||
| 4cff343370 | |||
| 4d6c8018df | |||
| db3e191983 | |||
| 33e9734991 | |||
| 3238f92e4d | |||
| f8e93354a4 |
6
.gitignore
vendored
6
.gitignore
vendored
@@ -1,3 +1,7 @@
|
||||
*.iml
|
||||
.idea/
|
||||
*.db
|
||||
*.db
|
||||
*.png
|
||||
hexlib.egg-info
|
||||
build/
|
||||
dist/
|
||||
@@ -1,5 +1,5 @@
|
||||
Misc utility methods in Python
|
||||
|
||||
```
|
||||
git+git://github.com/simon987/hexlib.git
|
||||
git+https://github.com/simon987/hexlib.git
|
||||
```
|
||||
27
bench/text.py
Normal file
27
bench/text.py
Normal file
@@ -0,0 +1,27 @@
|
||||
from timeit import timeit
|
||||
|
||||
t = bytes.maketrans(b".,;:\"!?/()|*=>", b" ")
|
||||
|
||||
|
||||
def translate(x: str):
|
||||
arr = x.encode("utf8")
|
||||
|
||||
return arr.translate(t).decode("utf8")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
res = timeit(
|
||||
setup='t = str.maketrans(".,;:\\"!?/()|*=>", " ")',
|
||||
stmt='x = "Hello, world %123 & *".translate(t)'
|
||||
)
|
||||
|
||||
# 0.865953s
|
||||
print("translate = %fs" % res)
|
||||
|
||||
res = timeit(
|
||||
setup='from text import translate',
|
||||
stmt='x = translate("Hello, world %123 & *")'
|
||||
)
|
||||
|
||||
# 0.865953s
|
||||
print("custom = %fs" % res)
|
||||
@@ -1,9 +1,139 @@
|
||||
from queue import Queue, Empty
|
||||
from multiprocessing import Process
|
||||
from multiprocessing import Queue as MPQueue
|
||||
from queue import Queue, Empty
|
||||
from threading import Thread
|
||||
|
||||
from hexlib.misc import ichunks
|
||||
|
||||
def queue_iter(q: Queue, **get_args):
|
||||
|
||||
class StatelessStreamWorker:
|
||||
|
||||
def __init__(self):
|
||||
self._q_out = None
|
||||
|
||||
def run(self, q: Queue, q_out: Queue):
|
||||
|
||||
self._q_out: Queue = q_out
|
||||
|
||||
for chunk in queue_iter(q, joinable=False, timeout=10):
|
||||
self._process_chunk(chunk)
|
||||
|
||||
def _process_chunk(self, chunk):
|
||||
results = []
|
||||
|
||||
for item in chunk:
|
||||
result = self.process(item)
|
||||
if result is not None:
|
||||
results.append(result)
|
||||
|
||||
if results:
|
||||
self._q_out.put(results)
|
||||
|
||||
def process(self, item):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class StatelessStreamProcessor:
|
||||
def __init__(self, worker_factory, chunk_size=128, processes=1, timeout=60):
|
||||
self._chunk_size = 128
|
||||
self._queue = MPQueue(maxsize=chunk_size)
|
||||
self._queue_out = MPQueue(maxsize=processes * 2)
|
||||
self._process_count = processes
|
||||
self._processes = []
|
||||
self._factory = worker_factory
|
||||
self._workers = []
|
||||
self._timeout = timeout
|
||||
|
||||
if processes > 1:
|
||||
for _ in range(processes):
|
||||
worker = self._factory()
|
||||
p = Process(target=worker.run, args=(self._queue, self._queue_out))
|
||||
p.start()
|
||||
|
||||
self._processes.append(p)
|
||||
self._workers.append(worker)
|
||||
else:
|
||||
self._workers.append(self._factory())
|
||||
|
||||
def _ingest(self, iterable):
|
||||
if self._process_count > 1:
|
||||
for chunk in ichunks(iterable, self._chunk_size):
|
||||
self._queue.put(chunk)
|
||||
else:
|
||||
for item in iterable:
|
||||
self._workers[0].process(item)
|
||||
|
||||
def ingest(self, iterable):
|
||||
|
||||
ingest_thread = Thread(target=self._ingest, args=(iterable,))
|
||||
ingest_thread.start()
|
||||
|
||||
for results in queue_iter(self._queue_out, joinable=False, timeout=self._timeout):
|
||||
yield from results
|
||||
|
||||
ingest_thread.join()
|
||||
|
||||
|
||||
class StatefulStreamWorker:
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def run(self, q: Queue, q_out: Queue):
|
||||
for chunk in queue_iter(q, joinable=False, timeout=3):
|
||||
self._process_chunk(chunk)
|
||||
|
||||
q_out.put(self.results())
|
||||
|
||||
def _process_chunk(self, chunk):
|
||||
for item in chunk:
|
||||
self.process(item)
|
||||
|
||||
def process(self, item) -> None:
|
||||
raise NotImplementedError
|
||||
|
||||
def results(self):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class StatefulStreamProcessor:
|
||||
def __init__(self, worker_factory, chunk_size=128, processes=1):
|
||||
self._chunk_size = 128
|
||||
self._queue = MPQueue(maxsize=chunk_size)
|
||||
self._queue_out = MPQueue()
|
||||
self._process_count = processes
|
||||
self._processes = []
|
||||
self._factory = worker_factory
|
||||
self._workers = []
|
||||
|
||||
if processes > 1:
|
||||
for _ in range(processes):
|
||||
worker = self._factory()
|
||||
p = Process(target=worker.run, args=(self._queue, self._queue_out))
|
||||
p.start()
|
||||
|
||||
self._processes.append(p)
|
||||
self._workers.append(worker)
|
||||
else:
|
||||
self._workers.append(self._factory())
|
||||
|
||||
def ingest(self, iterable):
|
||||
|
||||
if self._process_count > 1:
|
||||
for chunk in ichunks(iterable, self._chunk_size):
|
||||
self._queue.put(chunk)
|
||||
else:
|
||||
for item in iterable:
|
||||
self._workers[0].process(item)
|
||||
|
||||
def get_results(self):
|
||||
for _ in range(self._process_count):
|
||||
yield self._queue_out.get()
|
||||
for p in self._processes:
|
||||
p.join()
|
||||
|
||||
|
||||
def queue_iter(q: Queue, joinable=True, **get_args):
|
||||
while True:
|
||||
try:
|
||||
task = q.get(**get_args)
|
||||
@@ -12,7 +142,8 @@ def queue_iter(q: Queue, **get_args):
|
||||
break
|
||||
|
||||
yield task
|
||||
q.task_done()
|
||||
if joinable:
|
||||
q.task_done()
|
||||
except Empty:
|
||||
break
|
||||
except KeyboardInterrupt:
|
||||
|
||||
150
hexlib/db.py
150
hexlib/db.py
@@ -1,26 +1,24 @@
|
||||
import base64
|
||||
import sqlite3
|
||||
import traceback
|
||||
from datetime import datetime
|
||||
from enum import Enum
|
||||
|
||||
import psycopg2
|
||||
import umsgpack
|
||||
from psycopg2.errorcodes import UNIQUE_VIOLATION
|
||||
from pydantic import BaseModel
|
||||
|
||||
from hexlib.env import get_redis
|
||||
|
||||
|
||||
class PersistentState:
|
||||
"""Quick and dirty persistent dict-like SQLite wrapper"""
|
||||
def _json_encoder(x):
|
||||
if isinstance(x, datetime):
|
||||
return x.isoformat()
|
||||
if isinstance(x, Enum):
|
||||
return x.value
|
||||
|
||||
def __init__(self, dbfile="state.db", logger=None, **dbargs):
|
||||
self.dbfile = dbfile
|
||||
self.logger = logger
|
||||
if dbargs is None:
|
||||
dbargs = {"timeout": 30000}
|
||||
self.dbargs = dbargs
|
||||
|
||||
def __getitem__(self, table):
|
||||
return Table(self, table)
|
||||
raise Exception(f"I don't know how to JSON encode {x} ({type(x)})")
|
||||
|
||||
|
||||
class VolatileState:
|
||||
@@ -36,6 +34,9 @@ class VolatileState:
|
||||
def __getitem__(self, table):
|
||||
return RedisTable(self, table, self._sep)
|
||||
|
||||
def __delitem__(self, key):
|
||||
self.rdb.delete(f"{self.prefix}{self._sep}{key}")
|
||||
|
||||
|
||||
class VolatileQueue:
|
||||
"""Quick and dirty volatile queue-like redis wrapper"""
|
||||
@@ -68,6 +69,9 @@ class VolatileBooleanState:
|
||||
def __getitem__(self, table):
|
||||
return RedisBooleanTable(self, table, self._sep)
|
||||
|
||||
def __delitem__(self, table):
|
||||
self.rdb.delete(f"{self.prefix}{self._sep}{table}")
|
||||
|
||||
|
||||
class RedisTable:
|
||||
def __init__(self, state, table, sep=""):
|
||||
@@ -89,9 +93,9 @@ class RedisTable:
|
||||
self._state.rdb.hdel(self._key, str(key))
|
||||
|
||||
def __iter__(self):
|
||||
val = self._state.rdb.hgetall(self._key)
|
||||
if val:
|
||||
return ((k, umsgpack.loads(v)) for k, v in val.items())
|
||||
for val in self._state.rdb.hscan(self._key):
|
||||
if val:
|
||||
return ((k, umsgpack.loads(v)) for k, v in val.items())
|
||||
|
||||
|
||||
class RedisBooleanTable:
|
||||
@@ -114,7 +118,7 @@ class RedisBooleanTable:
|
||||
self._state.rdb.srem(self._key, str(key))
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self._state.rdb.smembers(self._key))
|
||||
yield from self._state.rdb.sscan_iter(self._key)
|
||||
|
||||
|
||||
class Table:
|
||||
@@ -122,32 +126,54 @@ class Table:
|
||||
self._state = state
|
||||
self._table = table
|
||||
|
||||
def sql(self, where_clause, *params):
|
||||
with sqlite3.connect(self._state.dbfile, **self._state.dbargs) as conn:
|
||||
conn.row_factory = sqlite3.Row
|
||||
try:
|
||||
cur = conn.execute("SELECT * FROM %s %s" % (self._table, where_clause), params)
|
||||
for row in cur:
|
||||
yield dict(row)
|
||||
except:
|
||||
return None
|
||||
|
||||
def __iter__(self):
|
||||
with sqlite3.connect(self._state.dbfile, **self._state.dbargs) as conn:
|
||||
conn.row_factory = sqlite3.Row
|
||||
try:
|
||||
cur = conn.execute("SELECT * FROM %s" % (self._table,))
|
||||
for row in cur:
|
||||
yield dict(row)
|
||||
except:
|
||||
return None
|
||||
|
||||
def __getitem__(self, item):
|
||||
def _sql_dict(self, where_clause, *params):
|
||||
with sqlite3.connect(self._state.dbfile, **self._state.dbargs) as conn:
|
||||
conn.row_factory = sqlite3.Row
|
||||
try:
|
||||
col_types = conn.execute("PRAGMA table_info(%s)" % self._table).fetchall()
|
||||
cur = conn.execute("SELECT * FROM %s WHERE id=?" % (self._table,), (item,))
|
||||
cur = conn.execute("SELECT * FROM %s %s" % (self._table, where_clause), params)
|
||||
for row in cur:
|
||||
yield dict(
|
||||
(col[0], _deserialize(row[col[0]], col_types[i]["type"]))
|
||||
for i, col in enumerate(cur.description)
|
||||
)
|
||||
except:
|
||||
return None
|
||||
|
||||
def sql(self, where_clause, *params):
|
||||
for row in self._sql_dict(where_clause, *params):
|
||||
if row and "__pydantic" in row:
|
||||
yield self._deserialize_pydantic(row)
|
||||
else:
|
||||
yield row
|
||||
|
||||
def _iter_dict(self):
|
||||
with sqlite3.connect(self._state.dbfile, **self._state.dbargs) as conn:
|
||||
conn.row_factory = sqlite3.Row
|
||||
try:
|
||||
col_types = conn.execute("PRAGMA table_info(%s)" % self._table).fetchall()
|
||||
cur = conn.execute("SELECT * FROM %s" % (self._table,))
|
||||
for row in cur:
|
||||
yield dict(
|
||||
(col[0], _deserialize(row[col[0]], col_types[i]["type"]))
|
||||
for i, col in enumerate(cur.description)
|
||||
)
|
||||
except:
|
||||
return None
|
||||
|
||||
def __iter__(self):
|
||||
for row in self._iter_dict():
|
||||
if row and "__pydantic" in row:
|
||||
yield self._deserialize_pydantic(row)
|
||||
else:
|
||||
yield row
|
||||
|
||||
def _getitem_dict(self, key):
|
||||
with sqlite3.connect(self._state.dbfile, **self._state.dbargs) as conn:
|
||||
conn.row_factory = sqlite3.Row
|
||||
try:
|
||||
col_types = conn.execute("PRAGMA table_info(%s)" % self._table).fetchall()
|
||||
cur = conn.execute("SELECT * FROM %s WHERE id=?" % (self._table,), (key,))
|
||||
|
||||
row = cur.fetchone()
|
||||
if row:
|
||||
@@ -158,8 +184,32 @@ class Table:
|
||||
except:
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def _deserialize_pydantic(row):
|
||||
module = __import__(row["__module"])
|
||||
cls = getattr(module, row["__class"])
|
||||
return cls.parse_raw(row["json"])
|
||||
|
||||
def __getitem__(self, key):
|
||||
row = self._getitem_dict(key)
|
||||
if row and "__pydantic" in row:
|
||||
return self._deserialize_pydantic(row)
|
||||
return row
|
||||
|
||||
def setitem_pydantic(self, key, value: BaseModel):
|
||||
self.__setitem__(key, {
|
||||
"json": value.json(encoder=_json_encoder, indent=2),
|
||||
"__class": value.__class__.__name__,
|
||||
"__module": value.__class__.__module__,
|
||||
"__pydantic": 1
|
||||
})
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
|
||||
if isinstance(value, BaseModel):
|
||||
self.setitem_pydantic(key, value)
|
||||
return
|
||||
|
||||
with sqlite3.connect(self._state.dbfile, **self._state.dbargs) as conn:
|
||||
conn.row_factory = sqlite3.Row
|
||||
|
||||
@@ -217,11 +267,33 @@ def _serialize(value):
|
||||
|
||||
|
||||
def _deserialize(value, col_type):
|
||||
if col_type == "blob":
|
||||
if col_type.lower() == "blob":
|
||||
return base64.b64decode(value)
|
||||
return value
|
||||
|
||||
|
||||
class PersistentState:
|
||||
"""Quick and dirty persistent dict-like SQLite wrapper"""
|
||||
|
||||
def __init__(self, dbfile="state.db", logger=None, table_factory=Table, **dbargs):
|
||||
self.dbfile = dbfile
|
||||
self.logger = logger
|
||||
if dbargs is None or dbargs == {}:
|
||||
dbargs = {"timeout": 30000}
|
||||
self.dbargs = dbargs
|
||||
self._table_factory = table_factory
|
||||
|
||||
def __getitem__(self, table):
|
||||
return self._table_factory(self, table)
|
||||
|
||||
def __delitem__(self, key):
|
||||
with sqlite3.connect(self.dbfile, **self.dbargs) as conn:
|
||||
try:
|
||||
conn.execute(f"DROP TABLE {key}")
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
def pg_fetch_cursor_all(cur, name, batch_size=1000):
|
||||
while True:
|
||||
cur.execute("FETCH FORWARD %d FROM %s" % (batch_size, name))
|
||||
@@ -244,10 +316,10 @@ class PgConn:
|
||||
def __init__(self, logger=None, **kwargs):
|
||||
self._conn_args = kwargs
|
||||
self.conn = psycopg2.connect(**kwargs)
|
||||
self.cur = self.conn.cursor()
|
||||
self._logger = logger
|
||||
|
||||
def __enter__(self):
|
||||
self.cur = self.conn.cursor()
|
||||
return self
|
||||
|
||||
def exec(self, query_string, args=None):
|
||||
|
||||
@@ -37,13 +37,15 @@ def redis_publish(rdb, item, item_project, item_type, item_subproject=None, item
|
||||
def get_web(session=None):
|
||||
ua = UserAgent()
|
||||
|
||||
retry_codes = os.environ.get("RETRY_CODES", "")
|
||||
|
||||
web = Web(
|
||||
session=session,
|
||||
proxy=os.environ.get("PROXY", None),
|
||||
rps=os.environ.get("RPS", 1),
|
||||
logger=stdout_logger,
|
||||
cookie_file=os.environ.get("COOKIE_FILE", None),
|
||||
retry_codes=set(int(x) if x else None for x in os.environ.get("RETRY_CODES", "").split(",")),
|
||||
retry_codes=set(int(x) for x in retry_codes.split(",")) if retry_codes else None,
|
||||
retries=int(os.environ.get("RETRIES", 3)),
|
||||
retry_sleep=int(os.environ.get("RETRY_SLEEP", 0)),
|
||||
ua=ua[os.environ.get("USER_AGENT")] if os.environ.get("USER_AGENT", None) is not None else None
|
||||
|
||||
@@ -62,6 +62,16 @@ COMPRESSION_GZIP = "gz"
|
||||
COMPRESSION_ZSTD = "zstd"
|
||||
|
||||
|
||||
class NDJsonLine:
|
||||
__slots__ = "text"
|
||||
|
||||
def __init__(self, text):
|
||||
self.text = text
|
||||
|
||||
def json(self):
|
||||
return json.loads(self.text)
|
||||
|
||||
|
||||
def ndjson_iter(*files, compression=""):
|
||||
for file in files:
|
||||
cleanup = None
|
||||
@@ -75,7 +85,7 @@ def ndjson_iter(*files, compression=""):
|
||||
line_iter = BufferedReader(gzip.open(file))
|
||||
elif compression == COMPRESSION_ZSTD:
|
||||
fp = open(file, "rb")
|
||||
dctx = zstandard.ZstdDecompressor()
|
||||
dctx = zstandard.ZstdDecompressor(max_window_size=2147483648)
|
||||
reader = dctx.stream_reader(fp)
|
||||
line_iter = BufferedReader(reader)
|
||||
|
||||
@@ -90,7 +100,6 @@ def ndjson_iter(*files, compression=""):
|
||||
line_iter.close()
|
||||
|
||||
for line in line_iter:
|
||||
yield json.loads(line)
|
||||
yield NDJsonLine(line)
|
||||
if cleanup:
|
||||
cleanup()
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import atexit
|
||||
import itertools
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
@@ -33,6 +34,15 @@ def chunks(lst: list, chunk_len: int):
|
||||
yield lst[i:i + chunk_len]
|
||||
|
||||
|
||||
def ichunks(iterable, chunk_len: int):
|
||||
it = iter(iterable)
|
||||
while True:
|
||||
chunk = tuple(itertools.islice(it, chunk_len))
|
||||
if not chunk:
|
||||
break
|
||||
yield chunk
|
||||
|
||||
|
||||
def rate_limit(per_second):
|
||||
min_interval = 1.0 / float(per_second)
|
||||
|
||||
|
||||
@@ -1,22 +1,29 @@
|
||||
import logging
|
||||
import traceback
|
||||
from abc import ABC
|
||||
|
||||
from influxdb import InfluxDBClient
|
||||
|
||||
from hexlib.misc import buffered
|
||||
|
||||
|
||||
class Monitoring:
|
||||
def __init__(self, db, host="localhost", logger=logging.getLogger("default"), batch_size=1, flush_on_exit=False):
|
||||
self._db = db
|
||||
self._client = InfluxDBClient(host, 8086, "", "", db)
|
||||
class Monitoring(ABC):
|
||||
def log(self, points):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class BufferedInfluxDBMonitoring(Monitoring):
|
||||
def __init__(self, db_name, host="localhost", port=8086, logger=None, batch_size=1, flush_on_exit=False):
|
||||
self._db = db_name
|
||||
self._client = InfluxDBClient(host, port, "", "", db_name)
|
||||
self._logger = logger
|
||||
|
||||
self._init()
|
||||
if not self.db_exists(self._db):
|
||||
self._client.create_database(self._db)
|
||||
|
||||
@buffered(batch_size, flush_on_exit)
|
||||
def log(points):
|
||||
self._log(points)
|
||||
|
||||
self.log = log
|
||||
|
||||
def db_exists(self, name):
|
||||
@@ -25,14 +32,16 @@ class Monitoring:
|
||||
return True
|
||||
return False
|
||||
|
||||
def _init(self):
|
||||
if not self.db_exists(self._db):
|
||||
self._client.create_database(self._db)
|
||||
def log(self, points):
|
||||
# Is overwritten in __init__()
|
||||
pass
|
||||
|
||||
def _log(self, points):
|
||||
try:
|
||||
self._client.write_points(points)
|
||||
self._logger.debug("InfluxDB: Wrote %d points" % len(points))
|
||||
if self._logger:
|
||||
self._logger.debug("InfluxDB: Wrote %d points" % len(points))
|
||||
except Exception as e:
|
||||
self._logger.debug(traceback.format_exc())
|
||||
self._logger.error(str(e))
|
||||
if self._logger:
|
||||
self._logger.debug(traceback.format_exc())
|
||||
self._logger.error(str(e))
|
||||
|
||||
161
hexlib/mq.py
Normal file
161
hexlib/mq.py
Normal file
@@ -0,0 +1,161 @@
|
||||
import json
|
||||
from collections import namedtuple
|
||||
from functools import partial
|
||||
from itertools import islice
|
||||
from time import sleep, time
|
||||
|
||||
from orjson import orjson
|
||||
from redis import Redis
|
||||
|
||||
RoutingKeyParts = namedtuple(
|
||||
"RoutingKeyParts",
|
||||
["arc_list", "project", "subproject", "type", "category"]
|
||||
)
|
||||
|
||||
|
||||
def parse_routing_key(key):
|
||||
tokens = key.split(".")
|
||||
|
||||
if len(tokens) == 4:
|
||||
arc_list, project, type_, category = tokens
|
||||
return RoutingKeyParts(
|
||||
arc_list=arc_list,
|
||||
project=project,
|
||||
subproject=None,
|
||||
type=type_,
|
||||
category=category
|
||||
)
|
||||
else:
|
||||
arc_list, project, subproject, type_, category = tokens
|
||||
return RoutingKeyParts(
|
||||
arc_list=arc_list,
|
||||
project=project,
|
||||
subproject=subproject,
|
||||
type=type_,
|
||||
category=category
|
||||
)
|
||||
|
||||
|
||||
class MessageQueue:
|
||||
def read_messages(self, topics):
|
||||
raise NotImplementedError()
|
||||
|
||||
def publish(self, item, item_project, item_type, item_subproject, item_category):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class RedisMQ(MessageQueue):
|
||||
_MAX_KEYS = 30
|
||||
|
||||
def __init__(self, rdb, consumer_name="redis_mq", sep=".", max_pending_time=120, logger=None, publish_channel=None,
|
||||
arc_lists=None, wait=1):
|
||||
self._rdb: Redis = rdb
|
||||
self._key_cache = None
|
||||
self._consumer_id = consumer_name
|
||||
self._pending_list = f"pending{sep}{consumer_name}"
|
||||
self._max_pending_time = max_pending_time
|
||||
self._logger = logger
|
||||
self._publish_channel = publish_channel
|
||||
self._arc_lists = arc_lists
|
||||
self._wait = wait
|
||||
|
||||
def _get_keys(self, pattern):
|
||||
if self._key_cache:
|
||||
return self._key_cache
|
||||
|
||||
keys = list(islice(
|
||||
self._rdb.scan_iter(match=pattern, count=RedisMQ._MAX_KEYS), RedisMQ._MAX_KEYS
|
||||
))
|
||||
self._key_cache = keys
|
||||
|
||||
return keys
|
||||
|
||||
def _get_pending_tasks(self):
|
||||
for task_id, pending_task in self._rdb.hscan_iter(self._pending_list):
|
||||
|
||||
pending_task_json = orjson.loads(pending_task)
|
||||
|
||||
if time() >= pending_task_json["resubmit_at"]:
|
||||
yield pending_task_json["topic"], pending_task_json["task"], partial(self._ack, task_id)
|
||||
|
||||
def _ack(self, task_id):
|
||||
self._rdb.hdel(self._pending_list, task_id)
|
||||
|
||||
def read_messages(self, topics):
|
||||
"""
|
||||
Assumes json-encoded tasks with an _id field
|
||||
|
||||
Tasks are automatically put into a pending list until ack() is called.
|
||||
When a task has been in the pending list for at least max_pending_time seconds, it
|
||||
gets submitted again
|
||||
"""
|
||||
|
||||
assert len(topics) == 1, "RedisMQ only supports 1 topic pattern"
|
||||
|
||||
pattern = topics[0]
|
||||
counter = 0
|
||||
|
||||
if self._logger:
|
||||
self._logger.info(f"MQ>Listening for new messages in {pattern}")
|
||||
|
||||
while True:
|
||||
counter += 1
|
||||
|
||||
if counter % 1000 == 0:
|
||||
yield from self._get_pending_tasks()
|
||||
|
||||
keys = self._get_keys(pattern)
|
||||
if not keys:
|
||||
sleep(self._wait)
|
||||
self._key_cache = None
|
||||
continue
|
||||
|
||||
result = self._rdb.blpop(keys, timeout=1)
|
||||
if not result:
|
||||
self._key_cache = None
|
||||
continue
|
||||
|
||||
topic, task = result
|
||||
|
||||
task_json = orjson.loads(task)
|
||||
topic = topic.decode()
|
||||
|
||||
if "_id" not in task_json or not task_json["_id"]:
|
||||
raise ValueError(f"Task doesn't have _id field: {task}")
|
||||
|
||||
# Immediately put in pending queue
|
||||
self._rdb.hset(
|
||||
self._pending_list, task_json["_id"],
|
||||
orjson.dumps({
|
||||
"resubmit_at": time() + self._max_pending_time,
|
||||
"topic": topic,
|
||||
"task": task_json
|
||||
})
|
||||
)
|
||||
|
||||
yield topic, task_json, partial(self._ack, task_json["_id"])
|
||||
|
||||
def publish(self, item, item_project, item_type, item_subproject=None, item_category="x"):
|
||||
|
||||
if "_id" not in item:
|
||||
raise ValueError("_id field must be set for item")
|
||||
|
||||
item = json.dumps(item, separators=(',', ':'), ensure_ascii=False, sort_keys=True)
|
||||
|
||||
item_project = item_project.replace(".", "-")
|
||||
item_subproject = item_subproject.replace(".", "-") if item_subproject else None
|
||||
|
||||
item_source = item_project if not item_subproject else f"{item_project}.{item_subproject}"
|
||||
|
||||
item_type = item_type.replace(".", "-")
|
||||
item_category = item_category.replace(".", "-")
|
||||
|
||||
# If specified, fan-out to pub/sub channel
|
||||
if self._publish_channel is not None:
|
||||
routing_key = f"{self._publish_channel}.{item_source}.{item_type}.{item_category}"
|
||||
self._rdb.publish(routing_key, item)
|
||||
|
||||
# Save to list
|
||||
for arc_list in self._arc_lists:
|
||||
routing_key = f"{arc_list}.{item_source}.{item_type}.{item_category}"
|
||||
self._rdb.lpush(routing_key, item)
|
||||
@@ -3,4 +3,5 @@ import re
|
||||
LINK_RE = re.compile(r"(https?://[\w\-_.]+\.[a-z]{2,4}([^\s<'\"]*|$))")
|
||||
HTML_HREF_RE = re.compile(r"href=\"([^\"]+)\"")
|
||||
WHITESPACE_RE = re.compile(r"\s+")
|
||||
PUNCTUATION_RE = re.compile(r"[.,;:\"!?/()|*=]+")
|
||||
PUNCTUATION_RE = re.compile(r"[.,;:\"“!?/()|*=>]+")
|
||||
XML_ENTITY_RE = re.compile(r"&[a-z]+;")
|
||||
115
hexlib/text.py
115
hexlib/text.py
@@ -1,15 +1,20 @@
|
||||
from functools import partial
|
||||
from multiprocessing.pool import Pool
|
||||
import re
|
||||
from itertools import chain, repeat
|
||||
|
||||
import nltk.corpus
|
||||
from lxml import etree
|
||||
from nltk import word_tokenize
|
||||
from nltk.corpus import stopwords
|
||||
from nltk.stem import WordNetLemmatizer
|
||||
|
||||
from .regex import WHITESPACE_RE, PUNCTUATION_RE, LINK_RE
|
||||
from .regex_util import LINK_RE
|
||||
|
||||
get_text = etree.XPath("//text()")
|
||||
|
||||
nltk.download("stopwords", quiet=True)
|
||||
nltk.download("wordnet", quiet=True)
|
||||
nltk.download("punkt", quiet=True)
|
||||
|
||||
stop_words_en = set(stopwords.words("english"))
|
||||
|
||||
extra_stop_words_en = [
|
||||
@@ -18,74 +23,106 @@ extra_stop_words_en = [
|
||||
|
||||
stop_words_en.update(extra_stop_words_en)
|
||||
|
||||
nltk.download("stopwords", quiet=True)
|
||||
nltk.download("wordnet", quiet=True)
|
||||
|
||||
lemmatizer = WordNetLemmatizer()
|
||||
|
||||
|
||||
def clean_multicore(texts, processes, **kwargs):
|
||||
pool = Pool(processes=processes)
|
||||
return pool.map(
|
||||
func=partial(preprocess, **kwargs),
|
||||
iterable=texts,
|
||||
)
|
||||
|
||||
|
||||
def _transform_bigram(ngram_seq, ngrams):
|
||||
for ngram in ngram_seq:
|
||||
if ngram in ngrams:
|
||||
yield "_".join(ngram)
|
||||
yield ngram[0] + "_" + ngram[1]
|
||||
|
||||
ngram_seq.__next__()
|
||||
next(ngram_seq)
|
||||
else:
|
||||
yield ngram[0]
|
||||
|
||||
|
||||
def preprocess(text, lowercase=False, clean_html=False, strip=False, remove_punctuation=False,
|
||||
def _transform_trigram(ngram_seq, ngrams):
|
||||
for ngram in ngram_seq:
|
||||
if ngram in ngrams:
|
||||
# yield ngram[0] + "_" + ngram[1] + "_" + ngram[2]
|
||||
yield "_".join(ngram)
|
||||
|
||||
next(ngram_seq)
|
||||
next(ngram_seq)
|
||||
else:
|
||||
yield ngram[0]
|
||||
|
||||
|
||||
SINGLE_QUOTES = ("’", "`", "‘")
|
||||
SINGLE_QUOTE_TRANS = str.maketrans("".join(SINGLE_QUOTES), "".join(repeat("'", len(SINGLE_QUOTES))))
|
||||
|
||||
DASHES = ("–", "⸺", "–", "—")
|
||||
DASHES_TRANS = str.maketrans("".join(DASHES), "".join(repeat("-", len(DASHES))))
|
||||
|
||||
DASHES_RE = re.compile(r"-+")
|
||||
|
||||
SPECIAL_PUNCTUATION = ";:\"/()|*=>"
|
||||
SPECIAL_PUNCTUATION_TRANS = str.maketrans(SPECIAL_PUNCTUATION, " " * len(SPECIAL_PUNCTUATION))
|
||||
|
||||
PUNCTUATION = ".,!?"
|
||||
PUNCTUATION_TRANS = str.maketrans(PUNCTUATION, " " * len(PUNCTUATION))
|
||||
|
||||
|
||||
def preprocess(text, lowercase=False, clean_html=False, remove_punctuation=False, remove_special_punctuation=False,
|
||||
remove_stopwords_en=False, lemmatize=False, fix_single_quotes=False, strip_quotes=False,
|
||||
remove_urls=False, bigrams: set = None):
|
||||
strip_dashes=False,
|
||||
remove_urls=False, bigrams: set = None, trigrams: set = None, remove_numbers=False,
|
||||
use_nltk_tokenizer=False):
|
||||
if lowercase:
|
||||
text = text.lower()
|
||||
|
||||
if fix_single_quotes:
|
||||
text = text.replace("`", "'")
|
||||
text = text.translate(SINGLE_QUOTE_TRANS)
|
||||
|
||||
text = text.translate(DASHES_TRANS)
|
||||
|
||||
if strip_dashes:
|
||||
text = DASHES_RE.sub("-", text)
|
||||
|
||||
if remove_urls:
|
||||
text = LINK_RE.sub(" ", text)
|
||||
|
||||
if clean_html:
|
||||
try:
|
||||
root = etree.fromstring(text.replace("&", ""))
|
||||
text = "".join(get_text(root))
|
||||
text = "<root>" + text + "</root>"
|
||||
|
||||
parser = etree.XMLParser(recover=True)
|
||||
root = etree.fromstring(text, parser)
|
||||
|
||||
text = " ".join(get_text(root))
|
||||
except:
|
||||
pass
|
||||
|
||||
if remove_punctuation:
|
||||
text = PUNCTUATION_RE.sub(" ", text)
|
||||
text = text.translate(PUNCTUATION_TRANS)
|
||||
|
||||
text = WHITESPACE_RE.sub(" ", text)
|
||||
if remove_special_punctuation:
|
||||
text = text.translate(SPECIAL_PUNCTUATION_TRANS)
|
||||
|
||||
if use_nltk_tokenizer:
|
||||
words = word_tokenize(text, language="english")
|
||||
else:
|
||||
words = text.split()
|
||||
|
||||
if strip_quotes:
|
||||
words = text.split(" ")
|
||||
text = " ".join(w.strip("\"'") for w in words)
|
||||
words = map(lambda w: w.strip("\"'“”"), words)
|
||||
|
||||
if strip_dashes:
|
||||
words = map(lambda w: w.strip("-"), words)
|
||||
|
||||
if bigrams:
|
||||
words = text.split(" ")
|
||||
words.append("*")
|
||||
text = " ".join(_transform_bigram(nltk.bigrams(words), bigrams))
|
||||
words = _transform_bigram(nltk.bigrams(chain(words, ("*",))), bigrams)
|
||||
|
||||
if remove_stopwords_en or lemmatize:
|
||||
words = text.split(" ")
|
||||
if trigrams:
|
||||
words = _transform_trigram(nltk.trigrams(chain(words, ("*", "*"))), trigrams)
|
||||
|
||||
if lemmatize and remove_stopwords_en:
|
||||
text = " ".join(lemmatizer.lemmatize(w) for w in words if w not in stop_words_en)
|
||||
elif not lemmatize and remove_stopwords_en:
|
||||
text = " ".join(w for w in words if w not in stop_words_en)
|
||||
elif lemmatize and not remove_stopwords_en:
|
||||
text = " ".join(lemmatizer.lemmatize(w) for w in words)
|
||||
if remove_numbers:
|
||||
words = filter(lambda w: not w.isnumeric(), words)
|
||||
|
||||
if strip:
|
||||
text = text.strip()
|
||||
if lemmatize:
|
||||
words = map(lambda w: lemmatizer.lemmatize(w), words)
|
||||
|
||||
return text
|
||||
if remove_stopwords_en:
|
||||
words = filter(lambda w: w not in stop_words_en, words)
|
||||
|
||||
return filter(lambda w: w != "", words)
|
||||
|
||||
@@ -81,6 +81,14 @@ def cookiejar_filter(cj, pattern):
|
||||
return filtered_cj
|
||||
|
||||
|
||||
def cookiejar_filter_name(cj, pattern):
|
||||
filtered_cj = RequestsCookieJar()
|
||||
for c in cj:
|
||||
if re.match(pattern, c.name):
|
||||
filtered_cj.set_cookie(c)
|
||||
return filtered_cj
|
||||
|
||||
|
||||
def url_query_value(url, arg, as_list=False):
|
||||
qs = urlparse(url).query
|
||||
parsed_qs = parse_qs(qs)
|
||||
@@ -118,6 +126,7 @@ def download_file(url, destination, session=None, headers=None, overwrite=False,
|
||||
"url": url,
|
||||
"timestamp": datetime.utcnow().replace(microsecond=0).isoformat()
|
||||
}))
|
||||
r.close()
|
||||
break
|
||||
except Exception as e:
|
||||
if err_cb:
|
||||
@@ -134,7 +143,7 @@ class Web:
|
||||
self._logger = logger
|
||||
self._current_req = None
|
||||
if retry_codes is None or not retry_codes:
|
||||
retry_codes = {502, 504, 522, 524, 429}
|
||||
retry_codes = {500, 502, 503, 504, 520, 522, 524, 429}
|
||||
self._retry_codes = retry_codes
|
||||
|
||||
if session is None:
|
||||
@@ -166,8 +175,21 @@ class Web:
|
||||
|
||||
self._get = get
|
||||
|
||||
@rate_limit(rps)
|
||||
@retry(retries, callback=self._error_callback, retry_sleep=retry_sleep)
|
||||
def post(url, **kwargs):
|
||||
self._current_req = "POST", url, kwargs
|
||||
r = self._session.post(url, **kwargs)
|
||||
|
||||
if r.status_code in self._retry_codes:
|
||||
raise Exception(f"HTTP {r.status_code}")
|
||||
return r
|
||||
|
||||
self._post = post
|
||||
|
||||
def _error_callback(self, e):
|
||||
self._logger.critical(f"{self._format_url(*self._current_req)}: {e}")
|
||||
if self._logger:
|
||||
self._logger.critical(f"{self._format_url(*self._current_req)}: {e}")
|
||||
|
||||
def _format_url(self, method, url, kwargs, r=None):
|
||||
if "params" in kwargs and kwargs["params"]:
|
||||
@@ -188,6 +210,18 @@ class Web:
|
||||
self._logger.debug(self._format_url("GET", url, kwargs, r) + " %.2fs" % (time() - time_start))
|
||||
return r
|
||||
|
||||
def post(self, url, **kwargs):
|
||||
|
||||
time_start = time()
|
||||
r = self._post(url, **kwargs)
|
||||
|
||||
if self._cookie_file:
|
||||
save_cookiejar(self._session.cookies, self._cookie_file)
|
||||
|
||||
if self._logger and r is not None:
|
||||
self._logger.debug(self._format_url("POST", url, kwargs, r) + " %.2fs" % (time() - time_start))
|
||||
return r
|
||||
|
||||
def get_soup(self, url, **kwargs):
|
||||
r = self.get(url, **kwargs)
|
||||
if not r:
|
||||
|
||||
8
setup.py
8
setup.py
@@ -2,7 +2,7 @@ from setuptools import setup
|
||||
|
||||
setup(
|
||||
name="hexlib",
|
||||
version="1.42",
|
||||
version="1.89",
|
||||
description="Misc utility methods",
|
||||
author="simon987",
|
||||
author_email="me@simon987.net",
|
||||
@@ -12,7 +12,9 @@ setup(
|
||||
"data/*"
|
||||
]},
|
||||
install_requires=[
|
||||
"ImageHash", "influxdb", "siphash", "python-dateutil", "redis", "orjson", "zstandard",
|
||||
"u-msgpack-python", "psycopg2-binary", "fake-useragent", "bs4", "lxml", "nltk"
|
||||
"influxdb", "siphash", "python-dateutil", "redis", "orjson", "zstandard",
|
||||
"u-msgpack-python", "psycopg2-binary", "bs4", "lxml", "nltk", "numpy",
|
||||
"matplotlib", "fake-useragent @ git+https://github.com/Jordan9675/fake-useragent",
|
||||
"requests", "pydantic==1.10.13"
|
||||
]
|
||||
)
|
||||
|
||||
0
test/__init__.py
Normal file
0
test/__init__.py
Normal file
@@ -110,3 +110,34 @@ class TestPersistentState(TestCase):
|
||||
del s["a"][456]
|
||||
except Exception as e:
|
||||
self.fail(e)
|
||||
|
||||
def test_deserialize_get_set(self):
|
||||
s = PersistentState()
|
||||
|
||||
s["a"][0] = {"x": b'abc'}
|
||||
|
||||
self.assertEqual(s["a"][0]["x"], b'abc')
|
||||
|
||||
def test_deserialize_sql(self):
|
||||
s = PersistentState()
|
||||
|
||||
s["a"][0] = {"x": b'abc'}
|
||||
|
||||
self.assertEqual(list(s["a"].sql("WHERE 1=1"))[0]["x"], b'abc')
|
||||
|
||||
def test_deserialize_iter(self):
|
||||
s = PersistentState()
|
||||
|
||||
s["a"][0] = {"x": b'abc'}
|
||||
|
||||
self.assertEqual(list(s["a"])[0]["x"], b'abc')
|
||||
|
||||
def test_drop_table(self):
|
||||
s = PersistentState()
|
||||
|
||||
s["a"][0] = {"x": 1}
|
||||
s["a"][1] = {"x": 2}
|
||||
self.assertEqual(len(list(s["a"])), 2)
|
||||
|
||||
del s["a"]
|
||||
self.assertEqual(len(list(s["a"])), 0)
|
||||
110
test/test_PydanticTable.py
Normal file
110
test/test_PydanticTable.py
Normal file
@@ -0,0 +1,110 @@
|
||||
import os
|
||||
from datetime import datetime
|
||||
from enum import Enum
|
||||
from typing import Optional
|
||||
from unittest import TestCase
|
||||
|
||||
from pydantic import BaseModel
|
||||
from pydantic.types import List
|
||||
|
||||
from hexlib.db import PersistentState
|
||||
|
||||
|
||||
class Status(Enum):
|
||||
yes = "yes"
|
||||
no = "no"
|
||||
|
||||
|
||||
class Point(BaseModel):
|
||||
x: int
|
||||
y: int
|
||||
|
||||
|
||||
class Polygon(BaseModel):
|
||||
points: List[Point] = []
|
||||
created_date: datetime
|
||||
status: Status = Status("yes")
|
||||
|
||||
|
||||
class TestPydanticTable(TestCase):
|
||||
def tearDown(self) -> None:
|
||||
if os.path.exists("state.db"):
|
||||
os.remove("state.db")
|
||||
|
||||
def setUp(self) -> None:
|
||||
if os.path.exists("state.db"):
|
||||
os.remove("state.db")
|
||||
|
||||
def test_get_set(self):
|
||||
s = PersistentState()
|
||||
|
||||
val = Polygon(
|
||||
created_date=datetime(year=2000, day=1, month=1),
|
||||
points=[
|
||||
Point(x=1, y=2),
|
||||
Point(x=3, y=4),
|
||||
],
|
||||
)
|
||||
|
||||
s["a"]["1"] = val
|
||||
|
||||
self.assertEqual(s["a"]["1"].points[0].x, 1)
|
||||
self.assertEqual(s["a"]["1"].status, Status("yes"))
|
||||
self.assertEqual(s["a"]["1"].points[1].x, 3)
|
||||
self.assertEqual(s["a"]["1"].created_date.year, 2000)
|
||||
|
||||
def test_update(self):
|
||||
s = PersistentState()
|
||||
|
||||
val = Polygon(
|
||||
created_date=datetime(year=2000, day=1, month=1),
|
||||
points=[
|
||||
Point(x=1, y=2),
|
||||
Point(x=3, y=4),
|
||||
]
|
||||
)
|
||||
|
||||
s["a"]["1"] = val
|
||||
|
||||
self.assertEqual(s["a"]["1"].points[0].x, 1)
|
||||
|
||||
val.points[0].x = 2
|
||||
s["a"]["1"] = val
|
||||
self.assertEqual(s["a"]["1"].points[0].x, 2)
|
||||
|
||||
def test_sql(self):
|
||||
s = PersistentState()
|
||||
|
||||
s["b"]["1"] = Polygon(
|
||||
created_date=datetime(year=2000, day=1, month=1),
|
||||
points=[]
|
||||
)
|
||||
s["b"]["2"] = Polygon(
|
||||
created_date=datetime(year=2010, day=1, month=1),
|
||||
points=[]
|
||||
)
|
||||
|
||||
result = list(s["b"].sql(
|
||||
"WHERE json->>'created_date' LIKE '2000-%'"
|
||||
))
|
||||
|
||||
self.assertEqual(len(result), 1)
|
||||
self.assertEqual(result[0].created_date.year, 2000)
|
||||
|
||||
def test_iterate(self):
|
||||
s = PersistentState()
|
||||
|
||||
s["b"]["1"] = Polygon(
|
||||
created_date=datetime(year=2000, day=1, month=1),
|
||||
points=[]
|
||||
)
|
||||
s["b"]["2"] = Polygon(
|
||||
created_date=datetime(year=2010, day=1, month=1),
|
||||
points=[]
|
||||
)
|
||||
|
||||
result = list(s["b"])
|
||||
|
||||
self.assertEqual(len(result), 2)
|
||||
self.assertEqual(result[0].created_date.year, 2000)
|
||||
self.assertEqual(result[1].created_date.year, 2010)
|
||||
@@ -1,10 +1,15 @@
|
||||
from unittest import TestCase
|
||||
|
||||
from hexlib.db import VolatileState, VolatileBooleanState, VolatileQueue
|
||||
from hexlib.env import get_redis
|
||||
|
||||
|
||||
class TestVolatileState(TestCase):
|
||||
|
||||
def setUp(self) -> None:
|
||||
rdb = get_redis()
|
||||
rdb.delete("test1a", "test1b", "test1c", "test1:a", "test2b")
|
||||
|
||||
def test_get_set(self):
|
||||
s = VolatileState(prefix="test1")
|
||||
val = {
|
||||
@@ -53,6 +58,10 @@ class TestVolatileState(TestCase):
|
||||
|
||||
class TestVolatileBoolState(TestCase):
|
||||
|
||||
def setUp(self) -> None:
|
||||
rdb = get_redis()
|
||||
rdb.delete("test1a", "test1b", "test1c", "test1:a", "test2b")
|
||||
|
||||
def test_get_set(self):
|
||||
s = VolatileBooleanState(prefix="test1")
|
||||
|
||||
|
||||
@@ -2,12 +2,16 @@ import os
|
||||
from unittest import TestCase
|
||||
|
||||
from hexlib.web import download_file
|
||||
import warnings
|
||||
|
||||
|
||||
class TestDownloadFile(TestCase):
|
||||
|
||||
def setUp(self) -> None:
|
||||
warnings.filterwarnings(action="ignore", category=ResourceWarning)
|
||||
|
||||
def test_download_file(self):
|
||||
download_file("http://ovh.net/files/10Mb.dat", "/tmp/10Mb.dat")
|
||||
download_file("https://github.com/simon987/hexlib/raw/master/10MB.bin", "/tmp/10Mb.dat")
|
||||
self.assertTrue(os.path.exists("/tmp/10Mb.dat"))
|
||||
os.remove("/tmp/10Mb.dat")
|
||||
|
||||
@@ -22,8 +26,8 @@ class TestDownloadFile(TestCase):
|
||||
self.assertEqual(len(exceptions), 3)
|
||||
|
||||
def test_download_file_meta(self):
|
||||
download_file("http://ovh.net/files/10Mb.dat", "/tmp/10Mb.dat", save_meta=True)
|
||||
download_file("https://github.com/simon987/hexlib/raw/master/10MB.bin", "/tmp/10Mb.dat", save_meta=True)
|
||||
self.assertTrue(os.path.exists("/tmp/10Mb.dat"))
|
||||
self.assertTrue(os.path.exists("/tmp/10Mb.dat.meta"))
|
||||
os.remove("/tmp/10Mb.dat")
|
||||
# os.remove("/tmp/10Mb.dat.meta")
|
||||
os.remove("/tmp/10Mb.dat.meta")
|
||||
|
||||
62
test/test_redis_mq.py
Normal file
62
test/test_redis_mq.py
Normal file
@@ -0,0 +1,62 @@
|
||||
from unittest import TestCase
|
||||
|
||||
from hexlib.env import get_redis
|
||||
from hexlib.mq import RedisMQ, parse_routing_key, RoutingKeyParts
|
||||
|
||||
|
||||
class TestRedisMQ(TestCase):
|
||||
|
||||
def setUp(self) -> None:
|
||||
self.rdb = get_redis()
|
||||
self.rdb.delete("pending.test", "test_mq", "arc.test.msg.x")
|
||||
|
||||
def test_ack(self):
|
||||
mq = RedisMQ(self.rdb, consumer_name="test", max_pending_time=2, arc_lists=["arc"])
|
||||
|
||||
mq.publish({"_id": 1}, item_project="test", item_type="msg")
|
||||
|
||||
topic1, msg1, ack1 = next(mq.read_messages(topics=["arc.*"]))
|
||||
|
||||
self.assertEqual(self.rdb.hlen("pending.test"), 1)
|
||||
|
||||
ack1()
|
||||
|
||||
self.assertEqual(self.rdb.hlen("pending.test"), 0)
|
||||
|
||||
def test_pending_timeout(self):
|
||||
mq = RedisMQ(self.rdb, consumer_name="test", max_pending_time=0.5, arc_lists=["arc"], wait=0)
|
||||
|
||||
mq.publish({"_id": 1}, item_project="test", item_type="msg")
|
||||
|
||||
topic1, msg1, ack1 = next(mq.read_messages(topics=["arc.test.*"]))
|
||||
|
||||
self.assertEqual(self.rdb.hlen("pending.test"), 1)
|
||||
|
||||
# msg1 will timeout after 0.5s, next iteration takes ceil(0.5)s
|
||||
topic1_, msg1_, ack1_ = next(mq.read_messages(topics=["arc.test.*"]))
|
||||
self.assertEqual(self.rdb.hlen("pending.test"), 1)
|
||||
|
||||
ack1_()
|
||||
|
||||
self.assertEqual(self.rdb.hlen("pending.test"), 0)
|
||||
|
||||
self.assertEqual(msg1, msg1_)
|
||||
|
||||
def test_no_id_field(self):
|
||||
mq = RedisMQ(self.rdb, consumer_name="test", max_pending_time=0.5, arc_lists=["arc"], wait=0)
|
||||
|
||||
with self.assertRaises(ValueError):
|
||||
mq.publish({"a": 1}, item_project="test", item_type="msg")
|
||||
|
||||
|
||||
class TestRoutingKey(TestCase):
|
||||
|
||||
def test1(self):
|
||||
key = "arc.chan.4chan.post.b"
|
||||
parts = parse_routing_key(key)
|
||||
self.assertEqual(parts, RoutingKeyParts("arc", "chan", "4chan", "post", "b"))
|
||||
|
||||
def test2(self):
|
||||
key = "arc.reddit.submission.birdpics"
|
||||
parts = parse_routing_key(key)
|
||||
self.assertEqual(parts, RoutingKeyParts("arc", "reddit", None, "submission", "birdpics"))
|
||||
@@ -13,7 +13,7 @@ class TestText(TestCase):
|
||||
)
|
||||
expected = ""
|
||||
|
||||
self.assertEqual(cleaned, expected)
|
||||
self.assertEqual(" ".join(cleaned), expected)
|
||||
|
||||
def test_html_1(self):
|
||||
text = "<div>Hello, <strong>world</strong></div>"
|
||||
@@ -23,7 +23,7 @@ class TestText(TestCase):
|
||||
)
|
||||
expected = "Hello, world"
|
||||
|
||||
self.assertEqual(cleaned, expected)
|
||||
self.assertEqual(" ".join(cleaned), expected)
|
||||
|
||||
def test_html_2(self):
|
||||
text = "<div>Hello, <strong>world</strong></div>"
|
||||
@@ -34,18 +34,7 @@ class TestText(TestCase):
|
||||
)
|
||||
expected = "hello, world"
|
||||
|
||||
self.assertEqual(cleaned, expected)
|
||||
|
||||
def test_html_3(self):
|
||||
text = "<div>\n Hello, \t\n<strong> world </strong>\n\t</div>"
|
||||
cleaned = preprocess(
|
||||
text,
|
||||
clean_html=True,
|
||||
lowercase=True,
|
||||
)
|
||||
expected = " hello, world "
|
||||
|
||||
self.assertEqual(cleaned, expected)
|
||||
self.assertEqual(" ".join(cleaned), expected)
|
||||
|
||||
def test_html_4(self):
|
||||
text = "<div>\n Hello, \t\n<strong> world </strong>\n\t</div>"
|
||||
@@ -53,11 +42,10 @@ class TestText(TestCase):
|
||||
text,
|
||||
clean_html=True,
|
||||
lowercase=True,
|
||||
strip=True
|
||||
)
|
||||
expected = "hello, world"
|
||||
|
||||
self.assertEqual(cleaned, expected)
|
||||
self.assertEqual(" ".join(cleaned), expected)
|
||||
|
||||
def test_html_5(self):
|
||||
text = "<div>\n Hello, \t\n<strong> world </strong>\n\t</div>"
|
||||
@@ -65,12 +53,11 @@ class TestText(TestCase):
|
||||
text,
|
||||
clean_html=True,
|
||||
lowercase=True,
|
||||
strip=True,
|
||||
remove_punctuation=True
|
||||
)
|
||||
expected = "hello world"
|
||||
|
||||
self.assertEqual(cleaned, expected)
|
||||
self.assertEqual(" ".join(cleaned), expected)
|
||||
|
||||
def test_html_6(self):
|
||||
text = "<div>\n Hello, \t\n<strong>a the world </strong>\n\t</div>"
|
||||
@@ -79,12 +66,11 @@ class TestText(TestCase):
|
||||
clean_html=True,
|
||||
lowercase=True,
|
||||
remove_punctuation=True,
|
||||
strip=True,
|
||||
remove_stopwords_en=True
|
||||
)
|
||||
expected = "hello world"
|
||||
|
||||
self.assertEqual(cleaned, expected)
|
||||
self.assertEqual(" ".join(cleaned), expected)
|
||||
|
||||
def test_html_7(self):
|
||||
text = "<div>\n Hello, \t\n<strong>a the worlds </strong>\n\t</div>"
|
||||
@@ -93,13 +79,12 @@ class TestText(TestCase):
|
||||
clean_html=True,
|
||||
lowercase=True,
|
||||
remove_punctuation=True,
|
||||
strip=True,
|
||||
remove_stopwords_en=True,
|
||||
lemmatize=True
|
||||
)
|
||||
expected = "hello world"
|
||||
|
||||
self.assertEqual(cleaned, expected)
|
||||
self.assertEqual(" ".join(cleaned), expected)
|
||||
|
||||
def test_html_8(self):
|
||||
text = "<div>\n Hello, \t\n<strong>a the worlds! </strong>\n\t</div>"
|
||||
@@ -108,13 +93,12 @@ class TestText(TestCase):
|
||||
clean_html=True,
|
||||
lowercase=True,
|
||||
remove_punctuation=True,
|
||||
strip=True,
|
||||
remove_stopwords_en=True,
|
||||
lemmatize=True
|
||||
)
|
||||
expected = "hello world"
|
||||
|
||||
self.assertEqual(cleaned, expected)
|
||||
self.assertEqual(" ".join(cleaned), expected)
|
||||
|
||||
def test_html_9(self):
|
||||
text = "<div>\n Hello, \t\n<strong>world! it's it`s </strong>\n\t</div>"
|
||||
@@ -123,13 +107,23 @@ class TestText(TestCase):
|
||||
clean_html=True,
|
||||
lowercase=True,
|
||||
remove_punctuation=True,
|
||||
strip=True,
|
||||
lemmatize=True,
|
||||
fix_single_quotes=True
|
||||
)
|
||||
expected = "hello world it's it's"
|
||||
|
||||
self.assertEqual(cleaned, expected)
|
||||
self.assertEqual(" ".join(cleaned), expected)
|
||||
|
||||
def test_single_quote(self):
|
||||
text = "it's it`s it’s"
|
||||
cleaned = preprocess(
|
||||
text,
|
||||
lowercase=True,
|
||||
fix_single_quotes=True
|
||||
)
|
||||
expected = "it's it's it's"
|
||||
|
||||
self.assertEqual(" ".join(cleaned), expected)
|
||||
|
||||
def test_html_10(self):
|
||||
text = "<div>\n Hello, \t\n<strong>world! it's it`s https://google.ca/test/abc.pdf </strong>\n\t</div>"
|
||||
@@ -138,14 +132,13 @@ class TestText(TestCase):
|
||||
clean_html=True,
|
||||
lowercase=True,
|
||||
remove_punctuation=True,
|
||||
strip=True,
|
||||
lemmatize=True,
|
||||
fix_single_quotes=True,
|
||||
remove_urls=True
|
||||
)
|
||||
expected = "hello world it's it's"
|
||||
|
||||
self.assertEqual(cleaned, expected)
|
||||
self.assertEqual(" ".join(cleaned), expected)
|
||||
|
||||
def test_html_11(self):
|
||||
text = "<div>\n Hello, \t\n<strong>world! it's it`s & | </strong>\n\t</div>"
|
||||
@@ -154,15 +147,66 @@ class TestText(TestCase):
|
||||
clean_html=True,
|
||||
lowercase=True,
|
||||
remove_punctuation=True,
|
||||
strip=True,
|
||||
lemmatize=True,
|
||||
fix_single_quotes=True,
|
||||
remove_stopwords_en=True,
|
||||
remove_urls=True
|
||||
)
|
||||
expected = "hello world"
|
||||
expected = "hello world |"
|
||||
|
||||
self.assertEqual(cleaned, expected)
|
||||
self.assertEqual(" ".join(cleaned), expected)
|
||||
|
||||
def test_html_no_root(self):
|
||||
text = "<a href=\"#p217709510\" class=\"quotelink\">>>217709510</a><br>Is there a<wbr>servant that is against civilization and humanity?<br>Literally instant summon."
|
||||
|
||||
cleaned = preprocess(
|
||||
text,
|
||||
clean_html=True,
|
||||
lowercase=True,
|
||||
remove_punctuation=True,
|
||||
lemmatize=False,
|
||||
fix_single_quotes=True,
|
||||
remove_stopwords_en=False,
|
||||
remove_urls=False
|
||||
)
|
||||
|
||||
expected = ">>217709510 is there a servant that is against civilization and humanity literally instant summon"
|
||||
self.assertEqual(" ".join(cleaned), expected)
|
||||
|
||||
def test_html_entity(self):
|
||||
text = "doesn't"
|
||||
|
||||
cleaned = preprocess(
|
||||
text,
|
||||
clean_html=True,
|
||||
lowercase=True,
|
||||
remove_punctuation=True,
|
||||
lemmatize=False,
|
||||
fix_single_quotes=True,
|
||||
remove_stopwords_en=False,
|
||||
remove_urls=False
|
||||
)
|
||||
|
||||
expected = "doesn't"
|
||||
self.assertEqual(" ".join(cleaned), expected)
|
||||
|
||||
def test_html_invalid_attribute(self):
|
||||
text = '<root><iframe width="560" height="315" src=" " title="youtube video player" frameborder="0" allowfullscreen></iframe></root>'
|
||||
|
||||
cleaned = preprocess(
|
||||
text,
|
||||
clean_html=True,
|
||||
lowercase=True,
|
||||
remove_punctuation=True,
|
||||
lemmatize=False,
|
||||
fix_single_quotes=True,
|
||||
remove_stopwords_en=False,
|
||||
remove_urls=False
|
||||
)
|
||||
|
||||
expected = ""
|
||||
|
||||
self.assertEqual(" ".join(cleaned), expected)
|
||||
|
||||
def test_bigrams(self):
|
||||
text = "x A b c d e f g h"
|
||||
@@ -177,4 +221,59 @@ class TestText(TestCase):
|
||||
)
|
||||
expected = "x a_b c_d e f_g h"
|
||||
|
||||
self.assertEqual(cleaned, expected)
|
||||
self.assertEqual(" ".join(cleaned), expected)
|
||||
|
||||
def test_trigrams(self):
|
||||
text = "x A b c d e f g h"
|
||||
cleaned = preprocess(
|
||||
text,
|
||||
lowercase=True,
|
||||
trigrams={
|
||||
("a", "b", "c"),
|
||||
("e", "f", "g"),
|
||||
}
|
||||
)
|
||||
expected = "x a_b_c d e_f_g h"
|
||||
|
||||
self.assertEqual(" ".join(cleaned), expected)
|
||||
|
||||
def test_remove_numbers(self):
|
||||
text = "Hello1 test1124test 12 1 1111111 world"
|
||||
cleaned = preprocess(
|
||||
text,
|
||||
lowercase=True,
|
||||
remove_numbers=True
|
||||
)
|
||||
expected = "hello1 test1124test world"
|
||||
|
||||
self.assertEqual(" ".join(cleaned), expected)
|
||||
|
||||
def test_strip_quotes(self):
|
||||
text = "'hi' “test” 'hello\""
|
||||
cleaned = preprocess(
|
||||
text,
|
||||
strip_quotes=True
|
||||
)
|
||||
expected = "hi test hello"
|
||||
|
||||
self.assertEqual(" ".join(cleaned), expected)
|
||||
|
||||
def test_strip_dashes(self):
|
||||
text = "yes -But something-something -- hello aa--bb"
|
||||
cleaned = preprocess(
|
||||
text,
|
||||
strip_dashes=True
|
||||
)
|
||||
expected = "yes But something-something hello aa-bb"
|
||||
|
||||
self.assertEqual(" ".join(cleaned), expected)
|
||||
|
||||
def test_word_tokenize(self):
|
||||
text = "i cannot believe'"
|
||||
cleaned = preprocess(
|
||||
text,
|
||||
use_nltk_tokenizer=True
|
||||
)
|
||||
expected = "i can not believe '"
|
||||
|
||||
self.assertEqual(" ".join(cleaned), expected)
|
||||
|
||||
Reference in New Issue
Block a user