mirror of
https://github.com/simon987/hexlib.git
synced 2025-04-10 14:06:43 +00:00
Compare commits
5 Commits
18cd59fc4a
...
32119535ae
Author | SHA1 | Date | |
---|---|---|---|
32119535ae | |||
2ffaa4a5b3 | |||
067a20f7a8 | |||
00323ea576 | |||
45b5803c40 |
@ -3,4 +3,4 @@ import re
|
||||
LINK_RE = re.compile(r"(https?://[\w\-_.]+\.[a-z]{2,4}([^\s<'\"]*|$))")
|
||||
HTML_HREF_RE = re.compile(r"href=\"([^\"]+)\"")
|
||||
WHITESPACE_RE = re.compile(r"\s+")
|
||||
PUNCTUATION_RE = re.compile(r"[.,;:\"']+")
|
||||
PUNCTUATION_RE = re.compile(r"[.,;:\"!?/()|*=]+")
|
||||
|
@ -1,43 +1,65 @@
|
||||
from functools import partial
|
||||
from multiprocessing.pool import Pool
|
||||
|
||||
import nltk.corpus
|
||||
from lxml import etree
|
||||
from nltk.corpus import stopwords
|
||||
from nltk.stem import WordNetLemmatizer
|
||||
|
||||
from .regex import WHITESPACE_RE, PUNCTUATION_RE
|
||||
from .regex import WHITESPACE_RE, PUNCTUATION_RE, LINK_RE
|
||||
|
||||
get_text = etree.XPath("//text()")
|
||||
|
||||
stop_words_en = set(stopwords.words("english"))
|
||||
|
||||
extra_stop_words_en = [
|
||||
"u", "&", "-", "--"
|
||||
]
|
||||
|
||||
stop_words_en.update(extra_stop_words_en)
|
||||
|
||||
nltk.download("stopwords", quiet=True)
|
||||
nltk.download("wordnet", quiet=True)
|
||||
|
||||
lemmatizer = WordNetLemmatizer()
|
||||
|
||||
|
||||
def clean(text, compress_whitespace=False, lowercase=False, clean_html=False, strip=False, remove_punctuation=False,
|
||||
remove_stopwords_en=False, lemmatize=False):
|
||||
if compress_whitespace and remove_stopwords_en:
|
||||
raise ValueError("Redundant flags: remove_stopwords implies compress_whitespace")
|
||||
def clean_multicore(texts, processes, **kwargs):
|
||||
pool = Pool(processes=processes)
|
||||
return pool.map(
|
||||
func=partial(clean, **kwargs),
|
||||
iterable=texts,
|
||||
)
|
||||
|
||||
|
||||
def clean(text, lowercase=False, clean_html=False, strip=False, remove_punctuation=False,
|
||||
remove_stopwords_en=False, lemmatize=False, fix_single_quotes=False, strip_quotes=False,
|
||||
remove_urls=False):
|
||||
if fix_single_quotes:
|
||||
text = text.replace("`", "'")
|
||||
|
||||
if remove_urls:
|
||||
text = LINK_RE.sub(" ", text)
|
||||
|
||||
if clean_html:
|
||||
try:
|
||||
root = etree.fromstring(text)
|
||||
root = etree.fromstring(text.replace("&", ""))
|
||||
text = "".join(get_text(root))
|
||||
except:
|
||||
pass
|
||||
|
||||
if remove_punctuation:
|
||||
text = PUNCTUATION_RE.sub(" ", text)
|
||||
|
||||
if lowercase:
|
||||
text = text.lower()
|
||||
|
||||
if compress_whitespace:
|
||||
if not remove_stopwords_en or not lemmatize or not strip_quotes:
|
||||
text = WHITESPACE_RE.sub(" ", text)
|
||||
|
||||
if strip:
|
||||
text = text.strip()
|
||||
|
||||
if remove_punctuation:
|
||||
text = PUNCTUATION_RE.sub("", text)
|
||||
if strip_quotes:
|
||||
words = WHITESPACE_RE.split(text)
|
||||
text = " ".join(w.strip("\"'") for w in words)
|
||||
|
||||
if remove_stopwords_en or lemmatize:
|
||||
words = WHITESPACE_RE.split(text)
|
||||
@ -49,4 +71,7 @@ def clean(text, compress_whitespace=False, lowercase=False, clean_html=False, st
|
||||
elif lemmatize and not remove_stopwords_en:
|
||||
text = " ".join(lemmatizer.lemmatize(w) for w in words)
|
||||
|
||||
if strip:
|
||||
text = text.strip()
|
||||
|
||||
return text
|
||||
|
@ -42,7 +42,6 @@ class TestText(TestCase):
|
||||
text,
|
||||
clean_html=True,
|
||||
lowercase=True,
|
||||
compress_whitespace=True
|
||||
)
|
||||
expected = " hello, world "
|
||||
|
||||
@ -54,7 +53,6 @@ class TestText(TestCase):
|
||||
text,
|
||||
clean_html=True,
|
||||
lowercase=True,
|
||||
compress_whitespace=True,
|
||||
strip=True
|
||||
)
|
||||
expected = "hello, world"
|
||||
@ -67,7 +65,6 @@ class TestText(TestCase):
|
||||
text,
|
||||
clean_html=True,
|
||||
lowercase=True,
|
||||
compress_whitespace=True,
|
||||
strip=True,
|
||||
remove_punctuation=True
|
||||
)
|
||||
@ -103,3 +100,66 @@ class TestText(TestCase):
|
||||
expected = "hello world"
|
||||
|
||||
self.assertEqual(cleaned, expected)
|
||||
|
||||
def test_html_8(self):
|
||||
text = "<div>\n Hello, \t\n<strong>a the worlds! </strong>\n\t</div>"
|
||||
cleaned = clean(
|
||||
text,
|
||||
clean_html=True,
|
||||
lowercase=True,
|
||||
remove_punctuation=True,
|
||||
strip=True,
|
||||
remove_stopwords_en=True,
|
||||
lemmatize=True
|
||||
)
|
||||
expected = "hello world"
|
||||
|
||||
self.assertEqual(cleaned, expected)
|
||||
|
||||
def test_html_9(self):
|
||||
text = "<div>\n Hello, \t\n<strong>world! it's it`s </strong>\n\t</div>"
|
||||
cleaned = clean(
|
||||
text,
|
||||
clean_html=True,
|
||||
lowercase=True,
|
||||
remove_punctuation=True,
|
||||
strip=True,
|
||||
lemmatize=True,
|
||||
fix_single_quotes=True
|
||||
)
|
||||
expected = "hello world it's it's"
|
||||
|
||||
self.assertEqual(cleaned, expected)
|
||||
|
||||
def test_html_10(self):
|
||||
text = "<div>\n Hello, \t\n<strong>world! it's it`s https://google.ca/test/abc.pdf </strong>\n\t</div>"
|
||||
cleaned = clean(
|
||||
text,
|
||||
clean_html=True,
|
||||
lowercase=True,
|
||||
remove_punctuation=True,
|
||||
strip=True,
|
||||
lemmatize=True,
|
||||
fix_single_quotes=True,
|
||||
remove_urls=True
|
||||
)
|
||||
expected = "hello world it's it's"
|
||||
|
||||
self.assertEqual(cleaned, expected)
|
||||
|
||||
def test_html_11(self):
|
||||
text = "<div>\n Hello, \t\n<strong>world! it's it`s u & | </strong>\n\t</div>"
|
||||
cleaned = clean(
|
||||
text,
|
||||
clean_html=True,
|
||||
lowercase=True,
|
||||
remove_punctuation=True,
|
||||
strip=True,
|
||||
lemmatize=True,
|
||||
fix_single_quotes=True,
|
||||
remove_stopwords_en=True,
|
||||
remove_urls=True
|
||||
)
|
||||
expected = "hello world"
|
||||
|
||||
self.assertEqual(cleaned, expected)
|
||||
|
Loading…
x
Reference in New Issue
Block a user