From 23775ec126bd2f3e32261d6ed7bbbce30e5927dd Mon Sep 17 00:00:00 2001 From: simon Date: Mon, 5 Feb 2018 22:05:07 -0500 Subject: [PATCH] Fixed bugs, enhanced parser --- crawler.py | 43 ++++- parser.py | 182 ++++++++++++------ reddit_bot.py | 57 +++++- reports.py | 2 + spec/Crawler_spec.py | 32 +++ spec/Parser_spec.py | 67 ++++++- spec/RedditBot_spec.py | 61 +++++- ...est_apache_root.html => test_apache1.html} | 0 spec/test_apache3.html | 32 +++ spec/test_apache4.html | 38 ++++ ...{test_nginx_root.html => test_nginx1.html} | 0 static/js/report.js | 25 ++- 12 files changed, 459 insertions(+), 80 deletions(-) rename spec/{test_apache_root.html => test_apache1.html} (100%) create mode 100644 spec/test_apache3.html create mode 100644 spec/test_apache4.html rename spec/{test_nginx_root.html => test_nginx1.html} (100%) diff --git a/crawler.py b/crawler.py index 0c898bc..e577422 100644 --- a/crawler.py +++ b/crawler.py @@ -2,19 +2,41 @@ import requests from parser import NginxParser, ApacheParser from reports import ReportSaver, ReportBuilder -headers = { - 'User-Agent': "Mozilla/5.0 (X11; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0", - "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8" -} - class Crawler: - def __init__(self, url): - self.parser = NginxParser() + def __init__(self, url, test_url): self.files = [] self.base_url = url + if test_url: + # Test url + r = requests.get(self.base_url, timeout=30) + + self.parser = self.guess_parser(r.text, r.headers)() + + print("Using " + self.parser.__class__.__name__ + " as parser") + + else: + self.parser = None + + @staticmethod + def guess_parser(text, headers): + + server = headers["Server"] if "Server" in headers else "" + + # try nginx + parser = NginxParser() + if parser.page_is_valid(text): + return NginxParser + + # Try apache + parser = ApacheParser() + if parser.page_is_valid(text): + return ApacheParser + + return None + def crawl(self, address=None): if address is None: @@ -53,6 +75,7 @@ class Crawler: f.write(report_saver.to_link_list()) -c = Crawler("http://dl.upload8.in/files/Serial/Altered%20Carbon/") -c.crawl() -c.store_report("000002") +if __name__ == "__main__": + c = Crawler("https://repo.zenk-security.com/", True) + c.crawl() + c.store_report("000007") diff --git a/parser.py b/parser.py index c30a6b6..e71a30a 100644 --- a/parser.py +++ b/parser.py @@ -1,14 +1,43 @@ -from bs4 import BeautifulSoup -from urllib.parse import urljoin import os import re +from urllib.parse import urljoin + import humanfriendly +from bs4 import BeautifulSoup class PageParser: + + def __init__(self): + self.col_start = None + self.col_end = None + self.size_unknown = True + def get_links(self, text: str, base_url: str): raise NotImplementedError() + @staticmethod + def get_size_columns(cols): + + for i in range(len(cols)): + + if i == len(cols) - 1: + try: + humanfriendly.parse_size(cols[i]) + return tuple([i, i]) + except humanfriendly.InvalidSize: + return None + + try: + humanfriendly.parse_size(cols[i] + cols[i + 1]) + return tuple([i, i + 1]) + except humanfriendly.InvalidSize: + try: + humanfriendly.parse_size(cols[i]) + return tuple([i, i]) + except humanfriendly.InvalidSize: + continue + @staticmethod def get_parser_type(headers): """Get appropriate parser type for a a server based on its header""" @@ -26,26 +55,82 @@ class PageParser: @staticmethod def file_type(link): - return "d" if link.endswith("/") else "f" + + if link.endswith("/") or link.startswith("?"): + return "d" + return "f" + + + @staticmethod + def clean_page(text): + text = text.replace("", "") + + return text + + def get_size(self, cols): + + # Figure out which column(s) is the size one + size_cols = self.get_size_columns(cols) + if size_cols is not None: + col_start, col_end = size_cols + self.size_unknown = False + + size_human = cols[col_start] if col_start == col_end else cols[col_start] + cols[col_end] + + try: + size = humanfriendly.parse_size(size_human) + except humanfriendly.InvalidSize: + size = 0 + else: + size = 0 + + return size class NginxParser(PageParser): def get_links(self, text, base_url: str): links = dict() - soup = BeautifulSoup(text, "html.parser") - # Handle weird character formats and tag names - text = text.replace(" 0: + t = self.tasks.pop() + self.update_file() + else: + t = None + + return t + + def update_file(self): + with open(self.file, "w") as f: + json.dump(self.tasks, f, default=dumper) + + def is_queued(self, post_id): + + for task in self.tasks: + if task.post_id == post_id: + return True + + return False + + +def dumper(obj): + return obj.__dict__ class RedditBot: @@ -11,8 +65,7 @@ class RedditBot: self.crawled = [] else: with open(log_file, "r") as f: - self.crawled = f.read().split("\n") - self.crawled = list(filter(None, self.crawled)) + self.crawled = list(filter(None, f.read().split("\n"))) def log_crawl(self, post_id): diff --git a/reports.py b/reports.py index 58607a2..b2cfa8e 100644 --- a/reports.py +++ b/reports.py @@ -91,6 +91,7 @@ class ReportSaver: out["ext_sizes"] = self.builder.get_ext_sizes() out["ext_sizes_formatted"] = self.builder.get_ext_sizes_formatted() out["report_time"] = str(self.builder.report_time) + out["total_count"] = len(self.builder.files) return json.dumps(out) @@ -103,6 +104,7 @@ class ReportSaver: out["ext_count"] = self.builder.get_ext_counts() out["ext_sizes"] = self.builder.get_ext_sizes() out["report_time"] = str(self.builder.report_time) + out["total_count"] = len(self.builder.files) return json.dumps(out) diff --git a/spec/Crawler_spec.py b/spec/Crawler_spec.py index e69de29..22713f9 100644 --- a/spec/Crawler_spec.py +++ b/spec/Crawler_spec.py @@ -0,0 +1,32 @@ +from unittest import TestCase + +from parser import ApacheParser, NginxParser +from crawler import Crawler + + +class CrawlerTest(TestCase): + + def test_guess_parser1(self): + + with open("test_apache1.html", "r") as f: + text = f.read() + + c = Crawler("http://some.website/", False) + + self.assertEqual(c.guess_parser(text, {}), ApacheParser) + + def test_guess_parser2(self): + with open("test_nginx1.html", "r") as f: + text = f.read() + + c = Crawler("http://some.website", False) + + self.assertEqual(c.guess_parser(text, {}), NginxParser) + + def test_guess_parser3(self): + with open("test_invalid.html", "r") as f: + text = f.read() + + c = Crawler("http://some.website", False) + + self.assertEqual(c.guess_parser(text, {}), None) \ No newline at end of file diff --git a/spec/Parser_spec.py b/spec/Parser_spec.py index 1c9122a..e952759 100644 --- a/spec/Parser_spec.py +++ b/spec/Parser_spec.py @@ -18,7 +18,7 @@ class NginxParserTest(TestCase): def setUp(self): self.parser = NginxParser() - root_page_file = open("test_nginx_root.html", "r") + root_page_file = open("test_nginx1.html", "r") self.root_page = root_page_file.read() root_page_file.close() @@ -57,7 +57,7 @@ class ApacheParserTest(TestCase): def setUp(self): self.parser = ApacheParser() - root_page_file = open("test_apache_root.html", "r") + root_page_file = open("test_apache1.html", "r") self.root_page = root_page_file.read() root_page_file.close() @@ -76,7 +76,7 @@ class ApacheParserTest(TestCase): result = self.parser.get_links(self.root_page, "https://keisari.net/videos/") self.assertEqual(result["happyday.mp4"]["size"], 772000) - self.assertEqual(result["alex_räjähtää.mp4"]["size"], 715000) + self.assertEqual(result["alex_r%c3%a4j%c3%a4ht%c3%a4%c3%a4.mp4"]["size"], 715000) def test_link_type(self): result = self.parser.get_links(self.root_page, "https://keisari.net/videos/") @@ -109,16 +109,67 @@ class ApacheParserTest2(TestCase): def test_link_size(self): result = self.parser.get_links(self.root_page, self.base_url) - self.assertEqual(result["ƒhƒ‰ƒSƒ“ƒ{[ƒ‹Z.‘æ020˜b.u‚æ‚Ý‚ª‚¦‚éƒTƒCƒ„l“`àIŒå‹ó‚̃‹[ƒcv.wmv"]["size"], 179721000) - self.assertEqual(result["ƒhƒ‰ƒSƒ“ƒ{[ƒ‹Z.‘æ225˜b.u‹­‚¢‚ºƒ`ƒrƒbƒRII‚P‚W†‘å‹êíIHv.wmv"]["size"], 347507000) + self.assertEqual(result["ƒhƒ‰ƒSƒ“ƒ{[ƒ‹Z.‘æ011˜b.u‰F’ˆˆê‚Ì‹­íŽmƒTƒCƒ„l‚ß‚´‚ß‚éIv.wmv"]["size"], 232185000) + self.assertEqual(result["ƒhƒ‰ƒSƒ“ƒ{[ƒ‹Z.‘æ019˜b.ud—͂Ƃ̐킢Iƒoƒuƒ‹ƒXŒN‚ð‚‚©‚Ü‚¦‚ëv.wmv"]["size"], 185385000) def test_link_type(self): result = self.parser.get_links(self.root_page, self.base_url) - self.assertEqual(result["ƒhƒ‰ƒSƒ“ƒ{[ƒ‹Z.‘æ225˜b.u‹­‚¢‚ºƒ`ƒrƒbƒRII‚P‚W†‘å‹êíIHv.wmv"]["type"], "f") - self.assertEqual(result["ƒhƒ‰ƒSƒ“ƒ{[ƒ‹Z jpg/"]["type"], "d") + self.assertEqual(result["ƒhƒ‰ƒSƒ“ƒ{[ƒ‹Z.‘æ011˜b.u‰F’ˆˆê‚Ì‹­íŽmƒTƒCƒ„l‚ß‚´‚ß‚éIv.wmv"]["type"], "f") + self.assertEqual(result["ƒhƒ‰ƒSƒ“ƒ{[ƒ‹Z%20jpg/"]["type"], "d") def test_link_extension(self): result = self.parser.get_links(self.root_page, self.base_url) - self.assertEqual(result["ƒhƒ‰ƒSƒ“ƒ{[ƒ‹Z.‘æ225˜b.u‹­‚¢‚ºƒ`ƒrƒbƒRII‚P‚W†‘å‹êíIHv.wmv"]["ext"], "wmv") \ No newline at end of file + self.assertEqual(result["ƒhƒ‰ƒSƒ“ƒ{[ƒ‹Z.‘æ011˜b.u‰F’ˆˆê‚Ì‹­íŽmƒTƒCƒ„l‚ß‚´‚ß‚éIv.wmv"]["ext"], "wmv") + + +class ApacheParserTest3(TestCase): + + def setUp(self): + self.parser = ApacheParser() + + root_page_file = open("test_apache3.html", "r") + self.root_page = root_page_file.read() + self.base_url = "http://files.duspectacle.com/mp3/Jardinets/" + root_page_file.close() + + def test_link_count(self): + + result = self.parser.get_links(self.root_page, self.base_url) + + self.assertEqual(len(result), 21) + + def test_link_size(self): + result = self.parser.get_links(self.root_page, self.base_url) + + self.assertEqual(result["15%20Woodkid%20-%20Iron%20(Remix%20By%20Gucci%20Vump).mp3"]["size"], 9300000) + self.assertEqual(result["16%20Yellow%20Ostrich%20-%20WHALE.mp3"]["size"], 7100000) + + def test_link_type(self): + result = self.parser.get_links(self.root_page, self.base_url) + + self.assertEqual(result["15%20Woodkid%20-%20Iron%20(Remix%20By%20Gucci%20Vump).mp3"]["type"], "f") + self.assertEqual(result["01%20Jean%20Rochefort%20-%20Winnie%20et%20ses%20amis%20(introduction)/"]["type"], "d") + + def test_link_extension(self): + result = self.parser.get_links(self.root_page, self.base_url) + + self.assertEqual(result["15%20Woodkid%20-%20Iron%20(Remix%20By%20Gucci%20Vump).mp3"]["ext"], "mp3") + + +class ApacheParserTest4(TestCase): + + def setUp(self): + self.parser = ApacheParser() + + root_page_file = open("test_apache4.html", "r") + self.root_page = root_page_file.read() + self.base_url = "http://jenserserver.no-ip.biz/movieserver/serien/bigbangtheorie/S3/" + root_page_file.close() + + def test_link_size(self): + result = self.parser.get_links(self.root_page, self.base_url) + + self.assertEqual(result["The.Big.Bang.Theory.S03E06.Football.fuer.Nerds.German.WS.DVDRip.XviD-DELiCiOUS.avi"]["size"], 175000000) + self.assertEqual(result["The.Big.Bang.Theory.S03E03.Sex.oder.Pralinen.German.WS.DVDRip.XviD-DELiCiOUS.avi"]["size"], 0) \ No newline at end of file diff --git a/spec/RedditBot_spec.py b/spec/RedditBot_spec.py index ff0d667..a534fb0 100644 --- a/spec/RedditBot_spec.py +++ b/spec/RedditBot_spec.py @@ -1,5 +1,5 @@ from unittest import TestCase -from reddit_bot import RedditBot +from reddit_bot import RedditBot, TaskQueue, CrawTask import os @@ -33,3 +33,62 @@ class RedditBotTest(TestCase): self.assertTrue(bot.has_crawled("000000")) +class TaskQueueTest(TestCase): + + def tearDown(self): + if os.path.isfile("task_queue_test.txt"): + os.remove("task_queue_test.txt") + + def test_push_pop_test(self): + + if os.path.isfile("task_queue_test.txt"): + os.remove("task_queue_test.txt") + + tq = TaskQueue("task_queue_test.txt") + tq.push(CrawTask("http://awebsite.com/", "postid", "a title")) + + task1 = tq.pop() + + self.assertEqual(tq.pop(), None) + self.assertEqual(task1.url, "http://awebsite.com/") + self.assertEqual(task1.post_id, "postid") + + def test_persistence(self): + + if os.path.isfile("task_queue_test.txt"): + os.remove("task_queue_test.txt") + + tq = TaskQueue("task_queue_test.txt") + tq.push(CrawTask("http://awebsite.com/", "postid", "a title")) + + tq2 = TaskQueue("task_queue_test.txt") + task = tq2.pop() + + self.assertEqual(task.url, "http://awebsite.com/") + self.assertEqual(task.post_id, "postid") + + def test_multiple_tasks(self): + if os.path.isfile("task_queue_test.txt"): + os.remove("task_queue_test.txt") + + tq = TaskQueue("task_queue_test.txt") + + tq.push(CrawTask("http://awebsite.com/", "postid", "a title")) + tq.push(CrawTask("http://awebsite.com/", "postid", "a title")) + tq.push(CrawTask("http://awebsite.com/", "postid", "a title")) + + self.assertIsNotNone(tq.pop()) + self.assertIsNotNone(tq.pop()) + self.assertIsNotNone(tq.pop()) + self.assertIsNone(tq.pop()) + + def test_is_queued(self): + if os.path.isfile("task_queue_test.txt"): + os.remove("task_queue_test.txt") + + tq = TaskQueue("task_queue_test.txt") + + tq.push(CrawTask("http://awebsite.com/", "postid", "a title")) + + self.assertTrue(tq.is_queued("postid")) + self.assertFalse(tq.is_queued("123456")) \ No newline at end of file diff --git a/spec/test_apache_root.html b/spec/test_apache1.html similarity index 100% rename from spec/test_apache_root.html rename to spec/test_apache1.html diff --git a/spec/test_apache3.html b/spec/test_apache3.html new file mode 100644 index 0000000..eb551e8 --- /dev/null +++ b/spec/test_apache3.html @@ -0,0 +1,32 @@ + + + + + Index of /mp3/Jardinets + + +

Index of /mp3/Jardinets

+
Icon  Name                    Last modified      Size  Description
[PARENTDIR] Parent Directory - +[SND] 01 Jean Rochefort - ..> 2017-12-04 16:33 - +[SND] 02 Krisma - Amore.mp3 2017-12-04 16:32 11M +[SND] 03 Bernard Estardy -..> 2017-12-04 16:32 3.5M +[SND] 04 Jamie Woon - Stre..> 2017-12-04 16:32 5.0M +[SND] 05 DyE - Fantasy.mp3 2017-12-04 16:33 6.9M +[SND] 06 Games - Planet Pa..> 2017-12-04 16:33 5.6M +[SND] 07 Yeasayer - Swallo..> 2017-12-04 16:33 11M +[SND] 08 Pacific! - Venus ..> 2017-12-04 16:32 5.7M +[SND] 09 Jacky Chalard - S..> 2017-12-04 16:33 11M +[SND] 10 Piry - Heroi Mode..> 2017-12-04 16:32 4.1M +[SND] 11 Bahamas - Bahamas..> 2017-12-04 16:32 7.9M +[SND] 12 Aeroplane - Fish ..> 2017-12-04 16:32 7.6M +[SND] 13 Discodeine - Sync..> 2017-12-04 16:33 6.8M +[SND] 14 Lykke Li - I Foll..> 2017-12-04 16:33 7.3M +[SND] 15 Woodkid - Iron (R..> 2017-12-04 16:33 9.3M +[SND] 16 Yellow Ostrich - ..> 2017-12-04 16:33 7.1M +[SND] 17 Connan Mockasin -..> 2017-12-04 16:32 6.3M +[SND] 18 Bruce Haack - May..> 2017-12-04 16:33 5.4M +[IMG] cover-small.jpg 2017-12-04 16:32 97K +[IMG] cover.jpg 2017-12-04 16:33 466K +[TXT] playlist.txt 2017-12-04 16:33 955 +
+ diff --git a/spec/test_apache4.html b/spec/test_apache4.html new file mode 100644 index 0000000..07ed408 --- /dev/null +++ b/spec/test_apache4.html @@ -0,0 +1,38 @@ + + + + Index of /movieserver/serien/bigbangtheorie/S3 + + +

Index of /movieserver/serien/bigbangtheorie/S3

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
[ICO]NameLast modifiedSizeDescription

[PARENTDIR]Parent Directory  -  
[VID]The.Big.Bang.Theory.S03E01.Der.Nordpol.Plan.German.WS.DVDRip.XviD-DELiCiOUS.avi2017-01-17 18:52 6.8M 
[VID]The.Big.Bang.Theory.S03E02.Die.Grillenwette.German.WS.DVDRip.XviD-DELiCiOUS.avi2014-05-16 17:14 175M 
[VID]The.Big.Bang.Theory.S03E03.Sex.oder.Pralinen.German.WS.DVDRip.XviD-DELiCiOUS.avi2017-01-17 19:38 0  
[VID]The.Big.Bang.Theory.S03E04.Fuer.ihn.oder.mit.ihm.German.WS.DVDRip.XviD-DELiCiOUS.avi2014-05-16 17:16 175M 
[VID]The.Big.Bang.Theory.S03E05.Der.Mann.der.seine.Omi.liebte.German.WS.DVDRip.XviD-DELiCiOUS.avi2014-05-16 17:17 174M 
[VID]The.Big.Bang.Theory.S03E06.Football.fuer.Nerds.German.WS.DVDRip.XviD-DELiCiOUS.avi2014-05-16 17:17 175M 
[VID]The.Big.Bang.Theory.S03E07.Der.Gitarrist.auf.der.Couch.German.WS.DVDRip.XviD-DELiCiOUS.avi2014-05-16 17:18 175M 
[VID]The.Big.Bang.Theory.S03E08.Das.Suppentattoo.German.WS.DVDRip.XviD-DELiCiOUS.avi2014-05-16 17:18 174M 
[VID]The.Big.Bang.Theory.S03E09.Die.Racheformel.German.WS.DVDRip.XviD-DELiCiOUS.avi2014-05-16 17:20 174M 
[VID]The.Big.Bang.Theory.S03E10.Das.Gorilla.Projekt.German.WS.DVDRip.XviD-DELiCiOUS.avi2014-05-16 17:20 174M 
[VID]The.Big.Bang.Theory.S03E11.Maedels.an.der.Bar.German.WS.DVDRip.XviD-DELiCiOUS.avi2014-05-16 17:21 175M 
[VID]The.Big.Bang.Theory.S03E12.Howards.Phasen.German.WS.DVDRip.XviD-DELiCiOUS.avi2014-05-16 17:21 175M 
[VID]The.Big.Bang.Theory.S03E13.Terror.in.der.Oestadt.der.Rosen.German.WS.DVDRip.XviD-DELiCiOUS.avi2014-05-16 17:22 175M 
[VID]The.Big.Bang.Theory.S03E14.Fast.wie.Einstein.German.WS.DVDRip.XviD-DELiCiOUS.avi2014-05-16 17:23 175M 
[VID]The.Big.Bang.Theory.S03E15.Freiflug.nach.Genf.German.WS.DVDRip.XviD-DELiCiOUS.avi2014-05-16 17:24 175M 
[VID]The.Big.Bang.Theory.S03E16.Sheldon.pro.se.German.WS.DVDRip.XviD-DELiCiOUS.avi2014-05-16 17:24 175M 
[VID]The.Big.Bang.Theory.S03E17.Die.Herren.des.Rings.German.WS.DVDRip.XviD-DELiCiOUS.avi2014-05-16 17:25 175M 
[VID]The.Big.Bang.Theory.S03E18.Die.dunkle.Seite.des.Mondes.German.WS.DVDRip.XviD-DELiCiOUS.avi2014-05-16 17:25 175M 
[VID]The.Big.Bang.Theory.S03E19.Das.L.Wort.German.WS.DVDRip.XviD-DELiCiOUS.avi2014-05-16 17:27 175M 
[VID]The.Big.Bang.Theory.S03E20.Spaghetti.mit.Wuerstchen.German.WS.DVDRip.XviD-DELiCiOUS.avi2014-05-16 17:27 175M 
[VID]The.Big.Bang.Theory.S03E21.Vierer.ohne.Sheldon.German.WS.DVDRip.XviD-DELiCiOUS.avi2014-05-16 17:28 175M 
[VID]The.Big.Bang.Theory.S03E22.Die.Wahrheit.ueber.den.Fahrstuhl.German.WS.DVDRip.XviD-DELiCiOUS.avi2014-05-16 17:28 175M 
[VID]The.Big.Bang.Theory.S03E23.Nie.mehr.dumme.Typen.German.WS.DVDRip.XviD-DELiCiOUS.avi2014-05-16 17:29 174M 

+
Apache/2.4.10 (Debian) Server at jenserserver.no-ip.biz Port 80
+ diff --git a/spec/test_nginx_root.html b/spec/test_nginx1.html similarity index 100% rename from spec/test_nginx_root.html rename to spec/test_nginx1.html diff --git a/static/js/report.js b/static/js/report.js index 1efecf5..efb7fe7 100644 --- a/static/js/report.js +++ b/static/js/report.js @@ -23,7 +23,7 @@ function drawCharts(rData) { for(var ext in rData["ext_sizes"]) { //Ignore file sizes below 0.5% - if (rData["ext_sizes"][ext] < 0.005 * rData["total_size"]) { + if (!isRelevant(rData, ext)) { otherSize += rData["ext_sizes"][ext]; otherCount += rData["ext_count"][ext]; @@ -40,6 +40,7 @@ function drawCharts(rData) { colors.push(getRandomColor()); labels.push("other x" + otherCount + " (" + humanFileSize(otherSize) + ")"); dataSetSize.push(otherSize); + dataSetCount.push(otherCount); } var ctx = document.getElementById('typesChart').getContext('2d'); @@ -64,6 +65,23 @@ function drawCharts(rData) { }); } + +function isRelevant(rData, ext) { + + console.log("Checking + " + ext); + console.log("total + " + rData["total_size"]); + console.log("size + " + rData["ext_count"][ext]); + console.log("min + " + 0.03 * rData["total_count"]); + + if(rData["total_size"] === 0) { + return rData["ext_count"][ext] > 0.03 * rData["total_count"] + } else { + return rData["ext_sizes"][ext] > 0.005 * rData["total_size"] + } + + +} + /** * https://stackoverflow.com/questions/1484506 */ @@ -80,6 +98,11 @@ function getRandomColor() { * https://stackoverflow.com/questions/10420352 */ function humanFileSize(bytes) { + + if(bytes === 0) { + return "? B" + } + var thresh = 1000; if(Math.abs(bytes) < thresh) { return bytes + ' B';