25 Commits

Author SHA1 Message Date
Richard Patel
8f6f8fd17f fasthttp uri 2018-11-16 04:10:45 +01:00
Richard Patel
3c39f0d621 Random hacks 2018-11-16 03:22:51 +01:00
Richard Patel
50952791c5 Almost done 2018-11-16 03:12:26 +01:00
Richard Patel
30bf98ad34 Fix tests 2018-11-16 03:02:10 +01:00
Richard Patel
ccaf758e90 Remove URL.Opaque 2018-11-16 01:53:16 +01:00
Richard Patel
f668365edb Add tests 2018-11-16 01:51:34 +01:00
Richard Patel
1db8ff43bb Bump version 2018-11-16 00:25:11 +01:00
Richard Patel
82234f949e Less tokenizer allocations 2018-11-16 00:22:40 +01:00
Richard Patel
084b3a5903 Optimizing with hexa :P 2018-11-15 23:51:31 +01:00
Richard Patel
ac0b8d2d0b Blacklist all paths with a query parameter 2018-11-15 23:36:41 +01:00
Richard Patel
ffde1a9e5d Timeout and results saving 2018-11-15 20:14:31 +01:00
Richard Patel
a268c6dbcf Reduce WaitQueue usage 2018-11-12 00:38:22 +01:00
Richard Patel
4c071171eb Exclude dups in dir instead of keeping hashes of links 2018-11-11 23:11:30 +01:00
Richard Patel
9c8174dd8d Fix header parsing 2018-11-11 18:53:17 +01:00
Richard Patel
93272e1da1 Update README.md 2018-11-06 02:41:20 +01:00
Richard Patel
0344a120ff fasturl: Remove path escape 2018-11-06 02:15:09 +01:00
Richard Patel
6e6afd771e fasturl: Remove query 2018-11-06 02:11:22 +01:00
Richard Patel
a8c27b2d21 Hash links 2018-11-06 02:01:53 +01:00
Richard Patel
ed5e35f005 Performance improvements 2018-11-06 00:34:22 +01:00
Richard Patel
a12bca01c8 fasturl: Discard UserInfo 2018-11-06 00:33:57 +01:00
Richard Patel
ba9c818461 fasturl: Don't parse username and password 2018-11-06 00:28:42 +01:00
Richard Patel
9cf31b1d81 fasturl: Remove fragment 2018-11-06 00:17:10 +01:00
Richard Patel
ed0d9c681f fasturl: Replace scheme with enum 2018-11-06 00:15:12 +01:00
Richard Patel
b88d45fc21 fasturl: Remove allocs from Parse 2018-11-05 23:05:21 +01:00
Richard Patel
4989adff9f Add net/url package 2018-11-05 22:57:57 +01:00
10 changed files with 736 additions and 215 deletions

BIN
.github/stress.png vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 369 KiB

View File

@@ -1,2 +1,12 @@
# oddb Go crawler # oddb Go crawler 🚀
> by terorie 2018 :P > by terorie 2018 :P
* Crawls HTTP open directories (standard Web Server Listings)
* Gets name, path, size and modification time of all files
* Soon: Will work as a crawler for [OD-Database](https://github.com/simon987/od-database)!
Stress test crawling [pandoradir](https://github.com/terorie/pandoradir)
on an average laptop (~10K requests per second, 4 connections):
![image](.github/stress.png)
Memory usage is being optimized :P

View File

@@ -13,6 +13,7 @@ var config struct {
Token string Token string
Retries int Retries int
Workers int Workers int
Timeout time.Duration
Tasks int32 Tasks int32
CrawlStats time.Duration CrawlStats time.Duration
AllocStats time.Duration AllocStats time.Duration
@@ -25,6 +26,7 @@ const (
ConfTasks = "crawl.tasks" ConfTasks = "crawl.tasks"
ConfRetries = "crawl.retries" ConfRetries = "crawl.retries"
ConfWorkers = "crawl.connections" ConfWorkers = "crawl.connections"
ConfTimeout = "crawl.timeout"
ConfCrawlStats = "output.crawl_stats" ConfCrawlStats = "output.crawl_stats"
ConfAllocStats = "output.resource_stats" ConfAllocStats = "output.resource_stats"
ConfVerbose = "output.verbose" ConfVerbose = "output.verbose"
@@ -34,6 +36,7 @@ func prepareConfig() {
viper.SetDefault(ConfRetries, 5) viper.SetDefault(ConfRetries, 5)
viper.SetDefault(ConfWorkers, 2) viper.SetDefault(ConfWorkers, 2)
viper.SetDefault(ConfTasks, 3) viper.SetDefault(ConfTasks, 3)
viper.SetDefault(ConfTimeout, 10 * time.Second)
viper.SetDefault(ConfCrawlStats, 3 * time.Second) viper.SetDefault(ConfCrawlStats, 3 * time.Second)
viper.SetDefault(ConfAllocStats, 0) viper.SetDefault(ConfAllocStats, 0)
viper.SetDefault(ConfVerbose, false) viper.SetDefault(ConfVerbose, false)
@@ -73,6 +76,8 @@ func readConfig() {
configOOB(ConfTasks, int(config.Tasks)) configOOB(ConfTasks, int(config.Tasks))
} }
config.Timeout = viper.GetDuration(ConfTimeout)
config.CrawlStats = viper.GetDuration(ConfCrawlStats) config.CrawlStats = viper.GetDuration(ConfCrawlStats)
config.AllocStats = viper.GetDuration(ConfAllocStats) config.AllocStats = viper.GetDuration(ConfAllocStats)

View File

@@ -24,3 +24,5 @@ crawl:
# How often to retry getting data # How often to retry getting data
# from the site before giving up # from the site before giving up
retries: 5 retries: 5
# Time before discarding a network request
timeout: 10s

217
crawl.go
View File

@@ -2,14 +2,11 @@ package main
import ( import (
"bytes" "bytes"
"encoding/base64"
"fmt" "fmt"
"github.com/sirupsen/logrus" "github.com/terorie/oddb-go/ds/redblackhash"
"github.com/valyala/fasthttp" "github.com/valyala/fasthttp"
"golang.org/x/crypto/blake2b" "golang.org/x/crypto/blake2b"
"golang.org/x/net/html" "golang.org/x/net/html"
"golang.org/x/net/html/atom"
"net/url"
"path" "path"
"strconv" "strconv"
"strings" "strings"
@@ -18,23 +15,20 @@ import (
var client fasthttp.Client var client fasthttp.Client
func GetDir(j *Job, f *File) (links []url.URL, err error) { func GetDir(j *Job, f *File) (links []fasthttp.URI, err error) {
f.IsDir = true f.IsDir = true
f.Name = path.Base(j.Uri.Path) f.Name = path.Base(string(j.Uri.Path()))
req := fasthttp.AcquireRequest() req := fasthttp.AcquireRequest()
req.SetRequestURI(j.Uri.String()) req.SetRequestURI(j.UriStr)
res := fasthttp.AcquireResponse() res := fasthttp.AcquireResponse()
defer fasthttp.ReleaseResponse(res) defer fasthttp.ReleaseResponse(res)
err = client.Do(req, res) err = client.DoTimeout(req, res, config.Timeout)
fasthttp.ReleaseRequest(req) fasthttp.ReleaseRequest(req)
if err != nil { if err != nil { return }
logrus.Error(err)
return
}
err = checkStatusCode(res.StatusCode()) err = checkStatusCode(res.StatusCode())
if err != nil { return } if err != nil { return }
@@ -43,66 +37,58 @@ func GetDir(j *Job, f *File) (links []url.URL, err error) {
doc := html.NewTokenizer(bytes.NewReader(body)) doc := html.NewTokenizer(bytes.NewReader(body))
var linkHref string var linkHref string
var linkTexts []string
for { for {
tokenType := doc.Next() tokenType := doc.Next()
token := doc.Token()
if tokenType == html.ErrorToken { if tokenType == html.ErrorToken {
break break
} }
switch tokenType { switch tokenType {
case html.StartTagToken: case html.StartTagToken:
if token.DataAtom == atom.A { name, hasAttr := doc.TagName()
for _, attr := range token.Attr { if len(name) == 1 && name[0] == 'a' {
if attr.Key == "href" { for hasAttr {
linkHref = attr.Val var ks, vs []byte
ks, vs, hasAttr = doc.TagAttr()
if bytes.Equal(ks, []byte("href")) {
// TODO Check escape
linkHref = string(vs)
break break
} }
} }
} }
case html.TextToken:
if linkHref != "" {
linkTexts = append(linkTexts, token.Data)
}
case html.EndTagToken: case html.EndTagToken:
if linkHref != "" && token.DataAtom == atom.A { name, _ := doc.TagName()
if len(name) == 1 && name[0] == 'a' {
// Copy params // Copy params
href := linkHref href := linkHref
linkText := strings.Join(linkTexts, " ")
// Reset params // Reset params
linkHref = "" linkHref = ""
linkTexts = nil
// TODO Optimized decision tree if strings.LastIndexByte(href, '?') != -1 {
for _, entry := range urlBlackList { goto nextToken
if href == entry {
goto nextToken
}
}
for _, entry := range urlPartBlackList {
if strings.Contains(href, entry) {
goto nextToken
}
}
for _, entry := range fileNameBlackList {
if strings.Contains(linkText, entry) {
goto nextToken
}
} }
subref, err := url.Parse(href) switch href {
case "", " ", ".", "..", "/":
goto nextToken
}
if strings.Contains(href, "../") {
goto nextToken
}
var link fasthttp.URI
j.Uri.CopyTo(&link)
link.Update(href)
if err != nil { continue } if err != nil { continue }
link := *j.Uri.ResolveReference(subref) if !bytes.Equal(link.Scheme(), j.Uri.Scheme()) ||
!bytes.Equal(link.Host(), j.Uri.Host()) ||
if link.Scheme != j.Uri.Scheme || bytes.Equal(link.Path(), j.Uri.Path()) ||
link.Host != j.Uri.Host || !bytes.HasPrefix(link.Path(), j.Uri.Path()) {
link.Path == j.Uri.Path ||
!strings.HasPrefix(link.Path, j.Uri.Path) {
continue continue
} }
@@ -116,11 +102,12 @@ func GetDir(j *Job, f *File) (links []url.URL, err error) {
return return
} }
func GetFile(u url.URL, f *File) (err error) { func GetFile(u fasthttp.URI, f *File) (err error) {
f.IsDir = false f.IsDir = false
u.Path = path.Clean(u.Path) cleanPath := path.Clean(string(u.Path()))
f.Name = path.Base(u.Path) u.SetPath(cleanPath)
f.Path = strings.Trim(u.Path, "/") f.Name = path.Base(cleanPath)
f.Path = strings.Trim(cleanPath, "/")
req := fasthttp.AcquireRequest() req := fasthttp.AcquireRequest()
req.Header.SetMethod("HEAD") req.Header.SetMethod("HEAD")
@@ -130,7 +117,7 @@ func GetFile(u url.URL, f *File) (err error) {
res.SkipBody = true res.SkipBody = true
defer fasthttp.ReleaseResponse(res) defer fasthttp.ReleaseResponse(res)
err = client.Do(req, res) err = client.DoTimeout(req, res, config.Timeout)
fasthttp.ReleaseRequest(req) fasthttp.ReleaseRequest(req)
if err != nil { return } if err != nil { return }
@@ -138,83 +125,41 @@ func GetFile(u url.URL, f *File) (err error) {
err = checkStatusCode(res.StatusCode()) err = checkStatusCode(res.StatusCode())
if err != nil { return } if err != nil { return }
// TODO Inefficient af f.applyContentLength(string(res.Header.Peek("content-length")))
header := res.Header.Header() f.applyLastModified(string(res.Header.Peek("last-modified")))
f.ParseHeader(header)
return nil return nil
} }
func (f *File) HashDir(links []url.URL) string { func (f *File) HashDir(links []fasthttp.URI) (o redblackhash.Key) {
h, _ := blake2b.New256(nil) h, _ := blake2b.New256(nil)
h.Write([]byte(f.Name)) h.Write([]byte(f.Name))
for _, link := range links { for _, link := range links {
fileName := path.Base(link.Path) h.Write(link.Path())
h.Write([]byte(fileName))
} }
sum := h.Sum(nil) sum := h.Sum(nil)
b64sum := base64.StdEncoding.EncodeToString(sum) copy(o[:redblackhash.KeySize], sum)
return b64sum return
} }
func (f *File) ParseHeader(h []byte) { func (f *File) applyContentLength(v string) {
var k1, k2 int if v == "" { return }
var v1, v2 int size, err := strconv.ParseInt(v, 10, 64)
if err != nil { return }
// Simple finite state machine if size < 0 { return }
state := 0 f.Size = size
for i, b := range h {
switch state {
case 0:
if b == byte(':') {
state = 1
k2 = i
}
case 1:
state = 2
case 2:
state = 3
v1 = i
case 3:
if b == byte('\r') {
state = 4
}
case 4:
state = 0
v2 = i - 1
key := string(h[k1:k2])
val := string(h[v1:v2])
k1 = i
f.applyHeader(key, val)
}
}
} }
func (f *File) applyHeader(k, v string) { func (f *File) applyLastModified(v string) {
switch k { if v == "" { return }
case "content-length": var err error
size, err := strconv.ParseInt(v, 10, 64) f.MTime, err = time.Parse(time.RFC1123, v)
if err != nil { break } if err == nil { return }
if size < 0 { break } f.MTime, err = time.Parse(time.RFC850, v)
f.Size = size if err == nil { return }
// TODO Parse asctime
case "last-modified": f.MTime, err = time.Parse("2006-01-02", v[:10])
var err error if err == nil { return }
f.MTime, err = time.Parse(time.RFC1123, v)
if err == nil { break }
f.MTime, err = time.Parse(time.RFC850, v)
if err == nil { break }
// TODO Parse asctime
f.MTime, err = time.Parse("2006-01-02", v[:10])
if err == nil { break }
}
} }
func checkStatusCode(status int) error { func checkStatusCode(status int) error {
@@ -233,41 +178,3 @@ func checkStatusCode(status int) error {
return fmt.Errorf("got HTTP status %d", status) return fmt.Errorf("got HTTP status %d", status)
} }
} }
var urlBlackList = [...]string {
"",
" ",
".",
"..",
"/",
}
var urlPartBlackList = [...]string {
"?C=N&O=D",
"?C=M&O=A",
"?C=S&O=A",
"?C=D&O=A",
"?C=N;O=D",
"?C=M;O=A",
"?C=M&O=D",
"?C=S;O=A",
"?C=S&O=D",
"?C=D;O=A",
"?MA",
"?SA",
"?DA",
"?ND",
"?C=N&O=A",
"?C=N&O=A",
"?M=A",
"?N=D",
"?S=A",
"?D=A",
}
var fileNameBlackList = [...]string {
"Parent Directory",
" Parent Directory",
"../",
}

521
ds/redblackhash/redblack.go Normal file
View File

@@ -0,0 +1,521 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Modifications by terorie
// Package redblacktree implements a red-black tree.
//
// Used by TreeSet and TreeMap.
//
// Structure is not thread safe.
//
// References: http://en.wikipedia.org/wiki/Red%E2%80%93black_tree
package redblackhash
import (
"fmt"
)
const (
black, red color = true, false
KeySize = 64
)
type color bool
type Key [KeySize]byte
// Tree holds elements of the red-black tree
type Tree struct {
Root *Node
size int
}
// Node is a single element within the tree
type Node struct {
Key Key
color color
Left *Node
Right *Node
Parent *Node
}
func (k *Key) Compare(o *Key) int {
// TODO Assembly
/*for i := 0; i < KeySize / 8; i++ {
a := uint64(k[i+0] ) |
uint64(k[i+1] >> 8) |
uint64(k[i+2] >> 16) |
uint64(k[i+3] >> 24) |
uint64(k[i+4] >> 32) |
uint64(k[i+5] >> 40) |
uint64(k[i+6] >> 48) |
uint64(k[i+7] >> 56)
b := uint64(o[i+0] ) |
uint64(o[i+1] >> 8) |
uint64(o[i+2] >> 16) |
uint64(o[i+3] >> 24) |
uint64(o[i+4] >> 32) |
uint64(o[i+5] >> 40) |
uint64(o[i+6] >> 48) |
uint64(o[i+7] >> 56)
switch {
case a < b:
return -1
case a > b:
return 1
}
}*/
for i := 0; i < KeySize; i++ {
switch {
case k[i] < o[i]:
return -1
case k[i] > o[i]:
return 1
}
}
return 0
}
// Put inserts node into the tree.
// Key should adhere to the comparator's type assertion, otherwise method panics.
func (tree *Tree) Put(key *Key) {
var insertedNode *Node
if tree.Root == nil {
// Assert key is of comparator's type for initial tree
tree.Root = &Node{Key: *key, color: red}
insertedNode = tree.Root
} else {
node := tree.Root
loop := true
for loop {
compare := key.Compare(&node.Key)
switch {
case compare == 0:
node.Key = *key
return
case compare < 0:
if node.Left == nil {
node.Left = &Node{Key: *key, color: red}
insertedNode = node.Left
loop = false
} else {
node = node.Left
}
case compare > 0:
if node.Right == nil {
node.Right = &Node{Key: *key, color: red}
insertedNode = node.Right
loop = false
} else {
node = node.Right
}
}
}
insertedNode.Parent = node
}
tree.insertCase1(insertedNode)
tree.size++
}
// Get searches the node in the tree by key and returns its value or nil if key is not found in tree.
// Second return parameter is true if key was found, otherwise false.
// Key should adhere to the comparator's type assertion, otherwise method panics.
func (tree *Tree) Get(key *Key) (found bool) {
node := tree.lookup(key)
return node != nil
}
// Remove remove the node from the tree by key.
// Key should adhere to the comparator's type assertion, otherwise method panics.
func (tree *Tree) Remove(key *Key) {
var child *Node
node := tree.lookup(key)
if node == nil {
return
}
if node.Left != nil && node.Right != nil {
pred := node.Left.maximumNode()
node.Key = pred.Key
node = pred
}
if node.Left == nil || node.Right == nil {
if node.Right == nil {
child = node.Left
} else {
child = node.Right
}
if node.color == black {
node.color = nodeColor(child)
tree.deleteCase1(node)
}
tree.replaceNode(node, child)
if node.Parent == nil && child != nil {
child.color = black
}
}
tree.size--
}
// Empty returns true if tree does not contain any nodes
func (tree *Tree) Empty() bool {
return tree.size == 0
}
// Size returns number of nodes in the tree.
func (tree *Tree) Size() int {
return tree.size
}
// Left returns the left-most (min) node or nil if tree is empty.
func (tree *Tree) Left() *Node {
var parent *Node
current := tree.Root
for current != nil {
parent = current
current = current.Left
}
return parent
}
// Right returns the right-most (max) node or nil if tree is empty.
func (tree *Tree) Right() *Node {
var parent *Node
current := tree.Root
for current != nil {
parent = current
current = current.Right
}
return parent
}
// Floor Finds floor node of the input key, return the floor node or nil if no floor is found.
// Second return parameter is true if floor was found, otherwise false.
//
// Floor node is defined as the largest node that is smaller than or equal to the given node.
// A floor node may not be found, either because the tree is empty, or because
// all nodes in the tree are larger than the given node.
//
// Key should adhere to the comparator's type assertion, otherwise method panics.
func (tree *Tree) Floor(key *Key) (floor *Node, found bool) {
found = false
node := tree.Root
for node != nil {
compare := key.Compare(&node.Key)
switch {
case compare == 0:
return node, true
case compare < 0:
node = node.Left
case compare > 0:
floor, found = node, true
node = node.Right
}
}
if found {
return floor, true
}
return nil, false
}
// Ceiling finds ceiling node of the input key, return the ceiling node or nil if no ceiling is found.
// Second return parameter is true if ceiling was found, otherwise false.
//
// Ceiling node is defined as the smallest node that is larger than or equal to the given node.
// A ceiling node may not be found, either because the tree is empty, or because
// all nodes in the tree are smaller than the given node.
//
// Key should adhere to the comparator's type assertion, otherwise method panics.
func (tree *Tree) Ceiling(key *Key) (ceiling *Node, found bool) {
found = false
node := tree.Root
for node != nil {
compare := key.Compare(&node.Key)
switch {
case compare == 0:
return node, true
case compare < 0:
ceiling, found = node, true
node = node.Left
case compare > 0:
node = node.Right
}
}
if found {
return ceiling, true
}
return nil, false
}
// Clear removes all nodes from the tree.
func (tree *Tree) Clear() {
tree.Root = nil
tree.size = 0
}
// String returns a string representation of container
func (tree *Tree) String() string {
str := "RedBlackTree\n"
if !tree.Empty() {
output(tree.Root, "", true, &str)
}
return str
}
func (node *Node) String() string {
return fmt.Sprintf("%v", node.Key)
}
func output(node *Node, prefix string, isTail bool, str *string) {
if node.Right != nil {
newPrefix := prefix
if isTail {
newPrefix += "│ "
} else {
newPrefix += " "
}
output(node.Right, newPrefix, false, str)
}
*str += prefix
if isTail {
*str += "└── "
} else {
*str += "┌── "
}
*str += node.String() + "\n"
if node.Left != nil {
newPrefix := prefix
if isTail {
newPrefix += " "
} else {
newPrefix += "│ "
}
output(node.Left, newPrefix, true, str)
}
}
func (tree *Tree) lookup(key *Key) *Node {
node := tree.Root
for node != nil {
compare := key.Compare(&node.Key)
switch {
case compare == 0:
return node
case compare < 0:
node = node.Left
case compare > 0:
node = node.Right
}
}
return nil
}
func (node *Node) grandparent() *Node {
if node != nil && node.Parent != nil {
return node.Parent.Parent
}
return nil
}
func (node *Node) uncle() *Node {
if node == nil || node.Parent == nil || node.Parent.Parent == nil {
return nil
}
return node.Parent.sibling()
}
func (node *Node) sibling() *Node {
if node == nil || node.Parent == nil {
return nil
}
if node == node.Parent.Left {
return node.Parent.Right
}
return node.Parent.Left
}
func (tree *Tree) rotateLeft(node *Node) {
right := node.Right
tree.replaceNode(node, right)
node.Right = right.Left
if right.Left != nil {
right.Left.Parent = node
}
right.Left = node
node.Parent = right
}
func (tree *Tree) rotateRight(node *Node) {
left := node.Left
tree.replaceNode(node, left)
node.Left = left.Right
if left.Right != nil {
left.Right.Parent = node
}
left.Right = node
node.Parent = left
}
func (tree *Tree) replaceNode(old *Node, new *Node) {
if old.Parent == nil {
tree.Root = new
} else {
if old == old.Parent.Left {
old.Parent.Left = new
} else {
old.Parent.Right = new
}
}
if new != nil {
new.Parent = old.Parent
}
}
func (tree *Tree) insertCase1(node *Node) {
if node.Parent == nil {
node.color = black
} else {
tree.insertCase2(node)
}
}
func (tree *Tree) insertCase2(node *Node) {
if nodeColor(node.Parent) == black {
return
}
tree.insertCase3(node)
}
func (tree *Tree) insertCase3(node *Node) {
uncle := node.uncle()
if nodeColor(uncle) == red {
node.Parent.color = black
uncle.color = black
node.grandparent().color = red
tree.insertCase1(node.grandparent())
} else {
tree.insertCase4(node)
}
}
func (tree *Tree) insertCase4(node *Node) {
grandparent := node.grandparent()
if node == node.Parent.Right && node.Parent == grandparent.Left {
tree.rotateLeft(node.Parent)
node = node.Left
} else if node == node.Parent.Left && node.Parent == grandparent.Right {
tree.rotateRight(node.Parent)
node = node.Right
}
tree.insertCase5(node)
}
func (tree *Tree) insertCase5(node *Node) {
node.Parent.color = black
grandparent := node.grandparent()
grandparent.color = red
if node == node.Parent.Left && node.Parent == grandparent.Left {
tree.rotateRight(grandparent)
} else if node == node.Parent.Right && node.Parent == grandparent.Right {
tree.rotateLeft(grandparent)
}
}
func (node *Node) maximumNode() *Node {
if node == nil {
return nil
}
for node.Right != nil {
node = node.Right
}
return node
}
func (tree *Tree) deleteCase1(node *Node) {
if node.Parent == nil {
return
}
tree.deleteCase2(node)
}
func (tree *Tree) deleteCase2(node *Node) {
sibling := node.sibling()
if nodeColor(sibling) == red {
node.Parent.color = red
sibling.color = black
if node == node.Parent.Left {
tree.rotateLeft(node.Parent)
} else {
tree.rotateRight(node.Parent)
}
}
tree.deleteCase3(node)
}
func (tree *Tree) deleteCase3(node *Node) {
sibling := node.sibling()
if nodeColor(node.Parent) == black &&
nodeColor(sibling) == black &&
nodeColor(sibling.Left) == black &&
nodeColor(sibling.Right) == black {
sibling.color = red
tree.deleteCase1(node.Parent)
} else {
tree.deleteCase4(node)
}
}
func (tree *Tree) deleteCase4(node *Node) {
sibling := node.sibling()
if nodeColor(node.Parent) == red &&
nodeColor(sibling) == black &&
nodeColor(sibling.Left) == black &&
nodeColor(sibling.Right) == black {
sibling.color = red
node.Parent.color = black
} else {
tree.deleteCase5(node)
}
}
func (tree *Tree) deleteCase5(node *Node) {
sibling := node.sibling()
if node == node.Parent.Left &&
nodeColor(sibling) == black &&
nodeColor(sibling.Left) == red &&
nodeColor(sibling.Right) == black {
sibling.color = red
sibling.Left.color = black
tree.rotateRight(sibling)
} else if node == node.Parent.Right &&
nodeColor(sibling) == black &&
nodeColor(sibling.Right) == red &&
nodeColor(sibling.Left) == black {
sibling.color = red
sibling.Right.color = black
tree.rotateLeft(sibling)
}
tree.deleteCase6(node)
}
func (tree *Tree) deleteCase6(node *Node) {
sibling := node.sibling()
sibling.color = nodeColor(node.Parent)
node.Parent.color = black
if node == node.Parent.Left && nodeColor(sibling.Right) == red {
sibling.Right.color = black
tree.rotateLeft(node.Parent)
} else if nodeColor(sibling.Left) == red {
sibling.Left.color = black
tree.rotateRight(node.Parent)
}
}
func nodeColor(node *Node) color {
if node == nil {
return black
}
return node.color
}

25
main.go
View File

@@ -4,10 +4,10 @@ import (
"context" "context"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/urfave/cli" "github.com/urfave/cli"
"github.com/valyala/fasthttp"
"log" "log"
"net/http" "net/http"
_ "net/http/pprof" _ "net/http/pprof"
"net/url"
"os" "os"
"strings" "strings"
"time" "time"
@@ -16,7 +16,7 @@ import (
var app = cli.App { var app = cli.App {
Name: "oddb-go", Name: "oddb-go",
Usage: "OD-Database Go crawler", Usage: "OD-Database Go crawler",
Version: "0.1", Version: "0.2",
BashComplete: cli.DefaultAppComplete, BashComplete: cli.DefaultAppComplete,
Writer: os.Stdout, Writer: os.Stdout,
Compiled: buildDate, Compiled: buildDate,
@@ -55,12 +55,19 @@ func cmdCrawler(clic *cli.Context) error {
if !strings.Contains(arg, "://") { if !strings.Contains(arg, "://") {
arg = "http://" + arg arg = "http://" + arg
} }
u, err := url.Parse(arg) var u fasthttp.URI
if !strings.HasSuffix(u.Path, "/") { u.Parse(nil, []byte(arg))
u.Path += "/" uPath := string(u.Path())
if !strings.HasSuffix(uPath, "/") {
u.SetPath(uPath + "/")
}
remotes[i] = &OD {
Task: &Task{
WebsiteId: 0,
Url: u.String(),
},
BaseUri: u,
} }
if err != nil { return err }
remotes[i] = &OD{ BaseUri: *u }
} }
c := context.Background() c := context.Background()
@@ -82,6 +89,6 @@ func cmdCrawler(clic *cli.Context) error {
} }
var buildDate = time.Date( var buildDate = time.Date(
2018, 10, 28, 2018, 11, 15,
17, 10, 0, 0, 23, 24, 0, 0,
time.UTC) time.UTC)

View File

@@ -1,26 +1,28 @@
package main package main
import ( import (
"net/url" "github.com/terorie/oddb-go/ds/redblackhash"
"github.com/valyala/fasthttp"
"sync" "sync"
"time" "time"
) )
type Job struct { type Job struct {
OD *OD OD *OD
Uri url.URL Uri fasthttp.URI
UriStr string UriStr string
Fails int Fails int
LastError error LastError error
} }
type OD struct { type OD struct {
Task *Task
Wait sync.WaitGroup Wait sync.WaitGroup
BaseUri url.URL BaseUri fasthttp.URI
lock sync.Mutex
Files []File
WCtx WorkerContext WCtx WorkerContext
Scanned sync.Map Scanned redblackhash.Tree
lock sync.Mutex
} }
type File struct { type File struct {
@@ -30,3 +32,14 @@ type File struct {
Path string `json:"path"` Path string `json:"path"`
IsDir bool `json:"-"` IsDir bool `json:"-"`
} }
func (o *OD) LoadOrStoreKey(k *redblackhash.Key) (exists bool) {
o.lock.Lock()
defer o.lock.Unlock()
exists = o.Scanned.Get(k)
if exists { return true }
o.Scanned.Put(k)
return false
}

View File

@@ -2,7 +2,11 @@ package main
import ( import (
"context" "context"
"encoding/json"
"fmt"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"os"
"path"
"sync/atomic" "sync/atomic"
) )
@@ -12,38 +16,36 @@ var totalBuffered int64
func Schedule(c context.Context, remotes <-chan *OD) { func Schedule(c context.Context, remotes <-chan *OD) {
go Stats(c) go Stats(c)
for { for remote := range remotes {
select { logrus.WithField("url", remote.BaseUri.String()).
case <-c.Done(): Info("Starting crawler")
return
case remote := <-remotes: // Collect results
logrus.WithField("url", remote.BaseUri.String()). results := make(chan File)
Info("Starting crawler")
// Spawn workers // Spawn workers
remote.WCtx.in, remote.WCtx.out = makeJobBuffer(c) remote.WCtx.in, remote.WCtx.out = makeJobBuffer(c)
for i := 0; i < config.Workers; i++ { for i := 0; i < config.Workers; i++ {
go remote.WCtx.Worker() go remote.WCtx.Worker(results)
}
// Enqueue initial job
atomic.AddInt32(&activeTasks, 1)
remote.WCtx.queueJob(Job{
OD: remote,
Uri: remote.BaseUri,
UriStr: remote.BaseUri.String(),
Fails: 0,
})
globalWait.Done()
// Upload result when ready
go remote.Watch()
} }
// Enqueue initial job
atomic.AddInt32(&activeTasks, 1)
remote.WCtx.queueJob(Job{
OD: remote,
Uri: remote.BaseUri,
UriStr: remote.BaseUri.String(),
Fails: 0,
})
// Upload result when ready
go remote.Watch(results)
} }
} }
func (r *OD) Watch() { func (r *OD) Watch(results chan File) {
go r.Task.Collect(results)
// Wait for all jobs on remote to finish // Wait for all jobs on remote to finish
r.Wait.Wait() r.Wait.Wait()
close(r.WCtx.in) close(r.WCtx.in)
@@ -51,6 +53,42 @@ func (r *OD) Watch() {
logrus.WithField("url", r.BaseUri.String()). logrus.WithField("url", r.BaseUri.String()).
Info("Crawler finished") Info("Crawler finished")
globalWait.Done()
close(results)
}
func (t *Task) Collect(results chan File) {
err := t.collect(results)
if err != nil {
logrus.WithError(err).
Error("Failed saving crawl results")
}
}
func (t *Task) collect(results chan File) error {
err := os.MkdirAll("crawled", 0755)
if err != nil { return err }
f, err := os.OpenFile(
path.Join("crawled", fmt.Sprintf("%d.json", t.WebsiteId)),
os.O_CREATE | os.O_WRONLY | os.O_TRUNC,
0755,
)
if err != nil { return err }
defer f.Close()
for result := range results {
resJson, err := json.Marshal(result)
if err != nil { panic(err) }
_, err = f.Write(resJson)
if err != nil { return err }
_, err = f.Write([]byte{'\n'})
if err != nil { return err }
}
return nil
} }
func makeJobBuffer(c context.Context) (chan<- Job, <-chan Job) { func makeJobBuffer(c context.Context) (chan<- Job, <-chan Job) {

View File

@@ -1,9 +1,10 @@
package main package main
import ( import (
"bytes"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"math" "math"
"strings" "sort"
"sync" "sync"
"sync/atomic" "sync/atomic"
"time" "time"
@@ -18,13 +19,13 @@ type WorkerContext struct {
numRateLimits int numRateLimits int
} }
func (w WorkerContext) Worker() { func (w WorkerContext) Worker(results chan<- File) {
for job := range w.out { for job := range w.out {
w.step(job) w.step(results, job)
} }
} }
func (w WorkerContext) step(job Job) { func (w WorkerContext) step(results chan<- File, job Job) {
defer w.finishJob(&job) defer w.finishJob(&job)
var f File var f File
@@ -63,16 +64,20 @@ func (w WorkerContext) step(job Job) {
w.queueJob(job) w.queueJob(job)
} }
job.OD.Files = append(job.OD.Files, f) if !f.IsDir {
results <- f
}
} }
func DoJob(job *Job, f *File) (newJobs []Job, err error) { func DoJob(job *Job, f *File) (newJobs []Job, err error) {
if strings.HasSuffix(job.Uri.Path, "/") { uriPath := job.Uri.Path()
if len(uriPath) == 0 { return }
if uriPath[len(uriPath)-1] == '/' {
// Load directory // Load directory
links, err := GetDir(job, f) links, err := GetDir(job, f)
if err != nil { if err != nil {
logrus.WithError(err). logrus.WithError(err).
WithField("url", job.Uri.String()). WithField("url", job.UriStr).
Error("Failed getting dir") Error("Failed getting dir")
return nil, err return nil, err
} }
@@ -81,33 +86,48 @@ func DoJob(job *Job, f *File) (newJobs []Job, err error) {
hash := f.HashDir(links) hash := f.HashDir(links)
// Skip symlinked dirs // Skip symlinked dirs
if _, old := job.OD.Scanned.LoadOrStore(hash, true); old { if job.OD.LoadOrStoreKey(&hash) {
return nil, ErrKnown return nil, ErrKnown
} }
// Sort by path
sort.Slice(links, func(i, j int) bool {
return bytes.Compare(links[i].Path(), links[j].Path()) < 0
})
var newJobCount int
var lastLink string
for _, link := range links { for _, link := range links {
// Skip already queued links uriStr := link.String()
if _, old := job.OD.Scanned.LoadOrStore(link, true); old {
// Ignore dupes
if uriStr == lastLink {
continue continue
} }
lastLink = uriStr
job.OD.Wait.Add(1) job.OD.Wait.Add(1)
newJobs = append(newJobs, Job{ newJobs = append(newJobs, Job{
OD: job.OD, OD: job.OD,
Uri: link, Uri: link,
UriStr: link.String(), UriStr: uriStr,
Fails: 0, Fails: 0,
}) })
newJobCount++
}
if config.Verbose {
logrus.WithFields(logrus.Fields{
"url": job.UriStr,
"files": newJobCount,
}).Debug("Listed")
} }
logrus.WithFields(logrus.Fields{
"url": job.UriStr,
"files": len(links),
}).Debug("Listed")
} else { } else {
// Load file // Load file
err := GetFile(job.Uri, f) err := GetFile(job.Uri, f)
if err != nil { if err != nil {
logrus.WithError(err). logrus.WithError(err).
WithField("url", job.Uri.String()). WithField("url", job.UriStr).
Error("Failed getting file") Error("Failed getting file")
return nil, err return nil, err
} }
@@ -117,7 +137,6 @@ func DoJob(job *Job, f *File) (newJobs []Job, err error) {
func (w WorkerContext) queueJob(job Job) { func (w WorkerContext) queueJob(job Job) {
job.OD.Wait.Add(1) job.OD.Wait.Add(1)
globalWait.Add(1)
if w.numRateLimits > 0 { if w.numRateLimits > 0 {
if time.Since(w.lastRateLimit) > 5 * time.Second { if time.Since(w.lastRateLimit) > 5 * time.Second {
@@ -134,5 +153,4 @@ func (w WorkerContext) queueJob(job Job) {
func (w WorkerContext) finishJob(job *Job) { func (w WorkerContext) finishJob(job *Job) {
job.OD.Wait.Done() job.OD.Wait.Done()
globalWait.Done()
} }