mirror of
https://github.com/simon987/sist2.git
synced 2025-12-12 15:08:53 +00:00
Compare commits
11 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 86840b46f4 | |||
| e57f9916eb | |||
| 565ba6ee76 | |||
| d83fc2c373 | |||
| d4da28249e | |||
| 483a454c8d | |||
| 018ac86640 | |||
| 398f1aead4 | |||
| d19a75926b | |||
| 1ac8b40e3d | |||
| a8505cb8c1 |
@@ -26,6 +26,7 @@ add_executable(
|
||||
src/parsing/arc.c src/parsing/arc.h
|
||||
src/parsing/doc.c src/parsing/doc.h
|
||||
src/log.c src/log.h
|
||||
src/parsing/cbr.h src/parsing/cbr.c
|
||||
|
||||
# argparse
|
||||
argparse/argparse.h argparse/argparse.c
|
||||
@@ -136,6 +137,8 @@ TARGET_LINK_LIBRARIES(
|
||||
${PROJECT_SOURCE_DIR}/lib/libcrypto.a
|
||||
${PROJECT_SOURCE_DIR}/lib/libssl.a
|
||||
dl
|
||||
|
||||
pcre
|
||||
)
|
||||
|
||||
add_custom_target(
|
||||
|
||||
@@ -3,7 +3,7 @@ MAINTAINER simon987 <me@simon987.net>
|
||||
|
||||
RUN apt update
|
||||
RUN apt install -y libglib2.0-0 libcurl4 libmagic1 libharfbuzz-bin libopenjp2-7 libarchive13 liblzma5 libzstd1 liblz4-1 \
|
||||
curl libtiff5 libpng16-16
|
||||
curl libtiff5 libpng16-16 libpcre3
|
||||
|
||||
RUN mkdir -p /usr/share/tessdata && \
|
||||
cd /usr/share/tessdata/ && \
|
||||
|
||||
82
README.md
82
README.md
@@ -8,9 +8,12 @@ sist2 (Simple incremental search tool)
|
||||
|
||||
*Warning: sist2 is in early development*
|
||||
|
||||

|
||||
|
||||
## Features
|
||||
|
||||
* Fast, low memory usage, multi-threaded
|
||||
* Mobile-friendly Web interface
|
||||
* Portable (all its features are packaged in a single executable)
|
||||
* Extracts text from common file types \*
|
||||
* Generates thumbnails \*
|
||||
@@ -26,11 +29,27 @@ sist2 (Simple incremental search tool)
|
||||
|
||||
## Getting Started
|
||||
|
||||
1. Have an [Elasticsearch](https://www.elastic.co/downloads/elasticsearch) instance running
|
||||
1.
|
||||
1. Have an Elasticsearch (>= 6.X.X) instance running
|
||||
1. Download [from official website](https://www.elastic.co/downloads/elasticsearch)
|
||||
1. *(or)* Run using docker:
|
||||
```bash
|
||||
docker run -d --name es1 --net sist2_net -p 9200:9200 \
|
||||
-e "discovery.type=single-node" elasticsearch:7.5.2
|
||||
```
|
||||
1. *(or)* Run using docker-compose:
|
||||
```yaml
|
||||
elasticsearch:
|
||||
image: docker.elastic.co/elasticsearch/elasticsearch:7.5.2
|
||||
environment:
|
||||
- discovery.type=single-node
|
||||
- "ES_JAVA_OPTS=-Xms1G -Xmx2G"
|
||||
```
|
||||
1. Download sist2 executable
|
||||
1. Download the [latest sist2 release](https://github.com/simon987/sist2/releases) *
|
||||
1. *(or)* Download a [development snapshot](https://files.simon987.net/artifacts/Sist2/Build/) *(Not recommended!)*
|
||||
1. *(or)* `docker pull simon987/sist2:latest`
|
||||
|
||||
1. See [Usage guide](USAGE.md)
|
||||
|
||||
|
||||
\* *Windows users*: **sist2** runs under [WSL](https://en.wikipedia.org/wiki/Windows_Subsystem_for_Linux)
|
||||
@@ -39,60 +58,18 @@ sist2 (Simple incremental search tool)
|
||||
|
||||
## Example usage
|
||||
|
||||
See [Usage guide](USAGE.md) for more details
|
||||
|
||||

|
||||
|
||||
See help page `sist2 --help` for more details.
|
||||
|
||||
**Scan a directory**
|
||||
```bash
|
||||
sist2 scan ~/Documents -o ./orig_idx/
|
||||
sist2 scan --threads 4 --content-size 16384 /mnt/Pictures
|
||||
sist2 scan --incremental ./orig_idx/ -o ./updated_idx/ ~/Documents
|
||||
```
|
||||
|
||||
**Push index to Elasticsearch or file**
|
||||
```bash
|
||||
sist2 index --force-reset ./my_idx
|
||||
sist2 index --print ./my_idx > raw_documents.ndjson
|
||||
```
|
||||
|
||||
**Start web interface**
|
||||
```bash
|
||||
sist2 web --bind 0.0.0.0 --port 4321 ./my_idx1 ./my_idx2 ./my_idx3
|
||||
```
|
||||
|
||||
### Use sist2 with docker
|
||||
|
||||
**scan**
|
||||
```bash
|
||||
docker run -it \
|
||||
-v /path/to/files/:/files \
|
||||
-v $PWD/out/:/out \
|
||||
simon987/sist2 scan -t 4 /files -o /out/my_idx1
|
||||
```
|
||||
**index**
|
||||
```bash
|
||||
docker run -it --network host\
|
||||
-v $PWD/out/:/out \
|
||||
simon987/sist2 index /out/my_idx1
|
||||
```
|
||||
|
||||
**web**
|
||||
```bash
|
||||
docker run --rm --network host -d --name sist2\
|
||||
-v $PWD/out/my_idx:/idx \
|
||||
-v $PWD/my/files:/files
|
||||
simon987/sist2 web --bind 0.0.0.0 /idx
|
||||
docker stop sist2
|
||||
```
|
||||
1. Scan a directory: `sist2 scan ~/Documents -o ./docs_idx`
|
||||
1. Push index to Elasticsearch: `sist2 index ./docs_idx`
|
||||
1. Start web interface: `sist2 web ./docs_idx`
|
||||
|
||||
|
||||
## Format support
|
||||
|
||||
File type | Library | Content | Thumbnail | Metadata
|
||||
:---|:---|:---|:---|:---
|
||||
pdf,xps,cbz,fb2,epub | MuPDF | text+ocr | yes, `png` | title |
|
||||
pdf,xps,cbz,cbr,fb2,epub | MuPDF | text+ocr | yes, `png` | title |
|
||||
`audio/*` | ffmpeg | - | yes, `jpeg` | ID3 tags |
|
||||
`video/*` | ffmpeg | - | yes, `jpeg` | title, comment, artist |
|
||||
`image/*` | ffmpeg | - | yes, `jpeg` | [Common EXIF tags](https://github.com/simon987/sist2/blob/efdde2734eca9b14a54f84568863b7ffd59bdba3/src/parsing/media.c#L190) |
|
||||
@@ -120,7 +97,7 @@ To check if a media file can be parsed without *seek*, execute `cat file.mp4 | f
|
||||
|
||||
### OCR
|
||||
|
||||
You can enable OCR support for pdf,xps,cbz,fb2,epub file types with the
|
||||
You can enable OCR support for pdf,xps,cbz,cbr,fb2,epub file types with the
|
||||
`--ocr <lang>` option. Download the language data files with your
|
||||
package manager (`apt install tesseract-ocr-eng`) or directly [from Github](https://github.com/tesseract-ocr/tesseract/wiki/Data-Files).
|
||||
|
||||
@@ -145,8 +122,9 @@ binaries.
|
||||
```bash
|
||||
apt install git cmake pkg-config libglib2.0-dev \
|
||||
libssl-dev uuid-dev python3 libmagic-dev libfreetype6-dev \
|
||||
libcurl-dev libbz2-dev yasm libharfbuzz-dev ragel \
|
||||
libarchive-dev libtiff5 libpng16-16 libpango1.0-dev
|
||||
libcurl4-openssl-dev libbz2-dev yasm libharfbuzz-dev ragel \
|
||||
libarchive-dev libtiff5 libpng16-16 libpango1.0-dev \
|
||||
libxml2-dev
|
||||
```
|
||||
|
||||
2. Build
|
||||
|
||||
275
USAGE.md
Normal file
275
USAGE.md
Normal file
@@ -0,0 +1,275 @@
|
||||
# Usage
|
||||
|
||||
*More examples (specifically with docker/compose) are in progress*
|
||||
|
||||
* [scan](#scan)
|
||||
* [options](#scan-options)
|
||||
* [examples](#scan-examples)
|
||||
* [index format](#index-format)
|
||||
* [index](#index)
|
||||
* [options](#index-options)
|
||||
* [examples](#index-examples)
|
||||
* [web](#web)
|
||||
* [options](#web-options)
|
||||
* [examples](#web-examples)
|
||||
* [rewrite_url](#rewrite_url)
|
||||
* [link to specific indices](#link-to-specific-indices)
|
||||
|
||||
```
|
||||
Usage: sist2 scan [OPTION]... PATH
|
||||
or: sist2 index [OPTION]... INDEX
|
||||
or: sist2 web [OPTION]... INDEX...
|
||||
Lightning-fast file system indexer and search tool.
|
||||
|
||||
-h, --help show this help message and exit
|
||||
-v, --version Show version and exit
|
||||
--verbose Turn on logging
|
||||
--very-verbose Turn on debug messages
|
||||
|
||||
Scan options
|
||||
-t, --threads=<int> Number of threads. DEFAULT=1
|
||||
-q, --quality=<flt> Thumbnail quality, on a scale of 1.0 to 31.0, 1.0 being the best. DEFAULT=5
|
||||
--size=<int> Thumbnail size, in pixels. Use negative value to disable. DEFAULT=500
|
||||
--content-size=<int> Number of bytes to be extracted from text documents. Use negative value to disable. DEFAULT=32768
|
||||
--incremental=<str> Reuse an existing index and only scan modified files.
|
||||
-o, --output=<str> Output directory. DEFAULT=index.sist2/
|
||||
--rewrite-url=<str> Serve files from this url instead of from disk.
|
||||
--name=<str> Index display name. DEFAULT: (name of the directory)
|
||||
--depth=<int> Scan up to DEPTH subdirectories deep. Use 0 to only scan files in PATH. DEFAULT: -1
|
||||
--archive=<str> Archive file mode (skip|list|shallow|recurse). skip: Don't parse, list: only get file names as text, shallow: Don't parse archives inside archives. DEFAULT: recurse
|
||||
--ocr=<str> Tesseract language (use tesseract --list-langs to see which are installed on your machine)
|
||||
-e, --exclude=<str> Files that match this regex will not be scanned
|
||||
--fast Only index file names & mime type
|
||||
|
||||
Index options
|
||||
--es-url=<str> Elasticsearch url with port. DEFAULT=http://localhost:9200
|
||||
-p, --print Just print JSON documents to stdout.
|
||||
--script-file=<str> Path to user script.
|
||||
--batch-size=<int> Index batch size. DEFAULT: 100
|
||||
-f, --force-reset Reset Elasticsearch mappings and settings. (You must use this option the first time you use the index command)
|
||||
|
||||
Web options
|
||||
--es-url=<str> Elasticsearch url. DEFAULT=http://localhost:9200
|
||||
--bind=<str> Listen on this address. DEFAULT=localhost
|
||||
--port=<str> Listen on this port. DEFAULT=4090
|
||||
--auth=<str> Basic auth in user:password format
|
||||
Made by simon987 <me@simon987.net>. Released under GPL-3.0
|
||||
|
||||
```
|
||||
|
||||
## Scan
|
||||
|
||||
### Scan options
|
||||
|
||||
* `-t, --threads`
|
||||
Number of threads for file parsing. **Do not set a number higher than `$(nproc)`!**.
|
||||
* `-q, --quality`
|
||||
Thumbnail quality, on a scale of 1.0 to 31.0, 1.0 being the best. *Does not affect PDF thumbnails quality*
|
||||
* `--size`
|
||||
Thumbnail size in pixels.
|
||||
* `--content-size`
|
||||
Number of bytes of text to be extracted from the content of files (plain text and PDFs).
|
||||
Repeated whitespace and special characters do not count toward this limit.
|
||||
* `--incremental`
|
||||
Specify an existing index. Information about files in this index that were not modified (based on *mtime* attribute)
|
||||
will be copied to the new index and will not be parsed again.
|
||||
* `-o, --output` Output directory.
|
||||
* `--rewrite-url` Set the `rewrite_url` option for the web module (See [rewrite_url](#rewrite_url))
|
||||
* `--name` Set the `name` option for the web module
|
||||
* `--depth` Maximum scan dept. Set to 0 only scan files directly in the root directory, set to -1 for infinite depth
|
||||
* `--archive` Archive file mode.
|
||||
* skip: Don't parse
|
||||
* list: Only get file names as text
|
||||
* shallow: Don't parse archives inside archives.
|
||||
* recurse: Scan archives recursively (default)
|
||||
* `--ocr` See [OCR](README.md#OCR)
|
||||
* `-e, --exclude` Regex pattern to exclude files. A file is excluded if the pattern matches any
|
||||
part of the full absolute path.
|
||||
|
||||
Examples:
|
||||
* `-e ".*\.ttf"`: Ignore ttf files
|
||||
* `-e ".*\.(ttf|rar)"`: Ignore ttf and rar files
|
||||
* `-e "^/mnt/backups/"`: Ignore all files in the `/mnt/backups/` directory
|
||||
* `-e "^/mnt/Data[12]/"`: Ignore all files in the `/mnt/Data1/` and `/mnt/Data2/` directory
|
||||
* `-e "(^/usr/)|(^/var/)|(^/media/DRIVE-A/tmp/)|(^/media/DRIVE-B/Trash/)"` Exclude the
|
||||
`/usr`, `/var`, `/media/DRIVE-A/tmp`, `/media/DRIVE-B/Trash` directories
|
||||
* `--fast` Only index file names and mime type
|
||||
|
||||
### Scan examples
|
||||
|
||||
Simple scan
|
||||
```bash
|
||||
sist2 scan ~/Documents
|
||||
|
||||
sist2 scan \
|
||||
--threads 4 --content-size 16000000 --quality 1.0 --archive shallow \
|
||||
--name "My Documents" --rewrite-url "http://nas.domain.local/My Documents/" \
|
||||
~/Documents -o ./documents.idx/
|
||||
```
|
||||
|
||||
Incremental scan
|
||||
```
|
||||
sist2 scan --incremental ./orig_idx/ -o ./updated_idx/ ~/Documents
|
||||
```
|
||||
|
||||
### Index format
|
||||
|
||||
A typical `binary` type index structure looks like this:
|
||||
```
|
||||
documents.idx/
|
||||
├── descriptor.json
|
||||
├── _index_139965416830720
|
||||
├── _index_139965425223424
|
||||
├── _index_139965433616128
|
||||
├── _index_139965442008832
|
||||
└── thumbs
|
||||
├── data.mdb
|
||||
└── lock.mdb
|
||||
```
|
||||
|
||||
The `_index_*` files contain the raw binary index data and are not meant to be
|
||||
read by other applications. The format is generally compatible across different
|
||||
sist2 versions.
|
||||
|
||||
The `thumbs/` folder is a [LMDB](https://en.wikipedia.org/wiki/Lightning_Memory-Mapped_Database)
|
||||
database containing the thumbnails.
|
||||
|
||||
The `descriptor.json` file contains general information about the index. The
|
||||
following fields are safe to modify manually: `root`, `name`, [rewrite_url](#rewrite_url) and `timestamp`.
|
||||
|
||||
|
||||
*Advanced usage*
|
||||
|
||||
Instead of using the `scan` module, you can also import an index generated
|
||||
by a third party application. The 'external' index must have the following format:
|
||||
|
||||
```
|
||||
my_index/
|
||||
├── descriptor.json
|
||||
├── _index_0
|
||||
└── thumbs
|
||||
├── data.mdb
|
||||
└── lock.mdb
|
||||
```
|
||||
|
||||
*descriptor.json*:
|
||||
```json
|
||||
{
|
||||
"uuid": "<valid UUID4>",
|
||||
"version": "_external_v1",
|
||||
"root": "(optional)",
|
||||
"name": "<name>",
|
||||
"rewrite_url": "(optional)",
|
||||
"type": "json",
|
||||
"timestamp": 1578971024
|
||||
}
|
||||
```
|
||||
|
||||
*_index_0*: NDJSON format (One json object per line)
|
||||
|
||||
```json
|
||||
{
|
||||
"_id": "unique uuid for the file",
|
||||
"index": "index uuid4 (same one as descriptor.json!)",
|
||||
"mime": "application/x-cbz",
|
||||
"size": 14341204,
|
||||
"mtime": 1578882996,
|
||||
"extension": "cbz",
|
||||
"name": "my_book",
|
||||
"path": "path/to/books",
|
||||
"content": "text contents of the book",
|
||||
"title": "Title of the book",
|
||||
"tag": ["genre.fiction", "author.someguy", "etc..."],
|
||||
"_keyword": [
|
||||
{"k": "ISBN", "v": "ABCD34789231"}
|
||||
],
|
||||
"_text": [
|
||||
{"k": "other", "v": "This will be indexed as text"}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
You can find the full list of supported fields [here](src/io/serialize.c#L90)
|
||||
|
||||
The `_keyword.*` items will be indexed and searchable as **keyword** fields (only full matches allowed).
|
||||
The `_text.*` items will be indexed and searchable as **text** fields (fuzzy searching allowed)
|
||||
|
||||
|
||||
*thumbs/*:
|
||||
|
||||
LMDB key-value store. Keys are **binary** 128-bit UUID4s (`_id` field)
|
||||
and values are raw image bytes.
|
||||
|
||||
Importing an external `binary` type index is technically possible but
|
||||
it is currently unsupported and has no guaranties of back/forward compatibility.
|
||||
|
||||
|
||||
## Index
|
||||
### Index options
|
||||
* `--es-url`
|
||||
Elasticsearch url and port. If you are using docker, make sure that both containers are on the
|
||||
same network.
|
||||
* `-p, --print`
|
||||
Print index in JSON format to stdout.
|
||||
* `--script-file`
|
||||
Path to user script. See [Scripting](scripting/README.md).
|
||||
* `--batch-size=<int>`
|
||||
Index batch size. Indexing is generally faster with larger batches, but payloads that
|
||||
are too large will fail and additional overhead for retrying with smaller sizes may slow
|
||||
down the process.
|
||||
* `-f, --force-reset`
|
||||
Reset Elasticsearch mappings and settings.
|
||||
**(You must use this option the first time you use the index command)**.
|
||||
|
||||
### Index examples
|
||||
|
||||
**Push to elasticsearch**
|
||||
```bash
|
||||
sist2 index --force-reset --batch-size 1000 --es-url http://localhost:9200 ./my_index/
|
||||
sist2 index ./my_index/
|
||||
```
|
||||
|
||||
**Save index in JSON format**
|
||||
```bash
|
||||
sist2 index --print ./my_index/ > my_index.ndjson
|
||||
```
|
||||
|
||||
**Inspect contents of an index**
|
||||
```bash
|
||||
sist2 index --print ./my_index/ | jq | less
|
||||
```
|
||||
|
||||
## Web
|
||||
|
||||
### Web options
|
||||
* `--es-url=<str>` Elasticsearch url.
|
||||
* `--bind=<str>` Listen on this address.
|
||||
* `--port=<str>` Listen on this port.
|
||||
* `--auth=<str>` Basic auth in user:password format
|
||||
|
||||
### Web examples
|
||||
|
||||
**Single index**
|
||||
```bash
|
||||
sist2 web --auth admin:hunter2 --bind 0.0.0.0 --port 8888 my_index
|
||||
```
|
||||
|
||||
**Multiple indices**
|
||||
```bash
|
||||
# Indices will be displayed in this order in the web interface
|
||||
sist2 web index1 index2 index3 index4
|
||||
```
|
||||
|
||||
### rewrite_url
|
||||
|
||||
When the `rewrite_url` field is not empty, the web module ignores the `root`
|
||||
field and will return a HTTP redirect to `<rewrite_url><path>/<name><extension>`
|
||||
instead of serving the file from disk.
|
||||
Both the `root` and `rewrite_url` fields are safe to manually modify from the
|
||||
`descriptor.json` file.
|
||||
|
||||
### Link to specific indices
|
||||
|
||||
To link to specific indices, you can add a list of comma-separated index name to
|
||||
the URL: `?i=<name>,<name>`. By default, indices with `"(nsfw)"` in their name are
|
||||
not displayed.
|
||||
@@ -54,6 +54,11 @@ script.painless.regex.enabled: true
|
||||
```
|
||||
Or, if you're using docker add `-e "script.painless.regex.enabled=true"`
|
||||
|
||||
**Tag color**
|
||||
|
||||
You can specify the color for an individual tag by appending an
|
||||
hexadecimal color code (`#RRGGBBAA`) to the tag name.
|
||||
|
||||
### Examples
|
||||
|
||||
If `(20XX)` is in the file name, add the `year.<year>` tag:
|
||||
@@ -115,3 +120,33 @@ if (ctx._source.path != "") {
|
||||
tags.add("studio." + names[names.length-1]);
|
||||
}
|
||||
```
|
||||
|
||||
Parse `EXIF:F Number` tag
|
||||
```Java
|
||||
if (ctx._source?.exif_fnumber != null) {
|
||||
String[] values = ctx._source.exif_fnumber.splitOnToken(' ');
|
||||
String aperture = String.valueOf(Float.parseFloat(values[0]) / Float.parseFloat(values[1]));
|
||||
if (aperture == "NaN") {
|
||||
aperture = "0,0";
|
||||
}
|
||||
tags.add("Aperture.f/" + aperture.replace(".", ","));
|
||||
}
|
||||
```
|
||||
|
||||
Display year and months from `EXIF:DateTime` tag
|
||||
```Java
|
||||
if (ctx._source?.exif_datetime != null) {
|
||||
SimpleDateFormat parser = new SimpleDateFormat("yyyy:MM:dd HH:mm:ss");
|
||||
Date date = parser.parse(ctx._source.exif_datetime);
|
||||
|
||||
SimpleDateFormat yp = new SimpleDateFormat("yyyy");
|
||||
SimpleDateFormat mp = new SimpleDateFormat("MMMMMMMMM");
|
||||
|
||||
String year = yp.format(date);
|
||||
String month = mp.format(date);
|
||||
|
||||
tags.add("Month." + month);
|
||||
tags.add("Year." + year);
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
22
src/cli.c
22
src/cli.c
@@ -162,6 +162,26 @@ int scan_args_validate(scan_args_t *args, int argc, const char **argv) {
|
||||
args->tesseract_path = path;
|
||||
}
|
||||
|
||||
if (args->exclude_regex != NULL) {
|
||||
const char *error;
|
||||
int error_offset;
|
||||
|
||||
pcre *re = pcre_compile(args->exclude_regex, 0, &error, &error_offset, 0);
|
||||
if (error != NULL) {
|
||||
LOG_FATALF("cli.c", "pcre_compile returned error: %s (offset:%d)", error, error_offset)
|
||||
}
|
||||
|
||||
pcre_extra *re_extra = pcre_study(re, 0, &error);
|
||||
if (error != NULL) {
|
||||
LOG_FATALF("cli.c", "pcre_study returned error: %s", error)
|
||||
}
|
||||
|
||||
ScanCtx.exclude = re;
|
||||
ScanCtx.exclude_extra = re_extra;
|
||||
} else {
|
||||
ScanCtx.exclude = NULL;
|
||||
}
|
||||
|
||||
LOG_DEBUGF("cli.c", "arg quality=%f", args->quality)
|
||||
LOG_DEBUGF("cli.c", "arg size=%d", args->size)
|
||||
LOG_DEBUGF("cli.c", "arg content_size=%d", args->content_size)
|
||||
@@ -175,6 +195,8 @@ int scan_args_validate(scan_args_t *args, int argc, const char **argv) {
|
||||
LOG_DEBUGF("cli.c", "arg archive=%s", args->archive)
|
||||
LOG_DEBUGF("cli.c", "arg tesseract_lang=%s", args->tesseract_lang)
|
||||
LOG_DEBUGF("cli.c", "arg tesseract_path=%s", args->tesseract_path)
|
||||
LOG_DEBUGF("cli.c", "arg exclude=%s", args->exclude_regex)
|
||||
LOG_DEBUGF("cli.c", "arg fast=%d", args->fast)
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -18,10 +18,14 @@ typedef struct scan_args {
|
||||
archive_mode_t archive_mode;
|
||||
char *tesseract_lang;
|
||||
const char *tesseract_path;
|
||||
char *exclude_regex;
|
||||
int fast;
|
||||
} scan_args_t;
|
||||
|
||||
scan_args_t *scan_args_create();
|
||||
|
||||
void scan_args_destroy(scan_args_t *args);
|
||||
|
||||
int scan_args_validate(scan_args_t *args, int argc, const char **argv);
|
||||
|
||||
typedef struct index_args {
|
||||
@@ -45,12 +49,15 @@ typedef struct web_args {
|
||||
} web_args_t;
|
||||
|
||||
index_args_t *index_args_create();
|
||||
|
||||
void index_args_destroy(index_args_t *args);
|
||||
|
||||
web_args_t *web_args_create();
|
||||
|
||||
void web_args_destroy(web_args_t *args);
|
||||
|
||||
int index_args_validate(index_args_t *args, int argc, const char **argv);
|
||||
|
||||
int web_args_validate(web_args_t *args, int argc, const char **argv);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -29,6 +29,9 @@ struct {
|
||||
pthread_mutex_t mupdf_mu;
|
||||
char * tesseract_lang;
|
||||
const char * tesseract_path;
|
||||
pcre *exclude;
|
||||
pcre_extra *exclude_extra;
|
||||
int fast;
|
||||
} ScanCtx;
|
||||
|
||||
struct {
|
||||
|
||||
@@ -20,6 +20,8 @@ typedef struct es_indexer {
|
||||
|
||||
static es_indexer_t *Indexer;
|
||||
|
||||
void delete_queue(int max);
|
||||
|
||||
void print_json(cJSON *document, const char uuid_str[UUID_STR_LEN]) {
|
||||
|
||||
cJSON *line = cJSON_CreateObject();
|
||||
@@ -64,7 +66,7 @@ void execute_update_script(const char *script, const char index_id[UUID_STR_LEN]
|
||||
cJSON *term_obj = cJSON_AddObjectToObject(query, "term");
|
||||
cJSON_AddStringToObject(term_obj, "index", index_id);
|
||||
|
||||
char * str = cJSON_Print(body);
|
||||
char *str = cJSON_Print(body);
|
||||
|
||||
char bulk_url[4096];
|
||||
snprintf(bulk_url, 4096, "%s/sist2/_update_by_query?pretty", Indexer->es_url);
|
||||
@@ -87,24 +89,18 @@ void execute_update_script(const char *script, const char index_id[UUID_STR_LEN]
|
||||
cJSON_Delete(resp);
|
||||
}
|
||||
|
||||
void elastic_flush() {
|
||||
|
||||
if (Indexer == NULL) {
|
||||
Indexer = create_indexer(IndexCtx.es_url);
|
||||
}
|
||||
|
||||
void *create_bulk_buffer(int max, int *count, size_t *buf_len) {
|
||||
es_bulk_line_t *line = Indexer->line_head;
|
||||
|
||||
int count = 0;
|
||||
*count = 0;
|
||||
|
||||
size_t buf_size = 0;
|
||||
size_t buf_cur = 0;
|
||||
char *buf = malloc(1);
|
||||
|
||||
while (line != NULL) {
|
||||
while (line != NULL && *count < max) {
|
||||
char action_str[512];
|
||||
snprintf(action_str, 512,
|
||||
"{\"index\":{\"_id\":\"%s\", \"_type\":\"_doc\", \"_index\":\"sist2\"}}\n", line->uuid_str);
|
||||
"{\"index\":{\"_id\":\"%s\", \"_type\":\"_doc\", \"_index\":\"sist2\"}}\n", line->uuid_str);
|
||||
size_t action_str_len = strlen(action_str);
|
||||
|
||||
size_t line_len = strlen(line->line);
|
||||
@@ -116,17 +112,20 @@ void elastic_flush() {
|
||||
memcpy(buf + buf_cur, line->line, line_len);
|
||||
buf_cur += line_len;
|
||||
|
||||
es_bulk_line_t *tmp = line;
|
||||
line = line->next;
|
||||
free(tmp);
|
||||
count++;
|
||||
(*count)++;
|
||||
}
|
||||
buf = realloc(buf, buf_size + 1);
|
||||
*(buf+buf_cur) = '\0';
|
||||
*(buf + buf_cur) = '\0';
|
||||
|
||||
Indexer->line_head = NULL;
|
||||
Indexer->line_tail = NULL;
|
||||
Indexer->queued = 0;
|
||||
*buf_len = buf_cur;
|
||||
return buf;
|
||||
}
|
||||
|
||||
void _elastic_flush(int max) {
|
||||
size_t buf_len;
|
||||
int count;
|
||||
void *buf = create_bulk_buffer(max, &count, &buf_len);
|
||||
|
||||
char bulk_url[4096];
|
||||
snprintf(bulk_url, 4096, "%s/sist2/_bulk?pipeline=tie", Indexer->es_url);
|
||||
@@ -136,15 +135,33 @@ void elastic_flush() {
|
||||
LOG_FATALF("elastic.c", "Could not connect to %s, make sure that elasticsearch is running!\n", IndexCtx.es_url)
|
||||
}
|
||||
|
||||
LOG_INFOF("elastic.c", "Indexed %d documents (%zukB) <%d>", count, buf_cur / 1024, r->status_code);
|
||||
if (r->status_code == 413) {
|
||||
|
||||
if (r->status_code != 200 && r->status_code != 413) {
|
||||
if (max <= 1) {
|
||||
LOG_ERRORF("elastic.c", "Single document too large, giving up: {%s}", Indexer->line_head->uuid_str)
|
||||
free_response(r);
|
||||
free(buf);
|
||||
delete_queue(1);
|
||||
if (Indexer->queued != 0) {
|
||||
elastic_flush();
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
LOG_WARNINGF("elastic.c", "Payload too large, retrying (%d documents)", count);
|
||||
|
||||
free_response(r);
|
||||
free(buf);
|
||||
_elastic_flush(max / 2);
|
||||
return;
|
||||
|
||||
} else if (r->status_code != 200) {
|
||||
cJSON *ret_json = cJSON_Parse(r->body);
|
||||
if (cJSON_GetObjectItem(ret_json, "errors")->valueint != 0) {
|
||||
cJSON *err;
|
||||
cJSON_ArrayForEach(err, cJSON_GetObjectItem(ret_json, "items")) {
|
||||
if (cJSON_GetObjectItem(cJSON_GetObjectItem(err, "index"), "status")->valueint != 201) {
|
||||
char* str = cJSON_Print(err);
|
||||
char *str = cJSON_Print(err);
|
||||
LOG_ERRORF("elastic.c", "%s\n", str);
|
||||
cJSON_free(str);
|
||||
}
|
||||
@@ -152,12 +169,44 @@ void elastic_flush() {
|
||||
}
|
||||
|
||||
cJSON_Delete(ret_json);
|
||||
delete_queue(Indexer->queued);
|
||||
|
||||
} else {
|
||||
LOG_INFOF("elastic.c", "Indexed %d documents (%zukB) <%d>", count, buf_len / 1024, r->status_code);
|
||||
|
||||
delete_queue(max);
|
||||
|
||||
if (Indexer->queued != 0) {
|
||||
elastic_flush();
|
||||
}
|
||||
}
|
||||
|
||||
free_response(r);
|
||||
free(buf);
|
||||
}
|
||||
|
||||
void delete_queue(int max) {
|
||||
for (int i = 0; i < max; i++) {
|
||||
es_bulk_line_t *tmp = Indexer->line_head;
|
||||
Indexer->line_head = tmp->next;
|
||||
if (Indexer->line_head == NULL) {
|
||||
Indexer->line_tail = NULL;
|
||||
} else {
|
||||
free(tmp);
|
||||
}
|
||||
Indexer->queued -= 1;
|
||||
}
|
||||
}
|
||||
|
||||
void elastic_flush() {
|
||||
|
||||
if (Indexer == NULL) {
|
||||
Indexer = create_indexer(IndexCtx.es_url);
|
||||
}
|
||||
|
||||
_elastic_flush(Indexer->queued);
|
||||
}
|
||||
|
||||
void elastic_index_line(es_bulk_line_t *line) {
|
||||
|
||||
if (Indexer == NULL) {
|
||||
@@ -194,7 +243,7 @@ es_indexer_t *create_indexer(const char *url) {
|
||||
return indexer;
|
||||
}
|
||||
|
||||
void destroy_indexer(char * script, char index_id[UUID_STR_LEN]) {
|
||||
void destroy_indexer(char *script, char index_id[UUID_STR_LEN]) {
|
||||
|
||||
char url[4096];
|
||||
|
||||
@@ -285,7 +334,7 @@ cJSON *elastic_get_document(const char *uuid_str) {
|
||||
char *elastic_get_status() {
|
||||
char url[4096];
|
||||
snprintf(url, 4096,
|
||||
"%s/_cluster/state/metadata/sist2?filter_path=metadata.indices.*.state", WebCtx.es_url);
|
||||
"%s/_cluster/state/metadata/sist2?filter_path=metadata.indices.*.state", WebCtx.es_url);
|
||||
|
||||
response_t *r = web_get(url);
|
||||
cJSON *json = NULL;
|
||||
|
||||
@@ -28,8 +28,18 @@ parse_job_t *create_fs_parse_job(const char *filepath, const struct stat *info,
|
||||
return job;
|
||||
}
|
||||
|
||||
int sub_strings[30];
|
||||
#define EXCLUDED(str) (pcre_exec(ScanCtx.exclude, ScanCtx.exclude_extra, filepath, strlen(filepath), 0, 0, sub_strings, sizeof(sub_strings)) >= 0)
|
||||
|
||||
int handle_entry(const char *filepath, const struct stat *info, int typeflag, struct FTW *ftw) {
|
||||
if (ftw->level <= ScanCtx.depth && typeflag == FTW_F && S_ISREG(info->st_mode)) {
|
||||
|
||||
if (typeflag == FTW_F && S_ISREG(info->st_mode) && ftw->level <= ScanCtx.depth) {
|
||||
|
||||
if (ScanCtx.exclude != NULL && EXCLUDED(filepath)) {
|
||||
LOG_DEBUGF("walk.c", "Excluded: %s", filepath)
|
||||
return 0;
|
||||
}
|
||||
|
||||
parse_job_t *job = create_fs_parse_job(filepath, info, ftw->base);
|
||||
tpool_add_work(ScanCtx.pool, parse, job);
|
||||
}
|
||||
|
||||
14
src/main.c
14
src/main.c
@@ -6,7 +6,7 @@
|
||||
#define EPILOG "Made by simon987 <me@simon987.net>. Released under GPL-3.0"
|
||||
|
||||
|
||||
static const char *const Version = "1.2.12";
|
||||
static const char *const Version = "1.2.16";
|
||||
static const char *const usage[] = {
|
||||
"sist2 scan [OPTION]... PATH",
|
||||
"sist2 index [OPTION]... INDEX",
|
||||
@@ -53,12 +53,15 @@ void sist2_scan(scan_args_t *args) {
|
||||
ScanCtx.index.desc.root_len = (short) strlen(ScanCtx.index.desc.root);
|
||||
ScanCtx.tesseract_lang = args->tesseract_lang;
|
||||
ScanCtx.tesseract_path = args->tesseract_path;
|
||||
ScanCtx.fast = args->fast;
|
||||
|
||||
init_dir(ScanCtx.index.path);
|
||||
|
||||
ScanCtx.mime_table = mime_get_mime_table();
|
||||
ScanCtx.ext_table = mime_get_ext_table();
|
||||
|
||||
cbr_init();
|
||||
|
||||
char store_path[PATH_MAX];
|
||||
snprintf(store_path, PATH_MAX, "%sthumbs", ScanCtx.index.path);
|
||||
mkdir(store_path, S_IWUSR | S_IRUSR | S_IXUSR);
|
||||
@@ -238,6 +241,8 @@ int main(int argc, const char *argv[]) {
|
||||
"shallow: Don't parse archives inside archives. DEFAULT: recurse"),
|
||||
OPT_STRING(0, "ocr", &scan_args->tesseract_lang, "Tesseract language (use tesseract --list-langs to see "
|
||||
"which are installed on your machine)"),
|
||||
OPT_STRING('e', "exclude", &scan_args->exclude_regex, "Files that match this regex will not be scanned"),
|
||||
OPT_BOOLEAN(0, "fast", &scan_args->fast, "Only index file names & mime type"),
|
||||
|
||||
OPT_GROUP("Index options"),
|
||||
OPT_STRING(0, "es-url", &common_es_url, "Elasticsearch url with port. DEFAULT=http://localhost:9200"),
|
||||
@@ -284,9 +289,7 @@ int main(int argc, const char *argv[]) {
|
||||
}
|
||||
sist2_scan(scan_args);
|
||||
|
||||
}
|
||||
|
||||
else if (strcmp(argv[0], "index") == 0) {
|
||||
} else if (strcmp(argv[0], "index") == 0) {
|
||||
|
||||
int err = index_args_validate(index_args, argc, argv);
|
||||
if (err != 0) {
|
||||
@@ -302,8 +305,7 @@ int main(int argc, const char *argv[]) {
|
||||
}
|
||||
sist2_web(web_args);
|
||||
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
fprintf(stderr, "Invalid command: '%s'\n", argv[0]);
|
||||
argparse_usage(&argparse);
|
||||
return 1;
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
#include "arc.h"
|
||||
#include "src/ctx.h"
|
||||
|
||||
#define ARC_BUF_SIZE 8192
|
||||
|
||||
int should_parse_filtered_file(const char *filepath, int ext) {
|
||||
char tmp[PATH_MAX * 2];
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
#define SIST2_ARC_H
|
||||
|
||||
#include "src/sist.h"
|
||||
#define ARC_BUF_SIZE 8192
|
||||
|
||||
int should_parse_filtered_file(const char *filepath, int ext);
|
||||
|
||||
|
||||
52
src/parsing/cbr.c
Normal file
52
src/parsing/cbr.c
Normal file
@@ -0,0 +1,52 @@
|
||||
#import "cbr.h"
|
||||
#import "src/ctx.h"
|
||||
|
||||
unsigned int cbr_mime;
|
||||
unsigned int cbz_mime;
|
||||
|
||||
void cbr_init() {
|
||||
cbr_mime = mime_get_mime_by_string(ScanCtx.mime_table, "application/x-cbr");
|
||||
cbz_mime = mime_get_mime_by_string(ScanCtx.mime_table, "application/x-cbz");
|
||||
}
|
||||
|
||||
int is_cbr(unsigned int mime) {
|
||||
return mime == cbr_mime;
|
||||
}
|
||||
|
||||
void parse_cbr(void *buf, size_t buf_len, document_t *doc) {
|
||||
char *out_buf = malloc(buf_len * 2);
|
||||
size_t out_buf_used = 0;
|
||||
|
||||
struct archive *rar_in = archive_read_new();
|
||||
archive_read_support_filter_none(rar_in);
|
||||
archive_read_support_format_rar(rar_in);
|
||||
|
||||
archive_read_open_memory(rar_in, buf, buf_len);
|
||||
|
||||
struct archive *zip_out = archive_write_new();
|
||||
archive_write_set_format_zip(zip_out);
|
||||
archive_write_open_memory(zip_out, out_buf, buf_len * 2, &out_buf_used);
|
||||
|
||||
struct archive_entry *entry;
|
||||
while (archive_read_next_header(rar_in, &entry) == ARCHIVE_OK) {
|
||||
archive_write_header(zip_out, entry);
|
||||
|
||||
char arc_buf[ARC_BUF_SIZE];
|
||||
int len = archive_read_data(rar_in, arc_buf, ARC_BUF_SIZE);
|
||||
while (len > 0) {
|
||||
archive_write_data(zip_out, arc_buf, len);
|
||||
len = archive_read_data(rar_in, arc_buf, ARC_BUF_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
archive_write_close(zip_out);
|
||||
archive_write_free(zip_out);
|
||||
|
||||
archive_read_close(rar_in);
|
||||
archive_read_free(rar_in);
|
||||
|
||||
doc->mime = cbz_mime;
|
||||
parse_pdf(out_buf, out_buf_used, doc);
|
||||
doc->mime = cbr_mime;
|
||||
free(out_buf);
|
||||
}
|
||||
12
src/parsing/cbr.h
Normal file
12
src/parsing/cbr.h
Normal file
@@ -0,0 +1,12 @@
|
||||
#ifndef SIST2_CBR_H
|
||||
#define SIST2_CBR_H
|
||||
|
||||
#include "src/sist.h"
|
||||
|
||||
void cbr_init();
|
||||
|
||||
int is_cbr(unsigned int mime);
|
||||
|
||||
void parse_cbr(void *buf, size_t buf_len, document_t *doc);
|
||||
|
||||
#endif
|
||||
@@ -8,7 +8,7 @@
|
||||
#define MIME_EMPTY 1
|
||||
|
||||
#define DONT_PARSE 0x80000000
|
||||
#define SHOULD_PARSE(mime_id) (mime_id & DONT_PARSE) != DONT_PARSE && mime_id != 0
|
||||
#define SHOULD_PARSE(mime_id) (ScanCtx.fast == 0 && (mime_id & DONT_PARSE) != DONT_PARSE && mime_id != 0)
|
||||
|
||||
#define PDF_MASK 0x40000000
|
||||
#define IS_PDF(mime_id) (mime_id & PDF_MASK) == PDF_MASK
|
||||
|
||||
@@ -149,6 +149,13 @@ void parse(void *arg) {
|
||||
if (doc_buf != buf && doc_buf != NULL) {
|
||||
free(doc_buf);
|
||||
}
|
||||
} else if (is_cbr(doc.mime)) {
|
||||
void *cbr_buf = read_all(job, (char *) buf, bytes_read);
|
||||
parse_cbr(cbr_buf, doc.size, &doc);
|
||||
|
||||
if (cbr_buf != buf && cbr_buf != NULL) {
|
||||
free(cbr_buf);
|
||||
}
|
||||
}
|
||||
|
||||
//Parent meta
|
||||
|
||||
@@ -35,6 +35,7 @@
|
||||
#include <libxml/xmlstring.h>
|
||||
#define BOOL int
|
||||
#include <tesseract/capi.h>
|
||||
#include <pcre.h>
|
||||
|
||||
#include <onion/onion.h>
|
||||
#include <onion/handler.h>
|
||||
@@ -59,6 +60,7 @@
|
||||
#include "parsing/font.h"
|
||||
#include "parsing/arc.h"
|
||||
#include "parsing/doc.h"
|
||||
#include "parsing/cbr.h"
|
||||
#include "cli.h"
|
||||
#include "log.h"
|
||||
#include "utf8.h/utf8.h"
|
||||
|
||||
@@ -95,7 +95,6 @@ typedef int (*read_func_t)(struct vfile *, void *buf, size_t size);
|
||||
typedef void (*close_func_t)(struct vfile *);
|
||||
|
||||
typedef struct vfile {
|
||||
|
||||
union {
|
||||
int fd;
|
||||
struct archive *arc;
|
||||
|
||||
@@ -91,7 +91,7 @@ text_buffer_t text_buffer_create(int max_size) {
|
||||
}
|
||||
|
||||
void text_buffer_terminate_string(text_buffer_t *buf) {
|
||||
if (*(buf->dyn_buffer.buf + buf->dyn_buffer.cur - 1) == ' ') {
|
||||
if (buf->dyn_buffer.cur > 0 && *(buf->dyn_buffer.buf + buf->dyn_buffer.cur - 1) == ' ') {
|
||||
*(buf->dyn_buffer.buf + buf->dyn_buffer.cur - 1) = '\0';
|
||||
} else {
|
||||
dyn_buffer_write_char(&buf->dyn_buffer, '\0');
|
||||
@@ -259,8 +259,10 @@ char *abspath(const char *path) {
|
||||
if (abs == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
abs = realloc(abs, strlen(abs) + 2);
|
||||
strcat(abs, "/");
|
||||
if (strlen(abs) > 1) {
|
||||
abs = realloc(abs, strlen(abs) + 2);
|
||||
strcat(abs, "/");
|
||||
}
|
||||
|
||||
wordfree(&w);
|
||||
return abs;
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -102,6 +102,7 @@ body {
|
||||
border-bottom: none;
|
||||
border-left: none;
|
||||
border-right: none;
|
||||
padding: .25rem 0.5rem;
|
||||
}
|
||||
|
||||
.list-group-item:first-child {
|
||||
@@ -199,7 +200,7 @@ body {
|
||||
max-width: 100%;
|
||||
max-height: 175px;
|
||||
margin: 0 auto 0;
|
||||
padding: 3px 3px 0 3px;
|
||||
padding: 3px 3px 0;
|
||||
width: auto;
|
||||
height: auto;
|
||||
}
|
||||
@@ -208,7 +209,7 @@ body {
|
||||
display: block;
|
||||
max-width: 64px;
|
||||
max-height: 64px;
|
||||
margin: 0 auto 0;
|
||||
margin: 0 auto;
|
||||
width: auto;
|
||||
height: auto;
|
||||
}
|
||||
@@ -391,10 +392,6 @@ option {
|
||||
margin-top: 1em;
|
||||
}
|
||||
|
||||
.list-group-item {
|
||||
padding: .25rem 0.5rem;
|
||||
}
|
||||
|
||||
.wrapper-sm {
|
||||
min-width: 64px;
|
||||
}
|
||||
|
||||
@@ -183,7 +183,7 @@ function infoButtonCb(hit) {
|
||||
"bitrate", "artist", "album", "album_artist", "genre", "title", "font_name", "tag"
|
||||
]);
|
||||
Object.keys(doc)
|
||||
.filter(key => key.startsWith("_keyword.") || key.startsWith("_text.") || displayFields.has(key) || key.startsWith("exif_"))
|
||||
.filter(key => key.startsWith("_keyword.") || key.startsWith("_text.") || displayFields.has(key) || key.startsWith("exif_"))
|
||||
.forEach(key => {
|
||||
tbody.append($("<tr>")
|
||||
.append($("<td>").text(key))
|
||||
@@ -377,6 +377,7 @@ function makeThumbnail(mimeCategory, hit, imgWrapper, small) {
|
||||
|| hit["_source"]["mime"] === "application/pdf"
|
||||
|| hit["_source"]["mime"] === "application/epub+zip"
|
||||
|| hit["_source"]["mime"] === "application/x-cbz"
|
||||
|| hit["_source"]["mime"] === "application/x-cbr"
|
||||
|| hit["_source"].hasOwnProperty("font_name")
|
||||
) {
|
||||
thumbnail = document.createElement("img");
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
|
||||
<nav class="navbar navbar-expand-lg">
|
||||
<a class="navbar-brand" href="/">sist2</a>
|
||||
<span class="badge badge-pill version">v1.2.12</span>
|
||||
<span class="badge badge-pill version">v1.2.16</span>
|
||||
<span class="tagline">Lightning-fast file system indexer and search tool </span>
|
||||
<a style="margin-left: auto" id="theme" class="btn" title="Toggle theme" href="/">Theme</a>
|
||||
</nav>
|
||||
|
||||
Reference in New Issue
Block a user