mirror of
https://github.com/simon987/sist2.git
synced 2025-12-12 15:08:53 +00:00
Compare commits
5 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 018ac86640 | |||
| 398f1aead4 | |||
| d19a75926b | |||
| 1ac8b40e3d | |||
| a8505cb8c1 |
@@ -26,6 +26,7 @@ add_executable(
|
||||
src/parsing/arc.c src/parsing/arc.h
|
||||
src/parsing/doc.c src/parsing/doc.h
|
||||
src/log.c src/log.h
|
||||
src/parsing/cbr.h src/parsing/cbr.c
|
||||
|
||||
# argparse
|
||||
argparse/argparse.h argparse/argparse.c
|
||||
|
||||
@@ -92,7 +92,7 @@ docker stop sist2
|
||||
|
||||
File type | Library | Content | Thumbnail | Metadata
|
||||
:---|:---|:---|:---|:---
|
||||
pdf,xps,cbz,fb2,epub | MuPDF | text+ocr | yes, `png` | title |
|
||||
pdf,xps,cbz,cbr,fb2,epub | MuPDF | text+ocr | yes, `png` | title |
|
||||
`audio/*` | ffmpeg | - | yes, `jpeg` | ID3 tags |
|
||||
`video/*` | ffmpeg | - | yes, `jpeg` | title, comment, artist |
|
||||
`image/*` | ffmpeg | - | yes, `jpeg` | [Common EXIF tags](https://github.com/simon987/sist2/blob/efdde2734eca9b14a54f84568863b7ffd59bdba3/src/parsing/media.c#L190) |
|
||||
@@ -120,7 +120,7 @@ To check if a media file can be parsed without *seek*, execute `cat file.mp4 | f
|
||||
|
||||
### OCR
|
||||
|
||||
You can enable OCR support for pdf,xps,cbz,fb2,epub file types with the
|
||||
You can enable OCR support for pdf,xps,cbz,cbr,fb2,epub file types with the
|
||||
`--ocr <lang>` option. Download the language data files with your
|
||||
package manager (`apt install tesseract-ocr-eng`) or directly [from Github](https://github.com/tesseract-ocr/tesseract/wiki/Data-Files).
|
||||
|
||||
|
||||
@@ -20,6 +20,8 @@ typedef struct es_indexer {
|
||||
|
||||
static es_indexer_t *Indexer;
|
||||
|
||||
void delete_queue(int max);
|
||||
|
||||
void print_json(cJSON *document, const char uuid_str[UUID_STR_LEN]) {
|
||||
|
||||
cJSON *line = cJSON_CreateObject();
|
||||
@@ -64,7 +66,7 @@ void execute_update_script(const char *script, const char index_id[UUID_STR_LEN]
|
||||
cJSON *term_obj = cJSON_AddObjectToObject(query, "term");
|
||||
cJSON_AddStringToObject(term_obj, "index", index_id);
|
||||
|
||||
char * str = cJSON_Print(body);
|
||||
char *str = cJSON_Print(body);
|
||||
|
||||
char bulk_url[4096];
|
||||
snprintf(bulk_url, 4096, "%s/sist2/_update_by_query?pretty", Indexer->es_url);
|
||||
@@ -87,24 +89,18 @@ void execute_update_script(const char *script, const char index_id[UUID_STR_LEN]
|
||||
cJSON_Delete(resp);
|
||||
}
|
||||
|
||||
void elastic_flush() {
|
||||
|
||||
if (Indexer == NULL) {
|
||||
Indexer = create_indexer(IndexCtx.es_url);
|
||||
}
|
||||
|
||||
void *create_bulk_buffer(int max, int *count, size_t *buf_len) {
|
||||
es_bulk_line_t *line = Indexer->line_head;
|
||||
|
||||
int count = 0;
|
||||
*count = 0;
|
||||
|
||||
size_t buf_size = 0;
|
||||
size_t buf_cur = 0;
|
||||
char *buf = malloc(1);
|
||||
|
||||
while (line != NULL) {
|
||||
while (line != NULL && *count < max) {
|
||||
char action_str[512];
|
||||
snprintf(action_str, 512,
|
||||
"{\"index\":{\"_id\":\"%s\", \"_type\":\"_doc\", \"_index\":\"sist2\"}}\n", line->uuid_str);
|
||||
"{\"index\":{\"_id\":\"%s\", \"_type\":\"_doc\", \"_index\":\"sist2\"}}\n", line->uuid_str);
|
||||
size_t action_str_len = strlen(action_str);
|
||||
|
||||
size_t line_len = strlen(line->line);
|
||||
@@ -116,17 +112,20 @@ void elastic_flush() {
|
||||
memcpy(buf + buf_cur, line->line, line_len);
|
||||
buf_cur += line_len;
|
||||
|
||||
es_bulk_line_t *tmp = line;
|
||||
line = line->next;
|
||||
free(tmp);
|
||||
count++;
|
||||
(*count)++;
|
||||
}
|
||||
buf = realloc(buf, buf_size + 1);
|
||||
*(buf+buf_cur) = '\0';
|
||||
*(buf + buf_cur) = '\0';
|
||||
|
||||
Indexer->line_head = NULL;
|
||||
Indexer->line_tail = NULL;
|
||||
Indexer->queued = 0;
|
||||
*buf_len = buf_cur;
|
||||
return buf;
|
||||
}
|
||||
|
||||
void _elastic_flush(int max) {
|
||||
size_t buf_len;
|
||||
int count;
|
||||
void *buf = create_bulk_buffer(max, &count, &buf_len);
|
||||
|
||||
char bulk_url[4096];
|
||||
snprintf(bulk_url, 4096, "%s/sist2/_bulk?pipeline=tie", Indexer->es_url);
|
||||
@@ -136,15 +135,33 @@ void elastic_flush() {
|
||||
LOG_FATALF("elastic.c", "Could not connect to %s, make sure that elasticsearch is running!\n", IndexCtx.es_url)
|
||||
}
|
||||
|
||||
LOG_INFOF("elastic.c", "Indexed %d documents (%zukB) <%d>", count, buf_cur / 1024, r->status_code);
|
||||
if (r->status_code == 413) {
|
||||
|
||||
if (r->status_code != 200 && r->status_code != 413) {
|
||||
if (max <= 1) {
|
||||
LOG_ERRORF("elastic.c", "Single document too large, giving up: {%s}", Indexer->line_head->uuid_str)
|
||||
free_response(r);
|
||||
free(buf);
|
||||
delete_queue(1);
|
||||
if (Indexer->queued != 0) {
|
||||
elastic_flush();
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
LOG_WARNINGF("elastic.c", "Payload too large, retrying (%d documents)", count);
|
||||
|
||||
free_response(r);
|
||||
free(buf);
|
||||
_elastic_flush(max / 2);
|
||||
return;
|
||||
|
||||
} else if (r->status_code != 200) {
|
||||
cJSON *ret_json = cJSON_Parse(r->body);
|
||||
if (cJSON_GetObjectItem(ret_json, "errors")->valueint != 0) {
|
||||
cJSON *err;
|
||||
cJSON_ArrayForEach(err, cJSON_GetObjectItem(ret_json, "items")) {
|
||||
if (cJSON_GetObjectItem(cJSON_GetObjectItem(err, "index"), "status")->valueint != 201) {
|
||||
char* str = cJSON_Print(err);
|
||||
char *str = cJSON_Print(err);
|
||||
LOG_ERRORF("elastic.c", "%s\n", str);
|
||||
cJSON_free(str);
|
||||
}
|
||||
@@ -152,12 +169,44 @@ void elastic_flush() {
|
||||
}
|
||||
|
||||
cJSON_Delete(ret_json);
|
||||
delete_queue(Indexer->queued);
|
||||
|
||||
} else {
|
||||
LOG_INFOF("elastic.c", "Indexed %d documents (%zukB) <%d>", count, buf_len / 1024, r->status_code);
|
||||
|
||||
delete_queue(max);
|
||||
|
||||
if (Indexer->queued != 0) {
|
||||
elastic_flush();
|
||||
}
|
||||
}
|
||||
|
||||
free_response(r);
|
||||
free(buf);
|
||||
}
|
||||
|
||||
void delete_queue(int max) {
|
||||
for (int i = 0; i < max; i++) {
|
||||
es_bulk_line_t *tmp = Indexer->line_head;
|
||||
Indexer->line_head = tmp->next;
|
||||
if (Indexer->line_head == NULL) {
|
||||
Indexer->line_tail = NULL;
|
||||
} else {
|
||||
free(tmp);
|
||||
}
|
||||
Indexer->queued -= 1;
|
||||
}
|
||||
}
|
||||
|
||||
void elastic_flush() {
|
||||
|
||||
if (Indexer == NULL) {
|
||||
Indexer = create_indexer(IndexCtx.es_url);
|
||||
}
|
||||
|
||||
_elastic_flush(Indexer->queued);
|
||||
}
|
||||
|
||||
void elastic_index_line(es_bulk_line_t *line) {
|
||||
|
||||
if (Indexer == NULL) {
|
||||
@@ -194,7 +243,7 @@ es_indexer_t *create_indexer(const char *url) {
|
||||
return indexer;
|
||||
}
|
||||
|
||||
void destroy_indexer(char * script, char index_id[UUID_STR_LEN]) {
|
||||
void destroy_indexer(char *script, char index_id[UUID_STR_LEN]) {
|
||||
|
||||
char url[4096];
|
||||
|
||||
@@ -285,7 +334,7 @@ cJSON *elastic_get_document(const char *uuid_str) {
|
||||
char *elastic_get_status() {
|
||||
char url[4096];
|
||||
snprintf(url, 4096,
|
||||
"%s/_cluster/state/metadata/sist2?filter_path=metadata.indices.*.state", WebCtx.es_url);
|
||||
"%s/_cluster/state/metadata/sist2?filter_path=metadata.indices.*.state", WebCtx.es_url);
|
||||
|
||||
response_t *r = web_get(url);
|
||||
cJSON *json = NULL;
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
#define EPILOG "Made by simon987 <me@simon987.net>. Released under GPL-3.0"
|
||||
|
||||
|
||||
static const char *const Version = "1.2.12";
|
||||
static const char *const Version = "1.2.14";
|
||||
static const char *const usage[] = {
|
||||
"sist2 scan [OPTION]... PATH",
|
||||
"sist2 index [OPTION]... INDEX",
|
||||
@@ -59,6 +59,8 @@ void sist2_scan(scan_args_t *args) {
|
||||
ScanCtx.mime_table = mime_get_mime_table();
|
||||
ScanCtx.ext_table = mime_get_ext_table();
|
||||
|
||||
cbr_init();
|
||||
|
||||
char store_path[PATH_MAX];
|
||||
snprintf(store_path, PATH_MAX, "%sthumbs", ScanCtx.index.path);
|
||||
mkdir(store_path, S_IWUSR | S_IRUSR | S_IXUSR);
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
#include "arc.h"
|
||||
#include "src/ctx.h"
|
||||
|
||||
#define ARC_BUF_SIZE 8192
|
||||
|
||||
int should_parse_filtered_file(const char *filepath, int ext) {
|
||||
char tmp[PATH_MAX * 2];
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
#define SIST2_ARC_H
|
||||
|
||||
#include "src/sist.h"
|
||||
#define ARC_BUF_SIZE 8192
|
||||
|
||||
int should_parse_filtered_file(const char *filepath, int ext);
|
||||
|
||||
|
||||
52
src/parsing/cbr.c
Normal file
52
src/parsing/cbr.c
Normal file
@@ -0,0 +1,52 @@
|
||||
#import "cbr.h"
|
||||
#import "src/ctx.h"
|
||||
|
||||
unsigned int cbr_mime;
|
||||
unsigned int cbz_mime;
|
||||
|
||||
void cbr_init() {
|
||||
cbr_mime = mime_get_mime_by_string(ScanCtx.mime_table, "application/x-cbr");
|
||||
cbz_mime = mime_get_mime_by_string(ScanCtx.mime_table, "application/x-cbz");
|
||||
}
|
||||
|
||||
int is_cbr(unsigned int mime) {
|
||||
return mime == cbr_mime;
|
||||
}
|
||||
|
||||
void parse_cbr(void *buf, size_t buf_len, document_t *doc) {
|
||||
char *out_buf = malloc(buf_len * 2);
|
||||
size_t out_buf_used = 0;
|
||||
|
||||
struct archive *rar_in = archive_read_new();
|
||||
archive_read_support_filter_none(rar_in);
|
||||
archive_read_support_format_rar(rar_in);
|
||||
|
||||
archive_read_open_memory(rar_in, buf, buf_len);
|
||||
|
||||
struct archive *zip_out = archive_write_new();
|
||||
archive_write_set_format_zip(zip_out);
|
||||
archive_write_open_memory(zip_out, out_buf, buf_len * 2, &out_buf_used);
|
||||
|
||||
struct archive_entry *entry;
|
||||
while (archive_read_next_header(rar_in, &entry) == ARCHIVE_OK) {
|
||||
archive_write_header(zip_out, entry);
|
||||
|
||||
char arc_buf[ARC_BUF_SIZE];
|
||||
int len = archive_read_data(rar_in, arc_buf, ARC_BUF_SIZE);
|
||||
while (len > 0) {
|
||||
archive_write_data(zip_out, arc_buf, len);
|
||||
len = archive_read_data(rar_in, arc_buf, ARC_BUF_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
archive_write_close(zip_out);
|
||||
archive_write_free(zip_out);
|
||||
|
||||
archive_read_close(rar_in);
|
||||
archive_read_free(rar_in);
|
||||
|
||||
doc->mime = cbz_mime;
|
||||
parse_pdf(out_buf, out_buf_used, doc);
|
||||
doc->mime = cbr_mime;
|
||||
free(out_buf);
|
||||
}
|
||||
12
src/parsing/cbr.h
Normal file
12
src/parsing/cbr.h
Normal file
@@ -0,0 +1,12 @@
|
||||
#ifndef SIST2_CBR_H
|
||||
#define SIST2_CBR_H
|
||||
|
||||
#include "src/sist.h"
|
||||
|
||||
void cbr_init();
|
||||
|
||||
int is_cbr(unsigned int mime);
|
||||
|
||||
void parse_cbr(void *buf, size_t buf_len, document_t *doc);
|
||||
|
||||
#endif
|
||||
@@ -149,6 +149,13 @@ void parse(void *arg) {
|
||||
if (doc_buf != buf && doc_buf != NULL) {
|
||||
free(doc_buf);
|
||||
}
|
||||
} else if (is_cbr(doc.mime)) {
|
||||
void *cbr_buf = read_all(job, (char *) buf, bytes_read);
|
||||
parse_cbr(cbr_buf, doc.size, &doc);
|
||||
|
||||
if (cbr_buf != buf && cbr_buf != NULL) {
|
||||
free(cbr_buf);
|
||||
}
|
||||
}
|
||||
|
||||
//Parent meta
|
||||
|
||||
@@ -59,6 +59,7 @@
|
||||
#include "parsing/font.h"
|
||||
#include "parsing/arc.h"
|
||||
#include "parsing/doc.h"
|
||||
#include "parsing/cbr.h"
|
||||
#include "cli.h"
|
||||
#include "log.h"
|
||||
#include "utf8.h/utf8.h"
|
||||
|
||||
@@ -95,7 +95,6 @@ typedef int (*read_func_t)(struct vfile *, void *buf, size_t size);
|
||||
typedef void (*close_func_t)(struct vfile *);
|
||||
|
||||
typedef struct vfile {
|
||||
|
||||
union {
|
||||
int fd;
|
||||
struct archive *arc;
|
||||
|
||||
@@ -91,7 +91,7 @@ text_buffer_t text_buffer_create(int max_size) {
|
||||
}
|
||||
|
||||
void text_buffer_terminate_string(text_buffer_t *buf) {
|
||||
if (*(buf->dyn_buffer.buf + buf->dyn_buffer.cur - 1) == ' ') {
|
||||
if (buf->dyn_buffer.cur > 0 && *(buf->dyn_buffer.buf + buf->dyn_buffer.cur - 1) == ' ') {
|
||||
*(buf->dyn_buffer.buf + buf->dyn_buffer.cur - 1) = '\0';
|
||||
} else {
|
||||
dyn_buffer_write_char(&buf->dyn_buffer, '\0');
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -102,6 +102,7 @@ body {
|
||||
border-bottom: none;
|
||||
border-left: none;
|
||||
border-right: none;
|
||||
padding: .25rem 0.5rem;
|
||||
}
|
||||
|
||||
.list-group-item:first-child {
|
||||
@@ -199,7 +200,7 @@ body {
|
||||
max-width: 100%;
|
||||
max-height: 175px;
|
||||
margin: 0 auto 0;
|
||||
padding: 3px 3px 0 3px;
|
||||
padding: 3px 3px 0;
|
||||
width: auto;
|
||||
height: auto;
|
||||
}
|
||||
@@ -208,7 +209,7 @@ body {
|
||||
display: block;
|
||||
max-width: 64px;
|
||||
max-height: 64px;
|
||||
margin: 0 auto 0;
|
||||
margin: 0 auto;
|
||||
width: auto;
|
||||
height: auto;
|
||||
}
|
||||
@@ -391,10 +392,6 @@ option {
|
||||
margin-top: 1em;
|
||||
}
|
||||
|
||||
.list-group-item {
|
||||
padding: .25rem 0.5rem;
|
||||
}
|
||||
|
||||
.wrapper-sm {
|
||||
min-width: 64px;
|
||||
}
|
||||
|
||||
@@ -183,7 +183,7 @@ function infoButtonCb(hit) {
|
||||
"bitrate", "artist", "album", "album_artist", "genre", "title", "font_name", "tag"
|
||||
]);
|
||||
Object.keys(doc)
|
||||
.filter(key => key.startsWith("_keyword.") || key.startsWith("_text.") || displayFields.has(key) || key.startsWith("exif_"))
|
||||
.filter(key => key.startsWith("_keyword.") || key.startsWith("_text.") || displayFields.has(key) || key.startsWith("exif_"))
|
||||
.forEach(key => {
|
||||
tbody.append($("<tr>")
|
||||
.append($("<td>").text(key))
|
||||
@@ -377,6 +377,7 @@ function makeThumbnail(mimeCategory, hit, imgWrapper, small) {
|
||||
|| hit["_source"]["mime"] === "application/pdf"
|
||||
|| hit["_source"]["mime"] === "application/epub+zip"
|
||||
|| hit["_source"]["mime"] === "application/x-cbz"
|
||||
|| hit["_source"]["mime"] === "application/x-cbr"
|
||||
|| hit["_source"].hasOwnProperty("font_name")
|
||||
) {
|
||||
thumbnail = document.createElement("img");
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
|
||||
<nav class="navbar navbar-expand-lg">
|
||||
<a class="navbar-brand" href="/">sist2</a>
|
||||
<span class="badge badge-pill version">v1.2.12</span>
|
||||
<span class="badge badge-pill version">v1.2.14</span>
|
||||
<span class="tagline">Lightning-fast file system indexer and search tool </span>
|
||||
<a style="margin-left: auto" id="theme" class="btn" title="Toggle theme" href="/">Theme</a>
|
||||
</nav>
|
||||
|
||||
Reference in New Issue
Block a user