mirror of
https://github.com/simon987/sist2.git
synced 2025-04-08 13:06:47 +00:00
Remove UUID dep, fix incremental scan, use MD5(path) as unique id, version bump
This commit is contained in:
parent
c6e1ba03bc
commit
050c1283a3
1
.gitignore
vendored
1
.gitignore
vendored
@ -1,6 +1,5 @@
|
||||
.idea
|
||||
thumbs
|
||||
test
|
||||
*.cbp
|
||||
CMakeCache.txt
|
||||
CMakeFiles
|
||||
|
@ -40,7 +40,6 @@ find_package(lmdb CONFIG REQUIRED)
|
||||
find_package(cJSON CONFIG REQUIRED)
|
||||
find_package(unofficial-glib CONFIG REQUIRED)
|
||||
find_package(unofficial-mongoose CONFIG REQUIRED)
|
||||
find_library(UUID_LIB NAMES uuid)
|
||||
find_package(CURL CONFIG REQUIRED)
|
||||
|
||||
#find_package(OpenSSL REQUIRED)
|
||||
@ -68,7 +67,8 @@ if (SIST_DEBUG)
|
||||
-fstack-protector
|
||||
-fno-omit-frame-pointer
|
||||
-fsanitize=address
|
||||
-O2
|
||||
-fno-inline
|
||||
# -O2
|
||||
)
|
||||
target_link_options(
|
||||
sist2
|
||||
@ -81,7 +81,6 @@ if (SIST_DEBUG)
|
||||
OUTPUT_NAME sist2_debug
|
||||
)
|
||||
else ()
|
||||
# set(VCPKG_BUILD_TYPE release)
|
||||
target_compile_options(
|
||||
sist2
|
||||
PRIVATE
|
||||
@ -108,10 +107,11 @@ target_link_libraries(
|
||||
unofficial::mongoose::mongoose
|
||||
CURL::libcurl
|
||||
|
||||
${UUID_LIB}
|
||||
pthread
|
||||
magic
|
||||
|
||||
c
|
||||
|
||||
scan
|
||||
)
|
||||
|
||||
|
@ -127,7 +127,7 @@ binaries (GCC 7+ required).
|
||||
1. Install compile-time dependencies
|
||||
|
||||
```bash
|
||||
vcpkg install lmdb cjson glib libarchive[core,bzip2,libxml2,lz4,lzma,lzo] pthread tesseract libxml2 ffmpeg zstd gtest mongoose libuuid libmagic libraw curl[core,ssl] jbig2dec brotli libmupdf
|
||||
vcpkg install lmdb cjson glib libarchive[core,bzip2,libxml2,lz4,lzma,lzo] pthread tesseract libxml2 ffmpeg zstd gtest mongoose libmagic libraw curl[core,ssl] jbig2dec brotli libmupdf
|
||||
```
|
||||
|
||||
2. Build
|
||||
|
@ -241,9 +241,11 @@ The `_text.*` items will be indexed and searchable as **text** fields (fuzzy sea
|
||||
|
||||
*thumbs/*:
|
||||
|
||||
LMDB key-value store. Keys are **binary** 128-bit UUID4s (`_id` field)
|
||||
LMDB key-value store. Keys are **binary** 16-byte md5 hash* (`_id` field)
|
||||
and values are raw image bytes.
|
||||
|
||||
*\* Hash is calculated from the full path of the file, including the extension, relative to the index root*
|
||||
|
||||
Importing an external `binary` type index is technically possible but
|
||||
it is currently unsupported and has no guaranties of back/forward compatibility.
|
||||
|
||||
|
@ -30,6 +30,10 @@
|
||||
"mime": {
|
||||
"type": "keyword"
|
||||
},
|
||||
"parent": {
|
||||
"type": "keyword",
|
||||
"index": false
|
||||
},
|
||||
"thumbnail": {
|
||||
"type": "keyword",
|
||||
"index": false
|
||||
|
@ -30,11 +30,11 @@ void elastic_cleanup() {
|
||||
}
|
||||
}
|
||||
|
||||
void print_json(cJSON *document, const char uuid_str[UUID_STR_LEN]) {
|
||||
void print_json(cJSON *document, const char id_str[MD5_STR_LENGTH]) {
|
||||
|
||||
cJSON *line = cJSON_CreateObject();
|
||||
|
||||
cJSON_AddStringToObject(line, "_id", uuid_str);
|
||||
cJSON_AddStringToObject(line, "_id", id_str);
|
||||
cJSON_AddStringToObject(line, "_index", IndexCtx.es_index);
|
||||
cJSON_AddStringToObject(line, "_type", "_doc");
|
||||
cJSON_AddItemReferenceToObject(line, "_source", document);
|
||||
@ -52,13 +52,13 @@ void index_json_func(void *arg) {
|
||||
elastic_index_line(line);
|
||||
}
|
||||
|
||||
void index_json(cJSON *document, const char uuid_str[UUID_STR_LEN]) {
|
||||
void index_json(cJSON *document, const char index_id_str[MD5_STR_LENGTH]) {
|
||||
char *json = cJSON_PrintUnformatted(document);
|
||||
|
||||
size_t json_len = strlen(json);
|
||||
es_bulk_line_t *bulk_line = malloc(sizeof(es_bulk_line_t) + json_len + 2);
|
||||
memcpy(bulk_line->line, json, json_len);
|
||||
memcpy(bulk_line->uuid_str, uuid_str, UUID_STR_LEN);
|
||||
memcpy(bulk_line->path_md5_str, index_id_str, MD5_STR_LENGTH);
|
||||
*(bulk_line->line + json_len) = '\n';
|
||||
*(bulk_line->line + json_len + 1) = '\0';
|
||||
bulk_line->next = NULL;
|
||||
@ -67,7 +67,7 @@ void index_json(cJSON *document, const char uuid_str[UUID_STR_LEN]) {
|
||||
tpool_add_work(IndexCtx.pool, index_json_func, bulk_line);
|
||||
}
|
||||
|
||||
void execute_update_script(const char *script, int async, const char index_id[UUID_STR_LEN]) {
|
||||
void execute_update_script(const char *script, int async, const char index_id[MD5_STR_LENGTH]) {
|
||||
|
||||
if (Indexer == NULL) {
|
||||
Indexer = create_indexer(IndexCtx.es_url, IndexCtx.es_index);
|
||||
@ -129,9 +129,9 @@ void *create_bulk_buffer(int max, int *count, size_t *buf_len) {
|
||||
while (line != NULL && *count < max) {
|
||||
char action_str[256];
|
||||
snprintf(
|
||||
action_str, 256,
|
||||
action_str, sizeof(action_str),
|
||||
"{\"index\":{\"_id\":\"%s\",\"_type\":\"_doc\",\"_index\":\"%s\"}}\n",
|
||||
line->uuid_str, Indexer->es_index
|
||||
line->path_md5_str, Indexer->es_index
|
||||
);
|
||||
|
||||
size_t action_str_len = strlen(action_str);
|
||||
@ -220,7 +220,7 @@ void _elastic_flush(int max) {
|
||||
if (r->status_code == 413) {
|
||||
|
||||
if (max <= 1) {
|
||||
LOG_ERRORF("elastic.c", "Single document too large, giving up: {%s}", Indexer->line_head->uuid_str)
|
||||
LOG_ERRORF("elastic.c", "Single document too large, giving up: {%s}", Indexer->line_head->path_md5_str)
|
||||
free_response(r);
|
||||
free(buf);
|
||||
delete_queue(1);
|
||||
@ -408,9 +408,9 @@ void elastic_init(int force_reset, const char* user_mappings, const char* user_s
|
||||
}
|
||||
}
|
||||
|
||||
cJSON *elastic_get_document(const char *uuid_str) {
|
||||
cJSON *elastic_get_document(const char *id_str) {
|
||||
char url[4096];
|
||||
snprintf(url, sizeof(url), "%s/%s/_doc/%s", WebCtx.es_url, WebCtx.es_index, uuid_str);
|
||||
snprintf(url, sizeof(url), "%s/%s/_doc/%s", WebCtx.es_url, WebCtx.es_index, id_str);
|
||||
|
||||
response_t *r = web_get(url, 3);
|
||||
cJSON *json = NULL;
|
||||
|
@ -5,7 +5,7 @@
|
||||
|
||||
typedef struct es_bulk_line {
|
||||
struct es_bulk_line *next;
|
||||
char uuid_str[UUID_STR_LEN];
|
||||
char path_md5_str[MD5_STR_LENGTH];
|
||||
char line[0];
|
||||
} es_bulk_line_t;
|
||||
|
||||
@ -16,9 +16,9 @@ typedef struct es_indexer es_indexer_t;
|
||||
|
||||
void elastic_index_line(es_bulk_line_t *line);
|
||||
|
||||
void print_json(cJSON *document, const char uuid_str[UUID_STR_LEN]);
|
||||
void print_json(cJSON *document, const char index_id_str[MD5_STR_LENGTH]);
|
||||
|
||||
void index_json(cJSON *document, const char uuid_str[UUID_STR_LEN]);
|
||||
void index_json(cJSON *document, const char index_id_str[MD5_STR_LENGTH]);
|
||||
|
||||
es_indexer_t *create_indexer(const char *url, const char *index);
|
||||
|
||||
@ -27,10 +27,10 @@ void finish_indexer(char *script, int async_script, char *index_id);
|
||||
|
||||
void elastic_init(int force_reset, const char* user_mappings, const char* user_settings);
|
||||
|
||||
cJSON *elastic_get_document(const char *uuid_str);
|
||||
cJSON *elastic_get_document(const char *id_str);
|
||||
|
||||
char *elastic_get_status();
|
||||
|
||||
void execute_update_script(const char *script, int async, const char index_id[UUID_STR_LEN]);
|
||||
void execute_update_script(const char *script, int async, const char index_id[MD5_STR_LENGTH]);
|
||||
|
||||
#endif
|
||||
|
File diff suppressed because one or more lines are too long
@ -6,13 +6,13 @@
|
||||
static __thread int index_fd = -1;
|
||||
|
||||
typedef struct {
|
||||
unsigned char uuid[16];
|
||||
unsigned long ino;
|
||||
unsigned char path_md5[MD5_DIGEST_LENGTH];
|
||||
unsigned long size;
|
||||
unsigned int mime;
|
||||
int mtime;
|
||||
short base;
|
||||
short ext;
|
||||
char has_parent;
|
||||
} line_t;
|
||||
|
||||
void skip_meta(FILE *file) {
|
||||
@ -32,7 +32,7 @@ void skip_meta(FILE *file) {
|
||||
|
||||
void write_index_descriptor(char *path, index_descriptor_t *desc) {
|
||||
cJSON *json = cJSON_CreateObject();
|
||||
cJSON_AddStringToObject(json, "uuid", desc->uuid);
|
||||
cJSON_AddStringToObject(json, "id", desc->id);
|
||||
cJSON_AddStringToObject(json, "version", desc->version);
|
||||
cJSON_AddStringToObject(json, "root", desc->root);
|
||||
cJSON_AddStringToObject(json, "name", desc->name);
|
||||
@ -82,7 +82,7 @@ index_descriptor_t read_index_descriptor(char *path) {
|
||||
strcpy(descriptor.rewrite_url, cJSON_GetObjectItem(json, "rewrite_url")->valuestring);
|
||||
descriptor.root_len = (short) strlen(descriptor.root);
|
||||
strcpy(descriptor.version, cJSON_GetObjectItem(json, "version")->valuestring);
|
||||
strcpy(descriptor.uuid, cJSON_GetObjectItem(json, "uuid")->valuestring);
|
||||
strcpy(descriptor.id, cJSON_GetObjectItem(json, "id")->valuestring);
|
||||
if (cJSON_GetObjectItem(json, "type") == NULL) {
|
||||
strcpy(descriptor.type, INDEX_TYPE_BIN);
|
||||
} else {
|
||||
@ -219,7 +219,7 @@ void read_index_bin(const char *path, const char *index_id, index_func func) {
|
||||
dyn_buffer_t buf = dyn_buffer_create();
|
||||
|
||||
FILE *file = fopen(path, "rb");
|
||||
while (1) {
|
||||
while (TRUE) {
|
||||
buf.cur = 0;
|
||||
size_t _ = fread((void *) &line, 1, sizeof(line_t), file);
|
||||
if (feof(file)) {
|
||||
@ -229,8 +229,8 @@ void read_index_bin(const char *path, const char *index_id, index_func func) {
|
||||
cJSON *document = cJSON_CreateObject();
|
||||
cJSON_AddStringToObject(document, "index", index_id);
|
||||
|
||||
char uuid_str[UUID_STR_LEN];
|
||||
uuid_unparse(line.uuid, uuid_str);
|
||||
char path_md5_str[MD5_STR_LENGTH];
|
||||
buf2hex(line.path_md5, sizeof(line.path_md5), path_md5_str);
|
||||
|
||||
const char *mime_text = mime_get_mime_text(line.mime);
|
||||
if (mime_text == NULL) {
|
||||
@ -247,9 +247,6 @@ void read_index_bin(const char *path, const char *index_id, index_func func) {
|
||||
}
|
||||
dyn_buffer_write_char(&buf, '\0');
|
||||
|
||||
char full_filename[PATH_MAX];
|
||||
strcpy(full_filename, buf.buf);
|
||||
|
||||
cJSON_AddStringToObject(document, "extension", buf.buf + line.ext);
|
||||
if (*(buf.buf + line.ext - 1) == '.') {
|
||||
*(buf.buf + line.ext - 1) = '\0';
|
||||
@ -331,7 +328,7 @@ void read_index_bin(const char *path, const char *index_id, index_func func) {
|
||||
|
||||
cJSON *meta_obj = NULL;
|
||||
if (IndexCtx.meta != NULL) {
|
||||
const char *meta_string = g_hash_table_lookup(IndexCtx.meta, full_filename);
|
||||
const char *meta_string = g_hash_table_lookup(IndexCtx.meta, path_md5_str);
|
||||
if (meta_string != NULL) {
|
||||
meta_obj = cJSON_Parse(meta_string);
|
||||
|
||||
@ -346,7 +343,7 @@ void read_index_bin(const char *path, const char *index_id, index_func func) {
|
||||
}
|
||||
|
||||
if (IndexCtx.tags != NULL) {
|
||||
const char *tags_string = g_hash_table_lookup(IndexCtx.tags, full_filename);
|
||||
const char *tags_string = g_hash_table_lookup(IndexCtx.tags, path_md5_str);
|
||||
if (tags_string != NULL) {
|
||||
cJSON *tags_arr = cJSON_Parse(tags_string);
|
||||
cJSON_DeleteItemFromObject(document, "tag");
|
||||
@ -354,7 +351,7 @@ void read_index_bin(const char *path, const char *index_id, index_func func) {
|
||||
}
|
||||
}
|
||||
|
||||
func(document, uuid_str);
|
||||
func(document, path_md5_str);
|
||||
cJSON_Delete(document);
|
||||
if (meta_obj) {
|
||||
cJSON_Delete(meta_obj);
|
||||
@ -382,7 +379,7 @@ const char *json_type_array_fields[] = {
|
||||
void read_index_json(const char *path, UNUSED(const char *index_id), index_func func) {
|
||||
|
||||
FILE *file = fopen(path, "r");
|
||||
while (1) {
|
||||
while (TRUE) {
|
||||
char *line = NULL;
|
||||
size_t len;
|
||||
size_t read = getline(&line, &len, file);
|
||||
@ -402,7 +399,7 @@ void read_index_json(const char *path, UNUSED(const char *index_id), index_func
|
||||
}
|
||||
|
||||
cJSON *document = cJSON_CreateObject();
|
||||
const char *uuid_str = cJSON_GetObjectItem(input, "_id")->valuestring;
|
||||
const char *id_str = cJSON_GetObjectItem(input, "_id")->valuestring;
|
||||
|
||||
for (int i = 0; i < (sizeof(json_type_copy_fields) / sizeof(json_type_copy_fields[0])); i++) {
|
||||
cJSON *value = cJSON_GetObjectItem(input, json_type_copy_fields[i]);
|
||||
@ -430,7 +427,7 @@ void read_index_json(const char *path, UNUSED(const char *index_id), index_func
|
||||
}
|
||||
}
|
||||
|
||||
func(document, uuid_str);
|
||||
func(document, id_str);
|
||||
cJSON_Delete(document);
|
||||
cJSON_Delete(input);
|
||||
|
||||
@ -438,7 +435,7 @@ void read_index_json(const char *path, UNUSED(const char *index_id), index_func
|
||||
fclose(file);
|
||||
}
|
||||
|
||||
void read_index(const char *path, const char index_id[UUID_STR_LEN], const char *type, index_func func) {
|
||||
void read_index(const char *path, const char index_id[MD5_STR_LENGTH], const char *type, index_func func) {
|
||||
|
||||
if (strcmp(type, INDEX_TYPE_BIN) == 0) {
|
||||
read_index_bin(path, index_id, func);
|
||||
@ -451,13 +448,15 @@ void incremental_read(GHashTable *table, const char *filepath) {
|
||||
FILE *file = fopen(filepath, "rb");
|
||||
line_t line;
|
||||
|
||||
LOG_DEBUGF("serialize.c", "Incremental read %s", filepath)
|
||||
|
||||
while (1) {
|
||||
size_t ret = fread((void *) &line, 1, sizeof(line_t), file);
|
||||
size_t ret = fread((void *) &line, sizeof(line_t), 1, file);
|
||||
if (ret != 1 || feof(file)) {
|
||||
break;
|
||||
}
|
||||
|
||||
incremental_put(table, line.ino, line.mtime);
|
||||
incremental_put(table, line.path_md5, line.mtime);
|
||||
|
||||
while ((getc(file))) {}
|
||||
skip_meta(file);
|
||||
@ -475,33 +474,47 @@ void incremental_copy(store_t *store, store_t *dst_store, const char *filepath,
|
||||
FILE *dst_file = fopen(dst_filepath, "ab");
|
||||
line_t line;
|
||||
|
||||
while (1) {
|
||||
size_t ret = fread((void *) &line, 1, sizeof(line_t), file);
|
||||
LOG_DEBUGF("serialize.c", "Incremental copy %s", filepath)
|
||||
|
||||
while (TRUE) {
|
||||
size_t ret = fread((void *) &line, sizeof(line_t), 1, file);
|
||||
if (ret != 1 || feof(file)) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (incremental_get(copy_table, line.ino)) {
|
||||
// Assume that files with parents still exist.
|
||||
// One way to "fix" this would be to check if the parent is marked for copy but it would consistently
|
||||
// delete files with grandparents, which is a side-effect worse than having orphaned files
|
||||
if (line.has_parent || incremental_get(copy_table, line.path_md5)) {
|
||||
fwrite(&line, sizeof(line), 1, dst_file);
|
||||
|
||||
size_t buf_len;
|
||||
char *buf = store_read(store, (char *) line.uuid, 16, &buf_len);
|
||||
store_write(dst_store, (char *) line.uuid, 16, buf, buf_len);
|
||||
free(buf);
|
||||
|
||||
// Copy filepath
|
||||
char filepath_buf[PATH_MAX];
|
||||
char c;
|
||||
char *ptr = filepath_buf;
|
||||
while ((c = (char) getc(file))) {
|
||||
fwrite(&c, sizeof(c), 1, dst_file);
|
||||
*ptr++ = c;
|
||||
}
|
||||
*ptr = '\0';
|
||||
fwrite(filepath_buf, (ptr - filepath_buf) + 1, 1, dst_file);
|
||||
|
||||
// Copy tn store contents
|
||||
size_t buf_len;
|
||||
char path_md5[MD5_DIGEST_LENGTH];
|
||||
MD5((unsigned char *) filepath_buf, (ptr - filepath_buf), (unsigned char *) path_md5);
|
||||
char *buf = store_read(store, path_md5, sizeof(path_md5), &buf_len);
|
||||
if (buf_len != 0) {
|
||||
store_write(dst_store, path_md5, sizeof(path_md5), buf, buf_len);
|
||||
free(buf);
|
||||
}
|
||||
fwrite("\0", sizeof(c), 1, dst_file);
|
||||
|
||||
enum metakey key;
|
||||
while (1) {
|
||||
key = getc(file);
|
||||
fwrite(&key, sizeof(char), 1, dst_file);
|
||||
if (key == '\n') {
|
||||
break;
|
||||
}
|
||||
fwrite(&key, sizeof(char), 1, dst_file);
|
||||
|
||||
if (IS_META_INT(key)) {
|
||||
int val;
|
||||
@ -517,14 +530,12 @@ void incremental_copy(store_t *store, store_t *dst_store, const char *filepath,
|
||||
}
|
||||
fwrite("\0", sizeof(c), 1, dst_file);
|
||||
}
|
||||
|
||||
if (ret != 1) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
while ((getc(file))) {}
|
||||
skip_meta(file);
|
||||
}
|
||||
}
|
||||
fclose(file);
|
||||
fclose(dst_file);
|
||||
}
|
||||
|
@ -7,14 +7,14 @@
|
||||
#include <sys/syscall.h>
|
||||
#include <glib.h>
|
||||
|
||||
typedef void(*index_func)(cJSON *, const char[UUID_STR_LEN]);
|
||||
typedef void(*index_func)(cJSON *, const char[MD5_STR_LENGTH]);
|
||||
|
||||
void incremental_copy(store_t *store, store_t *dst_store, const char *filepath,
|
||||
const char *dst_filepath, GHashTable *copy_table);
|
||||
|
||||
void write_document(document_t *doc);
|
||||
|
||||
void read_index(const char *path, const char[UUID_STR_LEN], const char *type, index_func);
|
||||
void read_index(const char *path, const char[MD5_STR_LENGTH], const char *type, index_func);
|
||||
|
||||
void incremental_read(GHashTable *table, const char *filepath);
|
||||
|
||||
|
@ -40,13 +40,17 @@ void store_destroy(store_t *store) {
|
||||
free(store);
|
||||
}
|
||||
|
||||
void store_flush(store_t *store) {
|
||||
mdb_env_sync(store->env, TRUE);
|
||||
}
|
||||
|
||||
void store_write(store_t *store, char *key, size_t key_len, char *buf, size_t buf_len) {
|
||||
|
||||
if (LogCtx.very_verbose) {
|
||||
if (key_len == 16) {
|
||||
char uuid_str[UUID_STR_LEN] = {0, };
|
||||
uuid_unparse((unsigned char *) key, uuid_str);
|
||||
LOG_DEBUGF("store.c", "Store write {%s} %lu bytes", uuid_str, buf_len)
|
||||
if (key_len == MD5_DIGEST_LENGTH) {
|
||||
char path_md5_str[MD5_STR_LENGTH];
|
||||
buf2hex((unsigned char *) key, MD5_DIGEST_LENGTH, path_md5_str);
|
||||
LOG_DEBUGF("store.c", "Store write {%s} %lu bytes", path_md5_str, buf_len)
|
||||
} else {
|
||||
LOG_DEBUGF("store.c", "Store write {%s} %lu bytes", key, buf_len)
|
||||
}
|
||||
|
@ -24,6 +24,8 @@ void store_destroy(store_t *store);
|
||||
|
||||
void store_write(store_t *store, char *key, size_t key_len, char *buf, size_t buf_len);
|
||||
|
||||
void store_flush(store_t *store);
|
||||
|
||||
char *store_read(store_t *store, char *key, size_t key_len, size_t *ret_vallen);
|
||||
|
||||
GHashTable *store_read_all(store_t *store);
|
||||
|
@ -20,7 +20,7 @@ parse_job_t *create_fs_parse_job(const char *filepath, const struct stat *info,
|
||||
|
||||
job->vfile.info = *info;
|
||||
|
||||
memset(job->parent, 0, 16);
|
||||
memset(job->parent, 0, MD5_DIGEST_LENGTH);
|
||||
|
||||
job->vfile.filepath = job->filepath;
|
||||
job->vfile.read = fs_read;
|
||||
|
25
src/main.c
25
src/main.c
@ -21,7 +21,7 @@
|
||||
#define EPILOG "Made by simon987 <me@simon987.net>. Released under GPL-3.0"
|
||||
|
||||
|
||||
static const char *const Version = "2.8.5";
|
||||
static const char *const Version = "2.9.0";
|
||||
static const char *const usage[] = {
|
||||
"sist2 scan [OPTION]... PATH",
|
||||
"sist2 index [OPTION]... INDEX",
|
||||
@ -34,9 +34,10 @@ void init_dir(const char *dirpath) {
|
||||
char path[PATH_MAX];
|
||||
snprintf(path, PATH_MAX, "%sdescriptor.json", dirpath);
|
||||
|
||||
uuid_t uuid;
|
||||
uuid_generate(uuid);
|
||||
uuid_unparse(uuid, ScanCtx.index.desc.uuid);
|
||||
unsigned char index_md5[MD5_DIGEST_LENGTH];
|
||||
MD5((unsigned char *) ScanCtx.index.desc.name, strlen(ScanCtx.index.desc.name), index_md5);
|
||||
buf2hex(index_md5, MD5_DIGEST_LENGTH, ScanCtx.index.desc.id);
|
||||
|
||||
time(&ScanCtx.index.desc.timestamp);
|
||||
strcpy(ScanCtx.index.desc.version, Version);
|
||||
strcpy(ScanCtx.index.desc.type, INDEX_TYPE_BIN);
|
||||
@ -218,7 +219,7 @@ void sist2_scan(scan_args_t *args) {
|
||||
while ((de = readdir(dir)) != NULL) {
|
||||
if (strncmp(de->d_name, "_index_", sizeof("_index_") - 1) == 0) {
|
||||
char file_path[PATH_MAX];
|
||||
snprintf(file_path, PATH_MAX, "%s/%s", args->incremental, de->d_name);
|
||||
snprintf(file_path, PATH_MAX, "%s%s", args->incremental, de->d_name);
|
||||
incremental_read(ScanCtx.original_table, file_path);
|
||||
}
|
||||
}
|
||||
@ -233,8 +234,6 @@ void sist2_scan(scan_args_t *args) {
|
||||
tpool_wait(ScanCtx.pool);
|
||||
tpool_destroy(ScanCtx.pool);
|
||||
|
||||
generate_stats(&ScanCtx.index, args->treemap_threshold, ScanCtx.index.path);
|
||||
|
||||
if (args->incremental != NULL) {
|
||||
char dst_path[PATH_MAX];
|
||||
snprintf(store_path, PATH_MAX, "%sthumbs", args->incremental);
|
||||
@ -250,7 +249,7 @@ void sist2_scan(scan_args_t *args) {
|
||||
while ((de = readdir(dir)) != NULL) {
|
||||
if (strncmp(de->d_name, "_index_", sizeof("_index_") - 1) == 0) {
|
||||
char file_path[PATH_MAX];
|
||||
snprintf(file_path, PATH_MAX, "%s/%s", args->incremental, de->d_name);
|
||||
snprintf(file_path, PATH_MAX, "%s%s", args->incremental, de->d_name);
|
||||
incremental_copy(source, ScanCtx.index.store, file_path, dst_path, ScanCtx.copy_table);
|
||||
}
|
||||
}
|
||||
@ -265,6 +264,8 @@ void sist2_scan(scan_args_t *args) {
|
||||
store_destroy(source_tags);
|
||||
}
|
||||
|
||||
generate_stats(&ScanCtx.index, args->treemap_threshold, ScanCtx.index.path);
|
||||
|
||||
store_destroy(ScanCtx.index.store);
|
||||
}
|
||||
|
||||
@ -327,7 +328,7 @@ void sist2_index(index_args_t *args) {
|
||||
if (strncmp(de->d_name, "_index_", sizeof("_index_") - 1) == 0) {
|
||||
char file_path[PATH_MAX];
|
||||
snprintf(file_path, PATH_MAX, "%s/%s", args->index_path, de->d_name);
|
||||
read_index(file_path, desc.uuid, desc.type, f);
|
||||
read_index(file_path, desc.id, desc.type, f);
|
||||
}
|
||||
}
|
||||
closedir(dir);
|
||||
@ -337,7 +338,7 @@ void sist2_index(index_args_t *args) {
|
||||
tpool_destroy(IndexCtx.pool);
|
||||
|
||||
if (!args->print) {
|
||||
finish_indexer(args->script, args->async_script, desc.uuid);
|
||||
finish_indexer(args->script, args->async_script, desc.id);
|
||||
}
|
||||
|
||||
store_destroy(IndexCtx.tag_store);
|
||||
@ -357,7 +358,7 @@ void sist2_exec_script(exec_args_t *args) {
|
||||
|
||||
LOG_DEBUGF("main.c", "descriptor version %s (%s)", desc.version, desc.type)
|
||||
|
||||
execute_update_script(args->script, args->async_script, desc.uuid);
|
||||
execute_update_script(args->script, args->async_script, desc.id);
|
||||
free(args->script);
|
||||
}
|
||||
|
||||
@ -533,7 +534,7 @@ int main(int argc, const char *argv[]) {
|
||||
}
|
||||
sist2_web(web_args);
|
||||
|
||||
} else if (strcmp(argv[0], "exec-script") == 0) {
|
||||
} else if (strcmp(argv[0], "exec-script") == 0) {
|
||||
|
||||
int err = exec_args_validate(exec_args, argc, argv);
|
||||
if (err != 0) {
|
||||
|
@ -46,29 +46,31 @@ void parse(void *arg) {
|
||||
parse_job_t *job = arg;
|
||||
document_t doc;
|
||||
|
||||
int inc_ts = incremental_get(ScanCtx.original_table, job->vfile.info.st_ino);
|
||||
if (inc_ts != 0 && inc_ts == job->vfile.info.st_mtim.tv_sec) {
|
||||
incremental_mark_file_for_copy(ScanCtx.copy_table, job->vfile.info.st_ino);
|
||||
return;
|
||||
}
|
||||
|
||||
doc.filepath = job->filepath;
|
||||
doc.ext = (short) job->ext;
|
||||
doc.base = (short) job->base;
|
||||
|
||||
char *rel_path = doc.filepath + ScanCtx.index.desc.root_len;
|
||||
MD5((unsigned char *) rel_path, strlen(rel_path), doc.path_md5);
|
||||
|
||||
doc.meta_head = NULL;
|
||||
doc.meta_tail = NULL;
|
||||
doc.mime = 0;
|
||||
doc.size = job->vfile.info.st_size;
|
||||
doc.ino = job->vfile.info.st_ino;
|
||||
doc.mtime = job->vfile.info.st_mtim.tv_sec;
|
||||
|
||||
uuid_generate(doc.uuid);
|
||||
int inc_ts = incremental_get(ScanCtx.original_table, doc.path_md5);
|
||||
if (inc_ts != 0 && inc_ts == job->vfile.info.st_mtim.tv_sec) {
|
||||
incremental_mark_file_for_copy(ScanCtx.copy_table, doc.path_md5);
|
||||
return;
|
||||
}
|
||||
|
||||
char *buf[MAGIC_BUF_SIZE];
|
||||
|
||||
if (LogCtx.very_verbose) {
|
||||
char uuid_str[UUID_STR_LEN];
|
||||
uuid_unparse(doc.uuid, uuid_str);
|
||||
LOG_DEBUGF(job->filepath, "Starting parse job {%s}", uuid_str)
|
||||
char path_md5_str[MD5_STR_LENGTH];
|
||||
buf2hex(doc.path_md5, MD5_DIGEST_LENGTH, path_md5_str);
|
||||
LOG_DEBUGF(job->filepath, "Starting parse job {%s}", path_md5_str)
|
||||
}
|
||||
|
||||
if (job->vfile.info.st_size == 0) {
|
||||
@ -86,7 +88,8 @@ void parse(void *arg) {
|
||||
|
||||
// Get mime type with libmagic
|
||||
if (!job->vfile.is_fs_file) {
|
||||
LOG_WARNING(job->filepath, "Guessing mime type with libmagic inside archive files is not currently supported");
|
||||
LOG_WARNING(job->filepath,
|
||||
"Guessing mime type with libmagic inside archive files is not currently supported");
|
||||
goto abort;
|
||||
}
|
||||
|
||||
@ -169,11 +172,15 @@ void parse(void *arg) {
|
||||
abort:
|
||||
|
||||
//Parent meta
|
||||
if (!uuid_is_null(job->parent)) {
|
||||
meta_line_t *meta_parent = malloc(sizeof(meta_line_t) + UUID_STR_LEN + 1);
|
||||
if (!md5_digest_is_null(job->parent)) {
|
||||
meta_line_t *meta_parent = malloc(sizeof(meta_line_t) + MD5_STR_LENGTH);
|
||||
meta_parent->key = MetaParent;
|
||||
uuid_unparse(job->parent, meta_parent->str_val);
|
||||
buf2hex(job->parent, MD5_DIGEST_LENGTH, meta_parent->str_val);
|
||||
APPEND_META((&doc), meta_parent)
|
||||
|
||||
doc.has_parent = TRUE;
|
||||
} else {
|
||||
doc.has_parent = FALSE;
|
||||
}
|
||||
|
||||
write_document(&doc);
|
||||
|
@ -7,7 +7,7 @@ void parse_sidecar(vfile_t *vfile, document_t *doc) {
|
||||
LOG_DEBUGF("sidecar.c", "Parsing sidecar file %s", vfile->filepath)
|
||||
|
||||
size_t size;
|
||||
char* buf = read_all(vfile, &size);
|
||||
char *buf = read_all(vfile, &size);
|
||||
if (buf == NULL) {
|
||||
LOG_ERRORF("sidecar.c", "Read error for %s", vfile->filepath)
|
||||
return;
|
||||
@ -23,11 +23,11 @@ void parse_sidecar(vfile_t *vfile, document_t *doc) {
|
||||
}
|
||||
char *json_str = cJSON_PrintUnformatted(json);
|
||||
|
||||
char filepath[PATH_MAX];
|
||||
memcpy(filepath, vfile->filepath + ScanCtx.index.desc.root_len, doc->ext - 1 - ScanCtx.index.desc.root_len);
|
||||
*(filepath + doc->ext - 1) = '\0';
|
||||
unsigned char path_md5[MD5_DIGEST_LENGTH];
|
||||
MD5((unsigned char *) vfile->filepath + ScanCtx.index.desc.root_len, doc->ext - 1 - ScanCtx.index.desc.root_len,
|
||||
path_md5);
|
||||
|
||||
store_write(ScanCtx.index.meta_store, filepath, doc->ext, json_str, strlen(json_str) + 1);
|
||||
store_write(ScanCtx.index.meta_store, (char *) path_md5, sizeof(path_md5), json_str, strlen(json_str) + 1);
|
||||
|
||||
cJSON_Delete(json);
|
||||
free(json_str);
|
||||
|
@ -23,9 +23,10 @@
|
||||
#undef ABS
|
||||
#define ABS(a) (((a) < 0) ? -(a) : (a))
|
||||
|
||||
#define UUID_STR_LEN 37
|
||||
#define UNUSED(x) __attribute__((__unused__)) x
|
||||
|
||||
#define MD5_STR_LENGTH 33
|
||||
|
||||
#include "util.h"
|
||||
#include "log.h"
|
||||
#include "types.h"
|
||||
@ -47,5 +48,4 @@
|
||||
#include <errno.h>
|
||||
#include <ctype.h>
|
||||
|
||||
|
||||
#endif
|
||||
|
@ -22,7 +22,7 @@ function gifOver(thumbnail, hit) {
|
||||
thumbnail.addEventListener("mouseout", function () {
|
||||
//Reset timer
|
||||
thumbnail.mouseStayedOver = false;
|
||||
thumbnail.setAttribute("src", `t/${hit["_source"]["index"]}/${hit["_id"]}`);
|
||||
thumbnail.setAttribute("src", `t/${hit["_source"]["index"]}/${hit["_path_md5"]}`);
|
||||
})
|
||||
}
|
||||
|
||||
@ -419,7 +419,7 @@ function makeThumbnail(mimeCategory, hit, imgWrapper, small) {
|
||||
thumbnail.setAttribute("class", "card-img-top fit");
|
||||
}
|
||||
}
|
||||
thumbnail.setAttribute("src", `t/${hit["_source"]["index"]}/${hit["_id"]}`);
|
||||
thumbnail.setAttribute("src", `t/${hit["_source"]["index"]}/${hit["_path_md5"]}`);
|
||||
|
||||
if (shouldDisplayRawImage(hit)) {
|
||||
thumbnail.addEventListener("click", () => {
|
||||
|
@ -174,7 +174,7 @@ function saveTag(tag, hit) {
|
||||
delete: false,
|
||||
name: tag,
|
||||
doc_id: hit["_id"],
|
||||
relpath: relPath
|
||||
path_md5: md5(relPath)
|
||||
}).then(() => {
|
||||
tagBar.blur();
|
||||
$("#tagModal").modal("hide");
|
||||
@ -604,6 +604,7 @@ function search(after = null) {
|
||||
hits.forEach(hit => {
|
||||
hit["_source"]["name"] = strUnescape(hit["_source"]["name"]);
|
||||
hit["_source"]["path"] = strUnescape(hit["_source"]["path"]);
|
||||
hit["_path_md5"] = md5(hit["_source"]["path"] + (hit["_source"]["path"] ? "/" : "") + hit["_source"]["name"] + ext(hit));
|
||||
});
|
||||
|
||||
if (!after) {
|
||||
|
@ -12,7 +12,7 @@
|
||||
|
||||
<nav class="navbar navbar-expand-lg">
|
||||
<a class="navbar-brand" href="/">sist2</a>
|
||||
<span class="badge badge-pill version">2.8.5</span>
|
||||
<span class="badge badge-pill version">2.9.0</span>
|
||||
<span class="tagline">Lightning-fast file system indexer and search tool </span>
|
||||
<a class="btn ml-auto" href="stats">Stats</a>
|
||||
<button class="btn" type="button" data-toggle="modal" data-target="#settings" onclick="loadSettings()">Settings
|
||||
|
@ -10,7 +10,7 @@
|
||||
|
||||
<nav class="navbar navbar-expand-lg">
|
||||
<a class="navbar-brand" href="/">sist2</a>
|
||||
<span class="badge badge-pill version">2.8.5</span>
|
||||
<span class="badge badge-pill version">2.9.0</span>
|
||||
<span class="tagline">Lightning-fast file system indexer and search tool </span>
|
||||
<a style="margin-left: auto" class="btn" href="/">Back</a>
|
||||
<button class="btn" type="button" data-toggle="modal" data-target="#settings"
|
||||
|
@ -2,8 +2,6 @@
|
||||
#include "io/serialize.h"
|
||||
#include "ctx.h"
|
||||
|
||||
#include <glib.h>
|
||||
|
||||
static GHashTable *FlatTree;
|
||||
static GHashTable *BufferTable;
|
||||
|
||||
@ -22,7 +20,7 @@ typedef struct {
|
||||
long count;
|
||||
} agg_t;
|
||||
|
||||
void fill_tables(cJSON *document, UNUSED(const char uuid_str[UUID_STR_LEN])) {
|
||||
void fill_tables(cJSON *document, UNUSED(const char index_id[MD5_STR_LENGTH])) {
|
||||
|
||||
if (cJSON_GetObjectItem(document, "parent") != NULL) {
|
||||
return;
|
||||
@ -103,8 +101,8 @@ void read_index_into_tables(index_t *index) {
|
||||
while ((de = readdir(dir)) != NULL) {
|
||||
if (strncmp(de->d_name, "_index_", sizeof("_index_") - 1) == 0) {
|
||||
char file_path[PATH_MAX];
|
||||
snprintf(file_path, PATH_MAX, "%s/%s", index->path, de->d_name);
|
||||
read_index(file_path, index->desc.uuid, index->desc.type, fill_tables);
|
||||
snprintf(file_path, PATH_MAX, "%s%s", index->path, de->d_name);
|
||||
read_index(file_path, index->desc.id, index->desc.type, fill_tables);
|
||||
}
|
||||
}
|
||||
closedir(dir);
|
||||
|
@ -6,7 +6,7 @@
|
||||
#define INDEX_VERSION_EXTERNAL "_external_v1"
|
||||
|
||||
typedef struct index_descriptor {
|
||||
char uuid[UUID_STR_LEN];
|
||||
char id[MD5_STR_LENGTH];
|
||||
char version[64];
|
||||
long timestamp;
|
||||
char root[PATH_MAX];
|
||||
|
@ -2,7 +2,6 @@
|
||||
#include "src/ctx.h"
|
||||
|
||||
#include <wordexp.h>
|
||||
#include <glib.h>
|
||||
|
||||
#define PBSTR "========================================"
|
||||
#define PBWIDTH 40
|
||||
@ -125,7 +124,7 @@ void progress_bar_print(double percentage, size_t tn_size, size_t index_size) {
|
||||
}
|
||||
|
||||
GHashTable *incremental_get_table() {
|
||||
GHashTable *file_table = g_hash_table_new(g_direct_hash, g_direct_equal);
|
||||
GHashTable *file_table = g_hash_table_new_full(g_str_hash, g_str_equal, free, NULL);
|
||||
return file_table;
|
||||
}
|
||||
|
||||
|
112
src/util.h
112
src/util.h
@ -10,6 +10,8 @@
|
||||
#include "third-party/utf8.h/utf8.h"
|
||||
#include "libscan/scan.h"
|
||||
|
||||
#define MD5_STR_LENGTH 33
|
||||
|
||||
|
||||
char *abspath(const char *path);
|
||||
|
||||
@ -21,25 +23,6 @@ void progress_bar_print(double percentage, size_t tn_size, size_t index_size);
|
||||
|
||||
GHashTable *incremental_get_table();
|
||||
|
||||
__always_inline
|
||||
static void incremental_put(GHashTable *table, unsigned long inode_no, int mtime) {
|
||||
g_hash_table_insert(table, (gpointer) inode_no, GINT_TO_POINTER(mtime));
|
||||
}
|
||||
|
||||
__always_inline
|
||||
static int incremental_get(GHashTable *table, unsigned long inode_no) {
|
||||
if (table != NULL) {
|
||||
return GPOINTER_TO_INT(g_hash_table_lookup(table, (gpointer) inode_no));
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
__always_inline
|
||||
static int incremental_mark_file_for_copy(GHashTable *table, unsigned long inode_no) {
|
||||
return g_hash_table_insert(table, GINT_TO_POINTER(inode_no), GINT_TO_POINTER(1));
|
||||
}
|
||||
|
||||
|
||||
const char *find_file_in_paths(const char **paths, const char *filename);
|
||||
|
||||
@ -48,4 +31,95 @@ void str_escape(char *dst, const char *str);
|
||||
|
||||
void str_unescape(char *dst, const char *str);
|
||||
|
||||
static int hex2buf(const char *str, int len, unsigned char *bytes) {
|
||||
static const uint8_t hashmap[] = {
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
|
||||
0x08, 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
|
||||
};
|
||||
|
||||
for (int pos = 0; pos < len; pos += 2) {
|
||||
int idx0 = (uint8_t) str[pos + 0];
|
||||
int idx1 = (uint8_t) str[pos + 1];
|
||||
bytes[pos / 2] = (uint8_t) (hashmap[idx0] << 4) | hashmap[idx1];
|
||||
}
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
__always_inline
|
||||
static void buf2hex(const unsigned char *buf, size_t buflen, char *hex_string) {
|
||||
static const char hexdig[] = "0123456789abcdef";
|
||||
|
||||
const unsigned char *p;
|
||||
size_t i;
|
||||
|
||||
char *s = hex_string;
|
||||
for (i = 0, p = buf; i < buflen; i++, p++) {
|
||||
*s++ = hexdig[(*p >> 4) & 0x0f];
|
||||
*s++ = hexdig[*p & 0x0f];
|
||||
}
|
||||
*s = '\0';
|
||||
}
|
||||
|
||||
|
||||
__always_inline
|
||||
static int md5_digest_is_null(const unsigned char digest[MD5_DIGEST_LENGTH]) {
|
||||
return (*(int64_t *) digest) == 0 && (*((int64_t *) digest + 1)) == 0;
|
||||
}
|
||||
|
||||
|
||||
__always_inline
|
||||
static void incremental_put(GHashTable *table, unsigned char path_md5[MD5_DIGEST_LENGTH], int mtime) {
|
||||
char *ptr = malloc(MD5_STR_LENGTH);
|
||||
buf2hex(path_md5, MD5_DIGEST_LENGTH, ptr);
|
||||
g_hash_table_insert(table, ptr, GINT_TO_POINTER(mtime));
|
||||
}
|
||||
|
||||
__always_inline
|
||||
static int incremental_get(GHashTable *table, unsigned char path_md5[MD5_DIGEST_LENGTH]) {
|
||||
if (table != NULL) {
|
||||
char md5_str[MD5_STR_LENGTH];
|
||||
buf2hex(path_md5, MD5_DIGEST_LENGTH, md5_str);
|
||||
return GPOINTER_TO_INT(g_hash_table_lookup(table, md5_str));
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
__always_inline
|
||||
static int incremental_mark_file_for_copy(GHashTable *table, unsigned char path_md5[MD5_DIGEST_LENGTH]) {
|
||||
char *ptr = malloc(MD5_STR_LENGTH);
|
||||
buf2hex(path_md5, MD5_DIGEST_LENGTH, ptr);
|
||||
return g_hash_table_insert(table, ptr, GINT_TO_POINTER(1));
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -36,7 +36,7 @@ static void send_response_line(struct mg_connection *nc, int status_code, int le
|
||||
|
||||
index_t *get_index_by_id(const char *index_id) {
|
||||
for (int i = WebCtx.index_count; i >= 0; i--) {
|
||||
if (strcmp(index_id, WebCtx.indices[i].desc.uuid) == 0) {
|
||||
if (strncmp(index_id, WebCtx.indices[i].desc.id, MD5_STR_LENGTH) == 0) {
|
||||
return &WebCtx.indices[i];
|
||||
}
|
||||
}
|
||||
@ -73,17 +73,17 @@ void stats(struct mg_connection *nc) {
|
||||
|
||||
void stats_files(struct mg_connection *nc, struct http_message *hm, struct mg_str *path) {
|
||||
|
||||
if (path->len != UUID_STR_LEN + 4) {
|
||||
if (path->len != MD5_STR_LENGTH + 4) {
|
||||
mg_http_send_error(nc, 404, NULL);
|
||||
nc->flags |= MG_F_SEND_AND_CLOSE;
|
||||
return;
|
||||
}
|
||||
|
||||
char arg_uuid[UUID_STR_LEN];
|
||||
memcpy(arg_uuid, hm->uri.p + 3, UUID_STR_LEN);
|
||||
*(arg_uuid + UUID_STR_LEN - 1) = '\0';
|
||||
char arg_md5[MD5_STR_LENGTH];
|
||||
memcpy(arg_md5, hm->uri.p + 3, MD5_STR_LENGTH);
|
||||
*(arg_md5 + MD5_STR_LENGTH - 1) = '\0';
|
||||
|
||||
index_t *index = get_index_by_id(arg_uuid);
|
||||
index_t *index = get_index_by_id(arg_md5);
|
||||
if (index == NULL) {
|
||||
mg_http_send_error(nc, 404, NULL);
|
||||
nc->flags |= MG_F_SEND_AND_CLOSE;
|
||||
@ -91,7 +91,7 @@ void stats_files(struct mg_connection *nc, struct http_message *hm, struct mg_st
|
||||
}
|
||||
|
||||
const char *file;
|
||||
switch (atoi(hm->uri.p + 3 + UUID_STR_LEN)) {
|
||||
switch (atoi(hm->uri.p + 3 + MD5_STR_LENGTH)) {
|
||||
case 1:
|
||||
file = "treemap.csv";
|
||||
break;
|
||||
@ -179,29 +179,23 @@ void img_sprite_skin_flat(struct mg_connection *nc, struct http_message *hm) {
|
||||
|
||||
void thumbnail(struct mg_connection *nc, struct http_message *hm, struct mg_str *path) {
|
||||
|
||||
if (path->len != UUID_STR_LEN * 2 + 2) {
|
||||
if (path->len != 68) {
|
||||
LOG_DEBUGF("serve.c", "Invalid thumbnail path: %.*s", (int) path->len, path->p)
|
||||
mg_http_send_error(nc, 404, NULL);
|
||||
nc->flags |= MG_F_SEND_AND_CLOSE;
|
||||
return;
|
||||
}
|
||||
|
||||
char arg_uuid[UUID_STR_LEN];
|
||||
char arg_index[UUID_STR_LEN];
|
||||
char arg_file_md5[MD5_STR_LENGTH];
|
||||
char arg_index[MD5_STR_LENGTH];
|
||||
|
||||
memcpy(arg_index, hm->uri.p + 3, UUID_STR_LEN);
|
||||
*(arg_index + UUID_STR_LEN - 1) = '\0';
|
||||
memcpy(arg_uuid, hm->uri.p + 3 + UUID_STR_LEN, UUID_STR_LEN);
|
||||
*(arg_uuid + UUID_STR_LEN - 1) = '\0';
|
||||
memcpy(arg_index, hm->uri.p + 3, MD5_STR_LENGTH);
|
||||
*(arg_index + MD5_STR_LENGTH - 1) = '\0';
|
||||
memcpy(arg_file_md5, hm->uri.p + 3 + MD5_STR_LENGTH, MD5_STR_LENGTH);
|
||||
*(arg_file_md5 + MD5_STR_LENGTH - 1) = '\0';
|
||||
|
||||
uuid_t uuid;
|
||||
int ret = uuid_parse(arg_uuid, uuid);
|
||||
if (ret != 0) {
|
||||
LOG_DEBUGF("serve.c", "Invalid thumbnail UUID: %s", arg_uuid)
|
||||
mg_http_send_error(nc, 404, NULL);
|
||||
nc->flags |= MG_F_SEND_AND_CLOSE;
|
||||
return;
|
||||
}
|
||||
unsigned char md5_buf[MD5_DIGEST_LENGTH];
|
||||
hex2buf(arg_file_md5, MD5_STR_LENGTH - 1, md5_buf);
|
||||
|
||||
store_t *store = get_store(arg_index);
|
||||
if (store == NULL) {
|
||||
@ -212,7 +206,7 @@ void thumbnail(struct mg_connection *nc, struct http_message *hm, struct mg_str
|
||||
}
|
||||
|
||||
size_t data_len = 0;
|
||||
char *data = store_read(store, (char *) uuid, sizeof(uuid_t), &data_len);
|
||||
char *data = store_read(store, (char *) md5_buf, sizeof(md5_buf), &data_len);
|
||||
if (data_len != 0) {
|
||||
send_response_line(nc, 200, data_len, "Content-Type: image/jpeg");
|
||||
mg_send(nc, data, data_len);
|
||||
@ -305,7 +299,7 @@ void index_info(struct mg_connection *nc) {
|
||||
cJSON *idx_json = cJSON_CreateObject();
|
||||
cJSON_AddStringToObject(idx_json, "name", idx->desc.name);
|
||||
cJSON_AddStringToObject(idx_json, "version", idx->desc.version);
|
||||
cJSON_AddStringToObject(idx_json, "id", idx->desc.uuid);
|
||||
cJSON_AddStringToObject(idx_json, "id", idx->desc.id);
|
||||
cJSON_AddNumberToObject(idx_json, "timestamp", (double) idx->desc.timestamp);
|
||||
cJSON_AddItemToArray(arr, idx_json);
|
||||
}
|
||||
@ -323,18 +317,18 @@ void index_info(struct mg_connection *nc) {
|
||||
|
||||
void document_info(struct mg_connection *nc, struct http_message *hm, struct mg_str *path) {
|
||||
|
||||
if (path->len != UUID_STR_LEN + 2) {
|
||||
if (path->len != MD5_STR_LENGTH + 2) {
|
||||
LOG_DEBUGF("serve.c", "Invalid document_info path: %.*s", (int) path->len, path->p)
|
||||
mg_http_send_error(nc, 404, NULL);
|
||||
nc->flags |= MG_F_SEND_AND_CLOSE;
|
||||
return;
|
||||
}
|
||||
|
||||
char arg_uuid[UUID_STR_LEN];
|
||||
memcpy(arg_uuid, hm->uri.p + 3, UUID_STR_LEN);
|
||||
*(arg_uuid + UUID_STR_LEN - 1) = '\0';
|
||||
char arg_md5[MD5_STR_LENGTH];
|
||||
memcpy(arg_md5, hm->uri.p + 3, MD5_STR_LENGTH);
|
||||
*(arg_md5 + MD5_STR_LENGTH - 1) = '\0';
|
||||
|
||||
cJSON *doc = elastic_get_document(arg_uuid);
|
||||
cJSON *doc = elastic_get_document(arg_md5);
|
||||
cJSON *source = cJSON_GetObjectItem(doc, "_source");
|
||||
|
||||
cJSON *index_id = cJSON_GetObjectItem(source, "index");
|
||||
@ -364,18 +358,18 @@ void document_info(struct mg_connection *nc, struct http_message *hm, struct mg_
|
||||
|
||||
void file(struct mg_connection *nc, struct http_message *hm, struct mg_str *path) {
|
||||
|
||||
if (path->len != UUID_STR_LEN + 2) {
|
||||
if (path->len != MD5_STR_LENGTH + 2) {
|
||||
LOG_DEBUGF("serve.c", "Invalid file path: %.*s", (int) path->len, path->p)
|
||||
mg_http_send_error(nc, 404, NULL);
|
||||
nc->flags |= MG_F_SEND_AND_CLOSE;
|
||||
return;
|
||||
}
|
||||
|
||||
char arg_uuid[UUID_STR_LEN];
|
||||
memcpy(arg_uuid, hm->uri.p + 3, UUID_STR_LEN);
|
||||
*(arg_uuid + UUID_STR_LEN - 1) = '\0';
|
||||
char arg_md5[MD5_STR_LENGTH];
|
||||
memcpy(arg_md5, hm->uri.p + 3, MD5_STR_LENGTH);
|
||||
*(arg_md5 + MD5_STR_LENGTH - 1) = '\0';
|
||||
|
||||
const char *next = arg_uuid;
|
||||
const char *next = arg_md5;
|
||||
cJSON *doc = NULL;
|
||||
cJSON *index_id = NULL;
|
||||
cJSON *source = NULL;
|
||||
@ -430,7 +424,7 @@ void status(struct mg_connection *nc) {
|
||||
typedef struct {
|
||||
char *name;
|
||||
int delete;
|
||||
char *relpath;
|
||||
char *path_md5_str;
|
||||
char *doc_id;
|
||||
} tag_req_t;
|
||||
|
||||
@ -450,8 +444,9 @@ tag_req_t *parse_tag_request(cJSON *json) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
cJSON *arg_relpath = cJSON_GetObjectItem(json, "relpath");
|
||||
if (arg_relpath == NULL || !cJSON_IsString(arg_relpath)) {
|
||||
cJSON *arg_path_md5 = cJSON_GetObjectItem(json, "path_md5");
|
||||
if (arg_path_md5 == NULL || !cJSON_IsString(arg_path_md5) ||
|
||||
strlen(arg_path_md5->valuestring) != MD5_STR_LENGTH - 1) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -463,23 +458,23 @@ tag_req_t *parse_tag_request(cJSON *json) {
|
||||
tag_req_t *req = malloc(sizeof(tag_req_t));
|
||||
req->delete = arg_delete->valueint;
|
||||
req->name = arg_name->valuestring;
|
||||
req->relpath = arg_relpath->valuestring;
|
||||
req->path_md5_str = arg_path_md5->valuestring;
|
||||
req->doc_id = arg_doc_id->valuestring;
|
||||
|
||||
return req;
|
||||
}
|
||||
|
||||
void tag(struct mg_connection *nc, struct http_message *hm, struct mg_str *path) {
|
||||
if (path->len != UUID_STR_LEN + 4) {
|
||||
if (path->len != MD5_STR_LENGTH + 4) {
|
||||
LOG_DEBUGF("serve.c", "Invalid tag path: %.*s", (int) path->len, path->p)
|
||||
mg_http_send_error(nc, 404, NULL);
|
||||
nc->flags |= MG_F_SEND_AND_CLOSE;
|
||||
return;
|
||||
}
|
||||
|
||||
char arg_index[UUID_STR_LEN];
|
||||
memcpy(arg_index, hm->uri.p + 5, UUID_STR_LEN);
|
||||
*(arg_index + UUID_STR_LEN - 1) = '\0';
|
||||
char arg_index[MD5_STR_LENGTH];
|
||||
memcpy(arg_index, hm->uri.p + 5, MD5_STR_LENGTH);
|
||||
*(arg_index + MD5_STR_LENGTH - 1) = '\0';
|
||||
|
||||
if (hm->body.len < 2 || hm->method.len != 4 || memcmp(&hm->method, "POST", 4) == 0) {
|
||||
LOG_DEBUG("serve.c", "Invalid tag request")
|
||||
@ -514,7 +509,7 @@ void tag(struct mg_connection *nc, struct http_message *hm, struct mg_str *path)
|
||||
cJSON *arr = NULL;
|
||||
|
||||
size_t data_len = 0;
|
||||
const char *data = store_read(store, arg_req->relpath, strlen(arg_req->relpath), &data_len);
|
||||
const char *data = store_read(store, arg_req->path_md5_str, MD5_STR_LENGTH, &data_len);
|
||||
if (data_len == 0) {
|
||||
arr = cJSON_CreateArray();
|
||||
} else {
|
||||
@ -574,7 +569,8 @@ void tag(struct mg_connection *nc, struct http_message *hm, struct mg_str *path)
|
||||
}
|
||||
|
||||
char *json_str = cJSON_PrintUnformatted(arr);
|
||||
store_write(store, arg_req->relpath, strlen(arg_req->relpath) + 1, json_str, strlen(json_str) + 1);
|
||||
store_write(store, arg_req->path_md5_str, MD5_STR_LENGTH, json_str, strlen(json_str) + 1);
|
||||
store_flush(store);
|
||||
|
||||
free(arg_req);
|
||||
free(json_str);
|
||||
|
File diff suppressed because one or more lines are too long
75
tests/test_scan.py
Normal file
75
tests/test_scan.py
Normal file
@ -0,0 +1,75 @@
|
||||
import unittest
|
||||
import subprocess
|
||||
import shutil
|
||||
import json
|
||||
import os
|
||||
|
||||
TEST_FILES = "third-party/libscan/libscan-test-files/test_files"
|
||||
|
||||
|
||||
def copy_files(files):
|
||||
base = os.path.basename(files)
|
||||
new_path = os.path.join("/tmp/sist2_test/", base)
|
||||
|
||||
shutil.rmtree(new_path, ignore_errors=True)
|
||||
shutil.copytree(files, new_path)
|
||||
return new_path
|
||||
|
||||
|
||||
def sist2(*args):
|
||||
return subprocess.check_output(
|
||||
args=["./sist2_debug", *args],
|
||||
)
|
||||
|
||||
|
||||
def sist2_index(files, *args):
|
||||
path = copy_files(files)
|
||||
|
||||
shutil.rmtree("i", ignore_errors=True)
|
||||
sist2("scan", path, "-o", "i", *args)
|
||||
return iter(sist2_index_to_dict("i"))
|
||||
|
||||
|
||||
def sist2_incremental_index(files, func=None, *args):
|
||||
path = copy_files(files)
|
||||
|
||||
if func:
|
||||
func(path)
|
||||
|
||||
shutil.rmtree("i_inc", ignore_errors=True)
|
||||
sist2("scan", path, "-o", "i_inc", "--incremental", "i", *args)
|
||||
return iter(sist2_index_to_dict("i_inc"))
|
||||
|
||||
|
||||
def sist2_index_to_dict(index):
|
||||
res = subprocess.check_output(
|
||||
args=["./sist2_debug", "index", "--print", index],
|
||||
)
|
||||
|
||||
for line in res.splitlines():
|
||||
if line:
|
||||
yield json.loads(line)
|
||||
|
||||
|
||||
class ScanTest(unittest.TestCase):
|
||||
|
||||
def test_incremental1(self):
|
||||
def remove_files(path):
|
||||
os.remove(os.path.join(path, "msdoc/test1.doc"))
|
||||
os.remove(os.path.join(path, "msdoc/test2.doc"))
|
||||
|
||||
def add_files(path):
|
||||
with open(os.path.join(path, "newfile1"), "w"):
|
||||
pass
|
||||
with open(os.path.join(path, "newfile2"), "w"):
|
||||
pass
|
||||
with open(os.path.join(path, "newfile3"), "w"):
|
||||
pass
|
||||
|
||||
file_count = sum(1 for _ in sist2_index(TEST_FILES))
|
||||
self.assertEqual(sum(1 for _ in sist2_incremental_index(TEST_FILES, remove_files)), file_count - 2)
|
||||
self.assertEqual(sum(1 for _ in sist2_incremental_index(TEST_FILES, add_files)), file_count + 3)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
2
third-party/libscan
vendored
2
third-party/libscan
vendored
@ -1 +1 @@
|
||||
Subproject commit 6b47b4dfbb28490f0bb2d30c0ca75ac945db2160
|
||||
Subproject commit ae9fadec473e6e4ade05259fe359c5366c3f3af6
|
Loading…
x
Reference in New Issue
Block a user