2020-11-09 20:29:02 +01:00
|
|
|
import logging
|
2022-03-24 18:17:29 +01:00
|
|
|
import os
|
|
|
|
|
import shutil
|
|
|
|
|
import tempfile
|
2020-11-09 20:29:02 +01:00
|
|
|
|
2020-12-10 00:02:45 +01:00
|
|
|
import tqdm
|
2022-03-26 09:28:19 +01:00
|
|
|
from asgiref.sync import async_to_sync
|
|
|
|
|
from channels.layers import get_channel_layer
|
2020-11-09 20:29:02 +01:00
|
|
|
from django.conf import settings
|
2020-12-11 14:27:54 +01:00
|
|
|
from django.db.models.signals import post_save
|
2022-03-11 10:55:51 -08:00
|
|
|
from documents import index
|
|
|
|
|
from documents import sanity_checker
|
|
|
|
|
from documents.classifier import DocumentClassifier
|
|
|
|
|
from documents.classifier import load_classifier
|
|
|
|
|
from documents.consumer import Consumer
|
|
|
|
|
from documents.consumer import ConsumerError
|
|
|
|
|
from documents.models import Correspondent
|
|
|
|
|
from documents.models import Document
|
|
|
|
|
from documents.models import DocumentType
|
|
|
|
|
from documents.models import Tag
|
2021-02-14 17:08:29 +01:00
|
|
|
from documents.sanity_checker import SanityCheckFailedException
|
2022-03-23 13:26:43 +01:00
|
|
|
from pdf2image import convert_from_path
|
|
|
|
|
from pikepdf import Pdf
|
2022-03-24 18:17:29 +01:00
|
|
|
from pyzbar import pyzbar
|
|
|
|
|
from whoosh.writing import AsyncWriter
|
2022-03-24 21:30:34 +01:00
|
|
|
|
2021-02-05 01:10:29 +01:00
|
|
|
logger = logging.getLogger("paperless.tasks")
|
|
|
|
|
|
|
|
|
|
|
2020-11-09 20:29:02 +01:00
|
|
|
def index_optimize():
|
2020-11-28 11:49:46 +01:00
|
|
|
ix = index.open_index()
|
2020-11-30 21:38:21 +01:00
|
|
|
writer = AsyncWriter(ix)
|
|
|
|
|
writer.commit(optimize=True)
|
2020-11-09 20:29:02 +01:00
|
|
|
|
|
|
|
|
|
2021-04-18 15:56:00 +02:00
|
|
|
def index_reindex(progress_bar_disable=False):
|
2020-11-09 20:29:02 +01:00
|
|
|
documents = Document.objects.all()
|
|
|
|
|
|
|
|
|
|
ix = index.open_index(recreate=True)
|
|
|
|
|
|
|
|
|
|
with AsyncWriter(ix) as writer:
|
2021-04-18 15:56:00 +02:00
|
|
|
for document in tqdm.tqdm(documents, disable=progress_bar_disable):
|
2020-11-09 20:29:02 +01:00
|
|
|
index.update_document(writer, document)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def train_classifier():
|
2022-02-27 15:26:41 +01:00
|
|
|
if (
|
|
|
|
|
not Tag.objects.filter(matching_algorithm=Tag.MATCH_AUTO).exists()
|
|
|
|
|
and not DocumentType.objects.filter(matching_algorithm=Tag.MATCH_AUTO).exists()
|
|
|
|
|
and not Correspondent.objects.filter(matching_algorithm=Tag.MATCH_AUTO).exists()
|
|
|
|
|
):
|
2020-11-09 20:29:02 +01:00
|
|
|
|
2021-01-30 14:22:23 +01:00
|
|
|
return
|
|
|
|
|
|
|
|
|
|
classifier = load_classifier()
|
|
|
|
|
|
|
|
|
|
if not classifier:
|
2020-12-30 21:54:36 +01:00
|
|
|
classifier = DocumentClassifier()
|
2020-11-09 20:29:02 +01:00
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
if classifier.train():
|
2021-02-05 01:10:29 +01:00
|
|
|
logger.info(
|
2022-03-11 10:55:51 -08:00
|
|
|
"Saving updated classifier model to {}...".format(settings.MODEL_FILE),
|
2020-11-09 20:29:02 +01:00
|
|
|
)
|
2021-02-06 20:54:58 +01:00
|
|
|
classifier.save()
|
2020-11-09 20:29:02 +01:00
|
|
|
else:
|
2022-02-27 15:26:41 +01:00
|
|
|
logger.debug("Training data unchanged.")
|
2020-11-09 20:29:02 +01:00
|
|
|
|
|
|
|
|
except Exception as e:
|
2022-02-27 15:26:41 +01:00
|
|
|
logger.warning("Classifier error: " + str(e))
|
2020-11-16 18:26:54 +01:00
|
|
|
|
|
|
|
|
|
2022-03-29 18:12:17 +02:00
|
|
|
def barcode_reader(image) -> list[str]:
|
2022-03-23 13:26:43 +01:00
|
|
|
"""
|
2022-03-23 22:49:29 +01:00
|
|
|
Read any barcodes contained in image
|
2022-03-23 13:26:43 +01:00
|
|
|
Returns a list containing all found barcodes
|
|
|
|
|
"""
|
2022-03-23 22:49:29 +01:00
|
|
|
barcodes = []
|
2022-03-23 13:26:43 +01:00
|
|
|
# Decode the barcode image
|
2022-03-23 22:49:29 +01:00
|
|
|
detected_barcodes = pyzbar.decode(image)
|
2022-03-23 13:26:43 +01:00
|
|
|
|
2022-03-29 17:56:11 +02:00
|
|
|
if detected_barcodes:
|
2022-03-23 13:26:43 +01:00
|
|
|
# Traverse through all the detected barcodes in image
|
|
|
|
|
for barcode in detected_barcodes:
|
2022-03-29 17:56:11 +02:00
|
|
|
if barcode.data:
|
2022-03-29 18:05:20 +02:00
|
|
|
decoded_barcode = barcode.data.decode("utf-8")
|
|
|
|
|
barcodes.append(decoded_barcode)
|
2022-03-23 22:49:29 +01:00
|
|
|
logger.debug(
|
2022-03-29 18:05:20 +02:00
|
|
|
f"Barcode of type {str(barcode.type)} found: {decoded_barcode}",
|
2022-03-23 22:49:29 +01:00
|
|
|
)
|
2022-03-23 13:26:43 +01:00
|
|
|
return barcodes
|
|
|
|
|
|
2022-03-23 22:49:29 +01:00
|
|
|
|
2022-03-29 18:12:17 +02:00
|
|
|
def scan_file_for_separating_barcodes(filepath: str) -> list[int]:
|
2022-03-23 13:26:43 +01:00
|
|
|
"""
|
2022-03-24 11:33:24 +01:00
|
|
|
Scan the provided file for page separating barcodes
|
|
|
|
|
Returns a list of pagenumbers, which separate the file
|
2022-03-23 13:26:43 +01:00
|
|
|
"""
|
2022-03-24 11:33:24 +01:00
|
|
|
separator_page_numbers = []
|
2022-03-29 18:05:20 +02:00
|
|
|
separator_barcode = str(settings.CONSUMER_BARCODE_STRING)
|
2022-03-23 13:26:43 +01:00
|
|
|
# use a temporary directory in case the file os too big to handle in memory
|
|
|
|
|
with tempfile.TemporaryDirectory() as path:
|
|
|
|
|
pages_from_path = convert_from_path(filepath, output_folder=path)
|
|
|
|
|
for current_page_number, page in enumerate(pages_from_path):
|
|
|
|
|
current_barcodes = barcode_reader(page)
|
2022-03-26 10:16:23 +01:00
|
|
|
if separator_barcode in current_barcodes:
|
2022-03-29 16:42:16 +02:00
|
|
|
separator_page_numbers.append(current_page_number)
|
2022-03-24 11:33:24 +01:00
|
|
|
return separator_page_numbers
|
2022-03-23 13:26:43 +01:00
|
|
|
|
2022-03-23 22:49:29 +01:00
|
|
|
|
2022-03-29 18:12:17 +02:00
|
|
|
def separate_pages(filepath: str, pages_to_split_on: list[int]) -> list[str]:
|
2022-03-23 13:26:43 +01:00
|
|
|
"""
|
2022-03-24 11:33:24 +01:00
|
|
|
Separate the provided file on the pages_to_split_on.
|
2022-03-23 13:26:43 +01:00
|
|
|
The pages which are defined by page_numbers will be removed.
|
2022-03-23 22:49:29 +01:00
|
|
|
Returns a list of (temporary) filepaths to consume.
|
|
|
|
|
These will need to be deleted later.
|
2022-03-23 13:26:43 +01:00
|
|
|
"""
|
2022-03-23 22:49:29 +01:00
|
|
|
os.makedirs(settings.SCRATCH_DIR, exist_ok=True)
|
|
|
|
|
tempdir = tempfile.mkdtemp(prefix="paperless-", dir=settings.SCRATCH_DIR)
|
2022-03-23 13:26:43 +01:00
|
|
|
fname = os.path.splitext(os.path.basename(filepath))[0]
|
|
|
|
|
pdf = Pdf.open(filepath)
|
2022-03-23 22:49:29 +01:00
|
|
|
document_paths = []
|
|
|
|
|
logger.debug(f"Temp dir is {str(tempdir)}")
|
2022-03-29 17:56:11 +02:00
|
|
|
if not pages_to_split_on:
|
2022-03-24 21:36:57 +01:00
|
|
|
logger.warning("No pages to split on!")
|
2022-03-23 22:49:29 +01:00
|
|
|
else:
|
|
|
|
|
# go from the first page to the first separator page
|
|
|
|
|
dst = Pdf.new()
|
|
|
|
|
for n, page in enumerate(pdf.pages):
|
|
|
|
|
if n < pages_to_split_on[0]:
|
2022-03-23 13:26:43 +01:00
|
|
|
dst.pages.append(page)
|
2022-03-23 22:49:29 +01:00
|
|
|
output_filename = "{}_document_0.pdf".format(fname)
|
|
|
|
|
savepath = os.path.join(tempdir, output_filename)
|
|
|
|
|
with open(savepath, "wb") as out:
|
|
|
|
|
dst.save(out)
|
|
|
|
|
document_paths = [savepath]
|
|
|
|
|
|
|
|
|
|
for count, page_number in enumerate(pages_to_split_on):
|
|
|
|
|
logger.debug(f"Count: {str(count)} page_number: {str(page_number)}")
|
|
|
|
|
dst = Pdf.new()
|
|
|
|
|
try:
|
|
|
|
|
next_page = pages_to_split_on[count + 1]
|
|
|
|
|
except IndexError:
|
|
|
|
|
next_page = len(pdf.pages)
|
|
|
|
|
# skip the first page_number. This contains the barcode page
|
|
|
|
|
for page in range(page_number + 1, next_page):
|
|
|
|
|
logger.debug(f"page_number: {str(page_number)} next_page: {str(next_page)}")
|
|
|
|
|
dst.pages.append(pdf.pages[page])
|
|
|
|
|
output_filename = "{}_document_{}.pdf".format(fname, str(count + 1))
|
|
|
|
|
logger.debug(f"pdf no:{str(count)} has {str(len(dst.pages))} pages")
|
|
|
|
|
savepath = os.path.join(tempdir, output_filename)
|
|
|
|
|
with open(savepath, "wb") as out:
|
|
|
|
|
dst.save(out)
|
2022-03-29 16:42:16 +02:00
|
|
|
document_paths.append(savepath)
|
2022-03-23 22:49:29 +01:00
|
|
|
logger.debug(f"Temp files are {str(document_paths)}")
|
|
|
|
|
return document_paths
|
2022-03-23 13:26:43 +01:00
|
|
|
|
2022-03-24 21:30:34 +01:00
|
|
|
|
2022-03-29 18:12:17 +02:00
|
|
|
def save_to_dir(
|
2022-03-30 17:24:01 +02:00
|
|
|
filepath: str,
|
|
|
|
|
newname: str = None,
|
|
|
|
|
target_dir: str = settings.CONSUMPTION_DIR,
|
2022-03-29 18:12:17 +02:00
|
|
|
):
|
2022-03-24 11:33:24 +01:00
|
|
|
"""
|
|
|
|
|
Copies filepath to target_dir.
|
2022-03-26 08:41:50 +01:00
|
|
|
Optionally rename the file.
|
2022-03-24 11:33:24 +01:00
|
|
|
"""
|
|
|
|
|
if os.path.isfile(filepath) and os.path.isdir(target_dir):
|
2022-03-26 08:41:50 +01:00
|
|
|
dst = shutil.copy(filepath, target_dir)
|
2022-03-26 10:16:23 +01:00
|
|
|
logging.debug(f"saved {str(filepath)} to {str(dst)}")
|
2022-03-26 08:41:50 +01:00
|
|
|
if newname:
|
|
|
|
|
dst_new = os.path.join(target_dir, newname)
|
2022-03-26 10:16:23 +01:00
|
|
|
logger.debug(f"moving {str(dst)} to {str(dst_new)}")
|
2022-03-26 08:41:50 +01:00
|
|
|
os.rename(dst, dst_new)
|
2022-03-24 11:33:24 +01:00
|
|
|
else:
|
|
|
|
|
logger.warning(f"{str(filepath)} or {str(target_dir)} don't exist.")
|
2022-03-23 13:26:43 +01:00
|
|
|
|
2022-03-24 21:30:34 +01:00
|
|
|
|
2022-02-27 15:26:41 +01:00
|
|
|
def consume_file(
|
|
|
|
|
path,
|
|
|
|
|
override_filename=None,
|
|
|
|
|
override_title=None,
|
|
|
|
|
override_correspondent_id=None,
|
|
|
|
|
override_document_type_id=None,
|
|
|
|
|
override_tag_ids=None,
|
|
|
|
|
task_id=None,
|
|
|
|
|
):
|
2020-11-16 18:26:54 +01:00
|
|
|
|
2022-03-24 11:33:24 +01:00
|
|
|
# check for separators in current document
|
2022-03-26 10:16:23 +01:00
|
|
|
if settings.CONSUMER_ENABLE_BARCODES:
|
2022-03-29 18:14:11 +02:00
|
|
|
separators = []
|
|
|
|
|
document_list = []
|
2022-03-26 10:16:23 +01:00
|
|
|
separators = scan_file_for_separating_barcodes(path)
|
2022-03-29 18:14:11 +02:00
|
|
|
if separators:
|
|
|
|
|
logger.debug(f"Pages with separators found in: {str(path)}")
|
|
|
|
|
document_list = separate_pages(path, separators)
|
|
|
|
|
if document_list:
|
|
|
|
|
for n, document in enumerate(document_list):
|
|
|
|
|
# save to consumption dir
|
|
|
|
|
# rename it to the original filename with number prefix
|
|
|
|
|
newname = f"{str(n)}_" + override_filename
|
|
|
|
|
save_to_dir(document, newname=newname)
|
|
|
|
|
# if we got here, the document was successfully split
|
|
|
|
|
# and can safely be deleted
|
|
|
|
|
logger.debug("Deleting file {}".format(path))
|
|
|
|
|
os.unlink(path)
|
|
|
|
|
# notify the sender, otherwise the progress bar
|
|
|
|
|
# in the UI stays stuck
|
|
|
|
|
payload = {
|
|
|
|
|
"filename": override_filename,
|
|
|
|
|
"task_id": task_id,
|
|
|
|
|
"current_progress": 100,
|
|
|
|
|
"max_progress": 100,
|
|
|
|
|
"status": "SUCCESS",
|
|
|
|
|
"message": "finished",
|
|
|
|
|
}
|
|
|
|
|
async_to_sync(get_channel_layer().group_send)(
|
|
|
|
|
"status_updates",
|
|
|
|
|
{"type": "status_update", "data": payload},
|
|
|
|
|
)
|
|
|
|
|
return "File successfully split"
|
2022-04-06 21:22:07 +02:00
|
|
|
|
|
|
|
|
# continue with consumption if no barcode was found
|
2020-11-16 18:26:54 +01:00
|
|
|
document = Consumer().try_consume_file(
|
2020-11-17 11:49:44 +01:00
|
|
|
path,
|
|
|
|
|
override_filename=override_filename,
|
|
|
|
|
override_title=override_title,
|
|
|
|
|
override_correspondent_id=override_correspondent_id,
|
|
|
|
|
override_document_type_id=override_document_type_id,
|
2021-01-26 00:51:20 +01:00
|
|
|
override_tag_ids=override_tag_ids,
|
2022-02-27 15:26:41 +01:00
|
|
|
task_id=task_id,
|
2021-01-26 00:51:20 +01:00
|
|
|
)
|
2020-11-16 18:26:54 +01:00
|
|
|
|
|
|
|
|
if document:
|
2022-02-27 15:26:41 +01:00
|
|
|
return "Success. New document id {} created".format(document.pk)
|
2020-11-16 18:26:54 +01:00
|
|
|
else:
|
2022-02-27 15:26:41 +01:00
|
|
|
raise ConsumerError(
|
|
|
|
|
"Unknown error: Returned document was null, but "
|
2022-03-11 10:55:51 -08:00
|
|
|
"no error message was given.",
|
2022-02-27 15:26:41 +01:00
|
|
|
)
|
2020-11-25 16:04:58 +01:00
|
|
|
|
|
|
|
|
|
|
|
|
|
def sanity_check():
|
|
|
|
|
messages = sanity_checker.check_sanity()
|
|
|
|
|
|
2021-02-14 17:08:29 +01:00
|
|
|
messages.log_messages()
|
|
|
|
|
|
|
|
|
|
if messages.has_error():
|
2022-02-27 15:26:41 +01:00
|
|
|
raise SanityCheckFailedException("Sanity check failed with errors. See log.")
|
2021-02-14 17:08:29 +01:00
|
|
|
elif messages.has_warning():
|
|
|
|
|
return "Sanity check exited with warnings. See log."
|
|
|
|
|
elif len(messages) > 0:
|
|
|
|
|
return "Sanity check exited with infos. See log."
|
2020-11-25 16:04:58 +01:00
|
|
|
else:
|
|
|
|
|
return "No issues detected."
|
2020-12-11 14:27:54 +01:00
|
|
|
|
|
|
|
|
|
2020-12-28 13:31:22 +01:00
|
|
|
def bulk_update_documents(document_ids):
|
2020-12-27 17:05:35 +01:00
|
|
|
documents = Document.objects.filter(id__in=document_ids)
|
|
|
|
|
|
|
|
|
|
ix = index.open_index()
|
2020-12-30 17:18:27 +01:00
|
|
|
|
|
|
|
|
for doc in documents:
|
|
|
|
|
post_save.send(Document, instance=doc, created=False)
|
|
|
|
|
|
2020-12-27 17:05:35 +01:00
|
|
|
with AsyncWriter(ix) as writer:
|
|
|
|
|
for doc in documents:
|
|
|
|
|
index.update_document(writer, doc)
|