2017-03-11 16:30:49 +00:00
|
|
|
import datetime
|
|
|
|
|
import hashlib
|
|
|
|
|
import logging
|
2016-11-27 15:06:45 +00:00
|
|
|
import os
|
|
|
|
|
import re
|
|
|
|
|
import uuid
|
2017-03-11 16:30:49 +00:00
|
|
|
|
2020-11-07 11:30:45 +01:00
|
|
|
from asgiref.sync import async_to_sync
|
|
|
|
|
from channels.layers import get_channel_layer
|
2016-01-30 01:18:52 +00:00
|
|
|
from django.conf import settings
|
2020-11-02 18:20:04 +01:00
|
|
|
from django.db import transaction
|
2016-01-30 01:18:52 +00:00
|
|
|
from django.utils import timezone
|
2020-11-02 18:20:04 +01:00
|
|
|
|
2016-01-30 01:18:52 +00:00
|
|
|
from paperless.db import GnuPG
|
2020-11-06 14:46:06 +01:00
|
|
|
from .classifier import DocumentClassifier, IncompatibleClassifierVersionError
|
2020-11-02 18:20:04 +01:00
|
|
|
from .models import Document, FileInfo
|
2020-11-03 14:04:11 +01:00
|
|
|
from .parsers import ParseError, get_parser_class
|
2016-03-14 21:20:44 +00:00
|
|
|
from .signals import (
|
2017-03-11 16:30:49 +00:00
|
|
|
document_consumption_finished,
|
|
|
|
|
document_consumption_started
|
2016-11-27 15:06:45 +00:00
|
|
|
)
|
2016-01-30 01:18:52 +00:00
|
|
|
|
|
|
|
|
|
2016-02-06 17:05:36 +00:00
|
|
|
class ConsumerError(Exception):
|
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
|
2018-02-18 15:55:55 +00:00
|
|
|
class Consumer:
|
2016-02-06 17:05:36 +00:00
|
|
|
"""
|
|
|
|
|
Loop over every file found in CONSUMPTION_DIR and:
|
2016-02-16 10:49:55 +01:00
|
|
|
1. Convert it to a greyscale pnm
|
|
|
|
|
2. Use tesseract on the pnm
|
2018-05-29 23:59:33 +02:00
|
|
|
3. Store the document in the MEDIA_ROOT with optional encryption
|
2016-02-06 17:05:36 +00:00
|
|
|
4. Store the OCR'd text in the database
|
|
|
|
|
5. Delete the document and image(s)
|
|
|
|
|
"""
|
2016-01-30 01:18:52 +00:00
|
|
|
|
2020-11-07 11:30:45 +01:00
|
|
|
def _send_progress(self, filename, current_progress, max_progress, status, message, document_id=None):
|
|
|
|
|
payload = {
|
|
|
|
|
'filename': os.path.basename(filename),
|
|
|
|
|
'current_progress': current_progress,
|
|
|
|
|
'max_progress': max_progress,
|
|
|
|
|
'status': status,
|
|
|
|
|
'message': message,
|
|
|
|
|
'document_id': document_id
|
|
|
|
|
}
|
|
|
|
|
async_to_sync(self.channel_layer.group_send)("status_updates", {'type': 'status_update', 'data': payload})
|
|
|
|
|
|
2018-02-25 19:20:51 +01:00
|
|
|
def __init__(self, consume=settings.CONSUMPTION_DIR,
|
|
|
|
|
scratch=settings.SCRATCH_DIR):
|
2016-01-30 01:18:52 +00:00
|
|
|
|
2016-02-27 20:18:50 +00:00
|
|
|
self.logger = logging.getLogger(__name__)
|
|
|
|
|
self.logging_group = None
|
2016-01-30 01:18:52 +00:00
|
|
|
|
2018-02-24 20:32:19 +01:00
|
|
|
self.consume = consume
|
|
|
|
|
self.scratch = scratch
|
|
|
|
|
|
2020-10-28 11:45:11 +01:00
|
|
|
self.classifier = DocumentClassifier()
|
|
|
|
|
|
2020-11-07 11:30:45 +01:00
|
|
|
self.channel_layer = get_channel_layer()
|
|
|
|
|
|
2018-05-28 13:08:00 +01:00
|
|
|
os.makedirs(self.scratch, exist_ok=True)
|
2016-01-30 01:18:52 +00:00
|
|
|
|
2018-05-27 23:20:04 +01:00
|
|
|
self.storage_type = Document.STORAGE_TYPE_UNENCRYPTED
|
|
|
|
|
if settings.PASSPHRASE:
|
|
|
|
|
self.storage_type = Document.STORAGE_TYPE_GPG
|
2018-02-04 13:14:47 +00:00
|
|
|
|
2018-02-24 20:32:19 +01:00
|
|
|
if not self.consume:
|
2016-02-06 17:05:36 +00:00
|
|
|
raise ConsumerError(
|
|
|
|
|
"The CONSUMPTION_DIR settings variable does not appear to be "
|
|
|
|
|
"set."
|
|
|
|
|
)
|
|
|
|
|
|
2018-02-24 20:32:19 +01:00
|
|
|
if not os.path.exists(self.consume):
|
2016-02-06 17:05:36 +00:00
|
|
|
raise ConsumerError(
|
2018-02-24 20:32:19 +01:00
|
|
|
"Consumption directory {} does not exist".format(self.consume))
|
2016-02-06 17:05:36 +00:00
|
|
|
|
2016-02-27 20:18:50 +00:00
|
|
|
def log(self, level, message):
|
|
|
|
|
getattr(self.logger, level)(message, extra={
|
2016-03-28 11:11:15 +01:00
|
|
|
"group": self.logging_group
|
2016-02-27 20:18:50 +00:00
|
|
|
})
|
|
|
|
|
|
2018-10-07 13:12:22 +01:00
|
|
|
@transaction.atomic
|
2018-05-11 14:01:16 +02:00
|
|
|
def try_consume_file(self, file):
|
2018-10-07 13:12:22 +01:00
|
|
|
"""
|
|
|
|
|
Return True if file was consumed
|
|
|
|
|
"""
|
2017-03-11 16:30:49 +00:00
|
|
|
|
2020-11-03 14:04:11 +01:00
|
|
|
self.logging_group = uuid.uuid4()
|
|
|
|
|
|
2018-05-11 14:01:16 +02:00
|
|
|
if not re.match(FileInfo.REGEXES["title"], file):
|
|
|
|
|
return False
|
2016-02-27 20:18:50 +00:00
|
|
|
|
2018-05-11 14:01:16 +02:00
|
|
|
doc = file
|
2016-02-06 17:05:36 +00:00
|
|
|
|
2018-05-11 14:01:15 +02:00
|
|
|
if self._is_duplicate(doc):
|
|
|
|
|
self.log(
|
2020-11-02 15:40:44 +01:00
|
|
|
"warning",
|
2018-05-11 14:01:15 +02:00
|
|
|
"Skipping {} as it appears to be a duplicate".format(doc)
|
2016-03-28 11:11:15 +01:00
|
|
|
)
|
2018-05-11 14:01:17 +02:00
|
|
|
return False
|
2016-03-14 21:20:44 +00:00
|
|
|
|
2020-11-02 15:40:44 +01:00
|
|
|
self.log("info", "Consuming {}".format(doc))
|
|
|
|
|
|
2020-11-07 11:30:45 +01:00
|
|
|
|
2020-11-03 14:04:11 +01:00
|
|
|
parser_class = get_parser_class(doc)
|
2018-05-11 14:01:15 +02:00
|
|
|
if not parser_class:
|
|
|
|
|
self.log(
|
|
|
|
|
"error", "No parsers could be found for {}".format(doc))
|
2018-05-11 14:01:17 +02:00
|
|
|
return False
|
2020-11-03 14:04:11 +01:00
|
|
|
else:
|
|
|
|
|
self.log("info", "Parser: {}".format(parser_class.__name__))
|
2016-03-14 21:20:44 +00:00
|
|
|
|
2020-11-07 11:30:45 +01:00
|
|
|
self._send_progress(file, 0, 100, 'WORKING', 'Consumption started')
|
2016-03-14 21:20:44 +00:00
|
|
|
|
2018-05-11 14:01:15 +02:00
|
|
|
document_consumption_started.send(
|
|
|
|
|
sender=self.__class__,
|
|
|
|
|
filename=doc,
|
|
|
|
|
logging_group=self.logging_group
|
|
|
|
|
)
|
2016-03-14 21:20:44 +00:00
|
|
|
|
2020-11-07 11:30:45 +01:00
|
|
|
def progress_callback(current_progress, max_progress, message):
|
|
|
|
|
# recalculate progress to be within 20 and 80
|
|
|
|
|
p = int((current_progress / max_progress) * 60 + 20)
|
|
|
|
|
self._send_progress(file, p, 100, "WORKING", message)
|
|
|
|
|
|
|
|
|
|
document_parser = parser_class(doc, self.logging_group, progress_callback)
|
2016-02-06 17:05:36 +00:00
|
|
|
|
2018-05-11 14:01:15 +02:00
|
|
|
try:
|
2020-11-03 14:04:11 +01:00
|
|
|
self.log("info", "Generating thumbnail for {}...".format(doc))
|
2020-11-07 11:30:45 +01:00
|
|
|
self._send_progress(file, 10, 100, 'WORKING',
|
|
|
|
|
'Generating thumbnail...')
|
2020-11-02 15:40:44 +01:00
|
|
|
thumbnail = document_parser.get_optimised_thumbnail()
|
2020-11-07 11:30:45 +01:00
|
|
|
self._send_progress(file, 20, 100, 'WORKING',
|
|
|
|
|
'Getting text from document...')
|
|
|
|
|
text = document_parser.get_text()
|
|
|
|
|
self._send_progress(file, 80, 100, 'WORKING',
|
|
|
|
|
'Getting date from document...')
|
2020-11-02 15:40:44 +01:00
|
|
|
date = document_parser.get_date()
|
2020-11-07 11:30:45 +01:00
|
|
|
self._send_progress(file, 85, 100, 'WORKING',
|
|
|
|
|
'Storing the document...')
|
2018-05-11 14:01:15 +02:00
|
|
|
document = self._store(
|
2020-11-07 11:30:45 +01:00
|
|
|
text,
|
2018-05-11 14:01:15 +02:00
|
|
|
doc,
|
|
|
|
|
thumbnail,
|
|
|
|
|
date
|
|
|
|
|
)
|
|
|
|
|
except ParseError as e:
|
2020-11-02 15:40:44 +01:00
|
|
|
self.log("fatal", "PARSE FAILURE for {}: {}".format(doc, e))
|
2020-11-07 11:30:45 +01:00
|
|
|
self._send_progress(file, 100, 100, 'FAILED',
|
|
|
|
|
"Failed: {}".format(e))
|
|
|
|
|
|
2020-11-02 15:40:44 +01:00
|
|
|
document_parser.cleanup()
|
2018-05-11 14:01:17 +02:00
|
|
|
return False
|
2018-05-11 14:01:15 +02:00
|
|
|
else:
|
2020-11-02 15:40:44 +01:00
|
|
|
document_parser.cleanup()
|
2018-05-11 14:01:15 +02:00
|
|
|
self._cleanup_doc(doc)
|
2016-02-06 17:05:36 +00:00
|
|
|
|
2018-05-11 14:01:15 +02:00
|
|
|
self.log(
|
|
|
|
|
"info",
|
|
|
|
|
"Document {} consumption finished".format(document)
|
|
|
|
|
)
|
2016-10-26 09:52:09 +00:00
|
|
|
|
2020-10-28 11:45:11 +01:00
|
|
|
classifier = None
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
self.classifier.reload()
|
|
|
|
|
classifier = self.classifier
|
2020-11-06 14:46:06 +01:00
|
|
|
except (FileNotFoundError, IncompatibleClassifierVersionError) as e:
|
|
|
|
|
logging.getLogger(__name__).warning("Cannot classify documents: {}.".format(e))
|
2020-10-28 11:45:11 +01:00
|
|
|
|
2020-11-07 11:30:45 +01:00
|
|
|
self._send_progress(file, 90, 100, 'WORKING',
|
|
|
|
|
'Performing post-consumption tasks...')
|
|
|
|
|
|
2018-05-11 14:01:15 +02:00
|
|
|
document_consumption_finished.send(
|
|
|
|
|
sender=self.__class__,
|
|
|
|
|
document=document,
|
2020-10-28 11:45:11 +01:00
|
|
|
logging_group=self.logging_group,
|
|
|
|
|
classifier=classifier
|
2018-05-11 14:01:15 +02:00
|
|
|
)
|
2020-11-07 11:30:45 +01:00
|
|
|
self._send_progress(file, 100, 100, 'SUCCESS',
|
|
|
|
|
'Finished.', document.id)
|
2018-05-11 14:01:17 +02:00
|
|
|
return True
|
2016-03-14 21:20:44 +00:00
|
|
|
|
2018-01-28 19:09:52 +01:00
|
|
|
def _store(self, text, doc, thumbnail, date):
|
2016-01-30 01:18:52 +00:00
|
|
|
|
2016-03-07 21:08:07 +02:00
|
|
|
file_info = FileInfo.from_path(doc)
|
2016-01-30 01:18:52 +00:00
|
|
|
|
|
|
|
|
stats = os.stat(doc)
|
|
|
|
|
|
2016-02-27 20:18:50 +00:00
|
|
|
self.log("debug", "Saving record to database")
|
2016-01-30 01:18:52 +00:00
|
|
|
|
2018-01-28 19:09:52 +01:00
|
|
|
created = file_info.created or date or timezone.make_aware(
|
2016-08-20 18:11:51 +01:00
|
|
|
datetime.datetime.fromtimestamp(stats.st_mtime))
|
|
|
|
|
|
2016-04-11 23:28:12 +01:00
|
|
|
with open(doc, "rb") as f:
|
|
|
|
|
document = Document.objects.create(
|
|
|
|
|
correspondent=file_info.correspondent,
|
|
|
|
|
title=file_info.title,
|
|
|
|
|
content=text,
|
|
|
|
|
file_type=file_info.extension,
|
|
|
|
|
checksum=hashlib.md5(f.read()).hexdigest(),
|
2016-08-20 18:11:51 +01:00
|
|
|
created=created,
|
2018-02-04 13:14:47 +00:00
|
|
|
modified=created,
|
2018-05-27 23:20:04 +01:00
|
|
|
storage_type=self.storage_type
|
2016-04-11 23:28:12 +01:00
|
|
|
)
|
2016-01-30 01:18:52 +00:00
|
|
|
|
2018-09-27 20:41:16 +02:00
|
|
|
relevant_tags = set(file_info.tags)
|
|
|
|
|
if relevant_tags:
|
|
|
|
|
tag_names = ", ".join([t.slug for t in relevant_tags])
|
|
|
|
|
self.log("debug", "Tagging with {}".format(tag_names))
|
|
|
|
|
document.tags.add(*relevant_tags)
|
|
|
|
|
|
2018-02-04 13:14:47 +00:00
|
|
|
self._write(document, doc, document.source_path)
|
|
|
|
|
self._write(document, thumbnail, document.thumbnail_path)
|
2016-01-30 01:18:52 +00:00
|
|
|
|
2020-11-03 14:04:11 +01:00
|
|
|
#TODO: why do we need to save the document again?
|
2019-12-27 14:13:18 +00:00
|
|
|
document.save()
|
|
|
|
|
|
2016-03-14 21:20:44 +00:00
|
|
|
return document
|
|
|
|
|
|
2018-02-04 13:14:47 +00:00
|
|
|
def _write(self, document, source, target):
|
|
|
|
|
with open(source, "rb") as read_file:
|
|
|
|
|
with open(target, "wb") as write_file:
|
|
|
|
|
if document.storage_type == Document.STORAGE_TYPE_UNENCRYPTED:
|
|
|
|
|
write_file.write(read_file.read())
|
|
|
|
|
return
|
2018-05-27 23:20:04 +01:00
|
|
|
self.log("debug", "Encrypting")
|
2018-02-04 13:14:47 +00:00
|
|
|
write_file.write(GnuPG.encrypted(read_file))
|
|
|
|
|
|
2016-02-27 20:18:50 +00:00
|
|
|
def _cleanup_doc(self, doc):
|
|
|
|
|
self.log("debug", "Deleting document {}".format(doc))
|
2016-02-14 17:40:37 +01:00
|
|
|
os.unlink(doc)
|
2016-01-30 01:18:52 +00:00
|
|
|
|
2016-04-03 18:44:00 +01:00
|
|
|
@staticmethod
|
|
|
|
|
def _is_duplicate(doc):
|
|
|
|
|
with open(doc, "rb") as f:
|
|
|
|
|
checksum = hashlib.md5(f.read()).hexdigest()
|
|
|
|
|
return Document.objects.filter(checksum=checksum).exists()
|