paperless-ngx/src/documents/consumer.py

248 lines
8.4 KiB
Python
Raw Normal View History

import datetime
import hashlib
import logging
import os
2020-11-20 13:31:03 +01:00
import magic
from django.conf import settings
2020-11-02 18:20:04 +01:00
from django.db import transaction
from django.utils import timezone
2020-11-02 18:20:04 +01:00
from .classifier import DocumentClassifier, IncompatibleClassifierVersionError
from .file_handling import generate_filename, create_source_path_directory
from .loggers import LoggingMixin
2020-11-16 18:26:54 +01:00
from .models import Document, FileInfo, Correspondent, DocumentType, Tag
2020-11-20 13:31:03 +01:00
from .parsers import ParseError, get_parser_class_for_mime_type
from .signals import (
document_consumption_finished,
document_consumption_started
)
class ConsumerError(Exception):
pass
class Consumer(LoggingMixin):
2020-11-16 18:26:54 +01:00
def __init__(self):
super().__init__()
2020-11-17 11:49:44 +01:00
self.path = None
self.filename = None
self.override_title = None
self.override_correspondent_id = None
self.override_tag_ids = None
self.override_document_type_id = None
def pre_check_file_exists(self):
if not os.path.isfile(self.path):
2020-11-16 18:26:54 +01:00
raise ConsumerError("Cannot consume {}: It is not a file".format(
2020-11-17 11:49:44 +01:00
self.path))
2020-11-16 18:26:54 +01:00
2020-11-17 11:49:44 +01:00
def pre_check_duplicate(self):
with open(self.path, "rb") as f:
2020-11-16 18:26:54 +01:00
checksum = hashlib.md5(f.read()).hexdigest()
if Document.objects.filter(checksum=checksum).exists():
if settings.CONSUMER_DELETE_DUPLICATES:
2020-11-17 11:49:44 +01:00
os.unlink(self.path)
2020-11-16 18:26:54 +01:00
raise ConsumerError(
2020-11-17 11:49:44 +01:00
"Not consuming {}: It is a duplicate.".format(self.filename)
2020-11-16 18:26:54 +01:00
)
2020-11-17 11:49:44 +01:00
def pre_check_directories(self):
2020-11-16 18:26:54 +01:00
os.makedirs(settings.SCRATCH_DIR, exist_ok=True)
2020-11-16 23:16:37 +01:00
os.makedirs(settings.THUMBNAIL_DIR, exist_ok=True)
os.makedirs(settings.ORIGINALS_DIR, exist_ok=True)
2020-11-16 18:26:54 +01:00
def try_consume_file(self,
2020-11-17 11:49:44 +01:00
path,
override_filename=None,
override_title=None,
override_correspondent_id=None,
override_document_type_id=None,
override_tag_ids=None):
"""
2020-11-16 18:26:54 +01:00
Return the document object if it was successfully created.
"""
2020-11-17 11:49:44 +01:00
self.path = path
self.filename = override_filename or os.path.basename(path)
self.override_title = override_title
self.override_correspondent_id = override_correspondent_id
self.override_document_type_id = override_document_type_id
self.override_tag_ids = override_tag_ids
2020-11-16 18:26:54 +01:00
# this is for grouping logging entries for this particular file
# together.
self.renew_logging_group()
2020-11-16 18:26:54 +01:00
# Make sure that preconditions for consuming the file are met.
2016-02-27 20:18:50 +00:00
2020-11-17 11:49:44 +01:00
self.pre_check_file_exists()
2020-11-16 23:16:37 +01:00
self.pre_check_directories()
2020-11-17 11:49:44 +01:00
self.pre_check_duplicate()
2020-11-17 11:49:44 +01:00
self.log("info", "Consuming {}".format(self.filename))
2020-11-16 18:26:54 +01:00
# Determine the parser class.
2020-11-20 13:31:03 +01:00
mime_type = magic.from_file(self.path, mime=True)
parser_class = get_parser_class_for_mime_type(mime_type)
if not parser_class:
2020-11-21 14:03:45 +01:00
raise ConsumerError(f"No parsers abvailable for {self.filename}")
else:
2020-11-21 14:03:45 +01:00
self.log("debug",
f"Parser: {parser_class.__name__} "
f"based on mime type {mime_type}")
2020-11-16 18:26:54 +01:00
# Notify all listeners that we're going to do some work.
document_consumption_started.send(
sender=self.__class__,
2020-11-17 11:49:44 +01:00
filename=self.path,
logging_group=self.logging_group
)
2020-11-16 18:26:54 +01:00
# This doesn't parse the document yet, but gives us a parser.
2020-11-17 11:49:44 +01:00
document_parser = parser_class(self.path, self.logging_group)
2020-11-16 18:26:54 +01:00
# However, this already created working directories which we have to
# clean up.
# Parse the document. This may take some time.
try:
2020-11-21 14:03:45 +01:00
self.log("debug", f"Generating thumbnail for {self.filename}...")
thumbnail = document_parser.get_optimised_thumbnail()
2020-11-17 11:49:44 +01:00
self.log("debug", "Parsing {}...".format(self.filename))
2020-11-11 14:14:21 +01:00
text = document_parser.get_text()
date = document_parser.get_date()
except ParseError as e:
document_parser.cleanup()
2020-11-16 18:26:54 +01:00
raise ConsumerError(e)
2020-11-16 18:26:54 +01:00
# Prepare the document classifier.
# TODO: I don't really like to do this here, but this way we avoid
# reloading the classifier multiple times, since there are multiple
# post-consume hooks that all require the classifier.
2016-10-26 09:52:09 +00:00
2020-11-16 18:26:54 +01:00
try:
classifier = DocumentClassifier()
classifier.reload()
except (FileNotFoundError, IncompatibleClassifierVersionError) as e:
logging.getLogger(__name__).warning(
"Cannot classify documents: {}.".format(e))
classifier = None
2020-11-16 18:26:54 +01:00
# now that everything is done, we can start to store the document
# in the system. This will be a transaction and reasonably fast.
try:
with transaction.atomic():
# store the document.
document = self._store(
text=text,
2020-11-20 13:31:03 +01:00
date=date,
mime_type=mime_type
2020-11-16 18:26:54 +01:00
)
# If we get here, it was successful. Proceed with post-consume
# hooks. If they fail, nothing will get changed.
document_consumption_finished.send(
sender=self.__class__,
document=document,
logging_group=self.logging_group,
classifier=classifier
)
# After everything is in the database, copy the files into
# place. If this fails, we'll also rollback the transaction.
create_source_path_directory(document.source_path)
2020-11-17 11:49:44 +01:00
self._write(document, self.path, document.source_path)
2020-11-16 18:26:54 +01:00
self._write(document, thumbnail, document.thumbnail_path)
# Delete the file only if it was successfully consumed
2020-11-17 11:49:44 +01:00
self.log("debug", "Deleting file {}".format(self.path))
os.unlink(self.path)
2020-11-16 18:26:54 +01:00
except Exception as e:
raise ConsumerError(e)
finally:
document_parser.cleanup()
self.log(
"info",
"Document {} consumption finished".format(document)
)
2020-11-16 18:26:54 +01:00
return document
2020-11-20 13:31:03 +01:00
def _store(self, text, date, mime_type):
2020-11-16 18:26:54 +01:00
# If someone gave us the original filename, use it instead of doc.
file_info = FileInfo.from_filename(self.filename)
2020-11-17 11:49:44 +01:00
stats = os.stat(self.path)
2016-02-27 20:18:50 +00:00
self.log("debug", "Saving record to database")
created = file_info.created or date or timezone.make_aware(
2020-11-12 21:09:45 +01:00
datetime.datetime.fromtimestamp(stats.st_mtime))
storage_type = Document.STORAGE_TYPE_UNENCRYPTED
2020-11-17 11:49:44 +01:00
with open(self.path, "rb") as f:
2016-04-11 23:28:12 +01:00
document = Document.objects.create(
correspondent=file_info.correspondent,
title=file_info.title,
content=text,
2020-11-20 13:31:03 +01:00
mime_type=mime_type,
2016-04-11 23:28:12 +01:00
checksum=hashlib.md5(f.read()).hexdigest(),
created=created,
modified=created,
2020-11-17 11:49:44 +01:00
storage_type=storage_type
2016-04-11 23:28:12 +01:00
)
2018-09-27 20:41:16 +02:00
relevant_tags = set(file_info.tags)
if relevant_tags:
tag_names = ", ".join([t.slug for t in relevant_tags])
self.log("debug", "Tagging with {}".format(tag_names))
document.tags.add(*relevant_tags)
2020-11-17 11:49:44 +01:00
self.apply_overrides(document)
2020-11-16 18:26:54 +01:00
document.filename = generate_filename(document)
# We need to save the document twice, since we need the PK of the
# document in order to create its filename above.
document.save()
return document
2020-11-17 11:49:44 +01:00
def apply_overrides(self, document):
if self.override_title:
document.title = self.override_title
if self.override_correspondent_id:
2020-11-21 14:03:45 +01:00
document.correspondent = Correspondent.objects.get(
pk=self.override_correspondent_id)
2020-11-17 11:49:44 +01:00
if self.override_document_type_id:
2020-11-21 14:03:45 +01:00
document.document_type = DocumentType.objects.get(
pk=self.override_document_type_id)
2020-11-17 11:49:44 +01:00
if self.override_tag_ids:
for tag_id in self.override_tag_ids:
document.tags.add(Tag.objects.get(pk=tag_id))
def _write(self, document, source, target):
with open(source, "rb") as read_file:
with open(target, "wb") as write_file:
write_file.write(read_file.read())