paperless-ngx/src/documents/classifier.py

201 lines
7.3 KiB
Python
Raw Normal View History

2018-09-05 12:43:11 +02:00
import logging
import os
import pickle
2018-09-26 10:51:42 +02:00
from sklearn.feature_extraction.text import CountVectorizer
2018-09-11 00:33:07 +02:00
from sklearn.neural_network import MLPClassifier
2018-09-26 10:51:42 +02:00
from sklearn.preprocessing import MultiLabelBinarizer, LabelBinarizer
2018-09-11 00:33:07 +02:00
from documents.models import Document, MatchingModel
from paperless import settings
def preprocess_content(content):
content = content.lower()
content = content.strip()
content = content.replace("\n", " ")
content = content.replace("\r", " ")
while content.find(" ") > -1:
content = content.replace(" ", " ")
return content
class DocumentClassifier(object):
2018-09-26 10:51:42 +02:00
def __init__(self):
self.classifier_version = 0
2018-09-26 10:51:42 +02:00
self.data_vectorizer = None
2018-09-26 10:51:42 +02:00
self.tags_binarizer = None
self.correspondent_binarizer = None
self.document_type_binarizer = None
2018-09-26 10:51:42 +02:00
self.tags_classifier = None
self.correspondent_classifier = None
self.document_type_classifier = None
def reload(self):
2018-09-26 10:51:42 +02:00
if os.path.getmtime(settings.MODEL_FILE) > self.classifier_version:
logging.getLogger(__name__).info("Reloading classifier models")
with open(settings.MODEL_FILE, "rb") as f:
self.data_vectorizer = pickle.load(f)
self.tags_binarizer = pickle.load(f)
self.correspondent_binarizer = pickle.load(f)
self.document_type_binarizer = pickle.load(f)
self.tags_classifier = pickle.load(f)
self.correspondent_classifier = pickle.load(f)
self.document_type_classifier = pickle.load(f)
self.classifier_version = os.path.getmtime(settings.MODEL_FILE)
def save_classifier(self):
with open(settings.MODEL_FILE, "wb") as f:
pickle.dump(self.data_vectorizer, f)
pickle.dump(self.tags_binarizer, f)
pickle.dump(self.correspondent_binarizer, f)
pickle.dump(self.document_type_binarizer, f)
pickle.dump(self.tags_classifier, f)
pickle.dump(self.correspondent_classifier, f)
pickle.dump(self.document_type_classifier, f)
2018-09-05 12:43:11 +02:00
def train(self):
data = list()
labels_tags = list()
labels_correspondent = list()
labels_document_type = list()
2018-09-05 12:43:11 +02:00
# Step 1: Extract and preprocess training data from the database.
logging.getLogger(__name__).info("Gathering data from database...")
for doc in Document.objects.exclude(tags__is_inbox_tag=True):
data.append(preprocess_content(doc.content))
2018-09-26 10:51:42 +02:00
y = -1
if doc.document_type:
if doc.document_type.matching_algorithm == MatchingModel.MATCH_AUTO:
y = doc.document_type.pk
2018-09-26 10:51:42 +02:00
labels_document_type.append(y)
y = -1
if doc.correspondent:
if doc.correspondent.matching_algorithm == MatchingModel.MATCH_AUTO:
y = doc.correspondent.pk
2018-09-26 10:51:42 +02:00
labels_correspondent.append(y)
tags = [tag.pk for tag in doc.tags.filter(
matching_algorithm=MatchingModel.MATCH_AUTO
2018-09-26 10:51:42 +02:00
)]
2018-09-05 12:43:11 +02:00
labels_tags.append(tags)
if not data:
raise ValueError("No training data available.")
labels_tags_unique = set([tag for tags in labels_tags for tag in tags])
2018-09-26 10:51:42 +02:00
logging.getLogger(__name__).info(
"{} documents, {} tag(s), {} correspondent(s), "
"{} document type(s).".format(
len(data),
len(labels_tags_unique),
len(set(labels_correspondent)),
len(set(labels_document_type))
)
)
2018-09-05 12:43:11 +02:00
# Step 2: vectorize data
logging.getLogger(__name__).info("Vectorizing data...")
2018-09-26 10:51:42 +02:00
self.data_vectorizer = CountVectorizer(
analyzer="char",
ngram_range=(3, 5),
min_df=0.1
)
2018-09-05 12:43:11 +02:00
data_vectorized = self.data_vectorizer.fit_transform(data)
self.tags_binarizer = MultiLabelBinarizer()
labels_tags_vectorized = self.tags_binarizer.fit_transform(labels_tags)
self.correspondent_binarizer = LabelBinarizer()
2018-09-26 10:51:42 +02:00
labels_correspondent_vectorized = \
self.correspondent_binarizer.fit_transform(labels_correspondent)
2018-09-05 12:43:11 +02:00
self.document_type_binarizer = LabelBinarizer()
2018-09-26 10:51:42 +02:00
labels_document_type_vectorized = \
self.document_type_binarizer.fit_transform(labels_document_type)
2018-09-05 12:43:11 +02:00
# Step 3: train the classifiers
if len(self.tags_binarizer.classes_) > 0:
logging.getLogger(__name__).info("Training tags classifier...")
2018-09-11 00:33:07 +02:00
self.tags_classifier = MLPClassifier(verbose=True)
2018-09-05 12:43:11 +02:00
self.tags_classifier.fit(data_vectorized, labels_tags_vectorized)
else:
self.tags_classifier = None
2018-09-26 10:51:42 +02:00
logging.getLogger(__name__).info(
"There are no tags. Not training tags classifier."
)
2018-09-05 12:43:11 +02:00
if len(self.correspondent_binarizer.classes_) > 0:
2018-09-26 10:51:42 +02:00
logging.getLogger(__name__).info(
"Training correspondent classifier..."
)
2018-09-11 00:33:07 +02:00
self.correspondent_classifier = MLPClassifier(verbose=True)
2018-09-26 10:51:42 +02:00
self.correspondent_classifier.fit(
data_vectorized,
labels_correspondent_vectorized
)
2018-09-05 12:43:11 +02:00
else:
self.correspondent_classifier = None
2018-09-26 10:51:42 +02:00
logging.getLogger(__name__).info(
"There are no correspondents. Not training correspondent "
"classifier."
)
2018-09-05 12:43:11 +02:00
if len(self.document_type_binarizer.classes_) > 0:
2018-09-26 10:51:42 +02:00
logging.getLogger(__name__).info(
"Training document type classifier..."
)
self.document_type_classifier = MLPClassifier(verbose=True)
2018-09-26 10:51:42 +02:00
self.document_type_classifier.fit(
data_vectorized,
labels_document_type_vectorized
)
2018-09-05 12:43:11 +02:00
else:
self.document_type_classifier = None
2018-09-26 10:51:42 +02:00
logging.getLogger(__name__).info(
"There are no document types. Not training document type "
"classifier."
)
def predict_correspondent(self, content):
if self.correspondent_classifier:
X = self.data_vectorizer.transform([preprocess_content(content)])
y = self.correspondent_classifier.predict(X)
correspondent_id = self.correspondent_binarizer.inverse_transform(y)[0]
2018-09-26 10:51:42 +02:00
if correspondent_id != -1:
return correspondent_id
2018-09-26 10:51:42 +02:00
else:
return None
else:
return None
2018-09-26 10:51:42 +02:00
def predict_document_type(self, content):
if self.document_type_classifier:
X = self.data_vectorizer.transform([preprocess_content(content)])
y = self.document_type_classifier.predict(X)
document_type_id = self.document_type_binarizer.inverse_transform(y)[0]
2018-09-26 10:51:42 +02:00
if document_type_id != -1:
return document_type_id
2018-09-26 10:51:42 +02:00
else:
return None
else:
return None
2018-09-26 10:51:42 +02:00
def predict_tags(self, content):
if self.tags_classifier:
X = self.data_vectorizer.transform([preprocess_content(content)])
y = self.tags_classifier.predict(X)
tags_ids = self.tags_binarizer.inverse_transform(y)[0]
return tags_ids
else:
return []