mirror of
https://github.com/paperless-ngx/paperless-ngx.git
synced 2025-12-20 05:26:53 +01:00
Better encapsulate backends, use llama_index OpenAI
This commit is contained in:
parent
42e3684211
commit
e2eec6dc71
3 changed files with 111 additions and 48 deletions
64
src/paperless/ai/llms.py
Normal file
64
src/paperless/ai/llms.py
Normal file
|
|
@ -0,0 +1,64 @@
|
|||
import httpx
|
||||
from llama_index.core.base.llms.types import ChatMessage
|
||||
from llama_index.core.base.llms.types import ChatResponse
|
||||
from llama_index.core.base.llms.types import ChatResponseGen
|
||||
from llama_index.core.base.llms.types import CompletionResponse
|
||||
from llama_index.core.base.llms.types import CompletionResponseGen
|
||||
from llama_index.core.base.llms.types import LLMMetadata
|
||||
from llama_index.core.llms.llm import LLM
|
||||
from pydantic import Field
|
||||
|
||||
|
||||
class OllamaLLM(LLM):
|
||||
model: str = Field(default="llama3")
|
||||
base_url: str = Field(default="http://localhost:11434")
|
||||
|
||||
@property
|
||||
def metadata(self) -> LLMMetadata:
|
||||
return LLMMetadata(
|
||||
model_name=self.model,
|
||||
is_chat_model=False,
|
||||
context_window=4096,
|
||||
num_output=512,
|
||||
is_function_calling_model=False,
|
||||
)
|
||||
|
||||
def complete(self, prompt: str, **kwargs) -> CompletionResponse:
|
||||
with httpx.Client(timeout=120.0) as client:
|
||||
response = client.post(
|
||||
f"{self.base_url}/api/generate",
|
||||
json={
|
||||
"model": self.model,
|
||||
"prompt": prompt,
|
||||
"stream": False,
|
||||
},
|
||||
)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
return CompletionResponse(text=data["response"])
|
||||
|
||||
# -- Required stubs for ABC:
|
||||
def stream_complete(self, prompt: str, **kwargs) -> CompletionResponseGen:
|
||||
raise NotImplementedError("stream_complete not supported")
|
||||
|
||||
def chat(self, messages: list[ChatMessage], **kwargs) -> ChatResponse:
|
||||
raise NotImplementedError("chat not supported")
|
||||
|
||||
def stream_chat(self, messages: list[ChatMessage], **kwargs) -> ChatResponseGen:
|
||||
raise NotImplementedError("stream_chat not supported")
|
||||
|
||||
async def achat(self, messages: list[ChatMessage], **kwargs) -> ChatResponse:
|
||||
raise NotImplementedError("async chat not supported")
|
||||
|
||||
async def astream_chat(
|
||||
self,
|
||||
messages: list[ChatMessage],
|
||||
**kwargs,
|
||||
) -> ChatResponseGen:
|
||||
raise NotImplementedError("async stream_chat not supported")
|
||||
|
||||
async def acomplete(self, prompt: str, **kwargs) -> CompletionResponse:
|
||||
raise NotImplementedError("async complete not supported")
|
||||
|
||||
async def astream_complete(self, prompt: str, **kwargs) -> CompletionResponseGen:
|
||||
raise NotImplementedError("async stream_complete not supported")
|
||||
Loading…
Add table
Add a link
Reference in a new issue