main.py aktualisiert
This commit is contained in:
20
main.py
20
main.py
@@ -5,7 +5,8 @@ import subprocess
|
|||||||
import sqlite3
|
import sqlite3
|
||||||
import asyncio
|
import asyncio
|
||||||
import openai
|
import openai
|
||||||
import google.generativeai as genai
|
from google import genai
|
||||||
|
from google.genai import types
|
||||||
import json
|
import json
|
||||||
from fastapi import FastAPI, WebSocket, BackgroundTasks, Request, Form, WebSocketDisconnect
|
from fastapi import FastAPI, WebSocket, BackgroundTasks, Request, Form, WebSocketDisconnect
|
||||||
from fastapi.responses import RedirectResponse
|
from fastapi.responses import RedirectResponse
|
||||||
@@ -43,7 +44,7 @@ async def get_ai_response(user_input):
|
|||||||
|
|
||||||
elif AI_PROVIDER == "ollama":
|
elif AI_PROVIDER == "ollama":
|
||||||
# Ollama nutzt das OpenAI-Format, braucht aber keinen Key
|
# Ollama nutzt das OpenAI-Format, braucht aber keinen Key
|
||||||
client = openai.OpenAI(base_url=OLLAMA_BASE_URL, api_key="ollama")
|
client = openai.OpenAI(base_url=OLLAMA_BASE_URL, api_key="ollama", timeout=20.0)
|
||||||
response = client.chat.completions.create(
|
response = client.chat.completions.create(
|
||||||
model="llama3", # Oder dein bevorzugtes Modell
|
model="llama3", # Oder dein bevorzugtes Modell
|
||||||
messages=[{"role": "system", "content": SYSTEM_PROMPT}, {"role": "user", "content": user_input}]
|
messages=[{"role": "system", "content": SYSTEM_PROMPT}, {"role": "user", "content": user_input}]
|
||||||
@@ -51,10 +52,17 @@ async def get_ai_response(user_input):
|
|||||||
return response.choices[0].message.content
|
return response.choices[0].message.content
|
||||||
|
|
||||||
elif AI_PROVIDER == "google":
|
elif AI_PROVIDER == "google":
|
||||||
genai.configure(api_key=GOOGLE_API_KEY)
|
# Initialisierung des neuen Clients
|
||||||
model = genai.GenerativeModel('gemini-1.5-flash')
|
client = genai.Client(api_key=GOOGLE_API_KEY)
|
||||||
# Gemini braucht einen etwas anderen Aufbau für System Prompts
|
|
||||||
response = model.generate_content(f"{SYSTEM_PROMPT}\n\nNutzer: {user_input}")
|
# Generierung mit dem neuen SDK
|
||||||
|
response = client.models.generate_content(
|
||||||
|
model='gemini-2.5-flash',
|
||||||
|
contents=user_input,
|
||||||
|
config=types.GenerateContentConfig(
|
||||||
|
system_instruction=SYSTEM_PROMPT
|
||||||
|
)
|
||||||
|
)
|
||||||
return response.text
|
return response.text
|
||||||
|
|
||||||
return "Fehler: Kein KI-Provider konfiguriert."
|
return "Fehler: Kein KI-Provider konfiguriert."
|
||||||
|
|||||||
Reference in New Issue
Block a user