Webseite überarbeitet und Telegram Bot funktion hinzugefügt #1
27
main.py
27
main.py
@@ -34,11 +34,13 @@ ENV_FILE = os.path.join(os.path.dirname(__file__), ".env")
|
||||
AI_PROVIDER = os.getenv("AI_PROVIDER", "google").lower()
|
||||
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "")
|
||||
GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY", "")
|
||||
NVIDIA_API_KEY = os.getenv("NVIDIA_API_KEY", "")
|
||||
OLLAMA_BASE_URL = os.getenv("OLLAMA_BASE_URL", "http://127.0.0.1:11434/v1")
|
||||
|
||||
GOOGLE_MODEL = os.getenv("GOOGLE_MODEL", "gemini-2.0-flash")
|
||||
OPENAI_MODEL = os.getenv("OPENAI_MODEL", "gpt-4o")
|
||||
OLLAMA_MODEL = os.getenv("OLLAMA_MODEL", "llama3")
|
||||
NVIDIA_MODEL = os.getenv("NVIDIA_MODEL", "moonshotai/kimi-k2.5")
|
||||
|
||||
# --- DATENBANK INITIALISIERUNG (ERWEITERT) ---
|
||||
def init_db():
|
||||
@@ -364,6 +366,7 @@ async def get_settings():
|
||||
"provider": AI_PROVIDER,
|
||||
"google_model": GOOGLE_MODEL,
|
||||
"openai_model": OPENAI_MODEL,
|
||||
"mvidia_model": NVIDIA_MODEL
|
||||
"ollama_model": OLLAMA_MODEL,
|
||||
"ollama_base_url": OLLAMA_BASE_URL # URL ans Frontend schicken
|
||||
}
|
||||
@@ -371,7 +374,7 @@ async def get_settings():
|
||||
@app.post("/api/settings")
|
||||
async def update_settings(request: Request):
|
||||
# WICHTIG: OLLAMA_BASE_URL als global deklarieren
|
||||
global AI_PROVIDER, GOOGLE_MODEL, OPENAI_MODEL, OLLAMA_MODEL, OLLAMA_BASE_URL
|
||||
global AI_PROVIDER, GOOGLE_MODEL, OPENAI_MODEL, NVIDIA_MODEL, OLLAMA_MODEL, OLLAMA_BASE_URL
|
||||
|
||||
data = await request.json()
|
||||
provider = data.get("provider")
|
||||
@@ -388,6 +391,9 @@ async def update_settings(request: Request):
|
||||
elif provider == "openai" and model:
|
||||
OPENAI_MODEL = model
|
||||
set_key(ENV_FILE, "OPENAI_MODEL", model)
|
||||
elif provider == "nvidia" and model:
|
||||
NVIDIA_MODEL = model
|
||||
set_key(ENV_FILE, "NVIDIA_MODEL", model)
|
||||
elif provider == "ollama" and model:
|
||||
OLLAMA_MODEL = model
|
||||
set_key(ENV_FILE, "OLLAMA_MODEL", model)
|
||||
@@ -421,7 +427,26 @@ async def get_models(provider: str, url: str = None):
|
||||
# Nur GPT-Modelle filtern, um die Liste sauber zu halten
|
||||
models = [m.id for m in response.data if "gpt" in m.id or "o1" in m.id]
|
||||
models.sort()
|
||||
|
||||
elif provider == "nvidia":
|
||||
if not NVIDIA_API_KEY or "hier" in NVIDIA_API_KEY: return {"models": []}
|
||||
|
||||
# NVIDIA nutzt das OpenAI SDK!
|
||||
import openai
|
||||
client = openai.AsyncOpenAI(
|
||||
api_key=NVIDIA_API_KEY,
|
||||
base_url="https://integrate.api.nvidia.com/v1"
|
||||
)
|
||||
|
||||
try:
|
||||
response = await client.models.list()
|
||||
# Bei NVIDIA heißen die Modelle oft nach dem Schema "nvidia/llama-3.1-405b"
|
||||
models = [m.id for m in response.data]
|
||||
models.sort()
|
||||
except Exception as e:
|
||||
print(f"Fehler bei NVIDIA Abfrage: {e}")
|
||||
return {"models": []}
|
||||
|
||||
elif provider == "google":
|
||||
if not GOOGLE_API_KEY:
|
||||
return {"models": ["API-Key fehlt"]}
|
||||
|
||||
Reference in New Issue
Block a user