diff --git a/main.py b/main.py index 1c30d4e..4e3c6df 100644 --- a/main.py +++ b/main.py @@ -13,7 +13,7 @@ from fastapi import FastAPI, WebSocket, BackgroundTasks, Request, Form, WebSocke from fastapi.responses import RedirectResponse from fastapi.templating import Jinja2Templates from fastapi.staticfiles import StaticFiles -from dotenv import load_dotenv +from dotenv import load_dotenv, set_key # Lade Umgebungsvariablen aus der .env Datei load_dotenv() @@ -27,7 +27,7 @@ SSH_KEY = os.path.expanduser("~/.ssh/id_rsa") DB_PATH = "cluster.db" chat_history = [] PROMPT_FILE = "system_prompt.txt" - +ENV_FILE = os.path.join(os.path.dirname(__file__), ".env") # NEU # --- KI KONFIGURATION (Werte aus .env laden) --- AI_PROVIDER = os.getenv("AI_PROVIDER", "google").lower() OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "") @@ -210,6 +210,40 @@ async def remove_node(node_id: int): conn.close() return RedirectResponse(url="/", status_code=303) +@app.get("/api/settings") +async def get_settings(): + return { + "provider": AI_PROVIDER, + "google_model": GOOGLE_MODEL, + "openai_model": OPENAI_MODEL, + "ollama_model": OLLAMA_MODEL + } + +@app.post("/api/settings") +async def update_settings(request: Request): + # Die globalen Variablen deklarieren, damit wir sie zur Laufzeit ändern können + global AI_PROVIDER, GOOGLE_MODEL, OPENAI_MODEL, OLLAMA_MODEL + + data = await request.json() + provider = data.get("provider") + model = data.get("model") + + if provider: + AI_PROVIDER = provider + set_key(ENV_FILE, "AI_PROVIDER", provider) + + if provider == "google" and model: + GOOGLE_MODEL = model + set_key(ENV_FILE, "GOOGLE_MODEL", model) + elif provider == "openai" and model: + OPENAI_MODEL = model + set_key(ENV_FILE, "OPENAI_MODEL", model) + elif provider == "ollama" and model: + OLLAMA_MODEL = model + set_key(ENV_FILE, "OLLAMA_MODEL", model) + + return {"status": "success"} + # --- WebSockets --- @app.websocket("/ws/install_logs")