main.py aktualisiert

This commit is contained in:
2026-03-06 13:32:28 +00:00
parent a69aceac28
commit 3697857ce1

379
main.py
View File

@@ -7,7 +7,6 @@ import asyncio
import openai import openai
import re import re
import httpx import httpx
import subprocess
from google import genai from google import genai
from google.genai import types from google.genai import types
import json import json
@@ -15,7 +14,6 @@ from fastapi import FastAPI, WebSocket, BackgroundTasks, Request, Form, WebSocke
from fastapi.responses import RedirectResponse from fastapi.responses import RedirectResponse
from fastapi.templating import Jinja2Templates from fastapi.templating import Jinja2Templates
from fastapi.staticfiles import StaticFiles from fastapi.staticfiles import StaticFiles
from contextlib import asynccontextmanager
from dotenv import load_dotenv, set_key from dotenv import load_dotenv, set_key
# Lade Umgebungsvariablen aus der .env Datei # Lade Umgebungsvariablen aus der .env Datei
@@ -30,69 +28,100 @@ SSH_KEY = os.path.expanduser("~/.ssh/id_rsa")
DB_PATH = "cluster.db" DB_PATH = "cluster.db"
chat_history = [] chat_history = []
PROMPT_FILE = "system_prompt.txt" PROMPT_FILE = "system_prompt.txt"
ENV_FILE = os.path.join(os.path.dirname(__file__), ".env") ENV_FILE = os.path.join(os.path.dirname(__file__), ".env") # NEU
# --- KI KONFIGURATION (Werte aus .env laden) ---
# --- KI KONFIGURATION ---
AI_PROVIDER = os.getenv("AI_PROVIDER", "google").lower() AI_PROVIDER = os.getenv("AI_PROVIDER", "google").lower()
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "") OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "")
GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY", "") GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY", "")
OLLAMA_BASE_URL = os.getenv("OLLAMA_BASE_URL", "http://127.0.0.1:11434/v1") OLLAMA_BASE_URL = os.getenv("OLLAMA_BASE_URL", "http://127.0.0.1:11434/v1")
# Modelle aus .env laden (mit Standardwerten als Fallback)
GOOGLE_MODEL = os.getenv("GOOGLE_MODEL", "gemini-2.5-flash") GOOGLE_MODEL = os.getenv("GOOGLE_MODEL", "gemini-2.5-flash")
OPENAI_MODEL = os.getenv("OPENAI_MODEL", "gpt-4o") OPENAI_MODEL = os.getenv("OPENAI_MODEL", "gpt-4o")
OLLAMA_MODEL = os.getenv("OLLAMA_MODEL", "llama3") OLLAMA_MODEL = os.getenv("OLLAMA_MODEL", "llama3")
def get_system_prompt(): def get_system_prompt():
# 1. Node Info aus DB holen
conn = get_db() conn = get_db()
nodes = conn.execute('SELECT * FROM nodes').fetchall() nodes = conn.execute('SELECT * FROM nodes').fetchall()
conn.close() conn.close()
node_info = "" node_info = ""
for n in nodes: for n in nodes:
node_info += f"- Name: {n['name']}, IP: {n['ip']}, User: {n['user']}, OS: {n['os']}, Arch: {n['arch']}, Docker: {'Ja' if n['docker'] else 'Nein'}\n" node_info += f"- Name: {n['name']}, IP: {n['ip']}, User: {n['user']}\n"
# 2. Versuche den Prompt aus der Datei zu laden
if os.path.exists(PROMPT_FILE): if os.path.exists(PROMPT_FILE):
with open(PROMPT_FILE, "r", encoding="utf-8") as f: with open(PROMPT_FILE, "r", encoding="utf-8") as f:
template = f.read() template = f.read()
else: else:
# Fallback falls Datei fehlt
template = "Du bist ein Helfer. Nodes:\n{node_info}\nNutze <EXECUTE target=\"IP\">cmd</EXECUTE>" template = "Du bist ein Helfer. Nodes:\n{node_info}\nNutze <EXECUTE target=\"IP\">cmd</EXECUTE>"
print(f"⚠️ Warnung: {PROMPT_FILE} nicht gefunden. Nutze Fallback.") print(f"⚠️ Warnung: {PROMPT_FILE} nicht gefunden. Nutze Fallback.")
return template.replace("{node_info}", node_info) return template.replace("{node_info}", node_info)
# --- KI FUNKTIONEN --- # --- KI FUNKTIONEN ---
async def get_ai_response(user_input, system_prompt): async def get_ai_response(user_input, system_prompt):
global chat_history global chat_history
# 1. Die neue User-Nachricht dem Gedächtnis hinzufügen
chat_history.append({"role": "user", "content": user_input}) chat_history.append({"role": "user", "content": user_input})
# 2. Gedächtnis auf die letzten 30 Nachrichten begrenzen
chat_history = chat_history[-30:] chat_history = chat_history[-30:]
ai_msg = "" ai_msg = ""
try: try:
if AI_PROVIDER == "openai" or AI_PROVIDER == "ollama": if AI_PROVIDER == "openai" or AI_PROVIDER == "ollama":
messages = [{"role": "system", "content": system_prompt}] + chat_history messages = [{"role": "system", "content": system_prompt}] + chat_history
# Sicherstellen, dass die URL für Ollama korrekt endet
if AI_PROVIDER == "ollama": if AI_PROVIDER == "ollama":
url = OLLAMA_BASE_URL url = OLLAMA_BASE_URL
if not url.endswith('/v1') and not url.endswith('/v1/'): url = url.rstrip('/') + '/v1' if not url.endswith('/v1') and not url.endswith('/v1/'):
url = url.rstrip('/') + '/v1'
key = "ollama" key = "ollama"
model_to_use = OLLAMA_MODEL model_to_use = OLLAMA_MODEL
else: else:
url = None url = None # Benutzt Standard OpenAI URL
key = OPENAI_API_KEY key = OPENAI_API_KEY
model_to_use = OPENAI_MODEL model_to_use = OPENAI_MODEL
client = openai.OpenAI(base_url=url, api_key=key) client = openai.OpenAI(base_url=url, api_key=key)
response = client.chat.completions.create(model=model_to_use, messages=messages) response = client.chat.completions.create(
model=model_to_use,
messages=messages
)
ai_msg = response.choices[0].message.content ai_msg = response.choices[0].message.content
elif AI_PROVIDER == "google": elif AI_PROVIDER == "google":
if not GOOGLE_API_KEY: return "Fehler: GOOGLE_API_KEY fehlt in der .env Datei!" # Für Google Gemini
if not GOOGLE_API_KEY:
return "Fehler: GOOGLE_API_KEY fehlt in der .env Datei!"
client = genai.Client(api_key=GOOGLE_API_KEY) client = genai.Client(api_key=GOOGLE_API_KEY)
# Wir müssen unser Array in das spezielle Google-Format umwandeln
google_history = [] google_history = []
# Alle Nachrichten AUSSER der allerletzten (die aktuelle User-Frage) in die History packen
for msg in chat_history[:-1]: for msg in chat_history[:-1]:
role = "user" if msg["role"] == "user" else "model" role = "user" if msg["role"] == "user" else "model"
google_history.append(types.Content(role=role, parts=[types.Part.from_text(text=msg["content"])])) google_history.append(
types.Content(role=role, parts=[types.Part.from_text(text=msg["content"])])
)
chat = client.chats.create(model=GOOGLE_MODEL, config=types.GenerateContentConfig(system_instruction=system_prompt), history=google_history) # Chat MIT dem übersetzten Gedächtnis starten
chat = client.chats.create(
model=GOOGLE_MODEL,
config=types.GenerateContentConfig(system_instruction=system_prompt),
history=google_history
)
# Jetzt erst die neue Nachricht an den Chat mit Gedächtnis schicken
response = chat.send_message(user_input) response = chat.send_message(user_input)
ai_msg = response.text ai_msg = response.text
@@ -100,38 +129,23 @@ async def get_ai_response(user_input, system_prompt):
ai_msg = f"Fehler bei der KI-Anfrage: {e}" ai_msg = f"Fehler bei der KI-Anfrage: {e}"
print(f"KI Fehler: {e}") print(f"KI Fehler: {e}")
# 3. Die Antwort der KI ebenfalls ins Gedächtnis aufnehmen
chat_history.append({"role": "assistant", "content": ai_msg}) chat_history.append({"role": "assistant", "content": ai_msg})
return ai_msg return ai_msg
# --- DATENBANK INITIALISIERUNG & MIGRATION --- # --- DATENBANK INITIALISIERUNG ---
def init_db(): def init_db():
conn = sqlite3.connect(DB_PATH) conn = sqlite3.connect(DB_PATH)
# Basis-Tabelle erstellen
conn.execute(''' conn.execute('''
CREATE TABLE IF NOT EXISTS nodes ( CREATE TABLE IF NOT EXISTS nodes (
id INTEGER PRIMARY KEY AUTOINCREMENT, id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT, ip TEXT UNIQUE, user TEXT, status TEXT name TEXT,
ip TEXT UNIQUE,
user TEXT,
status TEXT
) )
''') ''')
# Prüfen, ob die neuen Spalten existieren, und sie ansonsten anhängen (Migration)
cursor = conn.cursor()
cursor.execute("PRAGMA table_info(nodes)")
columns = [info[1] for info in cursor.fetchall()]
new_columns = {
"sudo_pass": "TEXT DEFAULT ''",
"os": "TEXT DEFAULT 'Unbekannt'",
"arch": "TEXT DEFAULT 'Unbekannt'",
"docker": "INTEGER DEFAULT 0",
"vnc": "INTEGER DEFAULT 0"
}
for col, dtype in new_columns.items():
if col not in columns:
conn.execute(f"ALTER TABLE nodes ADD COLUMN {col} {dtype}")
print(f"Datenbank aktualisiert: Spalte '{col}' hinzugefügt.")
conn.commit() conn.commit()
conn.close() conn.close()
@@ -142,7 +156,7 @@ def get_db():
conn.row_factory = sqlite3.Row conn.row_factory = sqlite3.Row
return conn return conn
# --- WebSocket Manager --- # --- WebSocket Manager für Logs & Chat ---
class ConnectionManager: class ConnectionManager:
def __init__(self): def __init__(self):
self.active_connections: list[WebSocket] = [] self.active_connections: list[WebSocket] = []
@@ -154,117 +168,28 @@ class ConnectionManager:
self.active_connections.remove(websocket) self.active_connections.remove(websocket)
async def broadcast(self, message: str): async def broadcast(self, message: str):
for connection in self.active_connections: for connection in self.active_connections:
try: await connection.send_text(message) try:
except: pass await connection.send_text(message)
except:
pass
manager = ConnectionManager() manager = ConnectionManager()
# --- SSH Handshake --- # --- SSH Handshake (Nur Key kopieren) ---
async def bootstrap_ssh_only(ip, user, password, sudo_pass=""): async def bootstrap_ssh_only(ip, user, password):
await manager.broadcast(f"🔑 Initialisiere SSH-Handshake für {ip}...") await manager.broadcast(f"🔑 Initialisiere SSH-Handshake für {ip}...")
# Nutzt sshpass um den Key einmalig mit Passwort zu hinterlegen
ssh_copy_cmd = f"sshpass -p '{password}' ssh-copy-id -o StrictHostKeyChecking=no -i {SSH_KEY}.pub {user}@{ip}" ssh_copy_cmd = f"sshpass -p '{password}' ssh-copy-id -o StrictHostKeyChecking=no -i {SSH_KEY}.pub {user}@{ip}"
process = subprocess.Popen(ssh_copy_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True) process = subprocess.Popen(ssh_copy_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True)
for line in process.stdout: for line in process.stdout:
await manager.broadcast(f"SSH: {line.strip()}") await manager.broadcast(f"SSH: {line.strip()}")
# Nach dem Handshake sofort die Node-Infos (OS, Arch) abrufen
conn = get_db() conn = get_db()
node = conn.execute("SELECT id FROM nodes WHERE ip=?", (ip,)).fetchone() conn.execute('UPDATE nodes SET status = "Bereit (Kein Docker)" WHERE ip = ?', (ip,))
conn.commit()
conn.close() conn.close()
await manager.broadcast(f"✅ Node {ip} ist verbunden. KI kann nun Befehle senden.")
if node:
await check_and_update_node(node['id'])
await manager.broadcast(f"✅ Node {ip} verbunden und analysiert.")
# --- AUTO-REFRESH (Alle 60 Sekunden) ---
@asynccontextmanager
async def lifespan(app: FastAPI):
# Alles hier drin läuft beim Starten
refresh_task = asyncio.create_task(auto_refresh_loop())
yield
# Alles hier drin läuft beim Beenden
refresh_task.cancel()
app = FastAPI(lifespan=lifespan)
async def auto_refresh_loop():
print("🚀 Auto-Refresh Task gestartet...")
while True:
try:
conn = get_db()
nodes = conn.execute('SELECT id, ip FROM nodes').fetchall()
conn.close()
for n in nodes:
# Schneller Ping
proc = await asyncio.create_subprocess_exec(
"ping", "-c", "1", "-W", "1", n['ip'],
stdout=asyncio.subprocess.DEVNULL,
stderr=asyncio.subprocess.DEVNULL
)
await proc.wait()
if proc.returncode == 0:
await check_and_update_node(n['id'])
else:
conn = get_db()
conn.execute("UPDATE nodes SET status='Offline' WHERE id=?", (n['id'],))
conn.commit()
conn.close()
print(f"⚠️ Auto-Refresh: Node {n['ip']} ist per Ping nicht erreichbar.")
except Exception as e:
print(f"🚨 Schwerer Fehler im Auto-Refresh Loop: {e}")
# Erst am Ende der Runde warten
await asyncio.sleep(60)
# Hilfsfunktion für den Info-Check via SSH
async def check_and_update_node(node_id: int):
conn = get_db()
node = conn.execute('SELECT * FROM nodes WHERE id = ?', (node_id,)).fetchone()
if not node:
conn.close()
return
check_cmd = (
'arch=$(uname -m); '
'dock=$(command -v docker >/dev/null 2>&1 && echo 1 || echo 0); '
'os=$(grep "^ID=" /etc/os-release 2>/dev/null | cut -d= -f2 | tr -d \'"\'); '
'[ -z "$os" ] && os=$(uname -s); '
'vnc=$(command -v vncserver >/dev/null 2>&1 && echo 1 || echo 0); '
'echo "$arch|$dock|$os|$vnc"'
)
try:
# Hier nutzen wir jetzt den asynchronen Subprocess-Aufruf von asyncio
proc = await asyncio.create_subprocess_exec(
"ssh", "-o", "StrictHostKeyChecking=no", "-o", "ConnectTimeout=3",
f"{node['user']}@{node['ip']}", check_cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE
)
stdout, stderr = await proc.communicate()
if proc.returncode == 0:
output = stdout.decode().strip()
arch, dock, os_name, vnc = output.split('|')
status = "Docker Aktiv" if dock == "1" else "Bereit (Kein Docker)"
conn.execute('''
UPDATE nodes SET status=?, arch=?, docker=?, os=?, vnc=? WHERE id=?
''', (status, arch, int(dock), os_name, int(vnc), node_id))
conn.commit()
print(f"✅ Auto-Refresh: Node {node['ip']} ist online ({status})")
else:
raise Exception(stderr.decode())
except Exception as e:
print(f"❌ Auto-Refresh Fehler bei {node['ip']}: {e}")
conn.execute("UPDATE nodes SET status='Offline/Fehler' WHERE id=?", (node_id,))
conn.commit()
finally:
conn.close()
# --- Routen --- # --- Routen ---
@@ -276,15 +201,12 @@ async def index(request: Request):
return templates.TemplateResponse("index.html", {"request": request, "nodes": nodes}) return templates.TemplateResponse("index.html", {"request": request, "nodes": nodes})
@app.post("/add_node") @app.post("/add_node")
async def add_node(background_tasks: BackgroundTasks, name: str = Form(...), ip: str = Form(...), user: str = Form(...), password: str = Form(...), sudo_pass: str = Form(default="")): async def add_node(background_tasks: BackgroundTasks, name: str = Form(...), ip: str = Form(...), user: str = Form(...), password: str = Form(...)):
conn = get_db() conn = get_db()
try: try:
conn.execute(''' conn.execute('INSERT INTO nodes (name, ip, user, status) VALUES (?, ?, ?, ?)', (name, ip, user, "Kopplung..."))
INSERT INTO nodes (name, ip, user, sudo_pass, status)
VALUES (?, ?, ?, ?, ?)
''', (name, ip, user, sudo_pass, "Kopplung..."))
conn.commit() conn.commit()
background_tasks.add_task(bootstrap_ssh_only, ip, user, password, sudo_pass) background_tasks.add_task(bootstrap_ssh_only, ip, user, password)
except sqlite3.IntegrityError: pass except sqlite3.IntegrityError: pass
finally: conn.close() finally: conn.close()
return RedirectResponse(url="/", status_code=303) return RedirectResponse(url="/", status_code=303)
@@ -297,48 +219,6 @@ async def remove_node(node_id: int):
conn.close() conn.close()
return RedirectResponse(url="/", status_code=303) return RedirectResponse(url="/", status_code=303)
# --- NEU: Endpunkt um Node-Daten abzufragen (für das Edit-Fenster) ---
@app.get("/api/node/{node_id}")
async def get_node_details(node_id: int):
conn = get_db()
node = conn.execute('SELECT id, name, ip, user, sudo_pass FROM nodes WHERE id = ?', (node_id,)).fetchone()
conn.close()
if node:
return dict(node)
return {"error": "Node nicht gefunden"}
# --- NEU: Endpunkt um Node-Daten zu speichern ---
@app.post("/edit_node/{node_id}")
async def edit_node(node_id: int, name: str = Form(...), ip: str = Form(...), user: str = Form(...), sudo_pass: str = Form(default="")):
conn = get_db()
conn.execute('''
UPDATE nodes SET name=?, ip=?, user=?, sudo_pass=? WHERE id=?
''', (name, ip, user, sudo_pass, node_id))
conn.commit()
conn.close()
return RedirectResponse(url="/", status_code=303)
@app.get("/refresh_status/{node_id}")
async def refresh_status_endpoint(node_id: int):
# Ruft unsere neue mächtige Info-Update-Funktion auf
new_status = await check_and_update_node(node_id)
# Die neu geladenen Daten aus der DB holen um sie ans Frontend zu schicken
conn = get_db()
node = conn.execute('SELECT * FROM nodes WHERE id=?', (node_id,)).fetchone()
conn.close()
if node:
return {
"status": node["status"],
"os": node["os"],
"arch": node["arch"],
"docker": node["docker"],
"vnc": node["vnc"]
}
return {"status": "Offline/Fehler"}
# --- EINSTELLUNGEN & KI MODELLE ---
@app.get("/api/settings") @app.get("/api/settings")
async def get_settings(): async def get_settings():
return { return {
@@ -346,20 +226,23 @@ async def get_settings():
"google_model": GOOGLE_MODEL, "google_model": GOOGLE_MODEL,
"openai_model": OPENAI_MODEL, "openai_model": OPENAI_MODEL,
"ollama_model": OLLAMA_MODEL, "ollama_model": OLLAMA_MODEL,
"ollama_base_url": OLLAMA_BASE_URL "ollama_base_url": OLLAMA_BASE_URL # URL ans Frontend schicken
} }
@app.post("/api/settings") @app.post("/api/settings")
async def update_settings(request: Request): async def update_settings(request: Request):
# WICHTIG: OLLAMA_BASE_URL als global deklarieren
global AI_PROVIDER, GOOGLE_MODEL, OPENAI_MODEL, OLLAMA_MODEL, OLLAMA_BASE_URL global AI_PROVIDER, GOOGLE_MODEL, OPENAI_MODEL, OLLAMA_MODEL, OLLAMA_BASE_URL
data = await request.json() data = await request.json()
provider = data.get("provider") provider = data.get("provider")
model = data.get("model") model = data.get("model")
ollama_url = data.get("ollama_base_url") ollama_url = data.get("ollama_base_url") # URL vom Frontend empfangen
if provider: if provider:
AI_PROVIDER = provider AI_PROVIDER = provider
set_key(ENV_FILE, "AI_PROVIDER", provider) set_key(ENV_FILE, "AI_PROVIDER", provider)
if provider == "google" and model: if provider == "google" and model:
GOOGLE_MODEL = model GOOGLE_MODEL = model
set_key(ENV_FILE, "GOOGLE_MODEL", model) set_key(ENV_FILE, "GOOGLE_MODEL", model)
@@ -369,6 +252,8 @@ async def update_settings(request: Request):
elif provider == "ollama" and model: elif provider == "ollama" and model:
OLLAMA_MODEL = model OLLAMA_MODEL = model
set_key(ENV_FILE, "OLLAMA_MODEL", model) set_key(ENV_FILE, "OLLAMA_MODEL", model)
# Wenn eine Ollama-URL mitgeschickt wurde, speichern wir sie
if ollama_url: if ollama_url:
OLLAMA_BASE_URL = ollama_url OLLAMA_BASE_URL = ollama_url
set_key(ENV_FILE, "OLLAMA_BASE_URL", ollama_url) set_key(ENV_FILE, "OLLAMA_BASE_URL", ollama_url)
@@ -379,7 +264,9 @@ async def update_settings(request: Request):
async def get_models(provider: str, url: str = None): async def get_models(provider: str, url: str = None):
try: try:
models = [] models = []
if provider == "ollama" and url: if provider == "ollama" and url:
# Das Backend hat keine CORS-Probleme und fragt Ollama direkt
clean_url = url.replace("/v1", "").rstrip("/") clean_url = url.replace("/v1", "").rstrip("/")
async with httpx.AsyncClient() as client: async with httpx.AsyncClient() as client:
response = await client.get(f"{clean_url}/api/tags", timeout=5.0) response = await client.get(f"{clean_url}/api/tags", timeout=5.0)
@@ -388,31 +275,44 @@ async def get_models(provider: str, url: str = None):
elif provider == "openai": elif provider == "openai":
if not OPENAI_API_KEY or "hier" in OPENAI_API_KEY: return {"models": []} if not OPENAI_API_KEY or "hier" in OPENAI_API_KEY: return {"models": []}
# Hier greift das Backend auf deinen OpenAI API-Key zu
import openai import openai
client = openai.AsyncOpenAI(api_key=OPENAI_API_KEY) client = openai.AsyncOpenAI(api_key=OPENAI_API_KEY)
response = await client.models.list() response = await client.models.list()
# Nur GPT-Modelle filtern, um die Liste sauber zu halten
models = [m.id for m in response.data if "gpt" in m.id or "o1" in m.id] models = [m.id for m in response.data if "gpt" in m.id or "o1" in m.id]
models.sort() models.sort()
elif provider == "google": elif provider == "google":
if not GOOGLE_API_KEY: return {"models": ["API-Key fehlt"]} if not GOOGLE_API_KEY:
return {"models": ["API-Key fehlt"]}
client = genai.Client(api_key=GOOGLE_API_KEY) client = genai.Client(api_key=GOOGLE_API_KEY)
models = []
# Im neuen SDK (google-genai) heißt das Feld 'supported_actions'
for m in client.models.list(): for m in client.models.list():
if 'generateContent' in m.supported_actions: if 'generateContent' in m.supported_actions:
models.append(m.name.replace("models/", "")) # Wir nehmen den Namen und entfernen das 'models/' Präfix
model_name = m.name.replace("models/", "")
models.append(model_name)
models.sort() models.sort()
return {"models": models} return {"models": models}
except Exception as e: except Exception as e:
print(f"Fehler: {str(e)}") print(f"Fehler beim Abrufen der Modelle für {provider}: {str(e)}")
return {"models": []} return {"models": []} # Gibt eine leere Liste zurück -> Frontend nutzt Fallback
# --- WebSockets ---
# --- WebSockets Logs & Terminal (Bleibt identisch) ---
@app.websocket("/ws/install_logs") @app.websocket("/ws/install_logs")
async def log_websocket(websocket: WebSocket): async def log_websocket(websocket: WebSocket):
await manager.connect(websocket) await manager.connect(websocket)
try: try:
while True: await websocket.receive_text() while True:
await websocket.receive_text()
except WebSocketDisconnect: except WebSocketDisconnect:
manager.disconnect(websocket) manager.disconnect(websocket)
@@ -428,6 +328,7 @@ async def terminal_websocket(websocket: WebSocket, ip: str):
await websocket.close() await websocket.close()
return return
# Pseudo-Terminal für interaktive SSH-Session
master_fd, slave_fd = pty.openpty() master_fd, slave_fd = pty.openpty()
proc = await asyncio.create_subprocess_exec( proc = await asyncio.create_subprocess_exec(
"ssh", "-o", "StrictHostKeyChecking=no", "-t", f"{node['user']}@{ip}", "ssh", "-o", "StrictHostKeyChecking=no", "-t", f"{node['user']}@{ip}",
@@ -435,6 +336,7 @@ async def terminal_websocket(websocket: WebSocket, ip: str):
) )
async def pty_to_ws(): async def pty_to_ws():
# Setzt den Master-FD auf non-blocking
fl = fcntl.fcntl(master_fd, fcntl.F_GETFL) fl = fcntl.fcntl(master_fd, fcntl.F_GETFL)
fcntl.fcntl(master_fd, fcntl.F_SETFL, fl | os.O_NONBLOCK) fcntl.fcntl(master_fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
try: try:
@@ -442,8 +344,10 @@ async def terminal_websocket(websocket: WebSocket, ip: str):
await asyncio.sleep(0.01) await asyncio.sleep(0.01)
try: try:
data = os.read(master_fd, 1024).decode(errors='ignore') data = os.read(master_fd, 1024).decode(errors='ignore')
if data: await websocket.send_text(data) if data:
except BlockingIOError: continue await websocket.send_text(data)
except BlockingIOError:
continue
except Exception: pass except Exception: pass
async def ws_to_pty(): async def ws_to_pty():
@@ -461,14 +365,16 @@ async def terminal_websocket(websocket: WebSocket, ip: str):
os.close(slave_fd) os.close(slave_fd)
# --- WEBSOCKET CHAT UPDATE --- # --- WEBSOCKET CHAT UPDATE ---
@app.websocket("/ws/chat") @app.websocket("/ws/chat")
async def chat_endpoint(websocket: WebSocket): async def chat_endpoint(websocket: WebSocket):
await websocket.accept() await websocket.accept()
# Check ob Key vorhanden ist
if AI_PROVIDER == "google" and not GOOGLE_API_KEY: if AI_PROVIDER == "google" and not GOOGLE_API_KEY:
await websocket.send_text("⚠️ Kein GOOGLE_API_KEY in der `.env` gefunden!") await websocket.send_text("⚠️ **Konfigurationsfehler:** Kein GOOGLE_API_KEY in der `.env` gefunden!")
elif AI_PROVIDER == "openai" and not OPENAI_API_KEY: elif AI_PROVIDER == "openai" and not OPENAI_API_KEY:
await websocket.send_text("⚠️ Kein OPENAI_API_KEY in der `.env` gefunden!") await websocket.send_text("⚠️ **Konfigurationsfehler:** Kein OPENAI_API_KEY in der `.env` gefunden!")
try: try:
while True: while True:
@@ -476,55 +382,112 @@ async def chat_endpoint(websocket: WebSocket):
sys_prompt = get_system_prompt() sys_prompt = get_system_prompt()
ai_response = await get_ai_response(user_msg, sys_prompt) ai_response = await get_ai_response(user_msg, sys_prompt)
# Befehle extrahieren
commands_to_run = re.findall(r'<EXECUTE target="(.*?)">(.*?)</EXECUTE>', ai_response, re.IGNORECASE | re.DOTALL) commands_to_run = re.findall(r'<EXECUTE target="(.*?)">(.*?)</EXECUTE>', ai_response, re.IGNORECASE | re.DOTALL)
clean_chat_msg = re.sub(r'<EXECUTE.*?>.*?</EXECUTE>', '', ai_response, flags=re.IGNORECASE | re.DOTALL).strip() clean_chat_msg = re.sub(r'<EXECUTE.*?>.*?</EXECUTE>', '', ai_response, flags=re.IGNORECASE | re.DOTALL).strip()
if clean_chat_msg: await websocket.send_text(clean_chat_msg) if clean_chat_msg:
await websocket.send_text(clean_chat_msg)
if commands_to_run: if commands_to_run:
# Liste für alle laufenden Tasks erstellen
tasks = [] tasks = []
for target_ip, cmd in commands_to_run: for target_ip, cmd in commands_to_run:
conn = get_db() conn = get_db()
node = conn.execute('SELECT * FROM nodes WHERE ip = ? OR name = ?', (target_ip.strip(), target_ip.strip())).fetchone() node = conn.execute('SELECT * FROM nodes WHERE ip = ? OR name = ?', (target_ip.strip(), target_ip.strip())).fetchone()
conn.close() conn.close()
if node: if node:
# KI soll sudo mit dem hinterlegten Passwort nutzen # Wir erstellen den Task, starten ihn aber noch nicht separat
if "sudo " in cmd and node['sudo_pass']:
# Ersetze `sudo Befehl` mit `echo 'passwort' | sudo -S Befehl`
cmd = cmd.replace("sudo ", f"echo '{node['sudo_pass']}' | sudo -S ")
tasks.append(run_remote_task(node['ip'], node['user'], cmd.strip(), "KI-Kommando")) tasks.append(run_remote_task(node['ip'], node['user'], cmd.strip(), "KI-Kommando"))
if tasks: if tasks:
# Dem Nutzer im Chat kurz Bescheid geben
await websocket.send_text(" *Warte auf Rückmeldungen der Nodes...*") await websocket.send_text(" *Warte auf Rückmeldungen der Nodes...*")
# Jetzt werden alle SSH-Befehle gleichzeitig gestartet und abgewartet
await asyncio.gather(*tasks) await asyncio.gather(*tasks)
# Sobald asyncio.gather fertig ist, geht es hier weiter mit dem Follow-up:
follow_up_prompt = "Die Befehle wurden ausgeführt. Bitte fasse die Ergebnisse kurz zusammen." follow_up_prompt = "Die Befehle wurden ausgeführt. Bitte fasse die Ergebnisse kurz zusammen."
ai_summary = await get_ai_response(follow_up_prompt, sys_prompt) ai_summary = await get_ai_response(follow_up_prompt, sys_prompt)
await websocket.send_text("--- Ergebnis ---")
await websocket.send_text("--- Ergebnis-Zusammenfassung ---")
await websocket.send_text(ai_summary) await websocket.send_text(ai_summary)
except Exception as e: print(f"Chat Fehler: {e}") except Exception as e:
print(f"Chat Fehler: {e}")
async def run_remote_task(ip, user, cmd, task_name): async def run_remote_task(ip, user, cmd, task_name):
global chat_history global chat_history # Zugriff auf das Gedächtnis der KI
await manager.broadcast(f"🚀 KI-Task gestartet: {cmd} auf {ip}") await manager.broadcast(f"🚀 KI-Task gestartet: {cmd} auf {ip}")
ssh_cmd = f"ssh -o StrictHostKeyChecking=no {user}@{ip} '{cmd}'" ssh_cmd = f"ssh -o StrictHostKeyChecking=no {user}@{ip} '{cmd}'"
process = await asyncio.create_subprocess_shell(ssh_cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.STDOUT)
process = await asyncio.create_subprocess_shell(
ssh_cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.STDOUT
)
# Hier speichern wir die komplette Terminal-Ausgabe
full_output = "" full_output = ""
while True: while True:
line = await process.stdout.readline() line = await process.stdout.readline()
if not line: break if not line:
break
decoded_line = line.decode('utf-8', errors='ignore').strip() decoded_line = line.decode('utf-8', errors='ignore').strip()
if decoded_line: if decoded_line:
await manager.broadcast(f"🛠️ {decoded_line}") await manager.broadcast(f"🛠️ {decoded_line}")
full_output += decoded_line + "\n" full_output += decoded_line + "\n"
# Gib dem Event-Loop kurz Zeit, andere Tasks (wie WebSockets) zu bedienen
await asyncio.sleep(0.001) await asyncio.sleep(0.001)
await process.wait() await process.wait()
if not full_output.strip(): full_output = "Befehl wurde ohne Ausgabe ausgeführt."
system_report = f"[SYSTEM] Befehl '{cmd}' auf {ip} beendet. Ausgabe:\n{full_output}" # --- NEU: Feedback an die KI ---
# Wir bereiten den Bericht vor
if not full_output.strip():
full_output = "Befehl wurde ohne Ausgabe ausgeführt (Exit Code 0)."
system_report = f"[SYSTEM-RÜCKMELDUNG] Der Befehl '{cmd}' auf Node {ip} wurde beendet. Ausgabe des Terminals:\n{full_output}"
# Wir schmuggeln den Bericht als "User"-Nachricht in den Verlauf,
# damit die KI beim nächsten Mal weiß, was passiert ist.
chat_history.append({"role": "user", "content": system_report}) chat_history.append({"role": "user", "content": system_report})
# -------------------------------
if "docker" in cmd.lower() and "install" in cmd.lower():
# Kleiner Bonus: Nur updaten, wenn wirklich installiert wird
await manager.broadcast(f"✨ Bitte aktualisiere den Status für {ip} manuell über das Refresh-Icon.")
await manager.broadcast(f"✅ Befehl auf {ip} abgeschlossen.") await manager.broadcast(f"✅ Befehl auf {ip} abgeschlossen.")
# --- Neuer Endpunkt: Manueller Refresh-Check ---
@app.get("/refresh_status/{node_id}")
async def refresh_status(node_id: int):
conn = get_db()
node = conn.execute('SELECT * FROM nodes WHERE id = ?', (node_id,)).fetchone()
if node:
# Kurzer Check via SSH, ob Docker antwortet
check_cmd = "command -v docker >/dev/null 2>&1 && echo 'Docker Aktiv' || echo 'Bereit (Kein Docker)'"
ssh_cmd = f"ssh -o StrictHostKeyChecking=no -o ConnectTimeout=2 {node['user']}@{node['ip']} \"{check_cmd}\""
try:
# Wir führen den Befehl aus
new_status = subprocess.check_output(ssh_cmd, shell=True).decode().strip()
except Exception:
new_status = "Offline/Fehler"
conn.execute('UPDATE nodes SET status = ? WHERE id = ?', (new_status, node_id))
conn.commit()
conn.close()
return {"status": new_status} # Wir senden nur den Status zurück
conn.close()
return {"status": "Unbekannt"}
if __name__ == "__main__": if __name__ == "__main__":
import uvicorn import uvicorn