Files
PiDoBot/pi_admin/main.py

106 lines
3.9 KiB
Python

import json
import os
from fastapi import FastAPI, Request, WebSocket
from fastapi.templating import Jinja2Templates
from fastapi.staticfiles import StaticFiles
import paramiko
from python_on_whales import DockerClient
app = FastAPI()
templates = Jinja2Templates(directory="templates")
# Speicher für Nodes (einfache JSON-Datei)
NODES_FILE = "nodes.json"
def load_nodes():
if os.path.exists(NODES_FILE):
with open(NODES_FILE, "r") as f: return json.load(f)
return {}
def save_nodes(nodes):
with open(NODES_FILE, "w") as f: json.dump(nodes, f)
@app.get("/")
async def index(request: Request):
return templates.TemplateResponse("index.html", {"request": request, "nodes": load_nodes()})
# --- Node Management ---
@app.post("/add_node")
async def add_node(data: dict):
nodes = load_nodes()
nodes[data['ip']] = {"name": data['name'], "status": "connecting"}
save_nodes(nodes)
# Hier würde im Hintergrund der Bootstrap-Prozess starten
return {"status": "added"}
# --- SSH & Command Logic ---
def run_ssh_cmd(ip, user, password, cmd):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh.connect(ip, username=user, password=password, timeout=10)
stdin, stdout, stderr = ssh.exec_command(f"sudo -n {cmd}")
output = stdout.read().decode()
ssh.close()
return output
except Exception as e:
return str(e)
# --- Ollama installation Logic ---
def install_ollama(ip, user, password, is_local=False):
install_cmd = "curl -fsSL https://ollama.com/install.sh | sh"
if is_local:
# Installation auf dem Master-Pi
import subprocess
try:
process = subprocess.Popen(install_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
return f"Lokal: {stdout.decode()}"
except Exception as e:
return f"Lokal Fehler: {str(e)}"
else:
# Installation auf einem Worker-Node via SSH
return run_ssh_cmd(ip, user, password, install_cmd)
# --- Chat & AI Logic ---
@app.websocket("/ws/chat")
async def chat_endpoint(websocket: WebSocket):
await websocket.accept()
while True:
user_msg = await websocket.receive_text()
# SIMULATION KI-LOGIK (Hier kommt dein LLM-Aufruf rein)
# Die KI würde entscheiden: "Ich muss Docker auf Node 192.168.1.10 installieren"
if "installiere docker" in user_msg.lower():
# Beispielhafter Ablauf
ip = "192.168.1.10" # Von KI extrahiert
await websocket.send_text(f"🤖 Starte Docker-Installation auf {ip}...")
result = run_ssh_cmd(ip, "pi", "raspberry", "curl -sSL https://get.docker.com | sh")
await websocket.send_text(f"✅ Ergebnis: {result[:100]}...")
else:
await websocket.send_text(f"🤖 Ich habe empfangen: '{user_msg}'. Wie kann ich helfen?")
if "installiere ollama" in user_msg:
# Einfache Logik zur Erkennung des Ziel-Pis
target_ip = None
for ip, info in nodes.items():
if info['name'].lower() in user_msg or ip in user_msg:
target_ip = ip
break
if target_ip:
await websocket.send_text(f"🤖 Starte Ollama-Installation auf {nodes[target_ip]['name']} ({target_ip})...")
# Hier rufen wir die Installationsfunktion auf (Passwort-Handling beachten!)
result = install_ollama(target_ip, "pi", "DEIN_PASSWORT")
await websocket.send_text(f"✅ Ollama erfolgreich installiert auf {target_ip}.")
else:
await websocket.send_text("🤖 Auf welchem Pi soll ich Ollama installieren? (Nenne Name oder IP)")
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000)