main.py hinzugefügt
This commit is contained in:
164
main.py
Normal file
164
main.py
Normal file
@@ -0,0 +1,164 @@
|
||||
import os
|
||||
import subprocess
|
||||
import pty
|
||||
import select
|
||||
import threading
|
||||
import json
|
||||
import paramiko
|
||||
from fastapi import FastAPI, WebSocket, BackgroundTasks, Request
|
||||
from fastapi.responses import HTMLResponse
|
||||
from fastapi.templating import Jinja2Templates
|
||||
from fastapi.staticfiles import StaticFiles
|
||||
from python_on_whales import DockerClient
|
||||
|
||||
app = FastAPI()
|
||||
templates = Jinja2Templates(directory="templates")
|
||||
# Pfad zum SSH-Key
|
||||
SSH_KEY = os.path.expanduser("~/.ssh/id_rsa")
|
||||
# Speicher für Nodes (einfache JSON-Datei)
|
||||
NODES_FILE = "nodes.json"
|
||||
|
||||
def load_nodes():
|
||||
if os.path.exists(NODES_FILE):
|
||||
with open(NODES_FILE, "r") as f: return json.load(f)
|
||||
return {}
|
||||
|
||||
def save_nodes(nodes):
|
||||
with open(NODES_FILE, "w") as f: json.dump(nodes, f)
|
||||
|
||||
@app.get("/")
|
||||
async def index(request: Request):
|
||||
return templates.TemplateResponse("index.html", {"request": request, "nodes": load_nodes()})
|
||||
|
||||
# --- Node Management ---
|
||||
|
||||
@app.post("/add_node")
|
||||
async def add_node(data: dict):
|
||||
nodes = load_nodes()
|
||||
nodes[data['ip']] = {"name": data['name'], "status": "connecting"}
|
||||
save_nodes(nodes)
|
||||
# Hier würde im Hintergrund der Bootstrap-Prozess starten
|
||||
return {"status": "added"}
|
||||
|
||||
# --- SSH & Command Logic ---
|
||||
|
||||
def run_ssh_cmd(ip, user, password, cmd):
|
||||
ssh = paramiko.SSHClient()
|
||||
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
|
||||
try:
|
||||
ssh.connect(ip, username=user, password=password, timeout=10)
|
||||
stdin, stdout, stderr = ssh.exec_command(f"sudo -n {cmd}")
|
||||
output = stdout.read().decode()
|
||||
ssh.close()
|
||||
return output
|
||||
except Exception as e:
|
||||
return str(e)
|
||||
|
||||
# --- Ollama installation Logic ---
|
||||
|
||||
def install_ollama(ip, user, password, is_local=False):
|
||||
install_cmd = "curl -fsSL https://ollama.com/install.sh | sh"
|
||||
|
||||
if is_local:
|
||||
# Installation auf dem Master-Pi
|
||||
import subprocess
|
||||
try:
|
||||
process = subprocess.Popen(install_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
stdout, stderr = process.communicate()
|
||||
return f"Lokal: {stdout.decode()}"
|
||||
except Exception as e:
|
||||
return f"Lokal Fehler: {str(e)}"
|
||||
else:
|
||||
# Installation auf einem Worker-Node via SSH
|
||||
return run_ssh_cmd(ip, user, password, install_cmd)
|
||||
|
||||
# --- Filemanagement Logic ---
|
||||
|
||||
def read_pi_file(ip, path):
|
||||
# Liest eine Datei (z.B. ein docker-compose.yml) von einem Pi
|
||||
return run_ssh_cmd(ip, "pi", "pass", f"cat {path}")
|
||||
|
||||
def write_pi_file(ip, path, content):
|
||||
# Schreibt Inhalt in eine Datei (Wichtig für KI-generierte Configs)
|
||||
cmd = f"echo '{content}' | sudo tee {path}"
|
||||
return run_ssh_cmd(ip, "pi", "pass", cmd)
|
||||
|
||||
# --- Chat & AI Logic ---
|
||||
|
||||
@app.websocket("/ws/chat")
|
||||
async def chat_endpoint(websocket: WebSocket):
|
||||
await websocket.accept()
|
||||
while True:
|
||||
user_msg = await websocket.receive_text()
|
||||
|
||||
# SIMULATION KI-LOGIK (Hier kommt dein LLM-Aufruf rein)
|
||||
# Die KI würde entscheiden: "Ich muss Docker auf Node 192.168.1.10 installieren"
|
||||
if "installiere docker" in user_msg.lower():
|
||||
# Beispielhafter Ablauf
|
||||
ip = "192.168.1.10" # Von KI extrahiert
|
||||
await websocket.send_text(f"🤖 Starte Docker-Installation auf {ip}...")
|
||||
result = run_ssh_cmd(ip, "pi", "raspberry", "curl -sSL https://get.docker.com | sh")
|
||||
await websocket.send_text(f"✅ Ergebnis: {result[:100]}...")
|
||||
else:
|
||||
await websocket.send_text(f"🤖 Ich habe empfangen: '{user_msg}'. Wie kann ich helfen?")
|
||||
|
||||
if "installiere ollama" in user_msg:
|
||||
# Einfache Logik zur Erkennung des Ziel-Pis
|
||||
target_ip = None
|
||||
for ip, info in nodes.items():
|
||||
if info['name'].lower() in user_msg or ip in user_msg:
|
||||
target_ip = ip
|
||||
break
|
||||
|
||||
if target_ip:
|
||||
await websocket.send_text(f"🤖 Starte Ollama-Installation auf {nodes[target_ip]['name']} ({target_ip})...")
|
||||
# Hier rufen wir die Installationsfunktion auf (Passwort-Handling beachten!)
|
||||
result = install_ollama(target_ip, "pi", "DEIN_PASSWORT")
|
||||
await websocket.send_text(f"✅ Ollama erfolgreich installiert auf {target_ip}.")
|
||||
else:
|
||||
await websocket.send_text("🤖 Auf welchem Pi soll ich Ollama installieren? (Nenne Name oder IP)")
|
||||
|
||||
def ensure_ssh_key():
|
||||
if not os.path.exists(SSH_KEY):
|
||||
subprocess.run(["ssh-keygen", "-t", "rsa", "-N", "", "-f", SSH_KEY])
|
||||
|
||||
async def deploy_key_and_install(ip, user, password, ws: WebSocket):
|
||||
ensure_ssh_key()
|
||||
await ws.send_text(f"📦 Initialisiere Node {ip}...")
|
||||
|
||||
# 1. SSH-Key kopieren (automatisiert mit sshpass)
|
||||
cmd_copy = f"sshpass -p '{password}' ssh-copy-id -o StrictHostKeyChecking=no {user}@{ip}"
|
||||
process = subprocess.Popen(cmd_copy, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True)
|
||||
|
||||
for line in process.stdout:
|
||||
await ws.send_text(f"🔑 SSH-Key: {line.strip()}")
|
||||
|
||||
# 2. Docker & Ollama Installation (Beispiel-Streaming)
|
||||
await ws.send_text(f"🚀 Starte Setup auf {ip}...")
|
||||
cmd_install = f"ssh {user}@{ip} 'curl -sSL https://get.docker.com | sh && curl -fsSL https://ollama.com/install.sh | sh'"
|
||||
|
||||
process = subprocess.Popen(cmd_install, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True)
|
||||
for line in process.stdout:
|
||||
await ws.send_text(f"🛠️ Install: {line.strip()}")
|
||||
|
||||
# --- WEB TERMINAL LOGIK (Vereinfacht) ---
|
||||
@app.websocket("/ws/terminal/{ip}")
|
||||
async def terminal_websocket(websocket: WebSocket, ip: str):
|
||||
await websocket.accept()
|
||||
|
||||
# Öffne eine echte Shell-Sitzung zum Ziel-Node
|
||||
(master_fd, slave_fd) = pty.openpty()
|
||||
p = subprocess.Popen(["ssh", f"pi@{ip}"], stdin=slave_fd, stdout=slave_fd, stderr=slave_fd, text=True)
|
||||
|
||||
async def pty_to_ws():
|
||||
while True:
|
||||
await websocket.receive_text() # Input vom Browser empfangen (vereinfacht)
|
||||
# Hier müsste xterm.js Input an master_fd geschrieben werden
|
||||
|
||||
# Hinweis: Ein volles Terminal braucht bidirektionales Streaming von Bytes.
|
||||
await websocket.send_text(f"Verbunden mit {ip}. Terminal-Session gestartet.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import uvicorn
|
||||
uvicorn.run(app, host="0.0.0.0", port=8000)
|
||||
Reference in New Issue
Block a user