Compare commits
119 Commits
3697857ce1
...
dev
| Author | SHA1 | Date | |
|---|---|---|---|
| d8f3f869d3 | |||
| cc51044d71 | |||
| 478d6c91ed | |||
| 15e04d54dc | |||
| cdf9f1d34c | |||
| 3293f24965 | |||
| ed7b4c771b | |||
| 55ae56dee1 | |||
| 07bd5df3cd | |||
| 7bcd1f401c | |||
| f46c72ce80 | |||
| e0f2afba77 | |||
| 0fc72d77aa | |||
| 7393f37654 | |||
| 8922d7c918 | |||
| 159782b62f | |||
| a9448dc4b3 | |||
| ab53c68342 | |||
| faa3835f45 | |||
| 4e9b516caa | |||
| fff6f0767f | |||
| 0c21078a79 | |||
| fc71f166e3 | |||
| b9291d9c6b | |||
| 3ed41cf810 | |||
| b5f6ac1701 | |||
| a4b3d58d76 | |||
| 30ca757e34 | |||
| 409c079197 | |||
| 1ad587cdd6 | |||
| f53beea3bd | |||
| e12cf51ffe | |||
| 5748e4c9d3 | |||
| 2d8c35cc44 | |||
| d3b8239f98 | |||
| 7b92eb44ff | |||
| 1b19c368ff | |||
| 1ffb7cec26 | |||
| e5893ec407 | |||
| 2a616632a8 | |||
| d8f8465f1f | |||
| d7c66ca572 | |||
| 259e2f3208 | |||
| 0eefb3b373 | |||
| c40879e75f | |||
| a755d1e6d1 | |||
| 226b1c8a95 | |||
| 44e0ff26fa | |||
| 973fe8ae75 | |||
| 3ed40551ae | |||
| a0d5e4540b | |||
| 90c2439705 | |||
| f17a4fca13 | |||
| a661b357c8 | |||
| 36432cf01a | |||
| a11d68a974 | |||
| b4cafdf462 | |||
| 07fc7d1d7f | |||
| 3828c792e3 | |||
| c18b4e2141 | |||
| 6d64a299af | |||
| 102baa0ad9 | |||
| 330dee175f | |||
| 54be8a6f6f | |||
| 2e5e21b41d | |||
| 755ce2941c | |||
| 1c26996f20 | |||
| aaefc6dcc8 | |||
| 908877abaa | |||
| b90d216067 | |||
| f0011e386b | |||
| b2841b0196 | |||
| 3bb59f178b | |||
| 69baef276d | |||
| f36b72a528 | |||
| bde4c5428d | |||
| 8f5fac6470 | |||
| 6d21cd3a60 | |||
| bd9d683767 | |||
| b814d2a8a6 | |||
| 8774cce382 | |||
| 25ca67aac0 | |||
| 2d3321aa32 | |||
| 9168a349f2 | |||
| ee43462cc8 | |||
| 9a5f30baa0 | |||
| 6c3d5d3376 | |||
| 468a89a4eb | |||
| c6f88253ba | |||
| 7a22c461d8 | |||
| 2f29e15e4d | |||
| 64586f0058 | |||
| 9419c8a243 | |||
| 042761bc5f | |||
| 37391412d0 | |||
| f701dfbb35 | |||
| f1f7fa826f | |||
| 3011e4747b | |||
| 65dcf4c2ac | |||
| 4b233dd7d4 | |||
| a1d8cdfd01 | |||
| 2c083b268e | |||
| 9f0b587234 | |||
| b63d4cbf63 | |||
| bcd705bc93 | |||
| a6fcedcee9 | |||
| 599f96bfc3 | |||
| 269ef7992c | |||
| 6845b1bea9 | |||
| 3d5504b5e8 | |||
| 5fbd16c5ec | |||
| d9d5152915 | |||
| 9d1e796885 | |||
| 9b9bc12731 | |||
| 745e50508e | |||
| 3d10127cd7 | |||
| 127c3365d0 | |||
| 7a5845a8a0 | |||
| e3a80f7016 |
18
.env
18
.env
@@ -1,18 +0,0 @@
|
||||
# --- KI Provider Auswahl (google, openai, oder ollama) ---
|
||||
AI_PROVIDER=google
|
||||
|
||||
# --- API Keys ---
|
||||
GOOGLE_API_KEY=dein-google-key-hier
|
||||
OPENAI_API_KEY=dein-openai-key-hier
|
||||
|
||||
# --- Modelle ---
|
||||
GOOGLE_MODEL=gemini-2.5-flash
|
||||
OPENAI_MODEL=gpt-4o
|
||||
OLLAMA_MODEL=llama3
|
||||
|
||||
# --- Lokale KI (Ollama) ---
|
||||
OLLAMA_BASE_URL=http://127.0.0.1:11434/v1
|
||||
|
||||
# --- System Prompt ---
|
||||
# WICHTIG: Den Platzhalter {node_info} nicht entfernen!
|
||||
SYSTEM_PROMPT="Du bist der Pi-Orchestrator KI-Assistent. Deine Aufgabe ist es, Befehle auf Raspberry Pis auszuführen. Hier sind die verbundenen Nodes:\n{node_info}\nWenn der Nutzer dich bittet etwas zu tun, frage kurz nach ob du es ausführen sollst. Erst nach Bestätigung fügst du die Befehle im Format <EXECUTE target=\"IP\">befehl</EXECUTE> hinzu."
|
||||
4
.gitignore
vendored
Normal file
4
.gitignore
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
config/.env
|
||||
data/*.db
|
||||
workspace/
|
||||
*/.gitkeep
|
||||
10
README.md
10
README.md
@@ -1,5 +1,9 @@
|
||||
# PiDoBot
|
||||
# J.A.R.V.I.S. - AI
|
||||
|
||||
Setup:
|
||||
curl -sSL https://git.pi-farm.de/pi-farm/PiDoBot/raw/branch/main/setup.sh | bash
|
||||
### Setup:
|
||||
|
||||
```
|
||||
curl -sSL https://git.pi-farm.de/pi-farm/PiDoBot/raw/branch/dev/setup.sh -o setup.sh && \
|
||||
chmod +x setup.sh && \
|
||||
./setup.sh
|
||||
```
|
||||
|
||||
21
config/.env
Normal file
21
config/.env
Normal file
@@ -0,0 +1,21 @@
|
||||
# --- KI Provider Auswahl (google, openai, oder ollama) ---
|
||||
AI_PROVIDER='nvidia'
|
||||
|
||||
# --- API Keys ---
|
||||
GOOGLE_API_KEY=dein-google-key-hier
|
||||
OPENAI_API_KEY=dein-openai-key-hier
|
||||
NIDIA_API_KEY=dein-nvidia-key-hier
|
||||
|
||||
# --- Modelle ---
|
||||
GOOGLE_MODEL=gemini-2.5-flash
|
||||
OPENAI_MODEL=gpt-4o
|
||||
OLLAMA_MODEL=llama3
|
||||
|
||||
# --- Lokale KI (Ollama) ---
|
||||
OLLAMA_BASE_URL='http://192.168.178.118:11434/v1'
|
||||
|
||||
TELEGRAM_BOT_TOKEN=dein-telegram-bot-token
|
||||
ALLOWED_TELEGRAM_USER_ID=deine-telegram-id
|
||||
|
||||
WEB_USER_NAME=Tony
|
||||
NVIDIA_MODEL='moonshotai/kimi-k2.5'
|
||||
6
config/group_prompt.txt
Normal file
6
config/group_prompt.txt
Normal file
@@ -0,0 +1,6 @@
|
||||
Du bist JARVIS, ein hilfreicher Assistent in dieser Telegram-Gruppe.
|
||||
DEINE ROLLE:
|
||||
- Sei höflich, locker und hilfsbereit.
|
||||
- Du hast in diesem Chat KEINEN Zugriff auf Server-Systeme oder private Dateien.
|
||||
- Wenn dich jemand nach technischen Details zum Cluster fragt, antworte, dass dies nur dem Administrator vorbehalten ist.
|
||||
- Du duzt die Leute und sprichst sie mit ihrem Namen "{user_name}" an.
|
||||
36
config/system_prompt.txt
Normal file
36
config/system_prompt.txt
Normal file
@@ -0,0 +1,36 @@
|
||||
Dein Name ist JARVIS.
|
||||
Du bist ein präziser KI-Assistent für die Cluster-Verwaltung.
|
||||
|
||||
WICHTIGSTE REGEL: Deine Sprache ist locker, technisch versiert und du verwendest NIEMALS die Höflichkeitsform "Sie". Wir sind per Du.
|
||||
|
||||
DEIN WORKSPACE (GEDÄCHTNIS):
|
||||
Du hast Zugriff auf ein eigenes Arbeitsverzeichnis auf dem Host-System (localhost), um dir Notizen zu machen oder Todos für {user_name} zu speichern:
|
||||
- Arbeitsverzeichnis: {workspace_dir}
|
||||
- Notizen-Datei im Mark-Down format: {notes_file}
|
||||
- Todo-Liste im Mark-Down format: {todo_file}
|
||||
|
||||
Du kannst diese Dateien jederzeit lesen oder beschreiben ohne nachzufragen. Nutze dazu normale Shell-Befehle (z.B. cat, echo "text" >> datei, sed) mit dem Ziel "localhost":
|
||||
<EXECUTE target="localhost">befehl</EXECUTE>
|
||||
|
||||
PROTOKOLL FÜR BEFEHLE: Du arbeitest STRENG in zwei Phasen:
|
||||
|
||||
PHASE 1 (Vorschlag):
|
||||
Wenn {user_name} eine Aktion anfordert (z.B. Installation, Update, Ping, oder etwas in deinen Notizen zu speichern), erstelle NUR einen Text-Vorschlag.
|
||||
- Beschreibe kurz, was du tun würdest.
|
||||
- Nenne den Befehl als normalen Text (KEIN XML, KEIN <EXECUTE>).
|
||||
- Frage explizit nach Erlaubnis: "Soll ich das ausführen, {user_name}?"
|
||||
|
||||
PHASE 2 (Ausführung):
|
||||
NUR wenn {user_name} die Aktion bestätigt (z.B. "Ja", "Mach das"), gibst du den Befehl im XML-Format aus:
|
||||
<EXECUTE target="IP_ODER_LOCALHOST">befehl</EXECUTE>
|
||||
|
||||
REGELN, TONFALL UND ANREDE:
|
||||
- Du duzt {user_name} konsequent (nutze "Du", "Dein", niemals "Sie").
|
||||
- Integriere den Namen des Nutzers ({user_name}) natürlich in deine Antwort.
|
||||
- Variiere die Begrüßung: Nutze mal "Hallo {user_name}", mal "Gerne, {user_name}...", oder flechte den Namen mitten im Satz ein.
|
||||
- Vermeide es, den Namen einfach nur wie einen statischen Header ("{user_name}: ...") voranzustellen.
|
||||
- Bekannte Nodes:
|
||||
{node_info}
|
||||
- Nutze für <EXECUTE> bei fernen Rechnern IMMER die IP-Adresse, bei deinen eigenen Dateien IMMER "localhost".
|
||||
- Begrenze endlose Befehle (z.B. ping -c 4).
|
||||
- Führe NIEMALS einen <EXECUTE> Tag in PHASE 1 aus.
|
||||
494
main.py
494
main.py
@@ -1,494 +0,0 @@
|
||||
import os
|
||||
import pty
|
||||
import fcntl
|
||||
import subprocess
|
||||
import sqlite3
|
||||
import asyncio
|
||||
import openai
|
||||
import re
|
||||
import httpx
|
||||
from google import genai
|
||||
from google.genai import types
|
||||
import json
|
||||
from fastapi import FastAPI, WebSocket, BackgroundTasks, Request, Form, WebSocketDisconnect
|
||||
from fastapi.responses import RedirectResponse
|
||||
from fastapi.templating import Jinja2Templates
|
||||
from fastapi.staticfiles import StaticFiles
|
||||
from dotenv import load_dotenv, set_key
|
||||
|
||||
# Lade Umgebungsvariablen aus der .env Datei
|
||||
load_dotenv()
|
||||
|
||||
app = FastAPI()
|
||||
static_path = os.path.join(os.path.dirname(__file__), "static")
|
||||
app.mount("/static", StaticFiles(directory=static_path), name="static")
|
||||
templates = Jinja2Templates(directory="templates")
|
||||
|
||||
SSH_KEY = os.path.expanduser("~/.ssh/id_rsa")
|
||||
DB_PATH = "cluster.db"
|
||||
chat_history = []
|
||||
PROMPT_FILE = "system_prompt.txt"
|
||||
ENV_FILE = os.path.join(os.path.dirname(__file__), ".env") # NEU
|
||||
# --- KI KONFIGURATION (Werte aus .env laden) ---
|
||||
AI_PROVIDER = os.getenv("AI_PROVIDER", "google").lower()
|
||||
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "")
|
||||
GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY", "")
|
||||
OLLAMA_BASE_URL = os.getenv("OLLAMA_BASE_URL", "http://127.0.0.1:11434/v1")
|
||||
|
||||
# Modelle aus .env laden (mit Standardwerten als Fallback)
|
||||
GOOGLE_MODEL = os.getenv("GOOGLE_MODEL", "gemini-2.5-flash")
|
||||
OPENAI_MODEL = os.getenv("OPENAI_MODEL", "gpt-4o")
|
||||
OLLAMA_MODEL = os.getenv("OLLAMA_MODEL", "llama3")
|
||||
|
||||
def get_system_prompt():
|
||||
# 1. Node Info aus DB holen
|
||||
conn = get_db()
|
||||
nodes = conn.execute('SELECT * FROM nodes').fetchall()
|
||||
conn.close()
|
||||
|
||||
node_info = ""
|
||||
for n in nodes:
|
||||
node_info += f"- Name: {n['name']}, IP: {n['ip']}, User: {n['user']}\n"
|
||||
|
||||
# 2. Versuche den Prompt aus der Datei zu laden
|
||||
if os.path.exists(PROMPT_FILE):
|
||||
with open(PROMPT_FILE, "r", encoding="utf-8") as f:
|
||||
template = f.read()
|
||||
else:
|
||||
# Fallback falls Datei fehlt
|
||||
template = "Du bist ein Helfer. Nodes:\n{node_info}\nNutze <EXECUTE target=\"IP\">cmd</EXECUTE>"
|
||||
print(f"⚠️ Warnung: {PROMPT_FILE} nicht gefunden. Nutze Fallback.")
|
||||
|
||||
return template.replace("{node_info}", node_info)
|
||||
|
||||
# --- KI FUNKTIONEN ---
|
||||
|
||||
async def get_ai_response(user_input, system_prompt):
|
||||
global chat_history
|
||||
|
||||
# 1. Die neue User-Nachricht dem Gedächtnis hinzufügen
|
||||
chat_history.append({"role": "user", "content": user_input})
|
||||
|
||||
# 2. Gedächtnis auf die letzten 30 Nachrichten begrenzen
|
||||
chat_history = chat_history[-30:]
|
||||
|
||||
ai_msg = ""
|
||||
|
||||
try:
|
||||
if AI_PROVIDER == "openai" or AI_PROVIDER == "ollama":
|
||||
messages = [{"role": "system", "content": system_prompt}] + chat_history
|
||||
|
||||
# Sicherstellen, dass die URL für Ollama korrekt endet
|
||||
if AI_PROVIDER == "ollama":
|
||||
url = OLLAMA_BASE_URL
|
||||
if not url.endswith('/v1') and not url.endswith('/v1/'):
|
||||
url = url.rstrip('/') + '/v1'
|
||||
key = "ollama"
|
||||
model_to_use = OLLAMA_MODEL
|
||||
else:
|
||||
url = None # Benutzt Standard OpenAI URL
|
||||
key = OPENAI_API_KEY
|
||||
model_to_use = OPENAI_MODEL
|
||||
|
||||
client = openai.OpenAI(base_url=url, api_key=key)
|
||||
response = client.chat.completions.create(
|
||||
model=model_to_use,
|
||||
messages=messages
|
||||
)
|
||||
ai_msg = response.choices[0].message.content
|
||||
|
||||
elif AI_PROVIDER == "google":
|
||||
# Für Google Gemini
|
||||
if not GOOGLE_API_KEY:
|
||||
return "Fehler: GOOGLE_API_KEY fehlt in der .env Datei!"
|
||||
|
||||
client = genai.Client(api_key=GOOGLE_API_KEY)
|
||||
|
||||
# Wir müssen unser Array in das spezielle Google-Format umwandeln
|
||||
google_history = []
|
||||
|
||||
# Alle Nachrichten AUSSER der allerletzten (die aktuelle User-Frage) in die History packen
|
||||
for msg in chat_history[:-1]:
|
||||
role = "user" if msg["role"] == "user" else "model"
|
||||
google_history.append(
|
||||
types.Content(role=role, parts=[types.Part.from_text(text=msg["content"])])
|
||||
)
|
||||
|
||||
# Chat MIT dem übersetzten Gedächtnis starten
|
||||
chat = client.chats.create(
|
||||
model=GOOGLE_MODEL,
|
||||
config=types.GenerateContentConfig(system_instruction=system_prompt),
|
||||
history=google_history
|
||||
)
|
||||
|
||||
# Jetzt erst die neue Nachricht an den Chat mit Gedächtnis schicken
|
||||
response = chat.send_message(user_input)
|
||||
ai_msg = response.text
|
||||
|
||||
except Exception as e:
|
||||
ai_msg = f"Fehler bei der KI-Anfrage: {e}"
|
||||
print(f"KI Fehler: {e}")
|
||||
|
||||
# 3. Die Antwort der KI ebenfalls ins Gedächtnis aufnehmen
|
||||
chat_history.append({"role": "assistant", "content": ai_msg})
|
||||
|
||||
return ai_msg
|
||||
|
||||
# --- DATENBANK INITIALISIERUNG ---
|
||||
def init_db():
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
conn.execute('''
|
||||
CREATE TABLE IF NOT EXISTS nodes (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
name TEXT,
|
||||
ip TEXT UNIQUE,
|
||||
user TEXT,
|
||||
status TEXT
|
||||
)
|
||||
''')
|
||||
conn.commit()
|
||||
conn.close()
|
||||
|
||||
init_db()
|
||||
|
||||
def get_db():
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
conn.row_factory = sqlite3.Row
|
||||
return conn
|
||||
|
||||
# --- WebSocket Manager für Logs & Chat ---
|
||||
class ConnectionManager:
|
||||
def __init__(self):
|
||||
self.active_connections: list[WebSocket] = []
|
||||
async def connect(self, websocket: WebSocket):
|
||||
await websocket.accept()
|
||||
self.active_connections.append(websocket)
|
||||
def disconnect(self, websocket: WebSocket):
|
||||
if websocket in self.active_connections:
|
||||
self.active_connections.remove(websocket)
|
||||
async def broadcast(self, message: str):
|
||||
for connection in self.active_connections:
|
||||
try:
|
||||
await connection.send_text(message)
|
||||
except:
|
||||
pass
|
||||
|
||||
manager = ConnectionManager()
|
||||
|
||||
# --- SSH Handshake (Nur Key kopieren) ---
|
||||
async def bootstrap_ssh_only(ip, user, password):
|
||||
await manager.broadcast(f"🔑 Initialisiere SSH-Handshake für {ip}...")
|
||||
# Nutzt sshpass um den Key einmalig mit Passwort zu hinterlegen
|
||||
ssh_copy_cmd = f"sshpass -p '{password}' ssh-copy-id -o StrictHostKeyChecking=no -i {SSH_KEY}.pub {user}@{ip}"
|
||||
|
||||
process = subprocess.Popen(ssh_copy_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True)
|
||||
for line in process.stdout:
|
||||
await manager.broadcast(f"SSH: {line.strip()}")
|
||||
|
||||
conn = get_db()
|
||||
conn.execute('UPDATE nodes SET status = "Bereit (Kein Docker)" WHERE ip = ?', (ip,))
|
||||
conn.commit()
|
||||
conn.close()
|
||||
await manager.broadcast(f"✅ Node {ip} ist verbunden. KI kann nun Befehle senden.")
|
||||
|
||||
# --- Routen ---
|
||||
|
||||
@app.get("/")
|
||||
async def index(request: Request):
|
||||
conn = get_db()
|
||||
nodes = conn.execute('SELECT * FROM nodes').fetchall()
|
||||
conn.close()
|
||||
return templates.TemplateResponse("index.html", {"request": request, "nodes": nodes})
|
||||
|
||||
@app.post("/add_node")
|
||||
async def add_node(background_tasks: BackgroundTasks, name: str = Form(...), ip: str = Form(...), user: str = Form(...), password: str = Form(...)):
|
||||
conn = get_db()
|
||||
try:
|
||||
conn.execute('INSERT INTO nodes (name, ip, user, status) VALUES (?, ?, ?, ?)', (name, ip, user, "Kopplung..."))
|
||||
conn.commit()
|
||||
background_tasks.add_task(bootstrap_ssh_only, ip, user, password)
|
||||
except sqlite3.IntegrityError: pass
|
||||
finally: conn.close()
|
||||
return RedirectResponse(url="/", status_code=303)
|
||||
|
||||
@app.post("/remove_node/{node_id}")
|
||||
async def remove_node(node_id: int):
|
||||
conn = get_db()
|
||||
conn.execute('DELETE FROM nodes WHERE id = ?', (node_id,))
|
||||
conn.commit()
|
||||
conn.close()
|
||||
return RedirectResponse(url="/", status_code=303)
|
||||
|
||||
@app.get("/api/settings")
|
||||
async def get_settings():
|
||||
return {
|
||||
"provider": AI_PROVIDER,
|
||||
"google_model": GOOGLE_MODEL,
|
||||
"openai_model": OPENAI_MODEL,
|
||||
"ollama_model": OLLAMA_MODEL,
|
||||
"ollama_base_url": OLLAMA_BASE_URL # URL ans Frontend schicken
|
||||
}
|
||||
|
||||
@app.post("/api/settings")
|
||||
async def update_settings(request: Request):
|
||||
# WICHTIG: OLLAMA_BASE_URL als global deklarieren
|
||||
global AI_PROVIDER, GOOGLE_MODEL, OPENAI_MODEL, OLLAMA_MODEL, OLLAMA_BASE_URL
|
||||
|
||||
data = await request.json()
|
||||
provider = data.get("provider")
|
||||
model = data.get("model")
|
||||
ollama_url = data.get("ollama_base_url") # URL vom Frontend empfangen
|
||||
|
||||
if provider:
|
||||
AI_PROVIDER = provider
|
||||
set_key(ENV_FILE, "AI_PROVIDER", provider)
|
||||
|
||||
if provider == "google" and model:
|
||||
GOOGLE_MODEL = model
|
||||
set_key(ENV_FILE, "GOOGLE_MODEL", model)
|
||||
elif provider == "openai" and model:
|
||||
OPENAI_MODEL = model
|
||||
set_key(ENV_FILE, "OPENAI_MODEL", model)
|
||||
elif provider == "ollama" and model:
|
||||
OLLAMA_MODEL = model
|
||||
set_key(ENV_FILE, "OLLAMA_MODEL", model)
|
||||
|
||||
# Wenn eine Ollama-URL mitgeschickt wurde, speichern wir sie
|
||||
if ollama_url:
|
||||
OLLAMA_BASE_URL = ollama_url
|
||||
set_key(ENV_FILE, "OLLAMA_BASE_URL", ollama_url)
|
||||
|
||||
return {"status": "success"}
|
||||
|
||||
@app.get("/api/models")
|
||||
async def get_models(provider: str, url: str = None):
|
||||
try:
|
||||
models = []
|
||||
|
||||
if provider == "ollama" and url:
|
||||
# Das Backend hat keine CORS-Probleme und fragt Ollama direkt
|
||||
clean_url = url.replace("/v1", "").rstrip("/")
|
||||
async with httpx.AsyncClient() as client:
|
||||
response = await client.get(f"{clean_url}/api/tags", timeout=5.0)
|
||||
data = response.json()
|
||||
models = [m["name"] for m in data.get("models", [])]
|
||||
|
||||
elif provider == "openai":
|
||||
if not OPENAI_API_KEY or "hier" in OPENAI_API_KEY: return {"models": []}
|
||||
# Hier greift das Backend auf deinen OpenAI API-Key zu
|
||||
import openai
|
||||
client = openai.AsyncOpenAI(api_key=OPENAI_API_KEY)
|
||||
response = await client.models.list()
|
||||
# Nur GPT-Modelle filtern, um die Liste sauber zu halten
|
||||
models = [m.id for m in response.data if "gpt" in m.id or "o1" in m.id]
|
||||
models.sort()
|
||||
|
||||
elif provider == "google":
|
||||
if not GOOGLE_API_KEY:
|
||||
return {"models": ["API-Key fehlt"]}
|
||||
|
||||
client = genai.Client(api_key=GOOGLE_API_KEY)
|
||||
models = []
|
||||
|
||||
# Im neuen SDK (google-genai) heißt das Feld 'supported_actions'
|
||||
for m in client.models.list():
|
||||
if 'generateContent' in m.supported_actions:
|
||||
# Wir nehmen den Namen und entfernen das 'models/' Präfix
|
||||
model_name = m.name.replace("models/", "")
|
||||
models.append(model_name)
|
||||
|
||||
models.sort()
|
||||
|
||||
return {"models": models}
|
||||
|
||||
except Exception as e:
|
||||
print(f"Fehler beim Abrufen der Modelle für {provider}: {str(e)}")
|
||||
return {"models": []} # Gibt eine leere Liste zurück -> Frontend nutzt Fallback
|
||||
|
||||
# --- WebSockets ---
|
||||
|
||||
@app.websocket("/ws/install_logs")
|
||||
async def log_websocket(websocket: WebSocket):
|
||||
await manager.connect(websocket)
|
||||
try:
|
||||
while True:
|
||||
await websocket.receive_text()
|
||||
except WebSocketDisconnect:
|
||||
manager.disconnect(websocket)
|
||||
|
||||
@app.websocket("/ws/terminal/{ip}")
|
||||
async def terminal_websocket(websocket: WebSocket, ip: str):
|
||||
await websocket.accept()
|
||||
conn = get_db()
|
||||
node = conn.execute('SELECT * FROM nodes WHERE ip = ?', (ip,)).fetchone()
|
||||
conn.close()
|
||||
|
||||
if not node:
|
||||
await websocket.send_text("\r\nFehler: Node nicht gefunden.\r\n")
|
||||
await websocket.close()
|
||||
return
|
||||
|
||||
# Pseudo-Terminal für interaktive SSH-Session
|
||||
master_fd, slave_fd = pty.openpty()
|
||||
proc = await asyncio.create_subprocess_exec(
|
||||
"ssh", "-o", "StrictHostKeyChecking=no", "-t", f"{node['user']}@{ip}",
|
||||
stdin=slave_fd, stdout=slave_fd, stderr=slave_fd
|
||||
)
|
||||
|
||||
async def pty_to_ws():
|
||||
# Setzt den Master-FD auf non-blocking
|
||||
fl = fcntl.fcntl(master_fd, fcntl.F_GETFL)
|
||||
fcntl.fcntl(master_fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
|
||||
try:
|
||||
while True:
|
||||
await asyncio.sleep(0.01)
|
||||
try:
|
||||
data = os.read(master_fd, 1024).decode(errors='ignore')
|
||||
if data:
|
||||
await websocket.send_text(data)
|
||||
except BlockingIOError:
|
||||
continue
|
||||
except Exception: pass
|
||||
|
||||
async def ws_to_pty():
|
||||
try:
|
||||
while True:
|
||||
data = await websocket.receive_text()
|
||||
os.write(master_fd, data.encode())
|
||||
except Exception: pass
|
||||
|
||||
try:
|
||||
await asyncio.gather(pty_to_ws(), ws_to_pty())
|
||||
finally:
|
||||
if proc.returncode is None: proc.terminate()
|
||||
os.close(master_fd)
|
||||
os.close(slave_fd)
|
||||
|
||||
# --- WEBSOCKET CHAT UPDATE ---
|
||||
|
||||
@app.websocket("/ws/chat")
|
||||
async def chat_endpoint(websocket: WebSocket):
|
||||
await websocket.accept()
|
||||
|
||||
# Check ob Key vorhanden ist
|
||||
if AI_PROVIDER == "google" and not GOOGLE_API_KEY:
|
||||
await websocket.send_text("⚠️ **Konfigurationsfehler:** Kein GOOGLE_API_KEY in der `.env` gefunden!")
|
||||
elif AI_PROVIDER == "openai" and not OPENAI_API_KEY:
|
||||
await websocket.send_text("⚠️ **Konfigurationsfehler:** Kein OPENAI_API_KEY in der `.env` gefunden!")
|
||||
|
||||
try:
|
||||
while True:
|
||||
user_msg = await websocket.receive_text()
|
||||
sys_prompt = get_system_prompt()
|
||||
ai_response = await get_ai_response(user_msg, sys_prompt)
|
||||
|
||||
# Befehle extrahieren
|
||||
commands_to_run = re.findall(r'<EXECUTE target="(.*?)">(.*?)</EXECUTE>', ai_response, re.IGNORECASE | re.DOTALL)
|
||||
clean_chat_msg = re.sub(r'<EXECUTE.*?>.*?</EXECUTE>', '', ai_response, flags=re.IGNORECASE | re.DOTALL).strip()
|
||||
|
||||
if clean_chat_msg:
|
||||
await websocket.send_text(clean_chat_msg)
|
||||
|
||||
if commands_to_run:
|
||||
# Liste für alle laufenden Tasks erstellen
|
||||
tasks = []
|
||||
for target_ip, cmd in commands_to_run:
|
||||
conn = get_db()
|
||||
node = conn.execute('SELECT * FROM nodes WHERE ip = ? OR name = ?', (target_ip.strip(), target_ip.strip())).fetchone()
|
||||
conn.close()
|
||||
|
||||
if node:
|
||||
# Wir erstellen den Task, starten ihn aber noch nicht separat
|
||||
tasks.append(run_remote_task(node['ip'], node['user'], cmd.strip(), "KI-Kommando"))
|
||||
|
||||
if tasks:
|
||||
# Dem Nutzer im Chat kurz Bescheid geben
|
||||
await websocket.send_text("ℹ️ *Warte auf Rückmeldungen der Nodes...*")
|
||||
|
||||
# Jetzt werden alle SSH-Befehle gleichzeitig gestartet und abgewartet
|
||||
await asyncio.gather(*tasks)
|
||||
|
||||
# Sobald asyncio.gather fertig ist, geht es hier weiter mit dem Follow-up:
|
||||
follow_up_prompt = "Die Befehle wurden ausgeführt. Bitte fasse die Ergebnisse kurz zusammen."
|
||||
ai_summary = await get_ai_response(follow_up_prompt, sys_prompt)
|
||||
|
||||
await websocket.send_text("--- Ergebnis-Zusammenfassung ---")
|
||||
await websocket.send_text(ai_summary)
|
||||
|
||||
except Exception as e:
|
||||
print(f"Chat Fehler: {e}")
|
||||
|
||||
async def run_remote_task(ip, user, cmd, task_name):
|
||||
global chat_history # Zugriff auf das Gedächtnis der KI
|
||||
|
||||
await manager.broadcast(f"🚀 KI-Task gestartet: {cmd} auf {ip}")
|
||||
|
||||
ssh_cmd = f"ssh -o StrictHostKeyChecking=no {user}@{ip} '{cmd}'"
|
||||
|
||||
process = await asyncio.create_subprocess_shell(
|
||||
ssh_cmd,
|
||||
stdout=asyncio.subprocess.PIPE,
|
||||
stderr=asyncio.subprocess.STDOUT
|
||||
)
|
||||
|
||||
# Hier speichern wir die komplette Terminal-Ausgabe
|
||||
full_output = ""
|
||||
|
||||
while True:
|
||||
line = await process.stdout.readline()
|
||||
if not line:
|
||||
break
|
||||
decoded_line = line.decode('utf-8', errors='ignore').strip()
|
||||
if decoded_line:
|
||||
await manager.broadcast(f"🛠️ {decoded_line}")
|
||||
full_output += decoded_line + "\n"
|
||||
# Gib dem Event-Loop kurz Zeit, andere Tasks (wie WebSockets) zu bedienen
|
||||
await asyncio.sleep(0.001)
|
||||
|
||||
await process.wait()
|
||||
|
||||
# --- NEU: Feedback an die KI ---
|
||||
# Wir bereiten den Bericht vor
|
||||
if not full_output.strip():
|
||||
full_output = "Befehl wurde ohne Ausgabe ausgeführt (Exit Code 0)."
|
||||
|
||||
system_report = f"[SYSTEM-RÜCKMELDUNG] Der Befehl '{cmd}' auf Node {ip} wurde beendet. Ausgabe des Terminals:\n{full_output}"
|
||||
|
||||
# Wir schmuggeln den Bericht als "User"-Nachricht in den Verlauf,
|
||||
# damit die KI beim nächsten Mal weiß, was passiert ist.
|
||||
chat_history.append({"role": "user", "content": system_report})
|
||||
# -------------------------------
|
||||
|
||||
if "docker" in cmd.lower() and "install" in cmd.lower():
|
||||
# Kleiner Bonus: Nur updaten, wenn wirklich installiert wird
|
||||
await manager.broadcast(f"✨ Bitte aktualisiere den Status für {ip} manuell über das Refresh-Icon.")
|
||||
|
||||
await manager.broadcast(f"✅ Befehl auf {ip} abgeschlossen.")
|
||||
|
||||
# --- Neuer Endpunkt: Manueller Refresh-Check ---
|
||||
@app.get("/refresh_status/{node_id}")
|
||||
async def refresh_status(node_id: int):
|
||||
conn = get_db()
|
||||
node = conn.execute('SELECT * FROM nodes WHERE id = ?', (node_id,)).fetchone()
|
||||
|
||||
if node:
|
||||
# Kurzer Check via SSH, ob Docker antwortet
|
||||
check_cmd = "command -v docker >/dev/null 2>&1 && echo 'Docker Aktiv' || echo 'Bereit (Kein Docker)'"
|
||||
ssh_cmd = f"ssh -o StrictHostKeyChecking=no -o ConnectTimeout=2 {node['user']}@{node['ip']} \"{check_cmd}\""
|
||||
try:
|
||||
# Wir führen den Befehl aus
|
||||
new_status = subprocess.check_output(ssh_cmd, shell=True).decode().strip()
|
||||
except Exception:
|
||||
new_status = "Offline/Fehler"
|
||||
|
||||
conn.execute('UPDATE nodes SET status = ? WHERE id = ?', (new_status, node_id))
|
||||
conn.commit()
|
||||
conn.close()
|
||||
return {"status": new_status} # Wir senden nur den Status zurück
|
||||
|
||||
conn.close()
|
||||
return {"status": "Unbekannt"}
|
||||
|
||||
if __name__ == "__main__":
|
||||
import uvicorn
|
||||
uvicorn.run(app, host="0.0.0.0", port=8000)
|
||||
300
setup.sh
Normal file → Executable file
300
setup.sh
Normal file → Executable file
@@ -1,81 +1,275 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Konfiguration
|
||||
# ==========================================
|
||||
# Farbdefinitionen
|
||||
# ==========================================
|
||||
C_DEF=$'\e[0m' # Default / Reset
|
||||
C_BOLD=$'\e[1m' # Fett
|
||||
C_CYAN=$'\e[1;36m' # Cyan
|
||||
C_BLUE=$'\e[1;34m' # Blau
|
||||
C_GREEN=$'\e[1;32m' # Grün
|
||||
C_YELLOW=$'\e[1;33m' # Gelb
|
||||
C_RED=$'\e[1;31m' # Rot
|
||||
|
||||
REPO_URL="https://git.pi-farm.de/pi-farm/PiDoBot.git"
|
||||
INSTALL_DIR="pi-orchestrator"
|
||||
SERVICE_FILE="/etc/systemd/system/jarvis.service"
|
||||
|
||||
echo ">>> Starte Setup für den Pi-Orchestrator Master..."
|
||||
echo -e "${C_CYAN}${C_BOLD}==========================================${C_DEF}"
|
||||
echo -e "${C_CYAN}${C_BOLD}>>> J.A.R.V.I.S.-AI - Setup <<<${C_DEF}"
|
||||
echo -e "${C_CYAN}${C_BOLD}==========================================${C_DEF}"
|
||||
|
||||
# 1. Prüfen, ob Git installiert ist, ansonsten installieren
|
||||
if ! command -v git &> /dev/null; then
|
||||
echo "--- Git nicht gefunden. Installiere Git..."
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y git
|
||||
else
|
||||
echo "--- Git ist bereits installiert."
|
||||
fi
|
||||
# 0. Installationsverzeichnis abfragen
|
||||
read -p "${C_CYAN}Installationsverzeichnis (Standard: ${C_YELLOW}/home/pi/jarvis-ai${C_CYAN}): ${C_DEF}" input_dir </dev/tty
|
||||
INSTALL_DIR=${input_dir:-/home/pi/jarvis-ai}
|
||||
|
||||
# 2. Weitere zwingende System-Abhängigkeiten installieren
|
||||
# sshpass: Für das automatische Kopieren des SSH-Keys auf neue Nodes
|
||||
# python3-venv: Für die isolierte Python-Umgebung
|
||||
echo "--- Installiere benötigte System-Pakete..."
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y wget sshpass python3-pip python3-venv
|
||||
# Pfad normalisieren
|
||||
INSTALL_DIR=$(realpath -m "$INSTALL_DIR")
|
||||
|
||||
<<<<<<< HEAD
|
||||
# 3. Repository klonen
|
||||
if [ ! -d "$INSTALL_DIR" ]; then
|
||||
echo "--- Klone Repository von $REPO_URL..."
|
||||
git clone "$REPO_URL" "$INSTALL_DIR"
|
||||
git clone --branch main --single-branch "$REPO_URL" "$INSTALL_DIR"
|
||||
else
|
||||
echo "--- Verzeichnis $INSTALL_DIR existiert bereits. Überspringe Klonen..."
|
||||
fi
|
||||
|
||||
# In das Verzeichnis wechseln
|
||||
=======
|
||||
echo -e "\n${C_GREEN}Zielverzeichnis: ${C_BOLD}$INSTALL_DIR${C_DEF}"
|
||||
mkdir -p "$INSTALL_DIR"
|
||||
>>>>>>> dev
|
||||
cd "$INSTALL_DIR" || exit
|
||||
|
||||
# 4. Virtual Environment und Python-Abhängigkeiten einrichten
|
||||
echo "--- Erstelle Python Virtual Environment..."
|
||||
python3 -m venv venv
|
||||
source venv/bin/activate
|
||||
# 1. System-Abhängigkeiten
|
||||
echo -e "\n${C_BLUE}${C_BOLD}--- 1. Prüfe und installiere System-Pakete...${C_DEF}"
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y git wget sshpass python3-pip python3-venv iproute2
|
||||
|
||||
echo "--- Installiere Python-Pakete aus requirements.txt..."
|
||||
pip install --upgrade pip
|
||||
if [ -f "requirements.txt" ]; then
|
||||
pip install -r requirements.txt
|
||||
# 2. Repository klonen
|
||||
echo -e "\n${C_BLUE}${C_BOLD}--- 2. Hole Quellcode...${C_DEF}"
|
||||
if [ ! -d ".git" ]; then
|
||||
git clone --branch dev --single-branch "$REPO_URL" .
|
||||
else
|
||||
echo "FEHLER: requirements.txt nicht im Repository gefunden!"
|
||||
echo -e "${C_YELLOW}Repo bereits vorhanden, aktualisiere...${C_DEF}"
|
||||
git pull
|
||||
fi
|
||||
|
||||
# 3. Virtual Environment
|
||||
echo -e "\n${C_BLUE}${C_BOLD}--- 3. Richte Python-Umgebung ein...${C_DEF}"
|
||||
if [ ! -d "venv" ]; then
|
||||
python3 -m venv venv
|
||||
fi
|
||||
# Nutze den direkten Pfad zum Pip im Venv
|
||||
./venv/bin/pip install --upgrade pip
|
||||
if [ -f "source/requirements.txt" ]; then
|
||||
./venv/bin/pip install -r source/requirements.txt
|
||||
else
|
||||
echo -e "${C_RED}❌ FEHLER: requirements.txt nicht gefunden!${C_DEF}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# 5. SSH-Key für den Master prüfen/erstellen (für passwortlosen Zugriff auf Nodes)
|
||||
# 4. SSH-Key & Konfiguration (Silent Mode)
|
||||
echo -e "\n${C_BLUE}${C_BOLD}--- 4. Prüfe SSH-Schlüssel und Konfiguration...${C_DEF}"
|
||||
if [ ! -f "$HOME/.ssh/id_rsa" ]; then
|
||||
echo "--- Generiere SSH-Key für die passwortlose Kommunikation..."
|
||||
ssh-keygen -t rsa -N "" -f "$HOME/.ssh/id_rsa"
|
||||
echo -e "${C_GREEN}✅ SSH-Key generiert.${C_DEF}"
|
||||
else
|
||||
echo "--- SSH-Key existiert bereits."
|
||||
echo -e "${C_GREEN}✅ SSH-Key existiert bereits.${C_DEF}"
|
||||
fi
|
||||
|
||||
# 6. Static Ordner anlegen und Dateien herunterladen
|
||||
if [ ! -f "./static" ]; then
|
||||
echo "--- Static-Ordner anlegen und Dateien herunterladen..."
|
||||
mkdir -p static
|
||||
cd static
|
||||
# GridStack
|
||||
wget https://cdn.jsdelivr.net/npm/gridstack@7.2.3/dist/gridstack.min.css
|
||||
wget https://cdn.jsdelivr.net/npm/gridstack@7.2.3/dist/gridstack-all.js
|
||||
# Xterm
|
||||
wget https://cdn.jsdelivr.net/npm/xterm@5.1.0/css/xterm.css
|
||||
wget https://cdn.jsdelivr.net/npm/xterm@5.1.0/lib/xterm.js
|
||||
# Xterm Fit Addon
|
||||
wget https://cdn.jsdelivr.net/npm/xterm-addon-fit@0.7.0/lib/xterm-addon-fit.js
|
||||
# marked.js
|
||||
wget https://cdn.jsdelivr.net/npm/marked/marked.min.js
|
||||
cd ..
|
||||
else
|
||||
echo "--- Static-Dateien existiert bereits."
|
||||
fi
|
||||
echo ">>> Installation abgeschlossen!"
|
||||
echo "--- Starte den Master-Server auf Port 8000..."
|
||||
# SSH 'Silent Mode' einrichten, um Spam im Log zu verhindern
|
||||
mkdir -p "$HOME/.ssh"
|
||||
if [ ! -f "$HOME/.ssh/config" ] || ! grep -q "StrictHostKeyChecking no" "$HOME/.ssh/config"; then
|
||||
echo -e "${C_YELLOW}Richte SSH Silent-Mode für lokale Netze ein...${C_DEF}"
|
||||
cat <<EOF >> "$HOME/.ssh/config"
|
||||
|
||||
# 6. Programm starten
|
||||
python3 -m uvicorn main:app --host 0.0.0.0 --port 8000
|
||||
Host 192.168.* 10.* 172.*
|
||||
StrictHostKeyChecking no
|
||||
UserKnownHostsFile /dev/null
|
||||
LogLevel ERROR
|
||||
EOF
|
||||
chmod 600 "$HOME/.ssh/config"
|
||||
fi
|
||||
|
||||
# 5. Static Dateien
|
||||
echo -e "\n${C_BLUE}${C_BOLD}--- 5. Lade Frontend-Bibliotheken...${C_DEF}"
|
||||
mkdir -p source/static
|
||||
cd source/static
|
||||
wget -nc -q https://cdn.jsdelivr.net/npm/gridstack@7.2.3/dist/gridstack.min.css
|
||||
wget -nc -q https://cdn.jsdelivr.net/npm/gridstack@7.2.3/dist/gridstack-all.js
|
||||
wget -nc -q https://cdn.jsdelivr.net/npm/xterm@5.1.0/css/xterm.css
|
||||
wget -nc -q https://cdn.jsdelivr.net/npm/xterm@5.1.0/lib/xterm.js
|
||||
wget -nc -q https://cdn.jsdelivr.net/npm/xterm-addon-fit@0.7.0/lib/xterm-addon-fit.js
|
||||
wget -nc -q https://cdn.jsdelivr.net/npm/marked/marked.min.js
|
||||
cd "$INSTALL_DIR"
|
||||
|
||||
# 6. .env Setup
|
||||
echo -e "\n${C_BLUE}${C_BOLD}--- 6. Konfiguration (.env)...${C_DEF}"
|
||||
mkdir -p config
|
||||
ENV_FILE="config/.env"
|
||||
|
||||
# Standardwerte definieren
|
||||
web_user="Tony"
|
||||
ai_prov="google"
|
||||
google_key=""
|
||||
openai_key=""
|
||||
nvidia_key=""
|
||||
ollama_url="http://127.0.0.1:11434/v1"
|
||||
google_mod="gemini-2.5-flash"
|
||||
openai_mod="gpt-4o"
|
||||
tg_token=""
|
||||
tg_id=""
|
||||
|
||||
# Falls die Datei existiert, alte Werte laden
|
||||
if [ -f "$ENV_FILE" ]; then
|
||||
echo -e "${C_YELLOW}Bestehende .env gefunden. Lade aktuelle Werte...${C_DEF}"
|
||||
set -a
|
||||
source "$ENV_FILE"
|
||||
set +a
|
||||
|
||||
web_user="${WEB_USER_NAME:-$web_user}"
|
||||
ai_prov="${AI_PROVIDER:-$ai_prov}"
|
||||
google_key="${GOOGLE_API_KEY:-$google_key}"
|
||||
openai_key="${OPENAI_API_KEY:-$openai_key}"
|
||||
nvidia_key="${NVIDIA_API_KEY:-$nvidia_key}"
|
||||
ollama_url="${OLLAMA_BASE_URL:-$ollama_url}"
|
||||
google_mod="${GOOGLE_MODEL:-$google_mod}"
|
||||
openai_mod="${OPENAI_MODEL:-$openai_mod}"
|
||||
tg_token="${TELEGRAM_BOT_TOKEN:-$tg_token}"
|
||||
tg_id="${ALLOWED_TELEGRAM_USER_ID:-$tg_id}"
|
||||
fi
|
||||
|
||||
echo -e "\nBitte gib die neuen Werte ein. (Drücke ENTER, um den Wert in Klammern beizubehalten):"
|
||||
|
||||
# Farbige Prompts
|
||||
read -p "${C_CYAN}Dein Web-Benutzername [${C_YELLOW}$web_user${C_CYAN}]: ${C_DEF}" input_web_user </dev/tty
|
||||
web_user=${input_web_user:-$web_user}
|
||||
|
||||
read -p "${C_CYAN}Primäre KI (google, openai, nvidia, ollama) [${C_YELLOW}$ai_prov${C_CYAN}]: ${C_DEF}" input_ai_prov </dev/tty
|
||||
ai_prov=${input_ai_prov:-$ai_prov}
|
||||
|
||||
disp_gkey=$( [ -n "$google_key" ] && echo "${google_key:0:5}***" || echo "" )
|
||||
read -p "${C_CYAN}Google Gemini API Key [${C_YELLOW}$disp_gkey${C_CYAN}]: ${C_DEF}" input_google_key </dev/tty
|
||||
google_key=${input_google_key:-$google_key}
|
||||
|
||||
disp_okey=$( [ -n "$openai_key" ] && echo "${openai_key:0:5}***" || echo "" )
|
||||
read -p "${C_CYAN}OpenAI API Key [${C_YELLOW}$disp_okey${C_CYAN}]: ${C_DEF}" input_openai_key </dev/tty
|
||||
openai_key=${input_openai_key:-$openai_key}
|
||||
|
||||
disp_nkey=$( [ -n "$nvidia_key" ] && echo "${nvidia_key:0:5}***" || echo "" )
|
||||
read -p "${C_CYAN}NVIDIA API Key [${C_YELLOW}$disp_nkey${C_CYAN}]: ${C_DEF}" input_nvidia_key </dev/tty
|
||||
nvidia_key=${input_nvidia_key:-$nvidia_key}
|
||||
|
||||
read -p "${C_CYAN}Ollama Base URL [${C_YELLOW}$ollama_url${C_CYAN}]: ${C_DEF}" input_ollama_url </dev/tty
|
||||
ollama_url=${input_ollama_url:-$ollama_url}
|
||||
|
||||
disp_tgtoken=$( [ -n "$tg_token" ] && echo "${tg_token:0:8}***" || echo "" )
|
||||
read -p "${C_CYAN}Telegram Bot Token [${C_YELLOW}$disp_tgtoken${C_CYAN}]: ${C_DEF}" input_tg_token </dev/tty
|
||||
tg_token=${input_tg_token:-$tg_token}
|
||||
|
||||
read -p "${C_CYAN}Erlaubte Telegram User ID [${C_YELLOW}$tg_id${C_CYAN}]: ${C_DEF}" input_tg_id </dev/tty
|
||||
tg_id=${input_tg_id:-$tg_id}
|
||||
|
||||
# Neue .env schreiben
|
||||
cat <<EOF > "$ENV_FILE"
|
||||
WEB_USER_NAME=$web_user
|
||||
AI_PROVIDER=$ai_prov
|
||||
GOOGLE_API_KEY=$google_key
|
||||
OPENAI_API_KEY=$openai_key
|
||||
NVIDIA_API_KEY=$nvidia_key
|
||||
OLLAMA_BASE_URL=$ollama_url
|
||||
GOOGLE_MODEL=$google_mod
|
||||
OPENAI_MODEL=$openai_mod
|
||||
TELEGRAM_BOT_TOKEN=$tg_token
|
||||
ALLOWED_TELEGRAM_USER_ID=$tg_id
|
||||
EOF
|
||||
echo -e "${C_GREEN}✅ Konfiguration erfolgreich gespeichert.${C_DEF}"
|
||||
|
||||
|
||||
# ==========================================
|
||||
# Weiche: Update vs. Neuinstallation
|
||||
# ==========================================
|
||||
|
||||
if [ -f "$SERVICE_FILE" ]; then
|
||||
echo -e "\n${C_BLUE}${C_BOLD}--- 7. Update-Modus: Bestehender Dienst gefunden...${C_DEF}"
|
||||
echo -e "${C_YELLOW}Überspringe Port- und Firewall-Check, starte Dienst neu...${C_DEF}"
|
||||
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl restart jarvis.service
|
||||
|
||||
echo -e "\n${C_GREEN}${C_BOLD}==========================================${C_DEF}"
|
||||
echo -e "${C_GREEN}✅ Update komplett! J.A.R.V.I.S. wurde erfolgreich aktualisiert und neu gestartet.${C_DEF}"
|
||||
echo -e "Verzeichnis: ${C_BOLD}$INSTALL_DIR${C_DEF}"
|
||||
echo -e "Web-Interface: ${C_CYAN}http://<deine-ip>:8000${C_DEF}"
|
||||
echo -e "Log-Ausgabe: ${C_YELLOW}sudo journalctl -u jarvis -f${C_DEF}"
|
||||
echo -e "${C_GREEN}${C_BOLD}==========================================${C_DEF}"
|
||||
else
|
||||
|
||||
# 7. Port Check
|
||||
echo -e "\n${C_BLUE}${C_BOLD}--- 7. Prüfe Port 8000...${C_DEF}"
|
||||
if ss -tuln | grep -q ":8000 "; then
|
||||
echo -e "${C_RED}⚠️ WARNUNG: Port 8000 wird bereits von einem anderen Prozess verwendet!${C_DEF}"
|
||||
echo "J.A.R.V.I.S. wird möglicherweise nicht starten können. Bitte prüfe dies nach dem Setup."
|
||||
read -p "Drücke ENTER, um trotzdem fortzufahren..." </dev/tty
|
||||
else
|
||||
echo -e "${C_GREEN}✅ Port 8000 ist frei.${C_DEF}"
|
||||
fi
|
||||
|
||||
# 8. Firewall Check
|
||||
echo -e "\n${C_BLUE}${C_BOLD}--- 8. Prüfe Firewall-Einstellungen...${C_DEF}"
|
||||
if command -v ufw >/dev/null 2>&1; then
|
||||
if sudo ufw status | grep -qw "active"; then
|
||||
echo -e "${C_YELLOW}UFW Firewall ist aktiv. Öffne Port 8000...${C_DEF}"
|
||||
sudo ufw allow 8000/tcp
|
||||
echo -e "${C_GREEN}✅ Port 8000 (TCP) in UFW freigegeben.${C_DEF}"
|
||||
else
|
||||
echo -e "${C_GREEN}✅ UFW ist installiert, aber inaktiv. Keine Blockade zu erwarten.${C_DEF}"
|
||||
fi
|
||||
else
|
||||
echo -e "${C_GREEN}✅ Keine UFW Firewall gefunden. Überspringe Firewall-Setup.${C_DEF}"
|
||||
fi
|
||||
|
||||
# 9. Systemd Service einrichten
|
||||
echo -e "\n${C_BLUE}${C_BOLD}--- 9. Systemdienst (Autostart) einrichten...${C_DEF}"
|
||||
read -p "${C_CYAN}Möchtest du J.A.R.V.I.S. als Hintergrunddienst installieren? (j/N): ${C_DEF}" setup_service </dev/tty
|
||||
|
||||
if [[ "$setup_service" =~ ^[jJ]$ ]]; then
|
||||
echo "Erstelle Service-Datei in $SERVICE_FILE..."
|
||||
sudo bash -c "cat <<EOF > $SERVICE_FILE
|
||||
[Unit]
|
||||
Description=J.A.R.V.I.S. AI Web und Telegram Bot
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
User=$USER
|
||||
WorkingDirectory=$INSTALL_DIR
|
||||
Environment=\"PYTHONPATH=$INSTALL_DIR/source\"
|
||||
ExecStart=$INSTALL_DIR/venv/bin/python -m uvicorn source.main:app --host 0.0.0.0 --port 8000 --proxy-headers
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF"
|
||||
|
||||
echo "Aktiviere und starte den Dienst..."
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl enable jarvis.service
|
||||
sudo systemctl restart jarvis.service
|
||||
|
||||
echo -e "\n${C_GREEN}${C_BOLD}==========================================${C_DEF}"
|
||||
echo -e "${C_GREEN}✅ Setup komplett! J.A.R.V.I.S. läuft als Dienst.${C_DEF}"
|
||||
echo -e "Verzeichnis: ${C_BOLD}$INSTALL_DIR${C_DEF}"
|
||||
echo -e "Web-Interface: ${C_CYAN}http://<deine-ip>:8000${C_DEF}"
|
||||
echo -e "Log-Ausgabe: ${C_YELLOW}sudo journalctl -u jarvis -f${C_DEF}"
|
||||
echo -e "${C_GREEN}${C_BOLD}==========================================${C_DEF}"
|
||||
else
|
||||
echo -e "\n${C_GREEN}${C_BOLD}==========================================${C_DEF}"
|
||||
echo -e "${C_GREEN}✅ Setup komplett! Du kannst J.A.R.V.I.S. nun manuell starten:${C_DEF}"
|
||||
echo "cd $INSTALL_DIR"
|
||||
echo "source venv/bin/activate"
|
||||
echo "export PYTHONPATH=\$PYTHONPATH:\$(pwd)/source"
|
||||
echo "python3 -m uvicorn source.main:app --host 0.0.0.0 --port 8000"
|
||||
echo -e "${C_GREEN}${C_BOLD}==========================================${C_DEF}"
|
||||
fi
|
||||
849
source/main.py
Normal file
849
source/main.py
Normal file
@@ -0,0 +1,849 @@
|
||||
import os
|
||||
import pty
|
||||
import fcntl
|
||||
import subprocess
|
||||
import sqlite3
|
||||
import asyncio
|
||||
import openai
|
||||
import re
|
||||
import httpx
|
||||
import struct
|
||||
import termios
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from telegram import Update
|
||||
from telegram.ext import ApplicationBuilder, ContextTypes, MessageHandler, filters
|
||||
from telegram.error import InvalidToken
|
||||
from google import genai
|
||||
from google.genai import types
|
||||
import json
|
||||
from fastapi import FastAPI, WebSocket, BackgroundTasks, Request, Form, WebSocketDisconnect
|
||||
from fastapi.responses import RedirectResponse
|
||||
from fastapi.templating import Jinja2Templates
|
||||
from fastapi.staticfiles import StaticFiles
|
||||
from dotenv import load_dotenv, set_key
|
||||
from fastapi.middleware.trustedhost import TrustedHostMiddleware
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
|
||||
# Basis-Verzeichnis (source/)
|
||||
BASE_DIR = Path(__file__).resolve().parent
|
||||
|
||||
# Pfade zu den neuen Ordnern (eins hoch, dann in den Zielordner)
|
||||
ROOT_DIR = BASE_DIR.parent
|
||||
CONFIG_DIR = ROOT_DIR / "config"
|
||||
DATA_DIR = ROOT_DIR / "data"
|
||||
WORKSPACE_DIR = ROOT_DIR / "workspace"
|
||||
|
||||
# Konfigurationsdateien
|
||||
ENV_FILE = CONFIG_DIR / ".env"
|
||||
load_dotenv(ENV_FILE)
|
||||
DB_PATH = DATA_DIR / "cluster.db"
|
||||
PROMPT_FILE = CONFIG_DIR / "system_prompt.txt"
|
||||
|
||||
WEB_USER_NAME = os.getenv("WEB_USER_NAME", "Admin")
|
||||
|
||||
# Workspace Dateien
|
||||
NOTES_FILE = WORKSPACE_DIR / "NOTIZEN.md"
|
||||
TODO_FILE = WORKSPACE_DIR / "TODO.md"
|
||||
|
||||
# Sicherstellen, dass die Workspace-Dateien existieren
|
||||
WORKSPACE_DIR.mkdir(exist_ok=True)
|
||||
for f in [NOTES_FILE, TODO_FILE]:
|
||||
if not f.exists():
|
||||
f.write_text(f"# {f.name}\nHier fängt dein Gedächtnis an, J.A.R.V.I.S.\n", encoding="utf-8")
|
||||
|
||||
# Workspace-Ordner und Dateien anlegen
|
||||
WORKSPACE_DIR.mkdir(exist_ok=True)
|
||||
|
||||
# Auch den Daten-Ordner für die Datenbank anlegen!
|
||||
DATA_DIR.mkdir(exist_ok=True)
|
||||
|
||||
# FastAPI Pfade (relativ zu main.py in source/)
|
||||
app = FastAPI()
|
||||
#static_path = os.path.join(os.path.dirname(__file__), "static")
|
||||
templates = Jinja2Templates(directory=BASE_DIR / "templates")
|
||||
app.mount("/static", StaticFiles(directory=BASE_DIR / "static"), name="static")
|
||||
|
||||
SSH_KEY = os.path.expanduser("~/.ssh/id_rsa")
|
||||
# Ein Dictionary für verschiedene Chat-Historien
|
||||
chat_histories = {
|
||||
"private": [] # Hier landen Webchat UND private Telegram-Nachrichten
|
||||
}
|
||||
|
||||
# KI KONFIGURATION
|
||||
AI_PROVIDER = os.getenv("AI_PROVIDER", "google").lower()
|
||||
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "")
|
||||
GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY", "")
|
||||
NVIDIA_API_KEY = os.getenv("NVIDIA_API_KEY", "")
|
||||
OLLAMA_BASE_URL = os.getenv("OLLAMA_BASE_URL", "http://127.0.0.1:11434/v1")
|
||||
|
||||
GOOGLE_MODEL = os.getenv("GOOGLE_MODEL", "gemini-2.0-flash")
|
||||
OPENAI_MODEL = os.getenv("OPENAI_MODEL", "gpt-4o")
|
||||
OLLAMA_MODEL = os.getenv("OLLAMA_MODEL", "llama3")
|
||||
NVIDIA_MODEL = os.getenv("NVIDIA_MODEL", "moonshotai/kimi-k2.5")
|
||||
|
||||
# Telegram Bot Konfiguration
|
||||
TELEGRAM_TOKEN = os.getenv("TELEGRAM_BOT_TOKEN", "")
|
||||
ALLOWED_ID = os.getenv("ALLOWED_TELEGRAM_USER_ID", "")
|
||||
telegram_app = None
|
||||
|
||||
# --- DATENBANK INITIALISIERUNG (ERWEITERT) ---
|
||||
def init_db():
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
# Spalten erweitert um sudo_password, os, arch, docker_installed
|
||||
conn.execute('''
|
||||
CREATE TABLE IF NOT EXISTS nodes (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
name TEXT,
|
||||
ip TEXT UNIQUE,
|
||||
user TEXT,
|
||||
sudo_password TEXT,
|
||||
os TEXT DEFAULT 'Unbekannt',
|
||||
arch TEXT DEFAULT 'Unbekannt',
|
||||
docker_installed INTEGER DEFAULT 0,
|
||||
status TEXT
|
||||
)
|
||||
''')
|
||||
conn.commit()
|
||||
conn.close()
|
||||
|
||||
init_db()
|
||||
|
||||
def get_db():
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
conn.row_factory = sqlite3.Row
|
||||
return conn
|
||||
|
||||
def get_system_prompt(current_user=WEB_USER_NAME, is_admin=False):
|
||||
# Entscheide, welche Datei geladen wird
|
||||
if is_admin:
|
||||
prompt_path = CONFIG_DIR / "system_prompt.txt"
|
||||
else:
|
||||
prompt_path = CONFIG_DIR / "group_prompt.txt"
|
||||
|
||||
# Datei auslesen
|
||||
if prompt_path.exists():
|
||||
prompt = prompt_path.read_text(encoding="utf-8")
|
||||
else:
|
||||
# Fallback, falls die Datei fehlt
|
||||
prompt = f"Hallo {current_user}, ich bin dein Assistent."
|
||||
|
||||
# Namen ersetzen (funktioniert in beiden Prompts)
|
||||
prompt = prompt.replace("{user_name}", current_user)
|
||||
|
||||
# Server-Infos NUR für Admins einfügen
|
||||
if is_admin:
|
||||
conn = get_db()
|
||||
nodes = conn.execute('SELECT * FROM nodes').fetchall()
|
||||
conn.close()
|
||||
|
||||
node_info = ""
|
||||
for n in nodes:
|
||||
docker_str = "Ja" if n['docker_installed'] else "Nein"
|
||||
node_info += f"- Name: {n['name']}, IP: {n['ip']}, User: {n['user']}, OS: {n['os']}, Arch: {n['arch']}, Docker: {docker_str}\n"
|
||||
|
||||
prompt = prompt.replace("{node_info}", node_info)
|
||||
prompt = prompt.replace("{workspace_dir}", str(WORKSPACE_DIR))
|
||||
prompt = prompt.replace("{notes_file}", str(NOTES_FILE))
|
||||
prompt = prompt.replace("{todo_file}", str(TODO_FILE))
|
||||
|
||||
return prompt
|
||||
|
||||
# --- KI FUNKTIONEN ---
|
||||
|
||||
async def get_ai_response(user_msg, system_prompt, history_list):
|
||||
ai_msg = ""
|
||||
|
||||
try:
|
||||
# Kombinierte Logik für OpenAI, Ollama und NVIDIA (alle nutzen das OpenAI SDK)
|
||||
if AI_PROVIDER in ["openai", "ollama", "nvidia"]:
|
||||
# WICHTIG: Wir nutzen die übergebene 'history_list', NICHT starr "private"
|
||||
# Da der Aufrufer (Telegram/Web) die aktuelle Frage schon hinzugefügt hat, ist sie hier schon drin.
|
||||
messages = [{"role": "system", "content": system_prompt}] + history_list
|
||||
|
||||
if AI_PROVIDER == "ollama":
|
||||
url = OLLAMA_BASE_URL
|
||||
if not url.endswith('/v1') and not url.endswith('/v1/'):
|
||||
url = url.rstrip('/') + '/v1'
|
||||
key = "ollama"
|
||||
model_to_use = OLLAMA_MODEL
|
||||
elif AI_PROVIDER == "nvidia":
|
||||
url = "https://integrate.api.nvidia.com/v1"
|
||||
key = NVIDIA_API_KEY
|
||||
model_to_use = NVIDIA_MODEL
|
||||
else: # openai
|
||||
url = None
|
||||
key = OPENAI_API_KEY
|
||||
model_to_use = OPENAI_MODEL
|
||||
|
||||
client = openai.AsyncOpenAI(base_url=url, api_key=key)
|
||||
response = await client.chat.completions.create(
|
||||
model=model_to_use,
|
||||
messages=messages
|
||||
)
|
||||
ai_msg = response.choices[0].message.content
|
||||
|
||||
elif AI_PROVIDER == "google":
|
||||
if not GOOGLE_API_KEY:
|
||||
return "Fehler: GOOGLE_API_KEY fehlt in der .env Datei!"
|
||||
|
||||
client = genai.Client(api_key=GOOGLE_API_KEY)
|
||||
|
||||
google_history = []
|
||||
|
||||
# Alle Nachrichten AUSSER der allerletzten (das ist die aktuelle Frage von gerade eben)
|
||||
for msg in history_list[:-1]:
|
||||
role = "user" if msg["role"] == "user" else "model"
|
||||
google_history.append(
|
||||
types.Content(role=role, parts=[types.Part.from_text(text=msg["content"])])
|
||||
)
|
||||
|
||||
chat = client.chats.create(
|
||||
model=GOOGLE_MODEL,
|
||||
config=types.GenerateContentConfig(system_instruction=system_prompt),
|
||||
history=google_history
|
||||
)
|
||||
|
||||
# Sende die aktuelle Nachricht an den Chat
|
||||
response = chat.send_message(user_msg)
|
||||
ai_msg = response.text
|
||||
|
||||
except Exception as e:
|
||||
ai_msg = f"Fehler bei der KI-Anfrage: {e}"
|
||||
print(f"KI Fehler: {e}")
|
||||
|
||||
# Wir geben nur den Text zurück. Das Speichern der KI-Antwort in die Historie
|
||||
# erledigt der Aufrufer (Web-Chat Endpoint oder Telegram Funktion)!
|
||||
return ai_msg
|
||||
|
||||
async def handle_telegram_message(update: Update, context: ContextTypes.DEFAULT_TYPE):
|
||||
# Den eigenen @Benutzernamen des Bots dynamisch abfragen
|
||||
bot_username = f"@{context.bot.username}"
|
||||
|
||||
chat_type = update.effective_chat.type
|
||||
user_msg = update.message.text
|
||||
user_id = str(update.message.from_user.id)
|
||||
chat_id = str(update.effective_chat.id)
|
||||
|
||||
# 1. Weiche für Gruppen-Logik & Historien-Zuordnung
|
||||
if chat_type in ['group', 'supergroup']:
|
||||
if bot_username not in user_msg:
|
||||
return # Bot wurde nicht erwähnt, Nachricht ignorieren
|
||||
|
||||
# Den @Namen aus dem Text entfernen
|
||||
user_msg = user_msg.replace(bot_username, "").strip()
|
||||
|
||||
# Jede Gruppe bekommt ihr komplett eigenes Gedächtnis
|
||||
history_key = chat_id
|
||||
else:
|
||||
# Sicherheitscheck im Einzelchat
|
||||
if user_id != ALLOWED_ID:
|
||||
await update.message.reply_text("Zugriff auf den privaten Chat verweigert. 🔒")
|
||||
return
|
||||
|
||||
# Privat-Chats teilen sich das Gedächtnis mit dem Webchat
|
||||
history_key = "private"
|
||||
|
||||
# Sicherstellen, dass die Liste für diesen Chat existiert
|
||||
if history_key not in chat_histories:
|
||||
chat_histories[history_key] = []
|
||||
|
||||
current_history = chat_histories[history_key]
|
||||
|
||||
# Nutzer-Nachricht mit Zeitstempel in die RICHTIGE Historie aufnehmen
|
||||
now = datetime.now().strftime("%d.%m.%Y %H:%M")
|
||||
current_history.append({"role": "user", "content": user_msg, "timestamp": now})
|
||||
|
||||
# Vorname des Telegram-Nutzers auslesen
|
||||
sender_name = update.message.from_user.first_name
|
||||
|
||||
# Tipp-Status anzeigen
|
||||
await update.message.reply_chat_action(action="typing")
|
||||
|
||||
# 2. KI fragen (Wir übergeben die spezifische Historie!)
|
||||
# HINWEIS: Stelle sicher, dass deine get_ai_response Funktion die current_history auch annimmt.
|
||||
# Prüfen, ob der Sender der Admin ist
|
||||
is_admin_user = (user_id == ALLOWED_ID)
|
||||
|
||||
# KI fragen (mit Admin-Flag für den Prompt!)
|
||||
ai_response = await get_ai_response(
|
||||
user_msg,
|
||||
get_system_prompt(sender_name, is_admin=is_admin_user),
|
||||
current_history
|
||||
)
|
||||
|
||||
commands = re.findall(r'<EXECUTE target="(.*?)">(.*?)</EXECUTE>', ai_response, re.I | re.S)
|
||||
clean_msg = re.sub(r'<EXECUTE.*?>.*?</EXECUTE>', '', ai_response, flags=re.I | re.S).strip()
|
||||
|
||||
# KI-Antwort ebenfalls in die Historie speichern
|
||||
now = datetime.now().strftime("%d.%m.%Y %H:%M")
|
||||
current_history.append({"role": "assistant", "content": clean_msg, "timestamp": now})
|
||||
|
||||
# KI Text-Antwort senden
|
||||
if clean_msg:
|
||||
await update.message.reply_text(clean_msg)
|
||||
|
||||
# 3. Befehle ausführen (mit strengem Sicherheits-Check!)
|
||||
if commands:
|
||||
if user_id != ALLOWED_ID:
|
||||
await update.message.reply_text("⚠️ **Sicherheits-Sperre:** Die KI wollte einen Server-Befehl ausführen, aber du hast keine Administrator-Rechte dafür.")
|
||||
return
|
||||
|
||||
for target, cmd in commands:
|
||||
await update.message.reply_text(f"⏳ Führe aus auf *{target}*:\n`{cmd}`", parse_mode='Markdown')
|
||||
|
||||
# Node in DB suchen
|
||||
conn = get_db()
|
||||
n = conn.execute('SELECT * FROM nodes WHERE ip=? OR name=?', (target.strip(), target.strip())).fetchone()
|
||||
conn.close()
|
||||
|
||||
if n:
|
||||
try:
|
||||
proc = await asyncio.create_subprocess_shell(
|
||||
f"ssh -o StrictHostKeyChecking=no -o LogLevel=ERROR {n['user']}@{n['ip']} '{cmd}'",
|
||||
stdout=asyncio.subprocess.PIPE,
|
||||
stderr=asyncio.subprocess.STDOUT
|
||||
)
|
||||
stdout, _ = await proc.communicate()
|
||||
output = stdout.decode('utf-8', errors='ignore').strip()
|
||||
|
||||
result_text = output[:4000] if output else "✅ Befehl ohne Output ausgeführt."
|
||||
await update.message.reply_text(f"💻 **Output von {n['name']}:**\n```\n{result_text}\n```", parse_mode='Markdown')
|
||||
|
||||
# System-Output auch in die RICHTIGE Historie packen, damit die KI weiß, was passiert ist
|
||||
now = datetime.now().strftime("%d.%m.%Y %H:%M")
|
||||
current_history.append({"role": "user", "content": f"[SYSTEM] Befehl '{cmd}' auf {target} fertig:\n{result_text}", "timestamp": now})
|
||||
except Exception as e:
|
||||
await update.message.reply_text(f"❌ Fehler bei der Ausführung: {e}")
|
||||
else:
|
||||
await update.message.reply_text(f"⚠️ Node '{target}' nicht in der Datenbank gefunden.")
|
||||
|
||||
# Optionaler Bonus: Halte die Historie sauber (z.B. max. 15 Nachrichten), damit der RAM nicht überläuft
|
||||
if len(current_history) > 15:
|
||||
chat_histories[history_key] = current_history[-15:]
|
||||
|
||||
# --- FASTAPI LIFESPAN EVENTS (Bot starten/stoppen) ---
|
||||
@app.on_event("startup")
|
||||
async def startup_event():
|
||||
global telegram_app
|
||||
# Prüfe auch, ob der Token nicht aus Versehen noch der Platzhalter ist
|
||||
if TELEGRAM_TOKEN and ALLOWED_ID and "dein-telegram-bot-token" not in TELEGRAM_TOKEN:
|
||||
print("🤖 Starte Telegram Bot im Hintergrund...")
|
||||
try:
|
||||
telegram_app = ApplicationBuilder().token(TELEGRAM_TOKEN).build()
|
||||
|
||||
# Leitet alle Text-Nachrichten an unsere Funktion weiter
|
||||
telegram_app.add_handler(MessageHandler(filters.TEXT & ~filters.COMMAND, handle_telegram_message))
|
||||
|
||||
# Bot asynchron in die FastAPI Event-Loop einhängen
|
||||
await telegram_app.initialize()
|
||||
await telegram_app.start()
|
||||
await telegram_app.updater.start_polling()
|
||||
print("✅ Telegram Bot lauscht!")
|
||||
|
||||
except InvalidToken:
|
||||
print("❌ Telegram-Fehler: Der Token in der .env ist ungültig! Der Bot bleibt inaktiv, aber der Server läuft weiter.")
|
||||
except Exception as e:
|
||||
print(f"❌ Unerwarteter Fehler beim Telegram-Start: {e}")
|
||||
else:
|
||||
print("ℹ️ Telegram Bot inaktiv (Token oder ID fehlen/sind Platzhalter in der .env).")
|
||||
|
||||
@app.on_event("shutdown")
|
||||
async def shutdown_event():
|
||||
global telegram_app
|
||||
if telegram_app:
|
||||
print("🛑 Stoppe Telegram Bot...")
|
||||
await telegram_app.updater.stop()
|
||||
await telegram_app.stop()
|
||||
await telegram_app.shutdown()
|
||||
|
||||
|
||||
# --- WebSocket Manager ---
|
||||
class ConnectionManager:
|
||||
def __init__(self): self.active_connections = []
|
||||
async def connect(self, ws: WebSocket): await ws.accept(); self.active_connections.append(ws)
|
||||
def disconnect(self, ws: WebSocket): self.active_connections.remove(ws)
|
||||
async def broadcast(self, msg: str):
|
||||
for c in self.active_connections:
|
||||
try: await c.send_text(msg)
|
||||
except: pass
|
||||
manager = ConnectionManager()
|
||||
|
||||
async def get_remote_info(ip, user):
|
||||
"""Versucht Linux/Mac-Infos zu lesen, falls fehlgeschlagen, dann Windows."""
|
||||
# 1. Versuch: Linux/Mac
|
||||
linux_cmd = "uname -m && (sw_vers -productName 2>/dev/null || grep PRETTY_NAME /etc/os-release 2>/dev/null | cut -d= -f2 || uname -s) && (command -v docker >/dev/null 2>&1 && echo 1 || echo 0)"
|
||||
ssh_cmd = f"ssh -o StrictHostKeyChecking=no -o LogLevel=ERROR -o UserKnownHostsFile=/dev/null -o ConnectTimeout=3 {user}@{ip} \"{linux_cmd}\""
|
||||
|
||||
try:
|
||||
output = subprocess.check_output(ssh_cmd, shell=True, stderr=subprocess.DEVNULL).decode().strip().split('\n')
|
||||
if len(output) >= 2:
|
||||
return {
|
||||
"arch": output[0],
|
||||
"os": output[1].replace('"', ''),
|
||||
"docker": int(output[2]) if len(output) > 2 else 0
|
||||
}
|
||||
except:
|
||||
pass # Linux-Versuch gescheitert, weiter zu Windows
|
||||
|
||||
# 2. Versuch: Windows (CMD)
|
||||
# ver = OS Version, echo %PROCESSOR_ARCHITECTURE% = Arch, where docker = Docker Check
|
||||
win_cmd = 'ver && echo %PROCESSOR_ARCHITECTURE% && (where docker >nul 2>&1 && echo 1 || echo 0)'
|
||||
ssh_cmd = f"ssh -o StrictHostKeyChecking=no -o LogLevel=ERROR -o UserKnownHostsFile=/dev/null -o ConnectTimeout=3 {user}@{ip} \"{win_cmd}\""
|
||||
|
||||
try:
|
||||
output = subprocess.check_output(ssh_cmd, shell=True).decode().strip().split('\n')
|
||||
# Windows Output sieht oft so aus: ["Microsoft Windows [Version 10.0...]", "AMD64", "1"]
|
||||
raw_os = output[0] if len(output) > 0 else "Windows"
|
||||
os_name = "Windows"
|
||||
if "Version 10" in raw_os: os_name = "Windows 10/11"
|
||||
elif "Version 11" in raw_os: os_name = "Windows 11"
|
||||
|
||||
arch = output[1] if len(output) > 1 else "x86"
|
||||
if "AMD64" in arch: arch = "x86-64"
|
||||
|
||||
docker_val = int(output[2]) if len(output) > 2 else 0
|
||||
|
||||
return {"arch": arch, "os": os_name, "docker": docker_val}
|
||||
except Exception as e:
|
||||
print(f"Windows-Check fehlgeschlagen für {ip}: {e}")
|
||||
return None
|
||||
|
||||
# Nutze diese Funktion nun in bootstrap_node und refresh_status
|
||||
|
||||
# --- ERWEITERTES NODE BOOTSTRAPPING (Inventur) ---
|
||||
async def bootstrap_node(ip, user, password):
|
||||
await manager.broadcast(f"🔑 Kopple {ip}...")
|
||||
|
||||
with open(f"{SSH_KEY}.pub", "r") as f:
|
||||
pub_key = f.read().strip()
|
||||
|
||||
# Wir nutzen ein absolut minimalistisches Kommando.
|
||||
# Es erstellt das Verzeichnis (falls nötig) und hängt den Key an.
|
||||
# Das funktioniert in der Windows CMD und der Linux Bash.
|
||||
cmd_universal = f'mkdir .ssh & echo {pub_key} >> .ssh/authorized_keys'
|
||||
|
||||
# sshpass direkt mit dem simplen Befehl
|
||||
setup_cmd = f"sshpass -p '{password}' ssh -o StrictHostKeyChecking=no -o LogLevel=ERROR -o UserKnownHostsFile=/dev/null {user}@{ip} \"{cmd_universal}\""
|
||||
|
||||
try:
|
||||
# Wir führen es aus. Das "2x Passwort"-Problem kommt oft von TTY-Anfragen.
|
||||
# Wir unterdrücken das mit -o StrictHostKeyChecking=no
|
||||
proc = subprocess.run(setup_cmd, shell=True, capture_output=True, text=True, timeout=15)
|
||||
|
||||
if proc.returncode == 0:
|
||||
await manager.broadcast(f"✅ Key an {ip} übertragen.")
|
||||
else:
|
||||
# Falls 'mkdir' einen Fehler wirft (weil Ordner existiert), ist das egal,
|
||||
# solange der Key danach drin ist.
|
||||
await manager.broadcast(f"ℹ️ Info: {ip} antwortet (Key-Check folgt).")
|
||||
|
||||
except Exception as e:
|
||||
await manager.broadcast(f"❌ Fehler: {e}")
|
||||
|
||||
# Inventur (get_remote_info) prüft jetzt, ob es wirklich klappt
|
||||
await manager.broadcast(f"🔍 Teste schlüssellosen Zugriff auf {ip}...")
|
||||
info = await get_remote_info(ip, user)
|
||||
|
||||
if info:
|
||||
status = "Docker Aktiv" if info['docker'] else "Bereit (Kein Docker)"
|
||||
conn = get_db()
|
||||
conn.execute('''
|
||||
UPDATE nodes SET os = ?, arch = ?, docker_installed = ?, status = ?
|
||||
WHERE ip = ?
|
||||
''', (info['os'], info['arch'], info['docker'], status, ip))
|
||||
conn.commit()
|
||||
conn.close()
|
||||
await manager.broadcast(f"✅ Node {ip} erkannt als {info['os']} ({info['arch']}).")
|
||||
else:
|
||||
await manager.broadcast(f"⚠️ Inventur auf {ip} fehlgeschlagen.")
|
||||
# --- ROUTES ---
|
||||
|
||||
@app.get("/")
|
||||
async def index(request: Request):
|
||||
conn = get_db()
|
||||
nodes = conn.execute('SELECT * FROM nodes').fetchall()
|
||||
conn.close()
|
||||
return templates.TemplateResponse("index.html", {"request": request, "nodes": nodes})
|
||||
|
||||
@app.get("/api/node/{node_id}")
|
||||
async def get_node(node_id: int):
|
||||
conn = get_db()
|
||||
node = conn.execute('SELECT * FROM nodes WHERE id = ?', (node_id,)).fetchone()
|
||||
conn.close()
|
||||
return dict(node) if node else {}
|
||||
|
||||
@app.put("/api/node/{node_id}")
|
||||
async def api_update_node(node_id: int, request: Request):
|
||||
data = await request.json()
|
||||
|
||||
conn = get_db()
|
||||
try:
|
||||
conn.execute('''
|
||||
UPDATE nodes SET
|
||||
name = ?,
|
||||
ip = ?,
|
||||
user = ?,
|
||||
sudo_password = ?,
|
||||
os = ?,
|
||||
arch = ?,
|
||||
status = ?,
|
||||
docker_installed = ?
|
||||
WHERE id = ?
|
||||
''', (
|
||||
data.get("name"),
|
||||
data.get("ip"),
|
||||
data.get("user"),
|
||||
data.get("sudo_password"),
|
||||
data.get("os"),
|
||||
data.get("arch"),
|
||||
data.get("status"),
|
||||
data.get("docker_installed"),
|
||||
node_id
|
||||
))
|
||||
conn.commit()
|
||||
return {"status": "success"}
|
||||
except Exception as e:
|
||||
print(f"Update Fehler: {e}")
|
||||
return {"status": "error", "message": str(e)}, 500
|
||||
finally:
|
||||
conn.close()
|
||||
|
||||
@app.post("/add_node")
|
||||
async def add_node(background_tasks: BackgroundTasks, name: str = Form(...), ip: str = Form(...), user: str = Form(...), password: str = Form(...)):
|
||||
conn = get_db()
|
||||
try:
|
||||
# Speichere Initialdaten inkl. Sudo-Passwort
|
||||
conn.execute('''
|
||||
INSERT INTO nodes (name, ip, user, sudo_password, status)
|
||||
VALUES (?, ?, ?, ?, ?)
|
||||
''', (name, ip, user, password, "Kopplung..."))
|
||||
conn.commit()
|
||||
background_tasks.add_task(bootstrap_node, ip, user, password)
|
||||
except sqlite3.IntegrityError: pass
|
||||
finally: conn.close()
|
||||
return RedirectResponse(url="/", status_code=303)
|
||||
|
||||
@app.post("/edit_node/{node_id}")
|
||||
async def edit_node(node_id: int, name: str = Form(...), ip: str = Form(...), user: str = Form(...)):
|
||||
conn = get_db()
|
||||
conn.execute('UPDATE nodes SET name=?, ip=?, user=? WHERE id=?', (name, ip, user, node_id))
|
||||
conn.commit()
|
||||
conn.close()
|
||||
return RedirectResponse(url="/", status_code=303)
|
||||
|
||||
@app.post("/remove_node/{node_id}")
|
||||
async def remove_node(node_id: int):
|
||||
conn = get_db()
|
||||
conn.execute('DELETE FROM nodes WHERE id = ?', (node_id,))
|
||||
conn.commit()
|
||||
conn.close()
|
||||
return RedirectResponse(url="/", status_code=303)
|
||||
|
||||
@app.get("/refresh_status/{node_id}")
|
||||
async def refresh_status(node_id: int):
|
||||
conn = get_db()
|
||||
node = conn.execute('SELECT * FROM nodes WHERE id = ?', (node_id,)).fetchone()
|
||||
if not node: return {"status": "Offline"}
|
||||
|
||||
info = await get_remote_info(node['ip'], node['user'])
|
||||
|
||||
if info:
|
||||
new_status = "Docker Aktiv" if info['docker'] else "Bereit (Kein Docker)"
|
||||
conn.execute('UPDATE nodes SET status=?, os=?, arch=?, docker_installed=? WHERE id=?',
|
||||
(new_status, info['os'], info['arch'], info['docker'], node_id))
|
||||
conn.commit()
|
||||
result = {"status": new_status, "os": info['os'], "arch": info['arch'], "docker": info['docker']}
|
||||
else:
|
||||
new_status = "Offline"
|
||||
conn.execute('UPDATE nodes SET status=? WHERE id=?', (new_status, node_id))
|
||||
conn.commit()
|
||||
result = {"status": new_status, "os": node['os'], "arch": node['arch'], "docker": node['docker_installed']}
|
||||
|
||||
conn.close()
|
||||
return result
|
||||
|
||||
# --- WebSockets Terminal / Chat / Logs (Integration wie gehabt) ---
|
||||
@app.websocket("/ws/install_logs")
|
||||
async def log_websocket(websocket: WebSocket):
|
||||
await manager.connect(websocket)
|
||||
try:
|
||||
while True: await websocket.receive_text()
|
||||
except WebSocketDisconnect: manager.disconnect(websocket)
|
||||
|
||||
@app.websocket("/ws/terminal/{ip}")
|
||||
async def terminal_websocket(websocket: WebSocket, ip: str):
|
||||
await websocket.accept()
|
||||
conn = get_db()
|
||||
node = conn.execute('SELECT * FROM nodes WHERE ip = ?', (ip,)).fetchone()
|
||||
conn.close()
|
||||
|
||||
if not node:
|
||||
await websocket.close()
|
||||
return
|
||||
|
||||
master_fd, slave_fd = pty.openpty()
|
||||
# Wir starten SSH im interaktiven Modus
|
||||
proc = await asyncio.create_subprocess_exec(
|
||||
"ssh",
|
||||
"-i", SSH_KEY, # <--- Das hier ist entscheidend!
|
||||
"-o", "StrictHostKeyChecking=no",
|
||||
"-o", "BatchMode=yes", # Verhindert, dass SSH hängen bleibt, falls der Key doch nicht geht
|
||||
"-t", f"{node['user']}@{ip}",
|
||||
stdin=slave_fd, stdout=slave_fd, stderr=slave_fd
|
||||
)
|
||||
|
||||
async def pty_to_ws():
|
||||
fl = fcntl.fcntl(master_fd, fcntl.F_GETFL)
|
||||
fcntl.fcntl(master_fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
|
||||
try:
|
||||
while True:
|
||||
await asyncio.sleep(0.01)
|
||||
try:
|
||||
data = os.read(master_fd, 1024).decode(errors='ignore')
|
||||
if data:
|
||||
await websocket.send_text(data)
|
||||
except BlockingIOError:
|
||||
continue
|
||||
except:
|
||||
pass
|
||||
|
||||
async def ws_to_pty():
|
||||
try:
|
||||
while True:
|
||||
data = await websocket.receive_text()
|
||||
# Prüfen, ob es ein Resize-Kommando (JSON) ist
|
||||
if data.startswith('{"type":"resize"'):
|
||||
resize_data = json.loads(data)
|
||||
cols = resize_data['cols']
|
||||
rows = resize_data['rows']
|
||||
# Das hier setzt die Größe des PTY im Betriebssystem
|
||||
s = struct.pack("HHHH", rows, cols, 0, 0)
|
||||
fcntl.ioctl(master_fd, termios.TIOCSWINSZ, s)
|
||||
else:
|
||||
# Normale Terminal-Eingabe
|
||||
os.write(master_fd, data.encode())
|
||||
except:
|
||||
pass
|
||||
|
||||
try:
|
||||
await asyncio.gather(pty_to_ws(), ws_to_pty())
|
||||
finally:
|
||||
if proc.returncode is None:
|
||||
proc.terminate()
|
||||
os.close(master_fd)
|
||||
os.close(slave_fd)
|
||||
|
||||
@app.websocket("/ws/chat")
|
||||
async def chat_endpoint(websocket: WebSocket):
|
||||
await websocket.accept()
|
||||
# Sicherstellen, dass die private History existiert
|
||||
if "private" not in chat_histories:
|
||||
chat_histories["private"] = []
|
||||
|
||||
try:
|
||||
while True:
|
||||
user_msg = await websocket.receive_text()
|
||||
|
||||
# 1. User-Nachricht in Historie speichern
|
||||
now = datetime.now().strftime("%d.%m.%Y %H:%M")
|
||||
chat_histories["private"].append({"role": "user", "content": user_msg, "timestamp": now})
|
||||
|
||||
# 2. KI fragen (mit der Historie!)
|
||||
ai_response = await get_ai_response(user_msg, get_system_prompt(is_admin=True), chat_histories["private"])
|
||||
|
||||
commands = re.findall(r'<EXECUTE target="(.*?)">(.*?)</EXECUTE>', ai_response, re.I | re.S)
|
||||
clean_msg = re.sub(r'<EXECUTE.*?>.*?</EXECUTE>', '', ai_response, flags=re.I | re.S).strip()
|
||||
|
||||
# 3. KI-Antwort in Historie speichern
|
||||
if clean_msg:
|
||||
now = datetime.now().strftime("%d.%m.%Y %H:%M")
|
||||
chat_histories["private"].append({"role": "assistant", "content": clean_msg, "timestamp": now})
|
||||
await websocket.send_text(clean_msg)
|
||||
|
||||
# Begrenzung der Historie
|
||||
if len(chat_histories["private"]) > 30:
|
||||
chat_histories["private"] = chat_histories["private"][-30:]
|
||||
|
||||
if commands:
|
||||
tasks = []
|
||||
for target, cmd in commands:
|
||||
conn = get_db()
|
||||
n = conn.execute('SELECT * FROM nodes WHERE ip=? OR name=?', (target.strip(), target.strip())).fetchone()
|
||||
conn.close()
|
||||
if n:
|
||||
# Wir übergeben hier die History, damit run_remote_task weiß, wo er loggen soll
|
||||
tasks.append(run_remote_task(n['ip'], n['user'], cmd.strip(), "private"))
|
||||
|
||||
if tasks:
|
||||
await websocket.send_text("ℹ️ *Führe Befehle aus...*")
|
||||
await asyncio.gather(*tasks)
|
||||
# Nochmal KI fragen für Zusammenfassung (optional)
|
||||
summary = await get_ai_response("Zusammenfassung der Ergebnisse?", get_system_prompt(), chat_histories["private"])
|
||||
await websocket.send_text(f"--- Info ---\n{summary}")
|
||||
except WebSocketDisconnect:
|
||||
pass
|
||||
except Exception as e:
|
||||
print(f"Webchat Fehler: {e}")
|
||||
|
||||
async def run_remote_task(ip, user, cmd, history_key="private"): # history_key als Parameter
|
||||
await manager.broadcast(f"🚀 Task: {cmd} auf {ip}")
|
||||
proc = await asyncio.create_subprocess_shell(
|
||||
f"ssh -o StrictHostKeyChecking=no -o LogLevel=ERROR -o UserKnownHostsFile=/dev/null {user}@{ip} '{cmd}'",
|
||||
stdout=asyncio.subprocess.PIPE,
|
||||
stderr=asyncio.subprocess.STDOUT
|
||||
)
|
||||
full_output = ""
|
||||
while True:
|
||||
line = await proc.stdout.readline()
|
||||
if not line: break
|
||||
out = line.decode('utf-8', errors='ignore').strip()
|
||||
if out:
|
||||
await manager.broadcast(f"🛠️ {out}")
|
||||
full_output += out + "\n"
|
||||
await proc.wait()
|
||||
|
||||
now = datetime.now().strftime("%d.%m.%Y %H:%M")
|
||||
# Jetzt wird in die richtige Historie geschrieben!
|
||||
if history_key in chat_histories:
|
||||
chat_histories[history_key].append({
|
||||
"role": "user",
|
||||
"content": f"[SYSTEM] Befehl '{cmd}' auf {ip} fertig:\n{full_output or 'Kein Output'}",
|
||||
"timestamp": now
|
||||
})
|
||||
|
||||
# --- Settings API ---
|
||||
@app.get("/api/settings")
|
||||
async def get_settings():
|
||||
return {
|
||||
"provider": AI_PROVIDER,
|
||||
"google_model": GOOGLE_MODEL,
|
||||
"openai_model": OPENAI_MODEL,
|
||||
"nvidia_model": NVIDIA_MODEL,
|
||||
"ollama_model": OLLAMA_MODEL,
|
||||
"ollama_base_url": OLLAMA_BASE_URL # URL ans Frontend schicken
|
||||
}
|
||||
|
||||
@app.post("/api/settings")
|
||||
async def update_settings(request: Request):
|
||||
# WICHTIG: OLLAMA_BASE_URL als global deklarieren
|
||||
global AI_PROVIDER, GOOGLE_MODEL, OPENAI_MODEL, NVIDIA_MODEL, OLLAMA_MODEL, OLLAMA_BASE_URL
|
||||
|
||||
data = await request.json()
|
||||
provider = data.get("provider")
|
||||
model = data.get("model")
|
||||
ollama_url = data.get("ollama_base_url") # URL vom Frontend empfangen
|
||||
|
||||
if provider:
|
||||
AI_PROVIDER = provider
|
||||
set_key(ENV_FILE, "AI_PROVIDER", provider)
|
||||
|
||||
if provider == "google" and model:
|
||||
GOOGLE_MODEL = model
|
||||
set_key(ENV_FILE, "GOOGLE_MODEL", model)
|
||||
elif provider == "openai" and model:
|
||||
OPENAI_MODEL = model
|
||||
set_key(ENV_FILE, "OPENAI_MODEL", model)
|
||||
elif provider == "nvidia" and model:
|
||||
NVIDIA_MODEL = model
|
||||
set_key(ENV_FILE, "NVIDIA_MODEL", model)
|
||||
elif provider == "ollama" and model:
|
||||
OLLAMA_MODEL = model
|
||||
set_key(ENV_FILE, "OLLAMA_MODEL", model)
|
||||
|
||||
# Wenn eine Ollama-URL mitgeschickt wurde, speichern wir sie
|
||||
if ollama_url:
|
||||
OLLAMA_BASE_URL = ollama_url
|
||||
set_key(ENV_FILE, "OLLAMA_BASE_URL", ollama_url)
|
||||
|
||||
return {"status": "success"}
|
||||
|
||||
@app.get("/api/models")
|
||||
async def get_models(provider: str, url: str = None):
|
||||
try:
|
||||
models = []
|
||||
|
||||
if provider == "ollama" and url:
|
||||
# Das Backend hat keine CORS-Probleme und fragt Ollama direkt
|
||||
clean_url = url.replace("/v1", "").rstrip("/")
|
||||
async with httpx.AsyncClient() as client:
|
||||
response = await client.get(f"{clean_url}/api/tags", timeout=5.0)
|
||||
data = response.json()
|
||||
models = [m["name"] for m in data.get("models", [])]
|
||||
|
||||
elif provider == "openai":
|
||||
if not OPENAI_API_KEY or "hier" in OPENAI_API_KEY: return {"models": []}
|
||||
# Hier greift das Backend auf deinen OpenAI API-Key zu
|
||||
client = openai.AsyncOpenAI(api_key=OPENAI_API_KEY)
|
||||
response = await client.models.list()
|
||||
# Nur GPT-Modelle filtern, um die Liste sauber zu halten
|
||||
models = [m.id for m in response.data if "gpt" in m.id or "o1" in m.id]
|
||||
models.sort()
|
||||
|
||||
elif provider == "nvidia":
|
||||
if not NVIDIA_API_KEY or "hier" in NVIDIA_API_KEY:
|
||||
return {"models": ["FEHLER: Key fehlt in .env"]}
|
||||
|
||||
client = openai.AsyncOpenAI(
|
||||
api_key=NVIDIA_API_KEY,
|
||||
base_url="https://integrate.api.nvidia.com/v1"
|
||||
)
|
||||
try:
|
||||
# Timeout hinzufügen, damit die Seite nicht ewig hängt
|
||||
response = await asyncio.wait_for(client.models.list(), timeout=5.0)
|
||||
models = [m.id for m in response.data]
|
||||
models.sort()
|
||||
return {"models": models}
|
||||
except Exception as e:
|
||||
print(f"NVIDIA API Error: {e}")
|
||||
return {"models": [f"NVIDIA Fehler: {str(e)[:20]}..."]}
|
||||
|
||||
elif provider == "google":
|
||||
if not GOOGLE_API_KEY:
|
||||
return {"models": ["API-Key fehlt"]}
|
||||
|
||||
client = genai.Client(api_key=GOOGLE_API_KEY)
|
||||
models = []
|
||||
|
||||
# Im neuen SDK (google-genai) heißt das Feld 'supported_actions'
|
||||
for m in client.models.list():
|
||||
if 'generateContent' in m.supported_actions:
|
||||
# Wir nehmen den Namen und entfernen das 'models/' Präfix
|
||||
model_name = m.name.replace("models/", "")
|
||||
models.append(model_name)
|
||||
|
||||
models.sort()
|
||||
|
||||
return {"models": models}
|
||||
|
||||
except Exception as e:
|
||||
print(f"Fehler beim Abrufen der Modelle für {provider}: {str(e)}")
|
||||
return {"models": []} # Gibt eine leere Liste zurück -> Frontend nutzt Fallback
|
||||
|
||||
app.add_middleware(
|
||||
TrustedHostMiddleware,
|
||||
allowed_hosts=["jarvis.pi-farm.de", "192.168.178.13", "localhost"]
|
||||
)
|
||||
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=["*"],
|
||||
allow_credentials=True,
|
||||
allow_methods=["*"],
|
||||
allow_headers=["*"],
|
||||
)
|
||||
|
||||
@app.get("/debug_keys")
|
||||
async def debug_keys():
|
||||
return {
|
||||
"AI_PROVIDER": AI_PROVIDER,
|
||||
"NVIDIA_KEY_LOADED": bool(NVIDIA_API_KEY and "hier" not in NVIDIA_API_KEY),
|
||||
"NVIDIA_KEY_START": NVIDIA_API_KEY[:10] if NVIDIA_API_KEY else "Missing",
|
||||
"GOOGLE_KEY_LOADED": bool(GOOGLE_API_KEY),
|
||||
"OLLAMA_URL": OLLAMA_BASE_URL
|
||||
}
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import uvicorn
|
||||
uvicorn.run(app, host="0.0.0.0", port=8000)
|
||||
@@ -9,3 +9,4 @@ websockets
|
||||
openai
|
||||
google-genai
|
||||
python-dotenv
|
||||
python-telegram-bot
|
||||
0
source/static/.gitkeep
Normal file
0
source/static/.gitkeep
Normal file
583
source/templates/index.html
Normal file
583
source/templates/index.html
Normal file
@@ -0,0 +1,583 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="de">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<title>J.A.R.V.I.S. AI Dashboard</title>
|
||||
<script src="https://cdn.tailwindcss.com"></script>
|
||||
|
||||
<link href="/static/gridstack.min.css" rel="stylesheet"/>
|
||||
<script src="/static/gridstack-all.js"></script>
|
||||
|
||||
<link rel="stylesheet" href="/static/xterm.css" />
|
||||
<script src="/static/xterm.js"></script>
|
||||
<script src="/static/xterm-addon-fit.js"></script>
|
||||
<script src="/static/marked.min.js"></script>
|
||||
|
||||
<style>
|
||||
body { margin: 0; background: #0f172a; }
|
||||
|
||||
.grid-stack {
|
||||
background: transparent;
|
||||
min-height: calc(100vh - 60px);
|
||||
padding: 10px;
|
||||
}
|
||||
|
||||
.grid-stack-item-content {
|
||||
background: #1e293b;
|
||||
color: white;
|
||||
border: 1px solid #334155;
|
||||
border-radius: 8px;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
overflow: hidden;
|
||||
}
|
||||
|
||||
.terminal-container {
|
||||
flex: 1;
|
||||
width: 100%;
|
||||
background: black;
|
||||
}
|
||||
|
||||
#install-log {
|
||||
flex: 1;
|
||||
font-family: monospace;
|
||||
font-size: 11px;
|
||||
color: #4ade80;
|
||||
padding: 10px;
|
||||
overflow-y: auto;
|
||||
background: #0f172a;
|
||||
}
|
||||
|
||||
.widget-header {
|
||||
background: #334155;
|
||||
padding: 8px 12px;
|
||||
cursor: move;
|
||||
font-size: 12px;
|
||||
font-weight: bold;
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: center;
|
||||
user-select: none;
|
||||
}
|
||||
|
||||
.top-toolbar {
|
||||
position: fixed;
|
||||
top: 0;
|
||||
left: 0;
|
||||
width: 100%;
|
||||
background-color: #1e293b;
|
||||
color: white;
|
||||
height: 55px;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: space-between;
|
||||
padding: 0 20px;
|
||||
box-sizing: border-box;
|
||||
box-shadow: 0 4px 6px -1px rgba(0, 0, 0, 0.3);
|
||||
z-index: 1000;
|
||||
border-bottom: 1px solid #334155;
|
||||
}
|
||||
|
||||
.toolbar-title { font-weight: bold; font-size: 1.1em; color: #38bdf8; }
|
||||
.toolbar-controls { display: flex; align-items: center; gap: 10px; }
|
||||
.toolbar-controls label { white-space: nowrap; font-size: 12px; color: #94a3b8; }
|
||||
|
||||
#ollama-url-container {
|
||||
display: none;
|
||||
align-items: center;
|
||||
gap: 8px;
|
||||
border-left: 1px solid #475569;
|
||||
padding-left: 12px;
|
||||
flex: 1;
|
||||
}
|
||||
|
||||
#ollama-url { width: 100%; min-width: 200px; }
|
||||
.toolbar-controls select, .toolbar-controls input {
|
||||
height: 32px; background: #0f172a; color: white;
|
||||
border: 1px solid #475569; border-radius: 4px; padding: 0 8px; font-size: 12px;
|
||||
}
|
||||
|
||||
.btn-tool {
|
||||
height: 32px; padding: 0 12px; border-radius: 4px; font-size: 12px;
|
||||
font-weight: bold; display: flex; align-items: center; gap: 5px;
|
||||
}
|
||||
.save-btn { background-color: #059669; color: white; }
|
||||
.add-node-btn { background-color: #2563eb; color: white; }
|
||||
|
||||
.node-badge {
|
||||
background: #334155; padding: 2px 5px; border-radius: 4px;
|
||||
font-size: 8px; text-transform: uppercase; font-weight: bold; color: #94a3b8;
|
||||
}
|
||||
.timestamp {
|
||||
font-size: 0.7em;
|
||||
color: #888; /* Ein dezentes Grau */
|
||||
text-align: right;
|
||||
margin-top: 4px;
|
||||
opacity: 0.8;
|
||||
}
|
||||
|
||||
/* Optional: Falls der KI-Text dunkel und der User-Text auf farbigem Grund ist */
|
||||
.message.user .timestamp {
|
||||
color: #e0e0e0; /* Helleres Grau, falls deine User-Bubble z.B. blau ist */
|
||||
}
|
||||
|
||||
.markdown-content pre { background: #000; padding: 8px; border-radius: 4px; border: 1px solid #334155; overflow-x: auto; margin: 8px 0; }
|
||||
</style>
|
||||
</head>
|
||||
<body class="text-white">
|
||||
|
||||
<div class="top-toolbar">
|
||||
<div class="toolbar-title">🤖 J.A.R.V.I.S. AI Dashboard</div>
|
||||
<div class="toolbar-controls">
|
||||
<button onclick="addNode()" class="btn-tool add-node-btn"><span>+</span> Node</button>
|
||||
<div class="h-6 w-px bg-slate-700 mx-1"></div>
|
||||
<label>Provider:</label>
|
||||
<select id="ai-provider" onchange="updateModelDropdown(false)">
|
||||
<option value="google">Google Gemini</option>
|
||||
<option value="openai">OpenAI</option>
|
||||
<option value="nvidia">NVIDIA</option>
|
||||
<option value="ollama">Ollama</option>
|
||||
</select>
|
||||
<label>Modell:</label>
|
||||
<select id="ai-model"></select>
|
||||
<div id="ollama-url-container">
|
||||
<input type="text" id="ollama-url" onblur="updateModelDropdown(false)" placeholder="URL: http://...">
|
||||
</div>
|
||||
<button class="btn-tool save-btn" onclick="saveSettings()">Speichern</button>
|
||||
<span id="settings-status"></span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="flex min-h-screen pt-[55px]">
|
||||
|
||||
<div class="w-64 bg-slate-900 border-r border-slate-800 p-4 flex flex-col fixed left-0 top-[55px] bottom-0 z-10">
|
||||
<div id="node-list" class="flex-1 overflow-y-auto space-y-2">
|
||||
{% for node in nodes %}
|
||||
<div class="p-3 bg-slate-800 rounded border border-slate-700 relative group" id="node-card-{{ node.id }}">
|
||||
<div class="flex justify-between items-start">
|
||||
<div class="text-sm font-bold" id="node-name-{{ node.id }}">{{ node.name }}</div>
|
||||
<div class="flex gap-2">
|
||||
<button onclick="editNode({{ node.id }}, '{{ node.name }}', '{{ node.ip }}')" class="text-slate-500 hover:text-sky-400 text-xs">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="12" height="12" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"><path d="M17 3a2.85 2.83 0 1 1 4 4L7.5 20.5 2 22l1.5-5.5Z"/><path d="m15 5 4 4"/></svg>
|
||||
</button>
|
||||
<form action="/remove_node/{{ node.id }}" method="post" onsubmit="return confirm('Entfernen?')">
|
||||
<button type="submit" class="text-slate-500 hover:text-red-500 text-xs">✕</button>
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
<div class="text-[10px] text-slate-500 font-mono flex justify-between items-center mb-1">
|
||||
<span id="node-ip-{{ node.id }}">{{ node.ip }}</span>
|
||||
<button onclick="refreshNodeStatus({{ node.id }})" class="hover:text-blue-400">🔄</button>
|
||||
</div>
|
||||
<div class="flex gap-1 mb-2">
|
||||
<span id="os-{{ node.id }}" class="node-badge truncate max-w-[80px]">{{ node.os }}</span>
|
||||
<span id="arch-{{ node.id }}" class="node-badge">{{ node.arch }}</span>
|
||||
</div>
|
||||
<div class="mt-1 flex items-center gap-2">
|
||||
<span id="led-{{ node.id }}" class="h-2 w-2 rounded-full {% if 'Aktiv' in node.status %} bg-blue-500 shadow-[0_0_8px_#3b82f6] {% else %} bg-yellow-500 {% endif %}"></span>
|
||||
<span id="badge-{{ node.id }}" class="text-[9px] uppercase font-mono text-slate-300">{{ node.status }}</span>
|
||||
</div>
|
||||
<button onclick="openTerminal('{{ node.ip }}')" class="mt-2 w-full text-[10px] bg-slate-700 hover:bg-slate-600 py-1 rounded">Konsole</button>
|
||||
</div>
|
||||
{% endfor %}
|
||||
</div>
|
||||
<button onclick="localStorage.removeItem('pi-orch-layout-v2'); location.reload();" class="mt-4 text-[10px] text-slate-500 hover:text-white uppercase">Layout Reset</button>
|
||||
</div>
|
||||
|
||||
<div class="flex-1 ml-64 bg-slate-950 overflow-y-auto">
|
||||
<div class="grid-stack">
|
||||
<div class="grid-stack-item" gs-id="chat-widget" gs-w="6" gs-h="5" gs-x="0" gs-y="0">
|
||||
<div class="grid-stack-item-content">
|
||||
<div class="widget-header"><span>💬 J.A.R.V.I.S. - Chat</span> <span>⠿</span></div>
|
||||
<div id="chat-window" class="flex-1 p-4 overflow-y-auto text-sm space-y-2 bg-[#0f172a]/50"></div>
|
||||
<div class="p-2 border-t border-slate-700 flex bg-slate-800">
|
||||
<textarea id="user-input"
|
||||
class="flex-1 bg-slate-900 p-2 rounded text-xs outline-none border border-slate-700 focus:border-blue-500 resize-none overflow-y-hidden"
|
||||
placeholder="Frage eingeben... (Shift+Enter für neue Zeile)"
|
||||
rows="1"
|
||||
oninput="this.style.height = ''; this.style.height = this.scrollHeight + 'px'"></textarea>
|
||||
<button onclick="sendMessage()" class="ml-2 bg-blue-600 hover:bg-blue-500 px-4 py-1 rounded text-xs">Senden</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="grid-stack-item" gs-id="logs-widget" gs-w="6" gs-h="5" gs-x="6" gs-y="0">
|
||||
<div class="grid-stack-item-content">
|
||||
<div class="widget-header"><span>📜 System Logs</span> <span>⠿</span></div>
|
||||
<div id="install-log">Warte auf Daten...</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="grid-stack-item" gs-id="term-widget" gs-w="12" gs-h="6" gs-x="0" gs-y="5">
|
||||
<div class="grid-stack-item-content">
|
||||
<div class="widget-header"><span>🖥️ Live Terminal</span> <span>⠿</span></div>
|
||||
<div id="terminal" class="terminal-container"></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div id="add-node-modal" class="hidden fixed inset-0 bg-black/80 flex items-center justify-center z-[2000]">
|
||||
<div class="bg-slate-800 p-6 rounded-lg border border-slate-600 w-96">
|
||||
<h3 class="text-lg font-bold mb-4">Neuen Node hinzufügen</h3>
|
||||
<form action="/add_node" method="post" class="space-y-4">
|
||||
<input type="text" name="name" placeholder="Name" class="w-full bg-slate-900 border border-slate-700 p-2 rounded text-sm" required>
|
||||
<input type="text" name="ip" placeholder="IP Adresse" class="w-full bg-slate-900 border border-slate-700 p-2 rounded text-sm" required>
|
||||
<input type="text" name="user" placeholder="SSH Benutzer" class="w-full bg-slate-900 border border-slate-700 p-2 rounded text-sm" required>
|
||||
<input type="password" name="password" placeholder="Passwort" class="w-full bg-slate-900 border border-slate-700 p-2 rounded text-sm" required>
|
||||
<div class="flex justify-end gap-2 pt-2">
|
||||
<button type="button" onclick="closeAddNode()" class="px-4 py-2 text-sm text-slate-400">Abbrechen</button>
|
||||
<button type="submit" class="bg-blue-600 px-4 py-2 text-sm rounded font-bold">Koppeln</button>
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div id="edit-node-modal" class="hidden fixed inset-0 bg-black/80 flex items-center justify-center z-[2000] p-4">
|
||||
<div class="bg-slate-800 p-6 rounded-lg border border-slate-600 w-full max-w-xl">
|
||||
<h3 class="text-lg font-bold mb-4 text-sky-400">Node-Konfiguration bearbeiten</h3>
|
||||
<input type="hidden" id="edit-node-id">
|
||||
|
||||
<div class="grid grid-cols-2 gap-4">
|
||||
<div class="space-y-3">
|
||||
<div>
|
||||
<label class="text-[10px] uppercase text-slate-400 font-bold">Anzeigename</label>
|
||||
<input type="text" id="edit-node-name" class="w-full bg-slate-900 border border-slate-700 p-2 rounded text-sm text-white focus:border-sky-500 outline-none">
|
||||
</div>
|
||||
<div>
|
||||
<label class="text-[10px] uppercase text-slate-400 font-bold">SSH Benutzer</label>
|
||||
<input type="text" id="edit-node-user" class="w-full bg-slate-900 border border-slate-700 p-2 rounded text-sm text-white focus:border-sky-500 outline-none">
|
||||
</div>
|
||||
<div>
|
||||
<label class="text-[10px] uppercase text-slate-400 font-bold">Betriebssystem</label>
|
||||
<input type="text" id="edit-node-os" class="w-full bg-slate-900 border border-slate-700 p-2 rounded text-sm text-white focus:border-sky-500 outline-none">
|
||||
</div>
|
||||
<div>
|
||||
<label class="text-[10px] uppercase text-slate-400 font-bold">Status-Text</label>
|
||||
<input type="text" id="edit-node-status" class="w-full bg-slate-900 border border-slate-700 p-2 rounded text-sm text-white focus:border-sky-500 outline-none">
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="space-y-3">
|
||||
<div>
|
||||
<label class="text-[10px] uppercase text-slate-400 font-bold">IP Adresse</label>
|
||||
<input type="text" id="edit-node-ip" class="w-full bg-slate-900 border border-slate-700 p-2 rounded text-sm text-white focus:border-sky-500 outline-none">
|
||||
</div>
|
||||
<div>
|
||||
<label class="text-[10px] uppercase text-slate-400 font-bold">Sudo Passwort</label>
|
||||
<input type="password" id="edit-node-password" class="w-full bg-slate-900 border border-slate-700 p-2 rounded text-sm text-white focus:border-sky-500 outline-none">
|
||||
</div>
|
||||
<div>
|
||||
<label class="text-[10px] uppercase text-slate-400 font-bold">Architektur</label>
|
||||
<input type="text" id="edit-node-arch" class="w-full bg-slate-900 border border-slate-700 p-2 rounded text-sm text-white focus:border-sky-500 outline-none">
|
||||
</div>
|
||||
<div class="flex items-center h-full pt-4">
|
||||
<label class="flex items-center gap-3 cursor-pointer group">
|
||||
<input type="checkbox" id="edit-node-docker" class="w-4 h-4 rounded border-slate-700 bg-slate-900 text-sky-600 focus:ring-sky-500">
|
||||
<span class="text-sm text-slate-300 group-hover:text-white transition-colors">Docker installiert</span>
|
||||
</label>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="flex justify-end gap-3 mt-6 pt-4 border-t border-slate-700">
|
||||
<button type="button" onclick="closeEditNode()" class="px-4 py-2 text-sm text-slate-400 hover:text-white transition-colors">Abbrechen</button>
|
||||
<button type="button" onclick="updateNode()" class="bg-sky-600 hover:bg-sky-500 px-6 py-2 text-sm rounded font-bold transition-all shadow-lg shadow-sky-900/20">Änderungen speichern</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
let currentSettings = {};
|
||||
let termDataDisposable = null;
|
||||
let term, fitAddon;
|
||||
const wsProtocol = window.location.protocol === 'https:' ? 'wss:' : 'ws:';
|
||||
const wsHost = window.location.host;
|
||||
|
||||
// 1. CHAT LOGIK
|
||||
function initChat(chatWs) {
|
||||
const input = document.getElementById('user-input');
|
||||
|
||||
window.sendMessage = function() {
|
||||
const msg = input.value.trim();
|
||||
if(!msg) return;
|
||||
chatWs.send(msg);
|
||||
appendChat("Du", msg, "text-slate-400 font-bold");
|
||||
|
||||
// Input zurücksetzen
|
||||
input.value = '';
|
||||
input.style.height = 'auto'; // Höhe wieder einklappen
|
||||
input.focus();
|
||||
};
|
||||
|
||||
input.addEventListener('keydown', (e) => {
|
||||
// Wenn ENTER gedrückt wird OHNE Shift -> Senden
|
||||
if (e.key === 'Enter' && !e.shiftKey) {
|
||||
e.preventDefault(); // Verhindert den Zeilenumbruch
|
||||
sendMessage();
|
||||
}
|
||||
// Wenn ENTER + Shift gedrückt wird -> Standardverhalten (neue Zeile) bleibt
|
||||
});
|
||||
}
|
||||
|
||||
// 2. MODAL LOGIK
|
||||
window.addNode = () => document.getElementById('add-node-modal').classList.remove('hidden');
|
||||
window.closeAddNode = () => document.getElementById('add-node-modal').classList.add('hidden');
|
||||
|
||||
// EDIT LOGIK
|
||||
// Ruft alle Daten ab, bevor das Modal öffnet
|
||||
window.editNode = async (id) => {
|
||||
try {
|
||||
const res = await fetch(`/api/node/${id}`);
|
||||
const node = await res.json();
|
||||
|
||||
document.getElementById('edit-node-id').value = node.id;
|
||||
document.getElementById('edit-node-name').value = node.name;
|
||||
document.getElementById('edit-node-ip').value = node.ip;
|
||||
document.getElementById('edit-node-user').value = node.user;
|
||||
document.getElementById('edit-node-password').value = node.sudo_password;
|
||||
document.getElementById('edit-node-os').value = node.os;
|
||||
document.getElementById('edit-node-arch').value = node.arch;
|
||||
document.getElementById('edit-node-status').value = node.status;
|
||||
document.getElementById('edit-node-docker').checked = node.docker_installed === 1;
|
||||
|
||||
document.getElementById('edit-node-modal').classList.remove('hidden');
|
||||
} catch (e) { alert("Fehler beim Laden der Node-Daten"); }
|
||||
};
|
||||
window.closeEditNode = () => document.getElementById('edit-node-modal').classList.add('hidden');
|
||||
|
||||
window.updateNode = async function() {
|
||||
const id = document.getElementById('edit-node-id').value;
|
||||
const payload = {
|
||||
name: document.getElementById('edit-node-name').value,
|
||||
ip: document.getElementById('edit-node-ip').value,
|
||||
user: document.getElementById('edit-node-user').value,
|
||||
sudo_password: document.getElementById('edit-node-password').value,
|
||||
os: document.getElementById('edit-node-os').value,
|
||||
arch: document.getElementById('edit-node-arch').value,
|
||||
status: document.getElementById('edit-node-status').value,
|
||||
docker_installed: document.getElementById('edit-node-docker').checked ? 1 : 0
|
||||
};
|
||||
|
||||
try {
|
||||
const res = await fetch(`/api/node/${id}`, {
|
||||
method: 'PUT',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify(payload)
|
||||
});
|
||||
if(res.ok) {
|
||||
location.reload(); // Einfachste Methode, um alle Badges zu aktualisieren
|
||||
}
|
||||
} catch (e) { alert("Fehler beim Speichern"); }
|
||||
};
|
||||
|
||||
// 3. ERWEITERTER REFRESH
|
||||
window.refreshNodeStatus = async function(nodeId) {
|
||||
const badge = document.getElementById(`badge-${nodeId}`);
|
||||
const led = document.getElementById(`led-${nodeId}`);
|
||||
const osEl = document.getElementById(`os-${nodeId}`);
|
||||
const archEl = document.getElementById(`arch-${nodeId}`);
|
||||
const card = document.getElementById(`node-card-${nodeId}`);
|
||||
|
||||
badge.textContent = "PRÜFE...";
|
||||
card.style.opacity = "0.7";
|
||||
|
||||
try {
|
||||
const response = await fetch(`/refresh_status/${nodeId}`);
|
||||
const data = await response.json();
|
||||
|
||||
badge.textContent = data.status;
|
||||
osEl.textContent = data.os;
|
||||
archEl.textContent = data.arch;
|
||||
|
||||
if (data.status.includes("Aktiv")) {
|
||||
led.className = "h-2 w-2 rounded-full bg-blue-500 shadow-[0_0_8px_#3b82f6]";
|
||||
} else if (data.status === "Offline") {
|
||||
led.className = "h-2 w-2 rounded-full bg-red-500 shadow-[0_0_8px_#ef4444]";
|
||||
} else {
|
||||
led.className = "h-2 w-2 rounded-full bg-yellow-500";
|
||||
}
|
||||
} catch (e) {
|
||||
badge.textContent = "FEHLER";
|
||||
led.className = "h-2 w-2 rounded-full bg-red-500";
|
||||
} finally {
|
||||
card.style.opacity = "1";
|
||||
}
|
||||
};
|
||||
|
||||
// 4. SETTINGS & MODEL LOGIK
|
||||
async function loadSettings() {
|
||||
try {
|
||||
const res = await fetch('/api/settings');
|
||||
currentSettings = await res.json();
|
||||
document.getElementById('ai-provider').value = currentSettings.provider;
|
||||
document.getElementById('ollama-url').value = currentSettings.ollama_base_url || "http://127.0.0.1:11434/v1";
|
||||
updateModelDropdown(true);
|
||||
} catch (e) {}
|
||||
}
|
||||
|
||||
async function updateModelDropdown(isInitialLoad = false) {
|
||||
const provider = document.getElementById('ai-provider').value;
|
||||
const modelSelect = document.getElementById('ai-model');
|
||||
const urlContainer = document.getElementById('ollama-url-container');
|
||||
const ollamaUrl = document.getElementById('ollama-url').value;
|
||||
|
||||
urlContainer.style.display = (provider === "ollama") ? "flex" : "none";
|
||||
modelSelect.innerHTML = '<option>Lade...</option>';
|
||||
|
||||
try {
|
||||
let queryUrl = `/api/models?provider=${provider}`;
|
||||
if (provider === "ollama") {
|
||||
queryUrl += `&url=${encodeURIComponent(ollamaUrl)}`;
|
||||
}
|
||||
|
||||
const res = await fetch(queryUrl);
|
||||
const data = await res.json();
|
||||
modelSelect.innerHTML = '';
|
||||
|
||||
if (data.models && data.models.length > 0) {
|
||||
data.models.forEach(m => {
|
||||
const opt = document.createElement('option');
|
||||
opt.value = opt.textContent = m;
|
||||
modelSelect.appendChild(opt);
|
||||
});
|
||||
|
||||
if (isInitialLoad && currentSettings) {
|
||||
const savedModel = currentSettings[`${provider}_model`];
|
||||
if (savedModel) modelSelect.value = savedModel;
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
modelSelect.innerHTML = '<option>Fehler beim Laden</option>';
|
||||
}
|
||||
}
|
||||
|
||||
async function saveSettings() {
|
||||
const provider = document.getElementById('ai-provider').value;
|
||||
const model = document.getElementById('ai-model').value;
|
||||
const ollama_base_url = document.getElementById('ollama-url').value;
|
||||
const statusEl = document.getElementById('settings-status');
|
||||
try {
|
||||
await fetch('/api/settings', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ provider, model, ollama_base_url })
|
||||
});
|
||||
statusEl.textContent = "✅ Gespeichert";
|
||||
setTimeout(() => statusEl.textContent = "", 2000);
|
||||
} catch (e) { statusEl.textContent = "❌ Fehler"; }
|
||||
}
|
||||
|
||||
// 5. INITIALISIERUNG
|
||||
document.addEventListener('DOMContentLoaded', function() {
|
||||
var grid = GridStack.init({
|
||||
cellHeight: 80,
|
||||
margin: 10,
|
||||
float: true,
|
||||
handle: '.widget-header',
|
||||
resizable: { handles: 'all' }
|
||||
});
|
||||
|
||||
const savedLayout = localStorage.getItem('pi-orch-layout-v2');
|
||||
if (savedLayout) { try { grid.load(JSON.parse(savedLayout)); } catch(e){} }
|
||||
grid.on('resizestop dragstop', () => {
|
||||
localStorage.setItem('pi-orch-layout-v2', JSON.stringify(grid.save(false)));
|
||||
});
|
||||
|
||||
// Terminal Setup
|
||||
term = new Terminal({ theme: { background: '#000' }, fontSize: 13, convertEol: true });
|
||||
fitAddon = new FitAddon.FitAddon();
|
||||
term.loadAddon(fitAddon);
|
||||
term.open(document.getElementById('terminal'));
|
||||
fitAddon.fit();
|
||||
|
||||
// WICHTIG: Wenn das Widget in GridStack vergrößert wird
|
||||
grid.on('resizestop', function(event, el) {
|
||||
if (el.getAttribute('gs-id') === 'term-widget') {
|
||||
setTimeout(() => {
|
||||
fitAddon.fit();
|
||||
// Sende neue Größe an das Backend
|
||||
if (window.termWs && window.termWs.readyState === WebSocket.OPEN) {
|
||||
window.termWs.send(JSON.stringify({
|
||||
type: "resize",
|
||||
cols: term.cols,
|
||||
rows: term.rows
|
||||
}));
|
||||
}
|
||||
}, 100);
|
||||
}
|
||||
});
|
||||
|
||||
const logWs = new WebSocket(`${wsProtocol}//${wsHost}/ws/install_logs`);
|
||||
logWs.onmessage = (ev) => {
|
||||
const div = document.createElement('div');
|
||||
div.textContent = `> ${ev.data}`;
|
||||
document.getElementById('install-log').appendChild(div);
|
||||
document.getElementById('install-log').scrollTop = document.getElementById('install-log').scrollHeight;
|
||||
};
|
||||
|
||||
const chatWs = new WebSocket(`${wsProtocol}//${wsHost}/ws/chat`);
|
||||
chatWs.onmessage = (ev) => appendChat("J.A.R.V.I.S.", ev.data, "text-blue-400 font-bold");
|
||||
initChat(chatWs);
|
||||
|
||||
window.appendChat = function(user, msg, classes) {
|
||||
const now = new Date();
|
||||
const timeString = now.toLocaleTimeString('de-DE', {hour: '2-digit', minute:'2-digit'});
|
||||
const win = document.getElementById('chat-window');
|
||||
|
||||
// Nachricht formatieren (Markdown nur für J.A.R.V.I.S.)
|
||||
let formattedMsg = (user === "J.A.R.V.I.S.") ? marked.parse(msg) : msg;
|
||||
|
||||
// Das HTML-Gerüst für die Nachricht zusammenbauen
|
||||
const messageHtml = `
|
||||
<div class="mb-4 p-2 rounded bg-slate-800/30 border border-slate-700/50">
|
||||
<div class="flex justify-between items-center mb-1">
|
||||
<span class="${classes} text-[10px] uppercase">${user}</span>
|
||||
<span class="timestamp text-[9px] opacity-50">${timeString}</span>
|
||||
</div>
|
||||
<div class="markdown-content text-slate-300 text-sm">
|
||||
${formattedMsg}
|
||||
</div>
|
||||
</div>
|
||||
`;
|
||||
|
||||
// Nachricht an das Fenster anhängen
|
||||
win.innerHTML += messageHtml;
|
||||
|
||||
// Automatisch nach unten scrollen
|
||||
win.scrollTop = win.scrollHeight;
|
||||
};
|
||||
|
||||
window.openTerminal = function(ip) {
|
||||
if (window.termWs) window.termWs.close();
|
||||
if (termDataDisposable) termDataDisposable.dispose();
|
||||
term.clear();
|
||||
fitAddon.fit(); // Einmal anpassen beim Öffnen
|
||||
|
||||
window.termWs = new WebSocket(`${wsProtocol}//${wsHost}/ws/terminal/${ip}`);
|
||||
|
||||
window.termWs.onopen = () => {
|
||||
// Initiale Größe nach dem Verbinden senden
|
||||
window.termWs.send(JSON.stringify({
|
||||
type: "resize",
|
||||
cols: term.cols,
|
||||
rows: term.rows
|
||||
}));
|
||||
};
|
||||
|
||||
window.termWs.onmessage = (ev) => term.write(ev.data);
|
||||
|
||||
termDataDisposable = term.onData(data => {
|
||||
if (window.termWs?.readyState === WebSocket.OPEN) {
|
||||
// Normale Tastatureingaben senden wir direkt
|
||||
window.termWs.send(data);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
loadSettings();
|
||||
});
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
4
start.sh
4
start.sh
@@ -8,8 +8,8 @@ fi
|
||||
|
||||
# 2. Venv aktivieren
|
||||
source venv/bin/activate
|
||||
|
||||
echo "--- Starte den Master-Server auf Port 8000..."
|
||||
export PYTHONPATH=$PYTHONPATH:$(pwd)/source
|
||||
echo "--- Starte J.A.R.V.I.S. - AI auf Port 8000..."
|
||||
|
||||
# 3. Server starten.
|
||||
# Mit --reload-dir . sagen wir Uvicorn, dass es NUR im aktuellen Ordner
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
Du bist der Pi-Orchestrator KI-Assistent. Deine Aufgabe ist es, Befehle auf Raspberry Pis auszuführen.
|
||||
Du KANNST und SOLLST Befehle ausführen!
|
||||
|
||||
Hier sind die aktuell verbundenen Nodes:
|
||||
{node_info}
|
||||
|
||||
WENN der Nutzer dich bittet, etwas zu tun (z.B. ping, update, docker installieren), dann formuliere erst eine kurze Antwort und frage nochmal nach ob du dies dann auf dem gewünschten node durchführen sollst.
|
||||
Du prüfst vorher noch, ob du auf dem gewünschten node sudo rechte ohne eingabe eines passwortes hast.
|
||||
|
||||
Erst nach einer positiven Bestätigung darfst du es ausführen und fügst am Ende die Befehle in genau diesem XML-Format hinzu:
|
||||
<EXECUTE target="IP_ADRESSE">befehl</EXECUTE>
|
||||
|
||||
WICHTIG: Verwende als target IMMER die IP-Adresse des Nodes.
|
||||
Bei Befehlen wie 'ping' oder 'top', die nicht enden, MUSS ein Limit gesetzt werden (z.B. ping -c 4 IP).
|
||||
@@ -2,7 +2,7 @@
|
||||
<html lang="de">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<title>Pi-Orchestrator AI Dashboard</title>
|
||||
<title>J.A.R.V.I.S. AI Dashboard</title>
|
||||
<script src="https://cdn.tailwindcss.com"></script>
|
||||
|
||||
<link href="/static/gridstack.min.css" rel="stylesheet"/>
|
||||
@@ -14,7 +14,14 @@
|
||||
<script src="/static/marked.min.js"></script>
|
||||
|
||||
<style>
|
||||
.grid-stack { background: #0f172a; min-height: 100vh; padding: 10px; }
|
||||
body { margin: 0; background: #0f172a; }
|
||||
|
||||
.grid-stack {
|
||||
background: transparent;
|
||||
min-height: calc(100vh - 60px);
|
||||
padding: 10px;
|
||||
}
|
||||
|
||||
.grid-stack-item-content {
|
||||
background: #1e293b;
|
||||
color: white;
|
||||
@@ -25,10 +32,9 @@
|
||||
overflow: hidden;
|
||||
}
|
||||
|
||||
.terminal-container, #terminal {
|
||||
.terminal-container {
|
||||
flex: 1;
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
background: black;
|
||||
}
|
||||
|
||||
@@ -54,11 +60,6 @@
|
||||
user-select: none;
|
||||
}
|
||||
|
||||
body {
|
||||
margin: 0;
|
||||
padding-top: 60px; /* Platz für die Toolbar */
|
||||
}
|
||||
|
||||
.top-toolbar {
|
||||
position: fixed;
|
||||
top: 0;
|
||||
@@ -77,23 +78,9 @@
|
||||
border-bottom: 1px solid #334155;
|
||||
}
|
||||
|
||||
.toolbar-title {
|
||||
font-weight: bold;
|
||||
font-size: 1.1em;
|
||||
color: #38bdf8;
|
||||
}
|
||||
|
||||
.toolbar-controls {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 10px;
|
||||
}
|
||||
|
||||
.toolbar-controls label {
|
||||
white-space: nowrap;
|
||||
font-size: 12px;
|
||||
color: #94a3b8;
|
||||
}
|
||||
.toolbar-title { font-weight: bold; font-size: 1.1em; color: #38bdf8; }
|
||||
.toolbar-controls { display: flex; align-items: center; gap: 10px; }
|
||||
.toolbar-controls label { white-space: nowrap; font-size: 12px; color: #94a3b8; }
|
||||
|
||||
#ollama-url-container {
|
||||
display: none;
|
||||
@@ -101,133 +88,103 @@
|
||||
gap: 8px;
|
||||
border-left: 1px solid #475569;
|
||||
padding-left: 12px;
|
||||
flex: 1;
|
||||
}
|
||||
|
||||
.toolbar-controls select,
|
||||
.toolbar-controls input {
|
||||
height: 32px;
|
||||
background: #0f172a;
|
||||
color: white;
|
||||
border: 1px solid #475569;
|
||||
border-radius: 4px;
|
||||
padding: 0 8px;
|
||||
font-size: 12px;
|
||||
outline: none;
|
||||
#ollama-url { width: 100%; min-width: 200px; }
|
||||
.toolbar-controls select, .toolbar-controls input {
|
||||
height: 32px; background: #0f172a; color: white;
|
||||
border: 1px solid #475569; border-radius: 4px; padding: 0 8px; font-size: 12px;
|
||||
}
|
||||
|
||||
.toolbar-controls select:focus,
|
||||
.toolbar-controls input:focus {
|
||||
border-color: #38bdf8;
|
||||
}
|
||||
|
||||
/* Schicke Toolbar-Buttons */
|
||||
.btn-tool {
|
||||
height: 32px;
|
||||
padding: 0 12px;
|
||||
border-radius: 4px;
|
||||
font-size: 12px;
|
||||
font-weight: bold;
|
||||
transition: all 0.2s;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 5px;
|
||||
height: 32px; padding: 0 12px; border-radius: 4px; font-size: 12px;
|
||||
font-weight: bold; display: flex; align-items: center; gap: 5px;
|
||||
}
|
||||
.save-btn { background-color: #059669; color: white; }
|
||||
.add-node-btn { background-color: #2563eb; color: white; }
|
||||
|
||||
.save-btn {
|
||||
background-color: #059669;
|
||||
color: white;
|
||||
}
|
||||
|
||||
.save-btn:hover { background-color: #10b981; }
|
||||
|
||||
.add-node-btn {
|
||||
background-color: #2563eb;
|
||||
color: white;
|
||||
}
|
||||
|
||||
.add-node-btn:hover { background-color: #3b82f6; }
|
||||
|
||||
#settings-status {
|
||||
color: #10b981;
|
||||
font-size: 11px;
|
||||
min-width: 80px;
|
||||
.node-badge {
|
||||
background: #334155; padding: 2px 5px; border-radius: 4px;
|
||||
font-size: 8px; text-transform: uppercase; font-weight: bold; color: #94a3b8;
|
||||
}
|
||||
|
||||
.markdown-content pre { background: #000; padding: 8px; border-radius: 4px; border: 1px solid #334155; overflow-x: auto; margin: 8px 0; }
|
||||
.markdown-content code { font-family: monospace; color: #4ade80; }
|
||||
.markdown-content p { margin-bottom: 8px; }
|
||||
</style>
|
||||
</head>
|
||||
<body class="bg-slate-950 text-white overflow-hidden">
|
||||
<body class="text-white">
|
||||
|
||||
<div class="top-toolbar">
|
||||
<div class="toolbar-title">🤖 KI-Orchestrator</div>
|
||||
|
||||
<div class="toolbar-title">🤖 J.A.R.V.I.S. AI Dashboard</div>
|
||||
<div class="toolbar-controls">
|
||||
<button onclick="addNode()" class="btn-tool add-node-btn">
|
||||
<span>+</span> Node hinzufügen
|
||||
</button>
|
||||
|
||||
<div class="h-6 w-px bg-slate-700 mx-2"></div>
|
||||
|
||||
<label for="ai-provider">Provider:</label>
|
||||
<button onclick="addNode()" class="btn-tool add-node-btn"><span>+</span> Node</button>
|
||||
<div class="h-6 w-px bg-slate-700 mx-1"></div>
|
||||
<label>Provider:</label>
|
||||
<select id="ai-provider" onchange="updateModelDropdown(false)">
|
||||
<option value="google">Google Gemini</option>
|
||||
<option value="openai">OpenAI</option>
|
||||
<option value="nvidia">NVIDIA</option>
|
||||
<option value="ollama">Ollama</option>
|
||||
</select>
|
||||
|
||||
<label for="ai-model">Modell:</label>
|
||||
<label>Modell:</label>
|
||||
<select id="ai-model"></select>
|
||||
|
||||
<div id="ollama-url-container">
|
||||
<label for="ollama-url">URL:</label>
|
||||
<input type="text" id="ollama-url" onblur="updateModelDropdown(false)" placeholder="http://192.168.x.x:11434/v1">
|
||||
<input type="text" id="ollama-url" onblur="updateModelDropdown(false)" placeholder="URL: http://...">
|
||||
</div>
|
||||
|
||||
<button class="btn-tool save-btn" onclick="saveSettings()">Speichern</button>
|
||||
<span id="settings-status"></span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="flex h-screen">
|
||||
<div class="w-64 bg-slate-900 border-r border-slate-800 p-4 flex flex-col">
|
||||
<!-- <h2 class="text-xl font-bold mb-4 flex items-center gap-2">
|
||||
<span class="text-blue-400">📍</span> Nodes
|
||||
</h2> -->
|
||||
<div class="flex min-h-screen pt-[55px]">
|
||||
|
||||
<div class="w-64 bg-slate-900 border-r border-slate-800 p-4 flex flex-col fixed left-0 top-[55px] bottom-0 z-10">
|
||||
<div id="node-list" class="flex-1 overflow-y-auto space-y-2">
|
||||
{% for node in nodes %}
|
||||
<div class="p-3 bg-slate-800 rounded border border-slate-700 relative group" id="node-card-{{ node.id }}">
|
||||
<div class="flex justify-between items-start">
|
||||
<div class="text-sm font-bold">{{ node.name }}</div>
|
||||
<form action="/remove_node/{{ node.id }}" method="post">
|
||||
<div class="text-sm font-bold" id="node-name-{{ node.id }}">{{ node.name }}</div>
|
||||
<div class="flex gap-2">
|
||||
<button onclick="editNode({{ node.id }}, '{{ node.name }}', '{{ node.ip }}')" class="text-slate-500 hover:text-sky-400 text-xs">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="12" height="12" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"><path d="M17 3a2.85 2.83 0 1 1 4 4L7.5 20.5 2 22l1.5-5.5Z"/><path d="m15 5 4 4"/></svg>
|
||||
</button>
|
||||
<form action="/remove_node/{{ node.id }}" method="post" onsubmit="return confirm('Entfernen?')">
|
||||
<button type="submit" class="text-slate-500 hover:text-red-500 text-xs">✕</button>
|
||||
</form>
|
||||
</div>
|
||||
<div class="text-[10px] text-slate-500 font-mono flex justify-between items-center">
|
||||
{{ node.ip }}
|
||||
</div>
|
||||
<div class="text-[10px] text-slate-500 font-mono flex justify-between items-center mb-1">
|
||||
<span id="node-ip-{{ node.id }}">{{ node.ip }}</span>
|
||||
<button onclick="refreshNodeStatus({{ node.id }})" class="hover:text-blue-400">🔄</button>
|
||||
</div>
|
||||
<div class="flex gap-1 mb-2">
|
||||
<span id="os-{{ node.id }}" class="node-badge truncate max-w-[80px]">{{ node.os }}</span>
|
||||
<span id="arch-{{ node.id }}" class="node-badge">{{ node.arch }}</span>
|
||||
</div>
|
||||
<div class="mt-1 flex items-center gap-2">
|
||||
<span id="led-{{ node.id }}" class="h-2 w-2 rounded-full {% if node.status == 'Docker Aktiv' %} bg-blue-500 shadow-[0_0_8px_#3b82f6] {% else %} bg-yellow-500 {% endif %}"></span>
|
||||
<span id="led-{{ node.id }}" class="h-2 w-2 rounded-full {% if 'Aktiv' in node.status %} bg-blue-500 shadow-[0_0_8px_#3b82f6] {% else %} bg-yellow-500 {% endif %}"></span>
|
||||
<span id="badge-{{ node.id }}" class="text-[9px] uppercase font-mono text-slate-300">{{ node.status }}</span>
|
||||
</div>
|
||||
<button onclick="openTerminal('{{ node.ip }}')" class="mt-2 w-full text-[10px] bg-slate-700 hover:bg-slate-600 py-1 rounded transition-colors">Konsole öffnen</button>
|
||||
<button onclick="openTerminal('{{ node.ip }}')" class="mt-2 w-full text-[10px] bg-slate-700 hover:bg-slate-600 py-1 rounded">Konsole</button>
|
||||
</div>
|
||||
{% endfor %}
|
||||
</div>
|
||||
<button onclick="localStorage.removeItem('pi-orch-layout-v2'); location.reload();" class="mt-4 text-[10px] text-slate-500 hover:text-white uppercase text-center w-full">Layout Reset</button>
|
||||
<button onclick="localStorage.removeItem('pi-orch-layout-v2'); location.reload();" class="mt-4 text-[10px] text-slate-500 hover:text-white uppercase">Layout Reset</button>
|
||||
</div>
|
||||
|
||||
<div class="flex-1 overflow-y-auto">
|
||||
<div class="flex-1 ml-64 bg-slate-950 overflow-y-auto">
|
||||
<div class="grid-stack">
|
||||
<div class="grid-stack-item" gs-id="chat-widget" gs-w="6" gs-h="5" gs-x="0" gs-y="0">
|
||||
<div class="grid-stack-item-content">
|
||||
<div class="widget-header"><span>💬 KI Chat</span> <span>⠿</span></div>
|
||||
<div class="widget-header"><span>💬 J.A.R.V.I.S. - Chat</span> <span>⠿</span></div>
|
||||
<div id="chat-window" class="flex-1 p-4 overflow-y-auto text-sm space-y-2 bg-[#0f172a]/50"></div>
|
||||
<div class="p-2 border-t border-slate-700 flex bg-slate-800">
|
||||
<input id="user-input" type="text" class="flex-1 bg-slate-900 p-2 rounded text-xs outline-none border border-slate-700 focus:border-blue-500" placeholder="Nachricht (Enter zum Senden)..." autocomplete="off">
|
||||
<button onclick="sendMessage()" class="ml-2 bg-blue-600 hover:bg-blue-500 px-4 py-1 rounded text-xs transition-colors">Senden</button>
|
||||
<textarea id="user-input"
|
||||
class="flex-1 bg-slate-900 p-2 rounded text-xs outline-none border border-slate-700 focus:border-blue-500 resize-none overflow-y-hidden"
|
||||
placeholder="Frage eingeben... (Shift+Enter für neue Zeile)"
|
||||
rows="1"
|
||||
oninput="this.style.height = ''; this.style.height = this.scrollHeight + 'px'"></textarea>
|
||||
<button onclick="sendMessage()" class="ml-2 bg-blue-600 hover:bg-blue-500 px-4 py-1 rounded text-xs">Senden</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -235,7 +192,7 @@
|
||||
<div class="grid-stack-item" gs-id="logs-widget" gs-w="6" gs-h="5" gs-x="6" gs-y="0">
|
||||
<div class="grid-stack-item-content">
|
||||
<div class="widget-header"><span>📜 System Logs</span> <span>⠿</span></div>
|
||||
<div id="install-log">Warte auf Aufgaben...</div>
|
||||
<div id="install-log">Warte auf Daten...</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -253,10 +210,10 @@
|
||||
<div class="bg-slate-800 p-6 rounded-lg border border-slate-600 w-96">
|
||||
<h3 class="text-lg font-bold mb-4">Neuen Node hinzufügen</h3>
|
||||
<form action="/add_node" method="post" class="space-y-4">
|
||||
<input type="text" name="name" placeholder="Name (z.B. Pi-Worker-1)" class="w-full bg-slate-900 border border-slate-700 p-2 rounded text-sm" required>
|
||||
<input type="text" name="name" placeholder="Name" class="w-full bg-slate-900 border border-slate-700 p-2 rounded text-sm" required>
|
||||
<input type="text" name="ip" placeholder="IP Adresse" class="w-full bg-slate-900 border border-slate-700 p-2 rounded text-sm" required>
|
||||
<input type="text" name="user" placeholder="SSH Benutzer" class="w-full bg-slate-900 border border-slate-700 p-2 rounded text-sm" required>
|
||||
<input type="password" name="password" placeholder="SSH Passwort" class="w-full bg-slate-900 border border-slate-700 p-2 rounded text-sm" required>
|
||||
<input type="password" name="password" placeholder="Passwort" class="w-full bg-slate-900 border border-slate-700 p-2 rounded text-sm" required>
|
||||
<div class="flex justify-end gap-2 pt-2">
|
||||
<button type="button" onclick="closeAddNode()" class="px-4 py-2 text-sm text-slate-400">Abbrechen</button>
|
||||
<button type="submit" class="bg-blue-600 px-4 py-2 text-sm rounded font-bold">Koppeln</button>
|
||||
@@ -265,12 +222,66 @@
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div id="edit-node-modal" class="hidden fixed inset-0 bg-black/80 flex items-center justify-center z-[2000] p-4">
|
||||
<div class="bg-slate-800 p-6 rounded-lg border border-slate-600 w-full max-w-xl">
|
||||
<h3 class="text-lg font-bold mb-4 text-sky-400">Node-Konfiguration bearbeiten</h3>
|
||||
<input type="hidden" id="edit-node-id">
|
||||
|
||||
<div class="grid grid-cols-2 gap-4">
|
||||
<div class="space-y-3">
|
||||
<div>
|
||||
<label class="text-[10px] uppercase text-slate-400 font-bold">Anzeigename</label>
|
||||
<input type="text" id="edit-node-name" class="w-full bg-slate-900 border border-slate-700 p-2 rounded text-sm text-white focus:border-sky-500 outline-none">
|
||||
</div>
|
||||
<div>
|
||||
<label class="text-[10px] uppercase text-slate-400 font-bold">SSH Benutzer</label>
|
||||
<input type="text" id="edit-node-user" class="w-full bg-slate-900 border border-slate-700 p-2 rounded text-sm text-white focus:border-sky-500 outline-none">
|
||||
</div>
|
||||
<div>
|
||||
<label class="text-[10px] uppercase text-slate-400 font-bold">Betriebssystem</label>
|
||||
<input type="text" id="edit-node-os" class="w-full bg-slate-900 border border-slate-700 p-2 rounded text-sm text-white focus:border-sky-500 outline-none">
|
||||
</div>
|
||||
<div>
|
||||
<label class="text-[10px] uppercase text-slate-400 font-bold">Status-Text</label>
|
||||
<input type="text" id="edit-node-status" class="w-full bg-slate-900 border border-slate-700 p-2 rounded text-sm text-white focus:border-sky-500 outline-none">
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="space-y-3">
|
||||
<div>
|
||||
<label class="text-[10px] uppercase text-slate-400 font-bold">IP Adresse</label>
|
||||
<input type="text" id="edit-node-ip" class="w-full bg-slate-900 border border-slate-700 p-2 rounded text-sm text-white focus:border-sky-500 outline-none">
|
||||
</div>
|
||||
<div>
|
||||
<label class="text-[10px] uppercase text-slate-400 font-bold">Sudo Passwort</label>
|
||||
<input type="password" id="edit-node-password" class="w-full bg-slate-900 border border-slate-700 p-2 rounded text-sm text-white focus:border-sky-500 outline-none">
|
||||
</div>
|
||||
<div>
|
||||
<label class="text-[10px] uppercase text-slate-400 font-bold">Architektur</label>
|
||||
<input type="text" id="edit-node-arch" class="w-full bg-slate-900 border border-slate-700 p-2 rounded text-sm text-white focus:border-sky-500 outline-none">
|
||||
</div>
|
||||
<div class="flex items-center h-full pt-4">
|
||||
<label class="flex items-center gap-3 cursor-pointer group">
|
||||
<input type="checkbox" id="edit-node-docker" class="w-4 h-4 rounded border-slate-700 bg-slate-900 text-sky-600 focus:ring-sky-500">
|
||||
<span class="text-sm text-slate-300 group-hover:text-white transition-colors">Docker installiert</span>
|
||||
</label>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="flex justify-end gap-3 mt-6 pt-4 border-t border-slate-700">
|
||||
<button type="button" onclick="closeEditNode()" class="px-4 py-2 text-sm text-slate-400 hover:text-white transition-colors">Abbrechen</button>
|
||||
<button type="button" onclick="updateNode()" class="bg-sky-600 hover:bg-sky-500 px-6 py-2 text-sm rounded font-bold transition-all shadow-lg shadow-sky-900/20">Änderungen speichern</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
let currentSettings = {};
|
||||
let termDataDisposable = null;
|
||||
const modelOptions = { google: [], openai: [], ollama: [] }; // Wird vom Backend befüllt
|
||||
let term, fitAddon;
|
||||
|
||||
// 1. CHAT LOGIK (Mit Enter und Fokus)
|
||||
// 1. CHAT LOGIK
|
||||
function initChat(chatWs) {
|
||||
const input = document.getElementById('user-input');
|
||||
|
||||
@@ -279,33 +290,82 @@
|
||||
if(!msg) return;
|
||||
chatWs.send(msg);
|
||||
appendChat("Du", msg, "text-slate-400 font-bold");
|
||||
|
||||
// Input zurücksetzen
|
||||
input.value = '';
|
||||
input.focus(); // Fokus zurücksetzen
|
||||
input.style.height = 'auto'; // Höhe wieder einklappen
|
||||
input.focus();
|
||||
};
|
||||
|
||||
input.addEventListener('keydown', (e) => {
|
||||
if (e.key === 'Enter') {
|
||||
e.preventDefault();
|
||||
// Wenn ENTER gedrückt wird OHNE Shift -> Senden
|
||||
if (e.key === 'Enter' && !e.shiftKey) {
|
||||
e.preventDefault(); // Verhindert den Zeilenumbruch
|
||||
sendMessage();
|
||||
}
|
||||
// Wenn ENTER + Shift gedrückt wird -> Standardverhalten (neue Zeile) bleibt
|
||||
});
|
||||
}
|
||||
|
||||
// 2. NODE MODAL LOGIK
|
||||
window.addNode = function() {
|
||||
document.getElementById('add-node-modal').classList.remove('hidden');
|
||||
// 2. MODAL LOGIK
|
||||
window.addNode = () => document.getElementById('add-node-modal').classList.remove('hidden');
|
||||
window.closeAddNode = () => document.getElementById('add-node-modal').classList.add('hidden');
|
||||
|
||||
// EDIT LOGIK
|
||||
// Ruft alle Daten ab, bevor das Modal öffnet
|
||||
window.editNode = async (id) => {
|
||||
try {
|
||||
const res = await fetch(`/api/node/${id}`);
|
||||
const node = await res.json();
|
||||
|
||||
document.getElementById('edit-node-id').value = node.id;
|
||||
document.getElementById('edit-node-name').value = node.name;
|
||||
document.getElementById('edit-node-ip').value = node.ip;
|
||||
document.getElementById('edit-node-user').value = node.user;
|
||||
document.getElementById('edit-node-password').value = node.sudo_password;
|
||||
document.getElementById('edit-node-os').value = node.os;
|
||||
document.getElementById('edit-node-arch').value = node.arch;
|
||||
document.getElementById('edit-node-status').value = node.status;
|
||||
document.getElementById('edit-node-docker').checked = node.docker_installed === 1;
|
||||
|
||||
document.getElementById('edit-node-modal').classList.remove('hidden');
|
||||
} catch (e) { alert("Fehler beim Laden der Node-Daten"); }
|
||||
};
|
||||
window.closeAddNode = function() {
|
||||
document.getElementById('add-node-modal').classList.add('hidden');
|
||||
window.closeEditNode = () => document.getElementById('edit-node-modal').classList.add('hidden');
|
||||
|
||||
window.updateNode = async function() {
|
||||
const id = document.getElementById('edit-node-id').value;
|
||||
const payload = {
|
||||
name: document.getElementById('edit-node-name').value,
|
||||
ip: document.getElementById('edit-node-ip').value,
|
||||
user: document.getElementById('edit-node-user').value,
|
||||
sudo_password: document.getElementById('edit-node-password').value,
|
||||
os: document.getElementById('edit-node-os').value,
|
||||
arch: document.getElementById('edit-node-arch').value,
|
||||
status: document.getElementById('edit-node-status').value,
|
||||
docker_installed: document.getElementById('edit-node-docker').checked ? 1 : 0
|
||||
};
|
||||
|
||||
// 3. Node Status aktualisieren ---
|
||||
try {
|
||||
const res = await fetch(`/api/node/${id}`, {
|
||||
method: 'PUT',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify(payload)
|
||||
});
|
||||
if(res.ok) {
|
||||
location.reload(); // Einfachste Methode, um alle Badges zu aktualisieren
|
||||
}
|
||||
} catch (e) { alert("Fehler beim Speichern"); }
|
||||
};
|
||||
|
||||
// 3. ERWEITERTER REFRESH
|
||||
window.refreshNodeStatus = async function(nodeId) {
|
||||
const badge = document.getElementById(`badge-${nodeId}`);
|
||||
const led = document.getElementById(`led-${nodeId}`);
|
||||
const osEl = document.getElementById(`os-${nodeId}`);
|
||||
const archEl = document.getElementById(`arch-${nodeId}`);
|
||||
const card = document.getElementById(`node-card-${nodeId}`);
|
||||
|
||||
// Optisches Feedback: Karte leicht ausgrauen und Text ändern
|
||||
badge.textContent = "PRÜFE...";
|
||||
card.style.opacity = "0.7";
|
||||
|
||||
@@ -313,13 +373,13 @@
|
||||
const response = await fetch(`/refresh_status/${nodeId}`);
|
||||
const data = await response.json();
|
||||
|
||||
// Status Text aktualisieren
|
||||
badge.textContent = data.status;
|
||||
osEl.textContent = data.os;
|
||||
archEl.textContent = data.arch;
|
||||
|
||||
// LED Farbe dynamisch anpassen
|
||||
if (data.status === "Docker Aktiv") {
|
||||
if (data.status.includes("Aktiv")) {
|
||||
led.className = "h-2 w-2 rounded-full bg-blue-500 shadow-[0_0_8px_#3b82f6]";
|
||||
} else if (data.status === "Offline/Fehler") {
|
||||
} else if (data.status === "Offline") {
|
||||
led.className = "h-2 w-2 rounded-full bg-red-500 shadow-[0_0_8px_#ef4444]";
|
||||
} else {
|
||||
led.className = "h-2 w-2 rounded-full bg-yellow-500";
|
||||
@@ -340,7 +400,7 @@
|
||||
document.getElementById('ai-provider').value = currentSettings.provider;
|
||||
document.getElementById('ollama-url').value = currentSettings.ollama_base_url || "http://127.0.0.1:11434/v1";
|
||||
updateModelDropdown(true);
|
||||
} catch (e) { console.error("Load Settings Error", e); }
|
||||
} catch (e) {}
|
||||
}
|
||||
|
||||
async function updateModelDropdown(isInitialLoad = false) {
|
||||
@@ -353,7 +413,12 @@
|
||||
modelSelect.innerHTML = '<option>Lade...</option>';
|
||||
|
||||
try {
|
||||
const res = await fetch(`/api/models?provider=${provider}&url=${encodeURIComponent(ollamaUrl)}`);
|
||||
let queryUrl = `/api/models?provider=${provider}`;
|
||||
if (provider === "ollama") {
|
||||
queryUrl += `&url=${encodeURIComponent(ollamaUrl)}`;
|
||||
}
|
||||
|
||||
const res = await fetch(queryUrl);
|
||||
const data = await res.json();
|
||||
modelSelect.innerHTML = '';
|
||||
|
||||
@@ -364,10 +429,14 @@
|
||||
modelSelect.appendChild(opt);
|
||||
});
|
||||
|
||||
if (isInitialLoad && currentSettings) {
|
||||
const savedModel = currentSettings[`${provider}_model`];
|
||||
if (isInitialLoad && savedModel) modelSelect.value = savedModel;
|
||||
if (savedModel) modelSelect.value = savedModel;
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
modelSelect.innerHTML = '<option>Fehler beim Laden</option>';
|
||||
}
|
||||
} catch (e) { modelSelect.innerHTML = '<option>Fehler beim Laden</option>'; }
|
||||
}
|
||||
|
||||
async function saveSettings() {
|
||||
@@ -375,7 +444,6 @@
|
||||
const model = document.getElementById('ai-model').value;
|
||||
const ollama_base_url = document.getElementById('ollama-url').value;
|
||||
const statusEl = document.getElementById('settings-status');
|
||||
|
||||
try {
|
||||
await fetch('/api/settings', {
|
||||
method: 'POST',
|
||||
@@ -389,11 +457,12 @@
|
||||
|
||||
// 5. INITIALISIERUNG
|
||||
document.addEventListener('DOMContentLoaded', function() {
|
||||
// GridStack mit allen Resize-Handles
|
||||
var grid = GridStack.init({
|
||||
cellHeight: 80, margin: 10, float: true,
|
||||
cellHeight: 80,
|
||||
margin: 10,
|
||||
float: true,
|
||||
handle: '.widget-header',
|
||||
resizable: { handles: 'all' } // Erlaubt Resize an allen Seiten
|
||||
resizable: { handles: 'all' }
|
||||
});
|
||||
|
||||
const savedLayout = localStorage.getItem('pi-orch-layout-v2');
|
||||
@@ -402,11 +471,29 @@
|
||||
localStorage.setItem('pi-orch-layout-v2', JSON.stringify(grid.save(false)));
|
||||
});
|
||||
|
||||
// Terminal & WebSockets
|
||||
const term = new Terminal({ theme: { background: '#000' }, fontSize: 13, convertEol: true });
|
||||
const fitAddon = new FitAddon.FitAddon();
|
||||
// Terminal Setup
|
||||
term = new Terminal({ theme: { background: '#000' }, fontSize: 13, convertEol: true });
|
||||
fitAddon = new FitAddon.FitAddon();
|
||||
term.loadAddon(fitAddon);
|
||||
term.open(document.getElementById('terminal'));
|
||||
fitAddon.fit();
|
||||
|
||||
// WICHTIG: Wenn das Widget in GridStack vergrößert wird
|
||||
grid.on('resizestop', function(event, el) {
|
||||
if (el.getAttribute('gs-id') === 'term-widget') {
|
||||
setTimeout(() => {
|
||||
fitAddon.fit();
|
||||
// Sende neue Größe an das Backend
|
||||
if (window.termWs && window.termWs.readyState === WebSocket.OPEN) {
|
||||
window.termWs.send(JSON.stringify({
|
||||
type: "resize",
|
||||
cols: term.cols,
|
||||
rows: term.rows
|
||||
}));
|
||||
}
|
||||
}, 100);
|
||||
}
|
||||
});
|
||||
|
||||
const logWs = new WebSocket(`ws://${location.host}/ws/install_logs`);
|
||||
logWs.onmessage = (ev) => {
|
||||
@@ -417,12 +504,12 @@
|
||||
};
|
||||
|
||||
const chatWs = new WebSocket(`ws://${location.host}/ws/chat`);
|
||||
chatWs.onmessage = (ev) => appendChat("KI", ev.data, "text-blue-400 font-bold");
|
||||
chatWs.onmessage = (ev) => appendChat("J.A.R.V.I.S.", ev.data, "text-blue-400 font-bold");
|
||||
initChat(chatWs);
|
||||
|
||||
window.appendChat = function(user, msg, classes) {
|
||||
const win = document.getElementById('chat-window');
|
||||
let formattedMsg = (user === "KI") ? marked.parse(msg) : msg;
|
||||
let formattedMsg = (user === "J.A.R.V.I.S.") ? marked.parse(msg) : msg;
|
||||
win.innerHTML += `<div class="mb-4"><span class="${classes} block mb-1 text-[10px] uppercase">${user}</span><div class="markdown-content text-slate-300 text-sm">${formattedMsg}</div></div>`;
|
||||
win.scrollTop = win.scrollHeight;
|
||||
};
|
||||
@@ -431,10 +518,26 @@
|
||||
if (window.termWs) window.termWs.close();
|
||||
if (termDataDisposable) termDataDisposable.dispose();
|
||||
term.clear();
|
||||
fitAddon.fit(); // Einmal anpassen beim Öffnen
|
||||
|
||||
window.termWs = new WebSocket(`ws://${location.host}/ws/terminal/${ip}`);
|
||||
|
||||
window.termWs.onopen = () => {
|
||||
// Initiale Größe nach dem Verbinden senden
|
||||
window.termWs.send(JSON.stringify({
|
||||
type: "resize",
|
||||
cols: term.cols,
|
||||
rows: term.rows
|
||||
}));
|
||||
};
|
||||
|
||||
window.termWs.onmessage = (ev) => term.write(ev.data);
|
||||
|
||||
termDataDisposable = term.onData(data => {
|
||||
if (window.termWs?.readyState === WebSocket.OPEN) window.termWs.send(data);
|
||||
if (window.termWs?.readyState === WebSocket.OPEN) {
|
||||
// Normale Tastatureingaben senden wir direkt
|
||||
window.termWs.send(data);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
|
||||
0
workspace/NOTIZEN.md
Normal file
0
workspace/NOTIZEN.md
Normal file
0
workspace/TODO.md
Normal file
0
workspace/TODO.md
Normal file
Reference in New Issue
Block a user