241 lines
8.0 KiB
Python
241 lines
8.0 KiB
Python
import os
|
|
import json
|
|
import threading
|
|
import time
|
|
from flask import Flask, render_template, request, jsonify, redirect, url_for
|
|
from flask_sqlalchemy import SQLAlchemy
|
|
from flask_sock import Sock
|
|
import paramiko
|
|
import requests
|
|
|
|
# AI Provider Imports
|
|
import openai
|
|
import google.generativeai as genai
|
|
|
|
app = Flask(__name__)
|
|
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///orchestrator.db'
|
|
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
|
|
db = SQLAlchemy(app)
|
|
sock = Sock(app)
|
|
|
|
# --- DATENBANK MODELLE ---
|
|
|
|
class Node(db.Model):
|
|
id = db.Column(db.Integer, primary_key=True)
|
|
name = db.Column(db.String(100), nullable=False)
|
|
ip = db.Column(db.String(50), nullable=False)
|
|
user = db.Column(db.String(50), nullable=False)
|
|
password = db.Column(db.String(100), nullable=False)
|
|
status = db.Column(db.String(50), default="Unbekannt")
|
|
arch = db.Column(db.String(20), default="N/A")
|
|
os_type = db.Column(db.String(50), default="linux")
|
|
has_docker = db.Column(db.Boolean, default=False)
|
|
has_vnc = db.Column(db.Boolean, default=False)
|
|
|
|
def to_dict(self):
|
|
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
|
|
|
|
class Settings(db.Model):
|
|
id = db.Column(db.Integer, primary_key=True)
|
|
provider = db.Column(db.String(50), default="google")
|
|
google_model = db.Column(db.String(100), default="gemini-1.5-flash")
|
|
openai_model = db.Column(db.String(100), default="gpt-4o-mini")
|
|
ollama_model = db.Column(db.String(100), default="llama3")
|
|
ollama_base_url = db.Column(db.String(200), default="http://localhost:11434/v1")
|
|
|
|
with app.app_context():
|
|
db.create_all()
|
|
if not Settings.query.first():
|
|
db.session.add(Settings())
|
|
db.session.commit()
|
|
|
|
# --- HILFSFUNKTIONEN ---
|
|
|
|
def get_ssh_client(node):
|
|
client = paramiko.SSHClient()
|
|
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
|
|
client.connect(node.ip, username=node.user, password=node.password, timeout=5)
|
|
return client
|
|
|
|
def run_ssh_cmd(node, cmd):
|
|
try:
|
|
client = get_ssh_client(node)
|
|
stdin, stdout, stderr = client.exec_command(cmd)
|
|
result = stdout.read().decode().strip()
|
|
client.close()
|
|
return result
|
|
except:
|
|
return None
|
|
|
|
# --- ROUTEN ---
|
|
|
|
@app.route('/')
|
|
def index():
|
|
nodes = Node.query.all()
|
|
return render_template('index.html', nodes=nodes)
|
|
|
|
@app.route('/add_node', methods=['POST'])
|
|
def add_node():
|
|
new_node = Node(
|
|
name=request.form['name'],
|
|
ip=request.form['ip'],
|
|
user=request.form['user'],
|
|
password=request.form['password']
|
|
)
|
|
db.session.add(new_node)
|
|
db.session.commit()
|
|
return redirect(url_for('index'))
|
|
|
|
@app.route('/edit_node/<int:id>', methods=['POST'])
|
|
def edit_node(id):
|
|
node = Node.query.get_or_404(id)
|
|
node.name = request.form.get('name', node.name)
|
|
node.ip = request.form.get('ip', node.ip)
|
|
node.user = request.form.get('user', node.user)
|
|
if request.form.get('password'):
|
|
node.password = request.form.get('password')
|
|
db.session.commit()
|
|
return redirect(url_for('index'))
|
|
|
|
@app.route('/remove_node/<int:id>', methods=['POST'])
|
|
def remove_node(id):
|
|
node = Node.query.get_or_404(id)
|
|
db.session.delete(node)
|
|
db.session.commit()
|
|
return redirect(url_for('index'))
|
|
|
|
@app.route('/refresh_status/<int:id>')
|
|
def refresh_status(id):
|
|
node = Node.query.get_or_404(id)
|
|
try:
|
|
# Architektur prüfen
|
|
node.arch = run_ssh_cmd(node, "uname -m") or "N/A"
|
|
# OS Typ prüfen
|
|
os_info = run_ssh_cmd(node, "cat /etc/os-release | grep ^ID=")
|
|
node.os_type = os_info.split('=')[1].replace('"', '') if os_info else "linux"
|
|
# Docker prüfen
|
|
docker_check = run_ssh_cmd(node, "docker ps")
|
|
node.has_docker = True if docker_check is not None else False
|
|
# VNC prüfen (Port 5900/5901)
|
|
vnc_check = run_ssh_cmd(node, "netstat -tuln | grep :590")
|
|
node.has_vnc = True if vnc_check else False
|
|
|
|
node.status = "Online"
|
|
except:
|
|
node.status = "Offline"
|
|
|
|
db.session.commit()
|
|
return jsonify(node.to_dict())
|
|
|
|
# --- SETTINGS API ---
|
|
|
|
@app.route('/api/settings', methods=['GET', 'POST'])
|
|
def handle_settings():
|
|
s = Settings.query.first()
|
|
if request.method == 'POST':
|
|
data = request.json
|
|
s.provider = data.get('provider', s.provider)
|
|
s.ollama_base_url = data.get('ollama_base_url', s.ollama_base_url)
|
|
# Speichere das Modell für den aktuellen Provider
|
|
setattr(s, f"{s.provider}_model", data.get('model'))
|
|
db.session.commit()
|
|
return jsonify({"status": "success"})
|
|
return jsonify({
|
|
"provider": s.provider,
|
|
"google_model": s.google_model,
|
|
"openai_model": s.openai_model,
|
|
"ollama_model": s.ollama_model,
|
|
"ollama_base_url": s.ollama_base_url
|
|
})
|
|
|
|
@app.route('/api/models')
|
|
def get_models():
|
|
provider = request.args.get('provider')
|
|
if provider == "google":
|
|
return jsonify({"models": ["gemini-1.5-flash", "gemini-1.5-pro"]})
|
|
elif provider == "openai":
|
|
return jsonify({"models": ["gpt-4o-mini", "gpt-4o", "gpt-3.5-turbo"]})
|
|
elif provider == "ollama":
|
|
url = request.args.get('url', 'http://localhost:11434/v1')
|
|
try:
|
|
# Versuche Modelle von Ollama API zu laden
|
|
r = requests.get(url.replace('/v1', '/api/tags'), timeout=2)
|
|
names = [m['name'] for m in r.json().get('models', [])]
|
|
return jsonify({"models": names})
|
|
except:
|
|
return jsonify({"models": ["llama3", "mistral", "codellama"]})
|
|
return jsonify({"models": []})
|
|
|
|
# --- WEBSOCKETS ---
|
|
|
|
@sock.route('/ws/install_logs')
|
|
def install_logs(ws):
|
|
# Dummy Log Stream für Demo-Zwecke
|
|
while True:
|
|
data = ws.receive(timeout=1)
|
|
# Hier könnten echte Hintergrund-Prozesse ihre Logs senden
|
|
pass
|
|
|
|
@sock.route('/ws/chat')
|
|
def chat_handler(ws):
|
|
s = Settings.query.first()
|
|
while True:
|
|
msg = ws.receive()
|
|
if not msg: break
|
|
|
|
# KI LOGIK
|
|
response = "Fehler: Provider nicht konfiguriert"
|
|
try:
|
|
if s.provider == "google":
|
|
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
|
|
model = genai.GenerativeModel(s.google_model)
|
|
response = model.generate_content(msg).text
|
|
elif s.provider == "openai":
|
|
client = openai.OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
|
|
res = client.chat.completions.create(
|
|
model=s.openai_model,
|
|
messages=[{"role": "user", "content": msg}]
|
|
)
|
|
response = res.choices[0].message.content
|
|
elif s.provider == "ollama":
|
|
r = requests.post(f"{s.ollama_base_url}/chat/completions", json={
|
|
"model": s.ollama_model,
|
|
"messages": [{"role": "user", "content": msg}],
|
|
"stream": False
|
|
})
|
|
response = r.json()['choices'][0]['message']['content']
|
|
except Exception as e:
|
|
response = f"KI-Fehler: {str(e)}"
|
|
|
|
ws.send(response)
|
|
|
|
@sock.route('/ws/terminal/<ip>')
|
|
def terminal_handler(ws, ip):
|
|
node = Node.query.filter_by(ip=ip).first()
|
|
if not node: return
|
|
|
|
try:
|
|
client = get_ssh_client(node)
|
|
chan = client.invoke_shell()
|
|
|
|
def split_reader():
|
|
while True:
|
|
if chan.recv_ready():
|
|
out = chan.recv(1024).decode()
|
|
ws.send(out)
|
|
time.sleep(0.01)
|
|
|
|
threading.Thread(target=split_reader, daemon=True).start()
|
|
|
|
while True:
|
|
cmd = ws.receive()
|
|
if not cmd: break
|
|
chan.send(cmd)
|
|
|
|
except Exception as e:
|
|
ws.send(f"\r\n[SSH FEHLER]: {str(e)}\r\n")
|
|
|
|
if __name__ == '__main__':
|
|
# Stelle sicher, dass API Keys in der Umgebung sind oder hier hartcodiert werden
|
|
# os.environ["GOOGLE_API_KEY"] = "DEIN_KEY"
|
|
app.run(host='0.0.0.0', port=5000, debug=True) |