|
@@ -758,18 +758,54 @@ def api_upload_finish(upload_id):
|
|
|
["sudo", "rsync", tmp_archive, dest_path],
|
|
["sudo", "rsync", tmp_archive, dest_path],
|
|
|
capture_output=True, text=True,
|
|
capture_output=True, text=True,
|
|
|
)
|
|
)
|
|
|
- shutil.rmtree(tmp_dir, ignore_errors=True)
|
|
|
|
|
|
|
|
|
|
if result.returncode != 0:
|
|
if result.returncode != 0:
|
|
|
upload.status = "error"
|
|
upload.status = "error"
|
|
|
db.session.commit()
|
|
db.session.commit()
|
|
|
|
|
+ shutil.rmtree(tmp_dir, ignore_errors=True)
|
|
|
return jsonify({"error": result.stderr.strip()}), 500
|
|
return jsonify({"error": result.stderr.strip()}), 500
|
|
|
|
|
|
|
|
|
|
+ # .info.json optionnel transmis dans le body JSON
|
|
|
|
|
+ data = request.get_json(silent=True) or {}
|
|
|
|
|
+ info_json_str = data.get("info_json")
|
|
|
|
|
+ if info_json_str:
|
|
|
|
|
+ archive_base = upload.filename[:-4] if upload.filename.endswith(".tar") else upload.filename
|
|
|
|
|
+ tmp_info = os.path.join(tmp_dir, archive_base + ".info.json")
|
|
|
|
|
+ with open(tmp_info, "w") as f:
|
|
|
|
|
+ f.write(info_json_str)
|
|
|
|
|
+ subprocess.run(
|
|
|
|
|
+ ["sudo", "rsync", tmp_info,
|
|
|
|
|
+ os.path.join(backup_dir, archive_base + ".info.json")],
|
|
|
|
|
+ capture_output=True,
|
|
|
|
|
+ )
|
|
|
|
|
+
|
|
|
|
|
+ shutil.rmtree(tmp_dir, ignore_errors=True)
|
|
|
upload.status = "complete"
|
|
upload.status = "complete"
|
|
|
db.session.commit()
|
|
db.session.commit()
|
|
|
return jsonify({"status": "complete", "filename": upload.filename})
|
|
return jsonify({"status": "complete", "filename": upload.filename})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
+@app.route("/api/v1/archives/<name>/download")
|
|
|
|
|
+def api_archive_download(name):
|
|
|
|
|
+ """Téléchargement direct d'une archive via sudo cat (pour pull inter-instances)."""
|
|
|
|
|
+ backup_dir = app.config["YUNOHOST_BACKUP_DIR"]
|
|
|
|
|
+ archive_path = os.path.join(backup_dir, name + ".tar")
|
|
|
|
|
+ from jobs.utils import sudo_exists
|
|
|
|
|
+ if not sudo_exists(archive_path):
|
|
|
|
|
+ return jsonify({"error": "archive introuvable"}), 404
|
|
|
|
|
+
|
|
|
|
|
+ result = subprocess.run(["sudo", "cat", archive_path], capture_output=True, timeout=3600)
|
|
|
|
|
+ if result.returncode != 0:
|
|
|
|
|
+ return jsonify({"error": "lecture échouée"}), 500
|
|
|
|
|
+
|
|
|
|
|
+ from flask import Response
|
|
|
|
|
+ return Response(
|
|
|
|
|
+ result.stdout,
|
|
|
|
|
+ mimetype="application/octet-stream",
|
|
|
|
|
+ headers={"Content-Disposition": f'attachment; filename="{name}.tar"'},
|
|
|
|
|
+ )
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
@app.route("/api/v1/archives/upload/<upload_id>", methods=["DELETE"])
|
|
@app.route("/api/v1/archives/upload/<upload_id>", methods=["DELETE"])
|
|
|
def api_upload_cancel(upload_id):
|
|
def api_upload_cancel(upload_id):
|
|
|
upload = db.get_or_404(Upload, upload_id)
|
|
upload = db.get_or_404(Upload, upload_id)
|
|
@@ -841,6 +877,176 @@ def remote_instance_sync(inst_id):
|
|
|
return redirect(url_for("remote_instances_list"))
|
|
return redirect(url_for("remote_instances_list"))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
+@app.route("/network")
|
|
|
|
|
+def dashboard_network():
|
|
|
|
|
+ local_jobs = Job.query.order_by(Job.name).all()
|
|
|
|
|
+ local_jobs_data = []
|
|
|
|
|
+ for job in local_jobs:
|
|
|
|
|
+ run = Run.query.filter_by(job_id=job.id).order_by(Run.started_at.desc()).first()
|
|
|
|
|
+ local_jobs_data.append(_JobRow(
|
|
|
|
|
+ job_id=job.id, name=job.name, type=job.type,
|
|
|
|
|
+ last_run_at=run.started_at if run else None,
|
|
|
|
|
+ last_status=run.status if run else None,
|
|
|
|
|
+ last_archive_name=run.archive_name if run else None,
|
|
|
|
|
+ last_size_bytes=run.size_bytes if run else None,
|
|
|
|
|
+ ))
|
|
|
|
|
+ instances = RemoteInstance.query.order_by(RemoteInstance.name).all()
|
|
|
|
|
+ return render_template("dashboard_network.html",
|
|
|
|
|
+ local_jobs_data=local_jobs_data,
|
|
|
|
|
+ instances=instances,
|
|
|
|
|
+ instances_for_push=instances)
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+@app.route("/network/sync-all", methods=["POST"])
|
|
|
|
|
+def network_sync_all():
|
|
|
|
|
+ from federation.client import sync_instance
|
|
|
|
|
+ instances = RemoteInstance.query.all()
|
|
|
|
|
+ errors = []
|
|
|
|
|
+ for inst in instances:
|
|
|
|
|
+ try:
|
|
|
|
|
+ sync_instance(inst)
|
|
|
|
|
+ except Exception as exc:
|
|
|
|
|
+ errors.append(f"{inst.name}: {exc}")
|
|
|
|
|
+ if errors:
|
|
|
|
|
+ flash("Synchronisation partielle — " + " | ".join(errors), "error")
|
|
|
|
|
+ else:
|
|
|
|
|
+ flash(f"{len(instances)} instance(s) synchronisée(s).", "success")
|
|
|
|
|
+ return redirect(url_for("dashboard_network"))
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+@app.route("/remote-instances/<int:inst_id>/run-job/<int:job_id>", methods=["POST"])
|
|
|
|
|
+def remote_job_run(inst_id, job_id):
|
|
|
|
|
+ inst = db.get_or_404(RemoteInstance, inst_id)
|
|
|
|
|
+ from federation.client import FederationClient
|
|
|
|
|
+ try:
|
|
|
|
|
+ FederationClient(inst).run_job(job_id)
|
|
|
|
|
+ flash(f"Job déclenché sur « {inst.name} ».", "success")
|
|
|
|
|
+ except Exception as exc:
|
|
|
|
|
+ flash(f"Impossible de lancer le job sur « {inst.name} » : {exc}", "error")
|
|
|
|
|
+ return redirect(url_for("dashboard_network"))
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+@app.route("/archives/<path:archive_name>/push/<int:inst_id>", methods=["POST"])
|
|
|
|
|
+def archive_push(archive_name, inst_id):
|
|
|
|
|
+ inst = db.get_or_404(RemoteInstance, inst_id)
|
|
|
|
|
+ threading.Thread(target=_do_push_archive, args=(archive_name, inst.id), daemon=True).start()
|
|
|
|
|
+ flash(f"Envoi de « {archive_name} » vers « {inst.name} » démarré en arrière-plan.", "success")
|
|
|
|
|
+ return redirect(request.referrer or url_for("index"))
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+@app.route("/remote-instances/<int:inst_id>/pull/<path:archive_name>", methods=["POST"])
|
|
|
|
|
+def archive_pull(inst_id, archive_name):
|
|
|
|
|
+ inst = db.get_or_404(RemoteInstance, inst_id)
|
|
|
|
|
+ threading.Thread(target=_do_pull_archive, args=(archive_name, inst.id), daemon=True).start()
|
|
|
|
|
+ flash(f"Rapatriement de « {archive_name} » depuis « {inst.name} » démarré.", "success")
|
|
|
|
|
+ return redirect(url_for("dashboard_network"))
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+def _do_push_archive(archive_name, inst_id):
|
|
|
|
|
+ """Pousse une archive locale vers une instance distante via HTTP chunked."""
|
|
|
|
|
+ import hashlib as _hashlib
|
|
|
|
|
+ from federation.client import FederationClient
|
|
|
|
|
+ from jobs.utils import sudo_exists
|
|
|
|
|
+
|
|
|
|
|
+ with app.app_context():
|
|
|
|
|
+ inst = db.session.get(RemoteInstance, inst_id)
|
|
|
|
|
+ backup_dir = app.config["YUNOHOST_BACKUP_DIR"]
|
|
|
|
|
+ archive_path = os.path.join(backup_dir, archive_name + ".tar")
|
|
|
|
|
+
|
|
|
|
|
+ tmp_path = None
|
|
|
|
|
+ try:
|
|
|
|
|
+ # Copie vers /tmp accessible par l'app
|
|
|
|
|
+ tmp_path = f"/tmp/backupmanager_push_{archive_name}.tar"
|
|
|
|
|
+ result = subprocess.run(
|
|
|
|
|
+ ["sudo", "rsync", archive_path, tmp_path],
|
|
|
|
|
+ capture_output=True, text=True,
|
|
|
|
|
+ )
|
|
|
|
|
+ if result.returncode != 0:
|
|
|
|
|
+ raise RuntimeError(f"Copie locale échouée : {result.stderr.strip()}")
|
|
|
|
|
+
|
|
|
|
|
+ total_size = os.path.getsize(tmp_path)
|
|
|
|
|
+ sha256 = _hashlib.sha256()
|
|
|
|
|
+ chunk_size = 50 * 1024 * 1024
|
|
|
|
|
+ with open(tmp_path, "rb") as f:
|
|
|
|
|
+ while True:
|
|
|
|
|
+ data = f.read(65536)
|
|
|
|
|
+ if not data:
|
|
|
|
|
+ break
|
|
|
|
|
+ sha256.update(data)
|
|
|
|
|
+ checksum = sha256.hexdigest()
|
|
|
|
|
+
|
|
|
|
|
+ client = FederationClient(inst)
|
|
|
|
|
+ upload_info = client.upload_start(archive_name + ".tar", total_size, checksum, chunk_size)
|
|
|
|
|
+ upload_id = upload_info["upload_id"]
|
|
|
|
|
+
|
|
|
|
|
+ with open(tmp_path, "rb") as f:
|
|
|
|
|
+ n = 0
|
|
|
|
|
+ while True:
|
|
|
|
|
+ data = f.read(chunk_size)
|
|
|
|
|
+ if not data:
|
|
|
|
|
+ break
|
|
|
|
|
+ client.upload_chunk(upload_id, n, data)
|
|
|
|
|
+ n += 1
|
|
|
|
|
+
|
|
|
|
|
+ # Finish + transmettre le .info.json si présent
|
|
|
|
|
+ info_json_content = None
|
|
|
|
|
+ info_path = os.path.join(backup_dir, archive_name + ".info.json")
|
|
|
|
|
+ if sudo_exists(info_path):
|
|
|
|
|
+ r = subprocess.run(["sudo", "cat", info_path], capture_output=True)
|
|
|
|
|
+ if r.returncode == 0:
|
|
|
|
|
+ info_json_content = r.stdout.decode("utf-8", errors="replace")
|
|
|
|
|
+
|
|
|
|
|
+ client.upload_finish_with_info(upload_id, info_json_content)
|
|
|
|
|
+ app.logger.info(f"Push {archive_name} → {inst.name} OK")
|
|
|
|
|
+
|
|
|
|
|
+ except Exception as exc:
|
|
|
|
|
+ app.logger.error(f"Push {archive_name} → {inst.name} échoué : {exc}")
|
|
|
|
|
+ finally:
|
|
|
|
|
+ if tmp_path and os.path.exists(tmp_path):
|
|
|
|
|
+ os.unlink(tmp_path)
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+def _do_pull_archive(archive_name, inst_id):
|
|
|
|
|
+ """Rapatrie une archive depuis une instance distante via HTTP chunked."""
|
|
|
|
|
+ import hashlib as _hashlib
|
|
|
|
|
+ from federation.client import FederationClient
|
|
|
|
|
+
|
|
|
|
|
+ with app.app_context():
|
|
|
|
|
+ inst = db.session.get(RemoteInstance, inst_id)
|
|
|
|
|
+ backup_dir = app.config["YUNOHOST_BACKUP_DIR"]
|
|
|
|
|
+ try:
|
|
|
|
|
+ client = FederationClient(inst)
|
|
|
|
|
+ # Télécharge l'archive chunk par chunk
|
|
|
|
|
+ archive_bytes = client.download_archive(archive_name)
|
|
|
|
|
+ tmp_path = f"/tmp/backupmanager_pull_{archive_name}.tar"
|
|
|
|
|
+ with open(tmp_path, "wb") as f:
|
|
|
|
|
+ f.write(archive_bytes)
|
|
|
|
|
+ dest = os.path.join(backup_dir, archive_name + ".tar")
|
|
|
|
|
+ subprocess.run(["sudo", "rsync", tmp_path, dest], check=True)
|
|
|
|
|
+ os.unlink(tmp_path)
|
|
|
|
|
+ app.logger.info(f"Pull {archive_name} ← {inst.name} OK")
|
|
|
|
|
+ except Exception as exc:
|
|
|
|
|
+ app.logger.error(f"Pull {archive_name} ← {inst.name} échoué : {exc}")
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+class _JobRow:
|
|
|
|
|
+ """DTO pour le dashboard réseau (local et distant)."""
|
|
|
|
|
+ def __init__(self, job_id, name, type, last_run_at, last_status,
|
|
|
|
|
+ last_archive_name, last_size_bytes):
|
|
|
|
|
+ self.job_id = job_id
|
|
|
|
|
+ self.name = name
|
|
|
|
|
+ self.type = type
|
|
|
|
|
+ self.last_run_at = last_run_at
|
|
|
|
|
+ self.last_status = last_status
|
|
|
|
|
+ self.last_archive_name = last_archive_name
|
|
|
|
|
+ self.last_size_bytes = last_size_bytes
|
|
|
|
|
+
|
|
|
|
|
+ @property
|
|
|
|
|
+ def size_human(self):
|
|
|
|
|
+ from db import _size_human
|
|
|
|
|
+ return _size_human(self.last_size_bytes)
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
def _save_remote_instance(inst):
|
|
def _save_remote_instance(inst):
|
|
|
f = request.form
|
|
f = request.form
|
|
|
name = f.get("name", "").strip()
|
|
name = f.get("name", "").strip()
|