db_dump.py 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266
  1. import json
  2. import os
  3. import subprocess
  4. import tarfile
  5. import tempfile
  6. import time
  7. from datetime import datetime
  8. def run_db_dump(job, instance, backup_dir):
  9. """Point d'entrée commun mysql et postgresql."""
  10. if job.type == "mysql":
  11. return _run_mysql(job, instance, backup_dir)
  12. elif job.type == "postgresql":
  13. return _run_postgresql(job, instance, backup_dir)
  14. raise ValueError(f"Type inconnu pour db_dump : {job.type}")
  15. # ---------------------------------------------------------------------------
  16. # MySQL
  17. # ---------------------------------------------------------------------------
  18. def _run_mysql(job, instance, backup_dir):
  19. from flask import current_app
  20. cfg = json.loads(job.config_json or "{}")
  21. dbname = cfg.get("database", "")
  22. if not dbname:
  23. raise ValueError("Nom de base de données manquant dans la configuration du job.")
  24. archive_name = _archive_name(instance, "mysql", dbname)
  25. _abort_if_exists(archive_name, backup_dir)
  26. with tempfile.TemporaryDirectory() as tmpdir:
  27. dump_path = os.path.join(tmpdir, f"{dbname}.sql")
  28. result = subprocess.run(
  29. [
  30. "sudo", "mysqldump",
  31. "--single-transaction",
  32. "--routines",
  33. "--triggers",
  34. "--result-file", dump_path,
  35. dbname,
  36. ],
  37. capture_output=True,
  38. text=True,
  39. timeout=7200,
  40. )
  41. log = (result.stdout + result.stderr).strip()
  42. if result.returncode != 0:
  43. raise RuntimeError(f"mysqldump a échoué (code {result.returncode}) :\n{log}")
  44. _write_tar(tmpdir, dump_path, dbname, archive_name, backup_dir, job,
  45. instance, current_app.config.get("INSTANCE_URL", ""))
  46. return archive_name, log or "mysqldump terminé sans sortie."
  47. # ---------------------------------------------------------------------------
  48. # PostgreSQL
  49. # ---------------------------------------------------------------------------
  50. def _run_postgresql(job, instance, backup_dir):
  51. from flask import current_app
  52. cfg = json.loads(job.config_json or "{}")
  53. dbname = cfg.get("database", "")
  54. if not dbname:
  55. raise ValueError("Nom de base de données manquant dans la configuration du job.")
  56. archive_name = _archive_name(instance, "postgresql", dbname)
  57. _abort_if_exists(archive_name, backup_dir)
  58. with tempfile.TemporaryDirectory() as tmpdir:
  59. dump_path = os.path.join(tmpdir, f"{dbname}.sql")
  60. # pg_dump doit tourner en tant qu'utilisateur postgres
  61. result = subprocess.run(
  62. ["sudo", "-u", "postgres", "pg_dump", "--format=plain", dbname],
  63. capture_output=True,
  64. timeout=7200,
  65. )
  66. if result.returncode != 0:
  67. log = result.stderr.decode("utf-8", errors="replace").strip()
  68. raise RuntimeError(f"pg_dump a échoué (code {result.returncode}) :\n{log}")
  69. with open(dump_path, "wb") as f:
  70. f.write(result.stdout)
  71. log = result.stderr.decode("utf-8", errors="replace").strip()
  72. _write_tar(tmpdir, dump_path, dbname, archive_name, backup_dir, job,
  73. instance, current_app.config.get("INSTANCE_URL", ""))
  74. return archive_name, log or "pg_dump terminé sans sortie."
  75. # ---------------------------------------------------------------------------
  76. # Helpers partagés
  77. # ---------------------------------------------------------------------------
  78. def _archive_name(instance, db_type, dbname):
  79. date_str = datetime.utcnow().strftime("%Y%m%d")
  80. return f"{instance}_{db_type}_{dbname}_{date_str}"
  81. def _abort_if_exists(archive_name, backup_dir):
  82. from jobs.utils import sudo_exists
  83. path = os.path.join(backup_dir, archive_name + ".tar")
  84. if sudo_exists(path):
  85. raise RuntimeError(
  86. f"L'archive {archive_name}.tar existe déjà. "
  87. "Supprimez-la ou attendez le prochain cycle."
  88. )
  89. # ---------------------------------------------------------------------------
  90. # Restore
  91. # ---------------------------------------------------------------------------
  92. def restore_db_dump(archive_name, backup_dir):
  93. """Restauration d'une base MySQL ou PostgreSQL depuis une archive BackupManager."""
  94. archive_path = os.path.join(backup_dir, archive_name + ".tar")
  95. from jobs.utils import sudo_exists
  96. if not sudo_exists(archive_path):
  97. raise FileNotFoundError(f"Archive introuvable : {archive_path}")
  98. info = _read_backup_info(archive_path)
  99. db_type = info.get("type")
  100. dbname = info.get("database", "")
  101. if not dbname:
  102. raise ValueError("Nom de base de données introuvable dans backup_info.json.")
  103. with tempfile.TemporaryDirectory() as tmpdir:
  104. dump_path = os.path.join(tmpdir, f"{dbname}.sql")
  105. # Extraction du dump depuis l'archive
  106. with tarfile.open(archive_path) as tar:
  107. member = tar.getmember(f"db/{dbname}.sql")
  108. with tar.extractfile(member) as src, open(dump_path, "wb") as dst:
  109. dst.write(src.read())
  110. if db_type == "mysql":
  111. return _restore_mysql(dbname, dump_path)
  112. elif db_type == "postgresql":
  113. return _restore_postgresql(dbname, dump_path)
  114. else:
  115. raise ValueError(f"Type de base inconnu dans l'archive : {db_type}")
  116. def _restore_mysql(dbname, dump_path):
  117. log_lines = []
  118. # Suppression + recréation propre de la base
  119. result = subprocess.run(
  120. ["sudo", "mysql", "-e",
  121. f"DROP DATABASE IF EXISTS `{dbname}`; CREATE DATABASE `{dbname}` CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;"],
  122. capture_output=True, text=True, timeout=60,
  123. )
  124. if result.returncode != 0:
  125. raise RuntimeError(f"Impossible de recréer la base MySQL '{dbname}' :\n{result.stderr.strip()}")
  126. log_lines.append(f"Base MySQL '{dbname}' recréée.")
  127. # Restauration du dump
  128. with open(dump_path, "rb") as f:
  129. result = subprocess.run(
  130. ["sudo", "mysql", dbname],
  131. stdin=f,
  132. capture_output=True,
  133. timeout=7200,
  134. )
  135. log = result.stderr.decode("utf-8", errors="replace").strip()
  136. if result.returncode != 0:
  137. raise RuntimeError(f"mysql restore a échoué (code {result.returncode}) :\n{log}")
  138. log_lines.append(f"Dump restauré dans '{dbname}'.")
  139. if log:
  140. log_lines.append(log)
  141. return "\n".join(log_lines)
  142. def _restore_postgresql(dbname, dump_path):
  143. log_lines = []
  144. # Terminer les connexions actives puis drop + recreate
  145. subprocess.run(
  146. ["sudo", "-u", "postgres", "psql", "-c",
  147. f"SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE datname = '{dbname}' AND pid <> pg_backend_pid();"],
  148. capture_output=True, timeout=30,
  149. )
  150. subprocess.run(
  151. ["sudo", "-u", "postgres", "dropdb", "--if-exists", dbname],
  152. capture_output=True, timeout=60,
  153. )
  154. result = subprocess.run(
  155. ["sudo", "-u", "postgres", "createdb", dbname],
  156. capture_output=True, text=True, timeout=60,
  157. )
  158. if result.returncode != 0:
  159. raise RuntimeError(f"Impossible de recréer la base PostgreSQL '{dbname}' :\n{result.stderr.strip()}")
  160. log_lines.append(f"Base PostgreSQL '{dbname}' recréée.")
  161. # Restauration du dump
  162. with open(dump_path, "rb") as f:
  163. result = subprocess.run(
  164. ["sudo", "-u", "postgres", "psql", "-d", dbname, "-v", "ON_ERROR_STOP=1"],
  165. stdin=f,
  166. capture_output=True,
  167. timeout=7200,
  168. )
  169. log = result.stderr.decode("utf-8", errors="replace").strip()
  170. if result.returncode != 0:
  171. raise RuntimeError(f"psql restore a échoué (code {result.returncode}) :\n{log}")
  172. log_lines.append(f"Dump restauré dans '{dbname}'.")
  173. if log:
  174. log_lines.append(log)
  175. return "\n".join(log_lines)
  176. def _read_backup_info(archive_path):
  177. try:
  178. with tarfile.open(archive_path) as tar:
  179. member = tar.extractfile("backup_info.json")
  180. if member:
  181. return json.loads(member.read())
  182. except Exception:
  183. pass
  184. return {}
  185. def _write_tar(tmpdir, dump_path, dbname, archive_name, backup_dir, job, instance, instance_url):
  186. """Crée le .tar dans backup_dir et le .info.json YunoHost à côté."""
  187. archive_path = os.path.join(backup_dir, archive_name + ".tar")
  188. # backup_info.json embarqué dans le tar (métadonnées BackupManager)
  189. import json as _json
  190. info = {
  191. "instance_name": instance,
  192. "instance_url": instance_url,
  193. "type": job.type,
  194. "database": dbname,
  195. "created_at": datetime.utcnow().isoformat(),
  196. "backupmanager_version": "1.0.0",
  197. }
  198. info_path = os.path.join(tmpdir, "backup_info.json")
  199. with open(info_path, "w") as f:
  200. _json.dump(info, f, indent=2)
  201. with tarfile.open(archive_path, "w") as tar:
  202. tar.add(dump_path, arcname=f"db/{dbname}.sql")
  203. tar.add(info_path, arcname="backup_info.json")
  204. # .info.json YunoHost (hors tar, requis pour listing webadmin)
  205. size = os.path.getsize(archive_path)
  206. ynh_info = {
  207. "created_at": int(time.time()),
  208. "description": f"BackupManager: {job.type} {dbname}",
  209. "size": size,
  210. "from_before_upgrade": False,
  211. "apps": {},
  212. "system": {},
  213. }
  214. ynh_info_path = os.path.join(backup_dir, archive_name + ".info.json")
  215. with open(ynh_info_path, "w") as f:
  216. _json.dump(ynh_info, f, indent=2)