Initial upload: DotMigrate dotfiles migration tool with CI/CD
Some checks failed
CI / test (push) Has been cancelled
CI / release (push) Has been cancelled

This commit is contained in:
2026-02-04 09:53:03 +00:00
parent 79f03a66ab
commit a8123c83de

218
src/backup/mod.rs Normal file
View File

@@ -0,0 +1,218 @@
use crate::detect::DetectedDotfile;
use chrono::{DateTime, Local};
use std::fs::{self, File};
use std::io::{self, Write, BufReader, BufWriter};
use std::path::{Path, PathBuf};
use tar::Builder;
#[derive(Debug)]
pub struct BackupInfo {
pub timestamp: DateTime<Local>,
pub archive_path: PathBuf,
pub manifest_path: PathBuf,
pub dotfile_count: usize,
pub total_size: u64,
}
#[derive(Debug, serde::Serialize, serde::Deserialize)]
pub struct BackupManifest {
pub timestamp: String,
pub dotfiles: Vec<BackupEntry>,
pub total_size: u64,
}
#[derive(Debug, serde::Serialize, serde::Deserialize)]
pub struct BackupEntry {
pub original_path: PathBuf,
pub archived_path: PathBuf,
pub size: u64,
pub hash: Option<String>,
}
pub struct BackupManager {
backup_dir: PathBuf,
}
impl BackupManager {
pub fn new(backup_dir: &PathBuf) -> Result<Self, std::io::Error> {
if !backup_dir.exists() {
fs::create_dir_all(backup_dir)?;
}
Ok(Self {
backup_dir: backup_dir.to_path_buf(),
})
}
pub fn create_backup(&self, dotfiles: &[DetectedDotfile]) -> Result<BackupInfo, io::Error> {
let timestamp = Local::now();
let timestamp_str = timestamp.format("%Y-%m-%d_%H-%M-%S").to_string();
let archive_name = format!("backup_{}.tar.gz", timestamp_str);
let archive_path = self.backup_dir.join(&archive_name);
let manifest_name = format!("backup_{}.yml", timestamp_str);
let manifest_path = self.backup_dir.join(&manifest_name);
let archive_file = File::create(&archive_path)?;
let encoder = miniz_oxide::deflate::GzEncoder::new(
BufWriter::new(archive_file),
miniz_oxide::deflate::Compression::default()
);
let mut archive = Builder::new(encoder);
let mut manifest = BackupManifest {
timestamp: timestamp_str.clone(),
dotfiles: Vec::new(),
total_size: 0,
};
let mut total_size: u64 = 0;
for dotfile in dotfiles {
if !dotfile.path.exists() {
continue;
}
let entry = self.add_file_to_archive(&mut archive, dotfile)?;
manifest.dotfiles.push(entry);
total_size += dotfile.size;
}
let mut encoder = archive.into_inner()?;
encoder.finish()?;
manifest.total_size = total_size;
let manifest_content = serde_yaml::to_string(&manifest)?;
fs::write(&manifest_path, manifest_content)?;
Ok(BackupInfo {
timestamp,
archive_path,
manifest_path,
dotfile_count: manifest.dotfiles.len(),
total_size,
})
}
fn add_file_to_archive(
&self,
archive: &mut Builder<miniz_oxide::deflate::GzEncoder<BufWriter<File>>>,
dotfile: &DetectedDotfile,
) -> Result<BackupEntry, io::Error> {
let file = File::open(&dotfile.path)?;
let metadata = file.metadata()?;
let relative_name = dotfile.path.file_name()
.map(|n| format!(".{}", n.to_string_lossy()))
.unwrap_or_else(|| dotfile.path.to_string_lossy().to_string());
archive.append_file(&relative_name, &file)?;
Ok(BackupEntry {
original_path: dotfile.path.clone(),
archived_path: PathBuf::from(relative_name),
size: metadata.len(),
hash: dotfile.content_hash.clone(),
})
}
pub fn restore_backup(&self, archive_path: &Path, target_dir: &Path) -> Result<(), io::Error> {
let file = File::open(archive_path)?;
let decoder = miniz_oxide::inflate::GzDecoder::new(BufReader::new(file))?;
let mut archive = tar::Archive::new(decoder);
for entry in archive.entries()? {
let mut entry = entry?;
let path = entry.path()?.into_owned();
let full_path = target_dir.join(&path);
if let Some(parent) = full_path.parent() {
if !parent.exists() {
fs::create_dir_all(parent)?;
}
}
entry.unpack(&full_path)?;
log::debug!("Restored: {:?}", full_path);
}
Ok(())
}
pub fn get_manifest(&self, backup: &BackupInfo) -> BackupManifest {
let manifest_path = &backup.manifest_path;
if manifest_path.exists() {
let content = fs::read_to_string(manifest_path).unwrap_or_default();
serde_yaml::from_str(&content).unwrap_or_else(|_| BackupManifest {
timestamp: backup.timestamp.format("%Y-%m-%d_%H-%M-%S").to_string(),
dotfiles: Vec::new(),
total_size: backup.total_size,
})
} else {
BackupManifest {
timestamp: backup.timestamp.format("%Y-%m-%d_%H-%M-%S").to_string(),
dotfiles: Vec::new(),
total_size: backup.total_size,
}
}
}
pub fn list_backups(&self) -> Vec<BackupInfo> {
let mut backups = Vec::new();
if let Ok(entries) = fs::read_dir(&self.backup_dir) {
for entry in entries.filter_map(|e| e.ok()) {
let path = entry.path();
if path.extension().map(|e| e == "yml").unwrap_or(false) {
if let Ok(manifest) = self.read_manifest(&path) {
let archive_path = path.with_extension("tar.gz");
let timestamp = DateTime::parse_from_str(&manifest.timestamp, "%Y-%m-%d_%H-%M-%S")
.map(|t| t.with_timezone(&Local))
.unwrap_or_else(|_| Local::now());
backups.push(BackupInfo {
timestamp,
archive_path,
manifest_path: path,
dotfile_count: manifest.dotfiles.len(),
total_size: manifest.total_size,
});
}
}
}
}
backups.sort_by(|a, b| b.timestamp.cmp(&a.timestamp));
backups
}
fn read_manifest(&self, path: &Path) -> Result<BackupManifest, io::Error> {
let content = fs::read_to_string(path)?;
Ok(serde_yaml::from_str(&content)?)
}
pub fn cleanup_old_backups(&self, max_backups: usize) -> Result<Vec<PathBuf>, io::Error> {
let backups = self.list_backups();
let to_remove = if backups.len() > max_backups {
&backups[max_backups..]
} else {
&[]
};
let mut removed = Vec::new();
for backup in to_remove {
if backup.archive_path.exists() {
fs::remove_file(&backup.archive_path)?;
removed.push(backup.archive_path.clone());
}
if backup.manifest_path.exists() {
fs::remove_file(&backup.manifest_path)?;
removed.push(backup.manifest_path.clone());
}
}
Ok(removed)
}
}