#!/bin/bash # === Config === SERVER_IP="192.168.2.194" SHARE_NAME="PBS-back" MOUNT_POINT="/mnt/smb-backup" CREDENTIALS_FILE="/root/.smb-pbs-cred" DATE=$(date +%F-%H%M) BACKUP_NAME="pbs-config-$DATE" TMP_BACKUP="/tmp/$BACKUP_NAME" CHUNK_SIZE="10G" MIN_SPACE_REQUIRED="5G" # Minimum space required in /tmp LOG="/var/log/pbs-smb-backup.log" SRC="/mypool /etc" # Only scan these directories DB_DIR="/var/lib/pbs-backup" DB_FILE="$DB_DIR/index.db" # Global variables for cleanup CLEANUP_IN_PROGRESS=0 MONITOR_PID=0 # No longer used for background monitor, kept for potential future use CURRENT_SCAN_DIR="" # Exclusion patterns EXCLUDE_PATTERNS=( "node_modules" ".cursor-server" ".git" ".vscode" "*.log" "*.tmp" "*.temp" "*.swp" "*.swo" "*.bak" "*.cache" "*.pid" "*.lock" "*.socket" "*.sock" "*.pid" "*.log.*" "*.gz" "*.zip" "*.tar" "*.7z" "*.rar" "*.iso" "*.img" "*.vmdk" "*.vhd" "*.qcow2" "*.raw" "*.vdi" "*.vbox" "*.ova" "*.ovf" "*.vmx" "*.vmsd" "*.vmsn" "*.vmss" "*.vmtm" "*.vmxf" "*.nvram" "*.vmem" "*.vswp" ) # Build find command exclusions build_find_exclusions() { # Create a temporary file with exclusion patterns local exclude_file="$TMP_BACKUP/exclude_patterns.txt" printf "%s\n" "${EXCLUDE_PATTERNS[@]}" > "$exclude_file" # Build the find command with optimized exclusions echo "-type f -not -path '*/\.*' -not -path '*/node_modules/*' $(printf -- "-not -name '%s' " "${EXCLUDE_PATTERNS[@]}")" } # Function to handle script termination handle_termination() { local signal=$1 local exit_code=$2 # Prevent multiple cleanup attempts if [ "$CLEANUP_IN_PROGRESS" -eq 1 ]; then exit $exit_code fi CLEANUP_IN_PROGRESS=1 # Clear the current line before printing cleanup message echo -ne "\r\033[K" echo "[$(date)] โš ๏ธ Received signal $signal. Cleaning up..." | tee -a "$LOG" # Kill background processes (No background monitor anymore, but clean up potential child find/stat processes) # We can add more specific process killing here if needed later pgrep -P $$ | xargs -r kill # Cleanup temporary files if [ -d "$TMP_BACKUP" ]; then echo "[$(date)] Cleaning up temporary files..." | tee -a "$LOG" rm -rf "$TMP_BACKUP" fi # Unmount share if mounted if mountpoint -q "$MOUNT_POINT"; then echo "[$(date)] Unmounting share..." | tee -a "$LOG" cleanup_processes "$MOUNT_POINT" unmount_share fi # If it was a Ctrl+Z, kill the script if [ "$signal" = "SIGTSTP" ]; then echo "[$(date)] โŒ Script terminated by Ctrl+Z" | tee -a "$LOG" kill -9 $$ fi # For Ctrl+C, exit gracefully if [ "$signal" = "SIGINT" ]; then echo "[$(date)] ๐Ÿ‘‹ Script terminated by Ctrl+C" | tee -a "$LOG" exit $exit_code fi } # Set up signal handlers trap 'handle_termination SIGINT 130' SIGINT trap 'handle_termination SIGTSTP 146' SIGTSTP trap 'handle_termination EXIT $?' EXIT # Function to process a batch of files process_file_batch() { local batch_file="$1" local db_file="$2" local temp_sql="$TMP_BACKUP/temp_$(basename "$batch_file").sql" local batch_num=$(basename "$batch_file" | sed 's/batch_//') echo "[$(date)] ๐Ÿ“ฆ Processing batch $batch_num..." | tee -a "$LOG" # Create SQL file with all inserts echo "BEGIN TRANSACTION;" > "$temp_sql" local count=0 while read -r file; do ((count++)) # Get file metadata local size=$(stat -c %s "$file" 2>/dev/null) local mtime=$(stat -c %Y "$file" 2>/dev/null) local hash=$(md5sum "$file" 2>/dev/null | cut -d' ' -f1) # Show file being processed every 100 files if ((count % 100 == 0)); then echo "[$(date)] ๐Ÿ“„ Processing: $file (${size} bytes)" | tee -a "$LOG" fi # Escape single quotes in file path file=$(echo "$file" | sed "s/'/''/g") # Add to SQL file echo "INSERT OR REPLACE INTO file_index (path, size, mtime, hash, file_exists) VALUES ('$file', $size, $mtime, '$hash', 1);" >> "$temp_sql" done < "$batch_file" echo "COMMIT;" >> "$temp_sql" echo "[$(date)] ๐Ÿ’พ Writing batch $batch_num to database ($count files)..." | tee -a "$LOG" # Execute SQL file sqlite3 "$db_file" < "$temp_sql" rm -f "$temp_sql" echo "[$(date)] โœ… Completed batch $batch_num" | tee -a "$LOG" } # Function to check and update database schema check_and_update_schema() { echo "[$(date)] ๐Ÿ” Checking database schema..." | tee -a "$LOG" # Check if scan_time column exists if ! sqlite3 "$DB_FILE" "SELECT scan_time FROM file_index LIMIT 1;" > /dev/null 2>&1; then echo "[$(date)] โš ๏ธ Database schema needs update. Adding scan_time column..." | tee -a "$LOG" # Create backup of current database local backup_file="${DB_FILE}.bak.$(date +%s)" cp "$DB_FILE" "$backup_file" echo "[$(date)] ๐Ÿ“ฆ Created database backup: $backup_file" | tee -a "$LOG" # Add scan_time column sqlite3 "$DB_FILE" << EOF BEGIN TRANSACTION; ALTER TABLE file_index ADD COLUMN scan_time INTEGER; CREATE INDEX IF NOT EXISTS idx_scan_time ON file_index(scan_time); COMMIT; EOF if [ $? -eq 0 ]; then echo "[$(date)] โœ… Database schema updated successfully" | tee -a "$LOG" else echo "[$(date)] โŒ Failed to update schema. Restoring backup..." | tee -a "$LOG" mv "$backup_file" "$DB_FILE" exit 1 fi fi } # Initialize SQLite database if it doesn't exist init_database() { echo "[$(date)] ๐Ÿ“Š Initializing SQLite database..." | tee -a "$LOG" # Create database directory if it doesn't exist if [ ! -d "$DB_DIR" ]; then echo "[$(date)] Creating database directory $DB_DIR..." | tee -a "$LOG" mkdir -p "$DB_DIR" || { echo "[$(date)] โŒ Failed to create database directory. Trying alternative location..." | tee -a "$LOG" DB_DIR="/root/.pbs-backup" DB_FILE="$DB_DIR/index.db" mkdir -p "$DB_DIR" || { echo "[$(date)] โŒ Failed to create alternative database directory. Exiting." | tee -a "$LOG" exit 1 } } fi # Ensure proper permissions chmod 700 "$DB_DIR" # Create database if it doesn't exist if [ ! -f "$DB_FILE" ]; then echo "[$(date)] Creating new database file..." | tee -a "$LOG" sqlite3 "$DB_FILE" << EOF CREATE TABLE IF NOT EXISTS file_index ( path TEXT PRIMARY KEY, size INTEGER, mtime INTEGER, hash TEXT, last_backup TEXT, file_exists INTEGER DEFAULT 1, scan_time INTEGER ); CREATE INDEX IF NOT EXISTS idx_mtime ON file_index(mtime); CREATE INDEX IF NOT EXISTS idx_exists ON file_index(file_exists); CREATE INDEX IF NOT EXISTS idx_scan_time ON file_index(scan_time); EOF chmod 600 "$DB_FILE" else # Check and update schema if needed check_and_update_schema fi # Verify database is accessible if ! sqlite3 "$DB_FILE" "SELECT 1;" > /dev/null 2>&1; then echo "[$(date)] โŒ Failed to access database. Please check permissions." | tee -a "$LOG" exit 1 fi echo "[$(date)] โœ… Database initialized successfully at $DB_FILE" | tee -a "$LOG" } # Function to scan a single directory from a file list scan_directory() { local file_list="$1" local dir=$(dirname "$file_list") # Infer directory from the file list path local current_scan_time=$(date +%s) # Use a single scan time for the directory local temp_file_prefix="$TMP_BACKUP/scan_$(basename "$dir")" local start_time=$(date +%s) local count=0 local files_per_second=0 local batch_size=1000 local current_batch=0 local changed_files=0 local new_files=0 local unchanged_files=0 local total_files_in_dir=$(wc -l < "$file_list") # Accurate count from the find output local last_status_update_time=$start_time local batch_start_time=$start_time CURRENT_SCAN_DIR="$dir" echo "[$(date)] ๐Ÿ” Scanning directory: $dir ($total_files_in_dir files)" | tee -a "$LOG" # Create temporary directory for batch files mkdir -p "$TMP_BACKUP/batches" local current_batch_sql_file="$TMP_BACKUP/batches/batch_$current_batch.sql" > "$current_batch_sql_file" # Create/clear the first batch file # --- Step 1: Pre-load existing file metadata from DB --- echo "[$(date)] ๐Ÿ“š Pre-loading existing file metadata from database for $dir..." | tee -a "$LOG" local db_metadata_file="$TMP_BACKUP/db_metadata_$(basename "$dir").csv" # Export relevant data from DB to a temporary file sqlite3 -separator '|' "$DB_FILE" "SELECT path, size, mtime, hash FROM file_index WHERE path LIKE '$dir/%';" > "$db_metadata_file" # Read DB metadata into an associative array (if bash version supports it) declare -A db_files_metadata if [ -f "$db_metadata_file" ]; then while IFS='|' read -r path size mtime hash; do db_files_metadata["$path"]="$size|$mtime|$hash" done < "$db_metadata_file" rm -f "$db_metadata_file" fi echo "[$(date)] โœ… Pre-loading complete. Loaded ${#db_files_metadata[@]} entries." # --- # Read files from the pre-generated list and process them while read -r file; do # Skip if file doesn't exist (shouldn't happen with find output, but safety) [ -f "$file" ] || continue ((count++)) # Get file metadata from filesystem local size=$(stat -c %s "$file" 2>/dev/null) local mtime=$(stat -c %Y "$file" 2>/dev/null) local hash=$(md5sum "$file" 2>/dev/null | cut -d' ' -f1) # Escape single quotes in file path for SQL local escaped_file=$(echo "$file" | sed "s/'/''/g") local needs_batch_update=0 # --- Step 2: Check against pre-loaded metadata instead of querying DB --- local db_data=${db_files_metadata["$file"]} if [ -n "$db_data" ]; then IFS='|' read -r db_size db_mtime db_hash <<< "$db_data" if [ "$size" = "$db_size" ] && [ "$mtime" = "$db_mtime" ] && [ "$hash" = "$db_hash" ]; then # File unchanged, count and skip adding to batch ((unchanged_files++)) # No DB write needed here, file_exists will be set to 1 by INSERT OR REPLACE if it's in the batch # But since unchanged files are NOT added to the batch, their file_exists=0 status # from the initial UPDATE needs to be flipped. This is a key difference. # Let's add them to a separate temporary file for a batch UPDATE at the end of the directory scan. echo "$escaped_file" >> "$TMP_BACKUP/batches/unchanged_batch_$current_batch.sql" needs_batch_update=0 else # File changed ((changed_files++)) needs_batch_update=1 fi else # New file ((new_files++)) needs_batch_update=1 fi # --- # Add to current batch only if new or changed if [ $needs_batch_update -eq 1 ]; then echo "INSERT OR REPLACE INTO file_index (path, size, mtime, hash, file_exists, scan_time) VALUES ('$escaped_file', $size, $mtime, '$hash', 1, $current_scan_time);" >> "$current_batch_sql_file" fi # Update status line periodically (e.g., every 100 files or every 2 seconds) local current_time=$(date +%s) local elapsed_status_time=$((current_time - last_status_update_time)) # Update terminal every 100 files or 2 seconds if ((count % 100 == 0 || elapsed_status_time >= 2)); then local processed_rate=$((count / $((current_time - start_time + 1)) )) # Files per second overall local progress=$((count * 100 / total_files_in_dir)) if [ $progress -gt 100 ]; then progress=100; fi local bar_length=50 local filled=$((progress * bar_length / 100)) local empty=$((bar_length - filled)) local bar="[" for ((i=0; i= batch_size )); then local batch_write_start_time=$(date +%s) # Execute batch # Clear current line before printing batch write message echo -ne "\r\033[K" echo "[$(date)] ๐Ÿ’พ Writing batch $current_batch to database..." | tee -a "$LOG" # Add retry logic for database locked error local db_write_success=0 local retry_count=0 while [ $retry_count -lt 15 ]; do # Increased retry attempts if sqlite3 "$DB_FILE" "BEGIN TRANSACTION;\n$(cat "$current_batch_sql_file")\nCOMMIT;" 2>/dev/null; then db_write_success=1 break else retry_count=$((retry_count + 1)) echo "[$(date)] โš ๏ธ Database locked, retry $retry_count..." | tee -a "$LOG" sleep 0.3 # Shorter sleep for quicker retries fi done if [ $db_write_success -eq 0 ]; then echo "[$(date)] โŒ Failed to write batch $current_batch after multiple retries. Exiting." | tee -a "$LOG" exit 1 fi echo "[$(date)] โœ… Batch $current_batch written." | tee -a "$LOG" # Calculate batch write rate local batch_write_end_time=$(date +%s) local batch_write_elapsed=$((batch_write_end_time - batch_write_start_time + 1)) local batch_file_count=$((current_batch_lines)) local batch_write_rate=$((batch_file_count / batch_write_elapsed)) echo "[$(date)] โšก Batch Write Rate: ${batch_write_rate}/s" | tee -a "$LOG" # Update overall status line after batch commit to reflect new DB state local current_time=$(date +%s) local processed_rate=$((count / $((current_time - start_time + 1)) )) local progress=$((count * 100 / total_files_in_dir)) if [ $progress -gt 100 ]; then progress=100; fi local bar_length=50 local filled=$((progress * bar_length / 100)) local empty=$((bar_length - filled)) local bar="[" for ((i=0; i/dev/null) local db_size_mb=$((db_size / 1024 / 1024)) local db_files=$(sqlite3 "$DB_FILE" "SELECT COUNT(*) FROM file_index WHERE file_exists = 1;" 2>/dev/null) echo -ne "\r[$(date)] ๐Ÿ“Š $bar $progress% | Processed: $count (N:$new_files C:$changed_files U:$unchanged_files) | Rate: ${processed_rate}/s | DB: ${db_size_mb}MB, Indexed: $db_files" last_status_update_time=$current_time # Move to next batch file rm -f "$current_batch_sql_file" ((current_batch++)) current_batch_sql_file="$TMP_BACKUP/batches/batch_$current_batch.sql" > "$current_batch_sql_file" # Create/clear the next batch file fi done < "$file_list" # Process any remaining files in the last batch local current_batch_lines=$(wc -l < "$current_batch_sql_file" || echo 0) if (( current_batch_lines > 0 )); then # Check if the last batch file has content local batch_write_start_time=$(date +%s) # Clear current line before printing final batch write message echo -ne "\r\033[K" echo "[$(date)] ๐Ÿ’พ Writing final batch to database..." | tee -a "$LOG" # Add retry logic for database locked error local db_write_success=0 local retry_count=0 while [ $retry_count -lt 15 ]; do # Increased retry attempts if sqlite3 "$DB_FILE" "BEGIN TRANSACTION;\n$(cat "$current_batch_sql_file")\nCOMMIT;" 2>/dev/null; then db_write_success=1 break else retry_count=$((retry_count + 1)) echo "[$(date)] โš ๏ธ Database locked, retry $retry_count..." | tee -a "$LOG" sleep 0.5 # Shorter sleep for quicker retries fi done if [ $db_write_success -eq 0 ]; then echo "[$(date)] โŒ Failed to write final batch after multiple retries. Exiting." | tee -a "$LOG" exit 1 fi echo "[$(date)] โœ… Final batch written." | tee -a "$LOG" # Calculate batch write rate local batch_write_end_time=$(date +%s) local batch_write_elapsed=$((batch_write_end_time - batch_write_start_time + 1)) local batch_file_count=$((current_batch_lines)) local batch_write_rate=$((batch_file_count / batch_write_elapsed)) echo "[$(date)] โšก Final Batch Write Rate: ${batch_write_rate}/s" | tee -a "$LOG" # Cleanup batch file rm -f "$current_batch_sql_file" else # If the last batch file is empty, just remove it rm -f "$current_batch_sql_file" fi # --- Step 3: Batch update scan_time for unchanged files --- local unchanged_batch_file="$TMP_BACKUP/batches/unchanged_batch_$current_batch.sql" if [ -f "$unchanged_batch_file" ] && [ $(wc -l < "$unchanged_batch_file" || echo 0) -gt 0 ]; then echo "[$(date)] ๐Ÿ”„ Updating scan time for unchanged files in database..." | tee -a "$LOG" local update_success=0 local retry_count=0 while [ $retry_count -lt 15 ]; do # Increased retry attempts if sqlite3 "$DB_FILE" "BEGIN TRANSACTION;\nUPDATE file_index SET scan_time = $current_scan_time WHERE path IN ('$(cat "$unchanged_batch_file" | paste -sd ',' -)');\nCOMMIT;" 2>/dev/null; then update_success=1 break else retry_count=$((retry_count + 1)) echo "[$(date)] โš ๏ธ Database locked during unchanged update, retry $retry_count..." | tee -a "$LOG" sleep 0.5 fi done if [ $update_success -eq 0 ]; then echo "[$(date)] โŒ Failed to update scan time for unchanged files. Exiting." | tee -a "$LOG" exit 1 fi echo "[$(date)] โœ… Scan time updated for $unchanged_files unchanged files." rm -f "$unchanged_batch_file" elif [ -f "$unchanged_batch_file" ]; then rm -f "$unchanged_batch_file" fi # --- CURRENT_SCAN_DIR="" # Final verification and statistics # Clear the last progress line before printing final stats echo -ne "\r\033[K" local total_in_db=$(sqlite3 "$DB_FILE" "SELECT COUNT(*) FROM file_index;") local db_size=$(stat -c %s "$DB_FILE" 2>/dev/null) local db_size_mb=$((db_size / 1024 / 1024)) echo "[$(date)] ๐Ÿ“Š Directory scan completed: $dir" | tee -a "$LOG" echo "[$(date)] - Files processed: $count" | tee -a "$LOG" echo "[$(date)] - New files: $new_files" | tee -a "$LOG" echo "[$(date)] - Changed files: $changed_files" | tee -a "$LOG" echo "[$(date)] - Unchanged files: $unchanged_files" | tee -a "$LOG" echo "[$(date)] - Total files in database: $total_in_db" | tee -a "$LOG" echo "[$(date)] - Database size: ${db_size_mb}MB" | tee -a "$LOG" # Cleanup batch directory (done in update_file_list after all directories) # rm -rf "$TMP_BACKUP/batches" } # Function to update file list in database (Incremental Scan) update_file_list() { echo "[$(date)] ๐Ÿ”„ Starting incremental file system scan..." | tee -a "$LOG" # Mark all existing files as not found - will be updated to 1 during scan if found echo "[$(date)] ๐Ÿท๏ธ Marking existing files as potentially removed..." | tee -a "$LOG" sqlite3 "$DB_FILE" "BEGIN TRANSACTION;" sqlite3 "$DB_FILE" "UPDATE file_index SET file_exists = 0;" sqlite3 "$DB_FILE" "COMMIT;" # Create temporary directory for the find output and batches mkdir -p "$TMP_BACKUP" # Scan each directory for dir in $SRC; do if [ -d "$dir" ]; then # Use find to create a temporary list of files first local find_output_file="$TMP_BACKUP/scan_$(basename "$dir").find" echo "[$(date)] ๐Ÿ“‹ Finding files in $dir..." | tee -a "$LOG" # Add progress for the find command if 'pv' is available if command -v pv >/dev/null 2>&1; then find "$dir" -type f 2>/dev/null | pv -l -c -N "Finding files in $dir" > "$find_output_file" else find "$dir" -type f 2>/dev/null > "$find_output_file" fi if [ -s "$find_output_file" ]; then # Check if the find output file is not empty scan_directory "$find_output_file" else echo "[$(date)] โš ๏ธ No files found in $dir" | tee -a "$LOG" fi rm -f "$find_output_file" else echo "[$(date)] โš ๏ธ Directory not found: $dir" | tee -a "$LOG" fi done # Remove files that no longer exist echo "[$(date)] ๐Ÿงน Cleaning up removed files..." | tee -a "$LOG" sqlite3 "$DB_FILE" "BEGIN TRANSACTION;" local removed_count=$(sqlite3 "$DB_FILE" "SELECT COUNT(*) FROM file_index WHERE file_exists = 0;") sqlite3 "$DB_FILE" "DELETE FROM file_index WHERE file_exists = 0;" sqlite3 "$DB_FILE" "COMMIT;" echo "[$(date)] ๐Ÿ—‘๏ธ Removed $removed_count non-existent files" | tee -a "$LOG" # Get final statistics local total_files=$(sqlite3 "$DB_FILE" "SELECT COUNT(*) FROM file_index;") local db_size=$(stat -c %s "$DB_FILE" 2>/dev/null) local db_size_mb=$((db_size / 1024 / 1024)) echo "[$(date)] ๐Ÿ“Š Scan completed:" | tee -a "$LOG" echo "[$(date)] - Total files: $total_files" | tee -a "$LOG" echo "[$(date)] - Database size: ${db_size_mb}MB" | tee -a "$LOG" echo "[$(date)] - Directories scanned: $SRC" | tee -a "$LOG" # Verify database integrity echo "[$(date)] ๐Ÿ” Verifying database integrity..." | tee -a "$LOG" if sqlite3 "$DB_FILE" "PRAGMA integrity_check;" | grep -q "ok"; then echo "[$(date)] โœ… Database integrity check passed" | tee -a "$LOG" else echo "[$(date)] โŒ Database integrity check failed!" | tee -a "$LOG" fi # Cleanup temporary batch directory after all directories are scanned rm -rf "$TMP_BACKUP/batches" # Cleanup main temporary directory (if empty) rmdir "$TMP_BACKUP" 2>/dev/null } # Function to get files that need backup get_files_to_backup() { sqlite3 "$DB_FILE" "SELECT path FROM file_index WHERE needs_backup = 1;" } # Function to check if file needs backup needs_backup() { local file="$1" local current_mtime=$(stat -c %Y "$file") local current_size=$(stat -c %s "$file") local current_hash=$(md5sum "$file" | cut -d' ' -f1) local db_data=$(sqlite3 "$DB_FILE" "SELECT size, mtime, hash FROM file_index WHERE path='$file';") if [ -z "$db_data" ]; then return 0 # File not in database, needs backup fi IFS='|' read -r db_size db_mtime db_hash <<< "$db_data" if [ "$current_mtime" -ne "$db_mtime" ] || [ "$current_size" -ne "$db_size" ] || [ "$current_hash" != "$db_hash" ]; then return 0 # File changed, needs backup fi return 1 # File unchanged, no backup needed } # Initialize database at script start init_database # Update file list in database update_file_list # Create a temporary file list with files that need backup echo "[$(date)] ๐Ÿ“‹ Creating backup list..." | tee -a "$LOG" get_files_to_backup > "$TMP_BACKUP/filelist.txt" # Initialize variables for chunk creation chunk_num=1 current_chunk_size=0 current_chunk_files=() total_files=$(wc -l < "$TMP_BACKUP/filelist.txt") processed_files=0 skipped_files=0 echo "[$(date)] ๐Ÿ” Starting backup process (files to backup: $total_files)..." | tee -a "$LOG" # === Cleanup function === cleanup_old_backups() { echo "[$(date)] ๐Ÿงน Starting cleanup of old backups..." | tee -a "$LOG" # Cleanup old backups on SMB share if mountpoint -q "$MOUNT_POINT"; then echo "[$(date)] Keeping only the $MAX_BACKUPS most recent backups..." | tee -a "$LOG" # List all backup directories, sort by date (newest first), and remove older ones cd "$MOUNT_POINT" || exit 1 ls -td pbs-config-* 2>/dev/null | tail -n +$((MAX_BACKUPS + 1)) | xargs -r rm -rf echo "[$(date)] Current backups:" | tee -a "$LOG" ls -lhd pbs-config-* 2>/dev/null | tee -a "$LOG" fi # Cleanup old logs echo "[$(date)] Cleaning up old log files..." | tee -a "$LOG" find /var/log -name "pbs-smb-backup.*.log" -mtime +7 -delete # Cleanup /tmp echo "[$(date)] Cleaning up old temporary files..." | tee -a "$LOG" rm -rf /tmp/pbs-config-* rm -rf /tmp/restore-* } # === Create mount point if needed === mkdir -p "$MOUNT_POINT" # === Ensure credentials file exists === if [ ! -f "$CREDENTIALS_FILE" ]; then echo "username=pbs" > "$CREDENTIALS_FILE" echo "password=2104" >> "$CREDENTIALS_FILE" chmod 600 "$CREDENTIALS_FILE" fi # === Helper functions === get_available_space() { df -B1 /tmp | awk 'NR==2 {print $4}' } check_space() { local required_space=$(numfmt --from=iec $MIN_SPACE_REQUIRED) local available_space=$(get_available_space) if [ "$available_space" -lt "$required_space" ]; then echo "[$(date)] โš ๏ธ Warning: Only $(numfmt --to=iec $available_space) available in /tmp" | tee -a "$LOG" return 1 fi return 0 } wait_for_space() { local required_space=$(numfmt --from=iec $MIN_SPACE_REQUIRED) local available_space while true; do available_space=$(get_available_space) if [ "$available_space" -ge "$required_space" ]; then return 0 fi echo "[$(date)] โš ๏ธ Waiting for more space in /tmp (need $MIN_SPACE_REQUIRED, have $(numfmt --to=iec $available_space))..." | tee -a "$LOG" sleep 10 done } cleanup_processes() { local mount_point="$1" local max_attempts=3 local attempt=1 echo "[$(date)] Cleaning up processes using $mount_point..." | tee -a "$LOG" while [ $attempt -le $max_attempts ]; do # Get list of processes using the mount point, excluding our own script local pids=$(lsof -t "$mount_point" 2>/dev/null | grep -v "$$") if [ -z "$pids" ]; then echo "[$(date)] No processes found using $mount_point" | tee -a "$LOG" return 0 fi echo "[$(date)] Attempt $attempt: Found processes: $pids" | tee -a "$LOG" # Kill each process individually and verify it's gone for pid in $pids; do if [ -e "/proc/$pid" ]; then echo "[$(date)] Terminating process $pid..." | tee -a "$LOG" kill -15 "$pid" 2>/dev/null sleep 1 # Check if process still exists if [ -e "/proc/$pid" ]; then echo "[$(date)] Process $pid still running, sending SIGKILL..." | tee -a "$LOG" kill -9 "$pid" 2>/dev/null sleep 1 fi fi done # Final check for any remaining processes pids=$(lsof -t "$mount_point" 2>/dev/null | grep -v "$$") if [ -z "$pids" ]; then echo "[$(date)] Successfully cleaned up all processes" | tee -a "$LOG" return 0 fi ((attempt++)) sleep 2 done echo "[$(date)] โš ๏ธ Warning: Could not clean up all processes after $max_attempts attempts" | tee -a "$LOG" return 1 } mount_share() { echo "[$(date)] Mounting SMB share..." | tee -a "$LOG" # Check if already mounted if mountpoint -q "$MOUNT_POINT"; then echo "[$(date)] Share already mounted, attempting to unmount..." | tee -a "$LOG" # First try lazy unmount echo "[$(date)] Attempting lazy unmount..." | tee -a "$LOG" umount -l "$MOUNT_POINT" 2>/dev/null sleep 2 # If still mounted, try to clean up processes and unmount if mountpoint -q "$MOUNT_POINT"; then echo "[$(date)] Share still mounted, cleaning up processes..." | tee -a "$LOG" cleanup_processes "$MOUNT_POINT" # Try normal unmount umount "$MOUNT_POINT" 2>/dev/null sleep 2 # If still mounted, try force unmount if mountpoint -q "$MOUNT_POINT"; then echo "[$(date)] Attempting force unmount..." | tee -a "$LOG" umount -f "$MOUNT_POINT" 2>/dev/null sleep 2 fi fi # Final check - if still mounted, exit if mountpoint -q "$MOUNT_POINT"; then echo "[$(date)] โŒ Failed to unmount existing share. Please check for processes using $MOUNT_POINT" | tee -a "$LOG" echo "[$(date)] Running processes:" | tee -a "$LOG" lsof "$MOUNT_POINT" | grep -v "$$" | tee -a "$LOG" exit 1 fi fi # Ensure mount point is clean rm -rf "$MOUNT_POINT"/* # Try mounting with different SMB versions if needed for smb_ver in "3.0" "2.1" "2.0" "1.0"; do echo "[$(date)] Attempting mount with SMB version $smb_ver..." | tee -a "$LOG" mount -t cifs "//$SERVER_IP/$SHARE_NAME" "$MOUNT_POINT" -o "credentials=$CREDENTIALS_FILE,iocharset=utf8,vers=$smb_ver,sec=ntlmssp,uid=0,gid=0,file_mode=0644,dir_mode=0755" if [ $? -eq 0 ]; then echo "[$(date)] Successfully mounted with SMB version $smb_ver" | tee -a "$LOG" return 0 fi # Show dmesg output for debugging echo "[$(date)] Mount failed, checking dmesg for details..." | tee -a "$LOG" dmesg | tail -n 20 | tee -a "$LOG" sleep 2 done echo "[$(date)] โŒ Failed to mount SMB share. Please check:" | tee -a "$LOG" echo "1. SMB server is running and accessible" | tee -a "$LOG" echo "2. Credentials are correct" | tee -a "$LOG" echo "3. Network connectivity to $SERVER_IP" | tee -a "$LOG" echo "4. SMB share '$SHARE_NAME' exists and is accessible" | tee -a "$LOG" exit 1 } unmount_share() { echo "[$(date)] Unmounting SMB share..." | tee -a "$LOG" if mountpoint -q "$MOUNT_POINT"; then # Clean up any processes first cleanup_processes "$MOUNT_POINT" # Try to unmount gracefully first umount "$MOUNT_POINT" 2>/dev/null sleep 2 # If still mounted, try force unmount if mountpoint -q "$MOUNT_POINT"; then echo "[$(date)] Force unmounting..." | tee -a "$LOG" umount -f "$MOUNT_POINT" 2>/dev/null sleep 2 # If still mounted, try lazy unmount if mountpoint -q "$MOUNT_POINT"; then echo "[$(date)] Lazy unmounting..." | tee -a "$LOG" umount -l "$MOUNT_POINT" 2>/dev/null sleep 2 # If still mounted, try one more time with process cleanup if mountpoint -q "$MOUNT_POINT"; then cleanup_processes "$MOUNT_POINT" sleep 2 umount -f "$MOUNT_POINT" 2>/dev/null sleep 2 fi fi fi # Final check - if still mounted, warn but continue if mountpoint -q "$MOUNT_POINT"; then echo "[$(date)] โš ๏ธ Warning: Could not unmount $MOUNT_POINT completely" | tee -a "$LOG" echo "[$(date)] Running processes:" | tee -a "$LOG" lsof "$MOUNT_POINT" | grep -v "$$" | tee -a "$LOG" fi fi } # Add trap to ensure cleanup on script exit trap 'handle_exit' EXIT handle_exit() { local exit_code=$? echo "[$(date)] Cleaning up on exit..." | tee -a "$LOG" # Only attempt cleanup if we're not already in a cleanup process if [ -z "$CLEANUP_IN_PROGRESS" ]; then export CLEANUP_IN_PROGRESS=1 cleanup_processes "$MOUNT_POINT" unmount_share rm -rf "$TMP_BACKUP" fi exit $exit_code } # === Handle -u option for restore === if [ "$1" == "-u" ]; then echo "[$(date)] ๐Ÿ” Restore mode selected. Starting cleanup..." | tee -a "$LOG" cleanup_old_backups mount_share # Get latest backup directory LATEST_BACKUP_DIR=$(ls -td "$MOUNT_POINT"/pbs-config-* 2>/dev/null | head -n1) if [ -z "$LATEST_BACKUP_DIR" ]; then echo "[$(date)] โŒ No backup found on SMB share." | tee -a "$LOG" unmount_share exit 1 fi echo "[$(date)] โฌ‡๏ธ Starting restore process from $LATEST_BACKUP_DIR..." | tee -a "$LOG" # Create a temporary directory for processing TMP_RESTORE_DIR="/tmp/restore-$(basename "$LATEST_BACKUP_DIR")" mkdir -p "$TMP_RESTORE_DIR" # Process chunks in order for chunk in $(ls -v "$LATEST_BACKUP_DIR"/chunk_* 2>/dev/null); do if [ ! -f "$chunk" ]; then continue fi echo "[$(date)] Processing chunk: $(basename "$chunk")" | tee -a "$LOG" # Copy chunk to temp rsync -ah --progress "$chunk" "$TMP_RESTORE_DIR/" # Extract the chunk echo "[$(date)] Extracting chunk..." | tee -a "$LOG" 7z x -y "$TMP_RESTORE_DIR/$(basename "$chunk")" -o/ | tee -a "$LOG" # Remove the processed chunk rm -f "$TMP_RESTORE_DIR/$(basename "$chunk")" done # Cleanup rm -rf "$TMP_RESTORE_DIR" unmount_share echo "[$(date)] โœ… Restore completed." | tee -a "$LOG" exit 0 fi # === Regular backup mode === # Step 1: Start cleanup echo "[$(date)] ๐Ÿงน Starting initial cleanup..." | tee -a "$LOG" cleanup_old_backups if ! check_space; then echo "[$(date)] โš ๏ธ Proceeding with caution due to limited space" | tee -a "$LOG" fi # Step 2: Create temporary directory for chunks mkdir -p "$TMP_BACKUP" # Step 3: Mount share mount_share # Step 4: Create backup directory on SMB BACKUP_DIR="$MOUNT_POINT/$BACKUP_NAME" mkdir -p "$BACKUP_DIR" # Step 5: Create backup in chunks with space management echo "[$(date)] ๐Ÿ—œ๏ธ Starting backup with space management..." | tee -a "$LOG" # Create a temporary file list with exclusions echo "[$(date)] ๐Ÿ“‹ Creating file list (excluding unnecessary files)..." | tee -a "$LOG" # Function to check if pv is installed check_pv() { command -v pv >/dev/null 2>&1 } # Function to show progress bar show_progress() { if check_pv; then pv -l -s "$1" -N "Scanning files" else local count=0 local total="$1" while read -r line; do ((count++)) echo -ne "\r[$(date)] ๐Ÿ“‹ Scanning files: $count/$total ($((count * 100 / total))%)" | tee -a "$LOG" echo "$line" done echo fi } # First, count total files to scan echo "[$(date)] ๐Ÿ” Counting files to scan..." | tee -a "$LOG" total_files=$(eval "find $SRC -type f $(build_find_exclusions) | wc -l") echo "[$(date)] ๐Ÿ“Š Found $total_files files to process" | tee -a "$LOG" # Create file list with progress echo "[$(date)] ๐Ÿ“‹ Creating file list with exclusions..." | tee -a "$LOG" eval "find $SRC -type f $(build_find_exclusions)" | show_progress "$total_files" > "$TMP_BACKUP/filelist.txt" # Initialize variables for chunk creation chunk_num=1 current_chunk_size=0 current_chunk_files=() processed_files=0 skipped_files=0 echo "[$(date)] ๐Ÿ” Starting file analysis (total files: $total_files)..." | tee -a "$LOG" # Process files and create chunks while IFS= read -r file; do # Skip if file doesn't exist [ -f "$file" ] || continue ((processed_files++)) echo -ne "[$(date)] ๐Ÿ“Š Progress: $processed_files/$total_files files processed (${skipped_files} skipped)\r" | tee -a "$LOG" # Check if file needs backup using database if ! needs_backup "$file"; then ((skipped_files++)) continue fi # Get file size and metadata file_size=$(stat -c %s "$file") file_mtime=$(stat -c %Y "$file") file_hash=$(md5sum "$file" | cut -d' ' -f1) # Update database with new file metadata sqlite3 "$DB_FILE" << EOF INSERT OR REPLACE INTO file_index (path, size, mtime, hash, file_exists) VALUES ('$file', $file_size, $file_mtime, '$file_hash', 1); EOF # If adding this file would exceed chunk size, create the chunk if [ $((current_chunk_size + file_size)) -gt $(numfmt --from=iec $CHUNK_SIZE) ]; then if [ ${#current_chunk_files[@]} -gt 0 ]; then echo -e "\n[$(date)] ๐Ÿ“ฆ Creating chunk $chunk_num with ${#current_chunk_files[@]} files..." | tee -a "$LOG" # Create the chunk with verbose output chunk_file="$TMP_BACKUP/chunk_$(printf "%03d" $chunk_num).7z" echo "[$(date)] ๐Ÿ”„ Compressing files into chunk $chunk_num..." | tee -a "$LOG" 7z a -y -spf -t7z -m0=lzma2 -mx=5 -v "$chunk_file" "${current_chunk_files[@]}" | tee -a "$LOG" # Wait for 7z to complete and verify the chunk exists if [ -f "$chunk_file" ]; then echo "[$(date)] โฌ†๏ธ Transferring chunk $chunk_num to SMB..." | tee -a "$LOG" rsync -ah --progress "$chunk_file" "$BACKUP_DIR/" # Verify the transfer was successful if [ $? -eq 0 ]; then echo "[$(date)] โœ… Chunk $chunk_num transferred successfully" | tee -a "$LOG" rm -f "$chunk_file" else echo "[$(date)] โŒ Failed to transfer chunk $chunk_num" | tee -a "$LOG" exit 1 fi else echo "[$(date)] โŒ Failed to create chunk $chunk_num" | tee -a "$LOG" exit 1 fi # Reset for next chunk current_chunk_files=() current_chunk_size=0 ((chunk_num++)) # Check space before continuing if ! check_space; then wait_for_space fi fi fi # Add file to current chunk current_chunk_files+=("$file") current_chunk_size=$((current_chunk_size + file_size)) done < "$TMP_BACKUP/filelist.txt" echo -e "\n[$(date)] ๐Ÿ“Š Backup summary:" | tee -a "$LOG" echo "[$(date)] - Total files processed: $total_files" | tee -a "$LOG" echo "[$(date)] - Files skipped (unchanged): $skipped_files" | tee -a "$LOG" echo "[$(date)] - Files backed up: $((total_files - skipped_files))" | tee -a "$LOG" # Create final chunk if there are remaining files if [ ${#current_chunk_files[@]} -gt 0 ]; then echo "[$(date)] ๐Ÿ“ฆ Creating final chunk $chunk_num with ${#current_chunk_files[@]} files..." | tee -a "$LOG" chunk_file="$TMP_BACKUP/chunk_$(printf "%03d" $chunk_num).7z" echo "[$(date)] ๐Ÿ”„ Compressing files into final chunk..." | tee -a "$LOG" 7z a -y -spf -t7z -m0=lzma2 -mx=5 -v "$chunk_file" "${current_chunk_files[@]}" | tee -a "$LOG" # Wait for 7z to complete and verify the chunk exists if [ -f "$chunk_file" ]; then echo "[$(date)] โฌ†๏ธ Transferring final chunk to SMB..." | tee -a "$LOG" rsync -ah --progress "$chunk_file" "$BACKUP_DIR/" # Verify the transfer was successful if [ $? -eq 0 ]; then echo "[$(date)] โœ… Final chunk transferred successfully" | tee -a "$LOG" rm -f "$chunk_file" else echo "[$(date)] โŒ Failed to transfer final chunk" | tee -a "$LOG" exit 1 fi else echo "[$(date)] โŒ Failed to create final chunk" | tee -a "$LOG" exit 1 fi fi # Step 6: Final cleanup and finish rm -rf "$TMP_BACKUP" unmount_share echo "[$(date)] โœ… Backup completed successfully: $BACKUP_NAME" | tee -a "$LOG"