backup-volumes.sh
· 3.7 KiB · Bash
Неформатований
#!/bin/sh
set -e
# Backup docker volumes to S3
# This script lists all docker volumes, creates tar.gz backups,
# uploads them to S3, and deletes local archives one at a time
# to minimize disk space usage.
export AWS_PROFILE="volume-backup"
# Configuration
S3_BUCKET="homelab-volume-backups-v2"
HOSTNAME=$(hostname -s)
BACKUP_PREFIX="volumes/$HOSTNAME"
TEMP_DIR=$(mktemp -d)
TIMESTAMP=$(date +%Y_%m_%d)
# Logging functions
log_info() {
echo "[INFO] $1"
}
log_warn() {
echo "[WARN] $1"
}
log_error() {
echo "[ERROR] $1"
}
# Validate configuration
validate_config() {
# Test AWS credentials
if ! aws s3 ls "s3://$S3_BUCKET" >/dev/null 2>&1; then
log_error "Cannot access S3 bucket: $S3_BUCKET"
log_error "Please check your AWS credentials and bucket permissions"
exit 1
fi
}
# Cleanup temporary directory
cleanup() {
if [ -d "$TEMP_DIR" ]; then
rm -rf "$TEMP_DIR"
log_info "Cleaned up temporary directory"
fi
}
# Backup a single volume
backup_volume() {
local volume_name="$1"
local archive_name="${volume_name}_${TIMESTAMP}.tar.gz"
local archive_path="$TEMP_DIR/$archive_name"
local s3_path="s3://$S3_BUCKET/$BACKUP_PREFIX/$archive_name"
log_info "Starting backup of volume: $volume_name"
# Create tar.gz archive of the volume
# We use a temporary container to mount and backup the volume
log_info "Creating archive: $archive_name"
if docker run --rm \
-v "$volume_name:/volume:ro" \
-v "$TEMP_DIR:/backup" \
alpine \
tar czf "/backup/$archive_name" -C /volume . 2>/dev/null; then
log_info "Archive created successfully"
else
log_error "Failed to create archive for volume: $volume_name"
return 1
fi
# Get archive size for logging
if [ -f "$archive_path" ]; then
archive_size=$(du -h "$archive_path" | cut -f1)
log_info "Archive size: $archive_size"
# Upload to S3
log_info "Uploading to S3: $s3_path"
if aws s3 cp "$archive_path" "$s3_path"; then
log_info "Successfully uploaded to S3"
# Delete local archive
rm -f "$archive_path"
log_info "Deleted local archive"
return 0
else
log_error "Failed to upload to S3"
rm -f "$archive_path"
return 1
fi
else
log_error "Archive file not found: $archive_path"
return 1
fi
}
# Main backup process
main() {
log_info "Docker Volume Backup Script"
log_info "==========================="
validate_config
log_info "Fetching list of Docker volumes..."
volumes=$(docker volume ls -q)
if [ -z "$volumes" ]; then
log_warn "No Docker volumes found"
cleanup
exit 0
fi
volume_count=$(echo "$volumes" | wc -l | tr -d ' ')
log_info "Found $volume_count volume(s) to backup"
successful=0
failed=0
current=0
for volume in $volumes; do
current=$((current + 1))
log_info "Processing volume $current/$volume_count"
if backup_volume "$volume"; then
successful=$((successful + 1))
else
failed=$((failed + 1))
fi
echo ""
done
# Cleanup
cleanup
# Print summary
log_info "==========================="
log_info "Backup Summary"
log_info "==========================="
log_info "Total volumes: $volume_count"
log_info "Successful: $successful"
log_info "Failed: $failed"
if [ "$failed" -gt 0 ]; then
log_warn "Some backups failed. Please check the logs above."
exit 1
else
log_info "All backups completed successfully!"
exit 0
fi
}
# Run main function
main
| 1 | #!/bin/sh |
| 2 | |
| 3 | set -e |
| 4 | |
| 5 | # Backup docker volumes to S3 |
| 6 | # This script lists all docker volumes, creates tar.gz backups, |
| 7 | # uploads them to S3, and deletes local archives one at a time |
| 8 | # to minimize disk space usage. |
| 9 | |
| 10 | export AWS_PROFILE="volume-backup" |
| 11 | |
| 12 | # Configuration |
| 13 | S3_BUCKET="homelab-volume-backups-v2" |
| 14 | HOSTNAME=$(hostname -s) |
| 15 | BACKUP_PREFIX="volumes/$HOSTNAME" |
| 16 | TEMP_DIR=$(mktemp -d) |
| 17 | TIMESTAMP=$(date +%Y_%m_%d) |
| 18 | |
| 19 | # Logging functions |
| 20 | log_info() { |
| 21 | echo "[INFO] $1" |
| 22 | } |
| 23 | |
| 24 | log_warn() { |
| 25 | echo "[WARN] $1" |
| 26 | } |
| 27 | |
| 28 | log_error() { |
| 29 | echo "[ERROR] $1" |
| 30 | } |
| 31 | |
| 32 | |
| 33 | |
| 34 | # Validate configuration |
| 35 | validate_config() { |
| 36 | # Test AWS credentials |
| 37 | if ! aws s3 ls "s3://$S3_BUCKET" >/dev/null 2>&1; then |
| 38 | log_error "Cannot access S3 bucket: $S3_BUCKET" |
| 39 | log_error "Please check your AWS credentials and bucket permissions" |
| 40 | exit 1 |
| 41 | fi |
| 42 | } |
| 43 | |
| 44 | # Cleanup temporary directory |
| 45 | cleanup() { |
| 46 | if [ -d "$TEMP_DIR" ]; then |
| 47 | rm -rf "$TEMP_DIR" |
| 48 | log_info "Cleaned up temporary directory" |
| 49 | fi |
| 50 | } |
| 51 | |
| 52 | # Backup a single volume |
| 53 | backup_volume() { |
| 54 | local volume_name="$1" |
| 55 | local archive_name="${volume_name}_${TIMESTAMP}.tar.gz" |
| 56 | local archive_path="$TEMP_DIR/$archive_name" |
| 57 | local s3_path="s3://$S3_BUCKET/$BACKUP_PREFIX/$archive_name" |
| 58 | |
| 59 | log_info "Starting backup of volume: $volume_name" |
| 60 | |
| 61 | # Create tar.gz archive of the volume |
| 62 | # We use a temporary container to mount and backup the volume |
| 63 | log_info "Creating archive: $archive_name" |
| 64 | |
| 65 | if docker run --rm \ |
| 66 | -v "$volume_name:/volume:ro" \ |
| 67 | -v "$TEMP_DIR:/backup" \ |
| 68 | alpine \ |
| 69 | tar czf "/backup/$archive_name" -C /volume . 2>/dev/null; then |
| 70 | |
| 71 | log_info "Archive created successfully" |
| 72 | else |
| 73 | log_error "Failed to create archive for volume: $volume_name" |
| 74 | return 1 |
| 75 | fi |
| 76 | |
| 77 | # Get archive size for logging |
| 78 | if [ -f "$archive_path" ]; then |
| 79 | archive_size=$(du -h "$archive_path" | cut -f1) |
| 80 | log_info "Archive size: $archive_size" |
| 81 | |
| 82 | # Upload to S3 |
| 83 | log_info "Uploading to S3: $s3_path" |
| 84 | |
| 85 | if aws s3 cp "$archive_path" "$s3_path"; then |
| 86 | log_info "Successfully uploaded to S3" |
| 87 | |
| 88 | # Delete local archive |
| 89 | rm -f "$archive_path" |
| 90 | log_info "Deleted local archive" |
| 91 | |
| 92 | return 0 |
| 93 | else |
| 94 | log_error "Failed to upload to S3" |
| 95 | rm -f "$archive_path" |
| 96 | return 1 |
| 97 | fi |
| 98 | else |
| 99 | log_error "Archive file not found: $archive_path" |
| 100 | return 1 |
| 101 | fi |
| 102 | } |
| 103 | |
| 104 | # Main backup process |
| 105 | main() { |
| 106 | log_info "Docker Volume Backup Script" |
| 107 | log_info "===========================" |
| 108 | validate_config |
| 109 | log_info "Fetching list of Docker volumes..." |
| 110 | volumes=$(docker volume ls -q) |
| 111 | |
| 112 | if [ -z "$volumes" ]; then |
| 113 | log_warn "No Docker volumes found" |
| 114 | cleanup |
| 115 | exit 0 |
| 116 | fi |
| 117 | |
| 118 | volume_count=$(echo "$volumes" | wc -l | tr -d ' ') |
| 119 | log_info "Found $volume_count volume(s) to backup" |
| 120 | |
| 121 | successful=0 |
| 122 | failed=0 |
| 123 | current=0 |
| 124 | for volume in $volumes; do |
| 125 | current=$((current + 1)) |
| 126 | log_info "Processing volume $current/$volume_count" |
| 127 | |
| 128 | if backup_volume "$volume"; then |
| 129 | successful=$((successful + 1)) |
| 130 | else |
| 131 | failed=$((failed + 1)) |
| 132 | fi |
| 133 | |
| 134 | echo "" |
| 135 | done |
| 136 | |
| 137 | # Cleanup |
| 138 | cleanup |
| 139 | |
| 140 | # Print summary |
| 141 | log_info "===========================" |
| 142 | log_info "Backup Summary" |
| 143 | log_info "===========================" |
| 144 | log_info "Total volumes: $volume_count" |
| 145 | log_info "Successful: $successful" |
| 146 | log_info "Failed: $failed" |
| 147 | |
| 148 | if [ "$failed" -gt 0 ]; then |
| 149 | log_warn "Some backups failed. Please check the logs above." |
| 150 | exit 1 |
| 151 | else |
| 152 | log_info "All backups completed successfully!" |
| 153 | exit 0 |
| 154 | fi |
| 155 | } |
| 156 | |
| 157 | # Run main function |
| 158 | main |
| 159 |