Ejemplo n.º 1
0

<?php 
// Load required files.
require_once pb_backupbuddy::plugin_path() . '/destinations/s3/init.php';
require_once dirname(dirname(__FILE__)) . '/_s3lib/aws-sdk/sdk.class.php';
// Settings.
if (isset(pb_backupbuddy::$options['remote_destinations'][pb_backupbuddy::_GET('destination_id')])) {
    $settings =& pb_backupbuddy::$options['remote_destinations'][pb_backupbuddy::_GET('destination_id')];
}
$settings = array_merge(pb_backupbuddy_destination_s3::$default_settings, $settings);
$settings['bucket'] = strtolower($settings['bucket']);
// Buckets must be lowercase.
$remote_path = pb_backupbuddy_destination_s3::get_remote_path($settings['directory']);
// Welcome text.
$manage_data = pb_backupbuddy_destination_s3::get_credentials($settings);
// Connect to S3.
$s3 = new AmazonS3($manage_data);
// the key, secret, token
if ($settings['ssl'] == '0') {
    @$s3->disable_ssl(true);
}
// The bucket must be in existence and we must get it's region to be able to proceed
$region = '';
pb_backupbuddy::status('details', 'Getting region for bucket: `' . $settings['bucket'] . "`.");
$response = pb_backupbuddy_destination_s3::get_bucket_region($s3, $settings['bucket']);
if (!$response->isOK()) {
    $this_error = 'Bucket region could not be determined for management operation. Message details: `' . (string) $response->body->Message . '`.';
    pb_backupbuddy::status('error', $this_error);
} else {
    pb_backupbuddy::status('details', 'Bucket exists in region: ' . ($response->body === "" ? 'us-east-1' : $response->body));
Ejemplo n.º 2
0
Archivo: init.php Proyecto: russtx/tac
 public static function multipart_cleanup($settings, $lessLogs = true)
 {
     $settings['bucket'] = strtolower($settings['bucket']);
     // Buckets must be lowercase.
     $max_age = 60 * 60 * 72;
     // Seconds of max age to allow a stalled multipart upload.
     require_once dirname(dirname(__FILE__)) . '/_s3lib/aws-sdk/sdk.class.php';
     pb_backupbuddy::status('details', 'Amazon S3 Multipart Remote Housekeeping Starting ...');
     $manage_data = pb_backupbuddy_destination_s3::get_credentials($settings);
     // Create S3 instance.
     pb_backupbuddy::status('details', 'Creating S3 instance.');
     $s3 = new AmazonS3($manage_data);
     // the key, secret, token
     if ($settings['ssl'] == 0) {
         @$s3->disable_ssl(true);
     }
     pb_backupbuddy::status('details', 'S3 instance created. Listing in progress multipart uploads ...');
     // Verify bucket exists; create if not. Also set region to the region bucket exists in.
     if (false === self::_prepareBucketAndRegion($s3, $settings, $createBucket = false)) {
         return false;
     }
     // Get the in progress multipart uploads
     $response = $s3->list_multipart_uploads($settings['bucket'], array('prefix' => 'backup'));
     if (!$response->isOK()) {
         pb_backupbuddy::status('error', 'Error listing multipart uploads. Details: `' . print_r($response, true) . '`');
         return;
     } else {
         if (true !== $lessLogs) {
             pb_backupbuddy::status('details', 'Multipart upload check retrieved. Found `' . count($response->body->Upload) . '` multipart uploads in progress / stalled. Details: `' . print_r($response, true) . '`');
         } else {
             pb_backupbuddy::status('details', 'Multipart upload check retrieved. Found `' . count($response->body->Upload) . '` multipart uploads in progress / stalled. Old BackupBuddy parts will be cleaned up (if any found) ...');
         }
         foreach ($response->body->Upload as $upload) {
             if (true !== $lessLogs) {
                 pb_backupbuddy::status('details', 'Checking upload: ' . print_r($upload, true));
             }
             if (FALSE !== stristr($upload->Key, 'backup-')) {
                 // BackupBuddy backup file.
                 $initiated = strtotime($upload->Initiated);
                 if (true !== $lessLogs) {
                     pb_backupbuddy::status('details', 'BackupBuddy Multipart Chunked Upload(s) detected in progress. Age: `' . pb_backupbuddy::$format->time_ago($initiated) . '`.');
                 }
                 if ($initiated + $max_age < time()) {
                     $abort_response = $s3->abort_multipart_upload($settings['bucket'], $upload->Key, $upload->UploadId);
                     if (!$abort_response->isOK()) {
                         // abort fail.
                         pb_backupbuddy::status('error', 'Stalled Amazon S3 Multipart Chunked abort of file `' . $upload->Key . '` with ID `' . $upload->UploadId . '` FAILED. Manually abort it.');
                     } else {
                         // aborted.
                         pb_backupbuddy::status('details', 'Stalled Amazon S3 Multipart Chunked Uploads ABORTED ID `' . $upload->UploadId . '` of age `' . pb_backupbuddy::$format->time_ago($initiated) . '`.');
                     }
                 } else {
                     if (true !== $lessLogs) {
                         pb_backupbuddy::status('details', 'Amazon S3 Multipart Chunked Uploads not aborted as not too old.');
                     }
                 }
             }
         }
         // end foreach uploads.
     }
     pb_backupbuddy::status('details', 'Amazon S3 Multipart Remote Housekeeping Finished.');
     return true;
 }