Ejemplo n.º 1
0
 /**
  * Gets s3 object
  *
  * @param  boolean   $debug return error message instead of script stop
  * @return \AmazonS3 s3 object
  */
 public function s3($debug = false)
 {
     // This is workaround to composer autoloader
     if (!class_exists('CFLoader')) {
         throw new ClassNotFoundException('Amazon: autoload failed');
     }
     if (empty($this->_s3)) {
         \CFCredentials::set(array('@default' => array('key' => $this->getOption('key'), 'secret' => $this->getOption('secret'))));
         $this->_s3 = new \AmazonS3();
         $this->_s3->use_ssl = false;
         $this->_buckets = fn_array_combine($this->_s3->get_bucket_list(), true);
     }
     $message = '';
     $bucket = $this->getOption('bucket');
     if (empty($this->_buckets[$bucket])) {
         $res = $this->_s3->create_bucket($bucket, $this->getOption('region'));
         if ($res->isOK()) {
             $res = $this->_s3->create_cors_config($bucket, array('cors_rule' => array(array('allowed_origin' => '*', 'allowed_method' => 'GET'))));
             if ($res->isOK()) {
                 $this->_buckets[$bucket] = true;
             } else {
                 $message = (string) $res->body->Message;
             }
         } else {
             $message = (string) $res->body->Message;
         }
     }
     if (!empty($message)) {
         if ($debug == true) {
             return $message;
         }
         throw new ExternalException('Amazon: ' . $message);
     }
     return $this->_s3;
 }
 protected function execute($arguments = array(), $options = array())
 {
     $this->configuration = ProjectConfiguration::getApplicationConfiguration($options['app'], $options['env'], true);
     if (!sfConfig::get('app_sf_amazon_plugin_access_key', false)) {
         throw new sfException(sprintf('You have not set an amazon access key'));
     }
     if (!sfConfig::get('app_sf_amazon_plugin_secret_key', false)) {
         throw new sfException(sprintf('You have not set an amazon secret key'));
     }
     $s3 = new AmazonS3(sfConfig::get('app_sf_amazon_plugin_access_key'), sfConfig::get('app_sf_amazon_plugin_secret_key'));
     $this->s3_response = $s3->create_bucket($arguments['bucket'], $options['region'], $options['acl']);
     if ($this->s3_response->isOk()) {
         $this->log('Bucketed is being created...');
         /* Since AWS follows an "eventual consistency" model, sleep and poll
            until the bucket is available. */
         $exists = $s3->if_bucket_exists($arguments['bucket']);
         while (!$exists) {
             // Not yet? Sleep for 1 second, then check again
             sleep(1);
             $exists = $s3->if_bucket_exists($arguments['bucket']);
         }
         $this->logSection('Bucket+', sprintf('"%s" created successfully', $arguments['bucket']));
     } else {
         throw new sfException($this->s3_response->body->Message);
     }
 }
Ejemplo n.º 3
0
 /**
  * @param $jobid
  * @return string
  */
 public function edit_form_post_save($jobid)
 {
     BackWPup_Option::update($jobid, 's3accesskey', isset($_POST['s3accesskey']) ? $_POST['s3accesskey'] : '');
     BackWPup_Option::update($jobid, 's3secretkey', isset($_POST['s3secretkey']) ? BackWPup_Encryption::encrypt($_POST['s3secretkey']) : '');
     BackWPup_Option::update($jobid, 's3base_url', isset($_POST['s3base_url']) ? esc_url_raw($_POST['s3base_url']) : '');
     BackWPup_Option::update($jobid, 's3region', isset($_POST['s3region']) ? $_POST['s3region'] : '');
     BackWPup_Option::update($jobid, 's3storageclass', isset($_POST['s3storageclass']) ? $_POST['s3storageclass'] : '');
     BackWPup_Option::update($jobid, 's3ssencrypt', isset($_POST['s3ssencrypt']) && $_POST['s3ssencrypt'] == 'AES256' ? 'AES256' : '');
     BackWPup_Option::update($jobid, 's3bucket', isset($_POST['s3bucket']) ? $_POST['s3bucket'] : '');
     $_POST['s3dir'] = trailingslashit(str_replace('//', '/', str_replace('\\', '/', trim(stripslashes($_POST['s3dir'])))));
     if (substr($_POST['s3dir'], 0, 1) == '/') {
         $_POST['s3dir'] = substr($_POST['s3dir'], 1);
     }
     if ($_POST['s3dir'] == '/') {
         $_POST['s3dir'] = '';
     }
     BackWPup_Option::update($jobid, 's3dir', $_POST['s3dir']);
     BackWPup_Option::update($jobid, 's3maxbackups', isset($_POST['s3maxbackups']) ? (int) $_POST['s3maxbackups'] : 0);
     BackWPup_Option::update($jobid, 's3syncnodelete', isset($_POST['s3syncnodelete']) && $_POST['s3syncnodelete'] == 1 ? TRUE : FALSE);
     BackWPup_Option::update($jobid, 's3multipart', isset($_POST['s3multipart']) && $_POST['s3multipart'] == 1 ? TRUE : FALSE);
     //create new bucket
     if (!empty($_POST['s3newbucket'])) {
         try {
             $s3 = new AmazonS3(array('key' => $_POST['s3accesskey'], 'secret' => BackWPup_Encryption::decrypt($_POST['s3secretkey']), 'certificate_authority' => TRUE));
             $base_url = $this->get_s3_base_url($_POST['s3region'], $_POST['s3base_url']);
             if (stristr($base_url, 'amazonaws.com')) {
                 $s3->set_region(str_replace(array('http://', 'https://'), '', $base_url));
             } else {
                 $s3->set_hostname(str_replace(array('http://', 'https://'), '', $base_url));
                 $s3->allow_hostname_override(FALSE);
                 if (substr($base_url, -1) == '/') {
                     $s3->enable_path_style(TRUE);
                 }
             }
             if (stristr($base_url, 'http://')) {
                 $s3->disable_ssl();
             }
             // set bucket creation region
             if ($_POST['s3region'] == 'google-storage' || $_POST['s3region'] == 'hosteurope') {
                 $region = 'EU';
             } else {
                 $region = str_replace(array('http://', 'https://'), '', $base_url);
             }
             $bucket = $s3->create_bucket($_POST['s3newbucket'], $region, 'private');
             if ($bucket->status == 200) {
                 BackWPup_Admin::message(sprintf(__('Bucket %1$s created in %2$s.', 'backwpup'), $_POST['s3newbucket'], $bucket['Location']));
             } else {
                 BackWPup_Admin::message(sprintf(__('Bucket %s could not be created.', 'backwpup'), $_POST['s3newbucket']), TRUE);
             }
         } catch (Exception $e) {
             BackWPup_Admin::message($e->getMessage(), TRUE);
         }
         BackWPup_Option::update($jobid, 's3bucket', $_POST['s3newbucket']);
     }
 }
Ejemplo n.º 4
0
<?php

// Inizializzo la classe AmazonS3
$s3 = new AmazonS3();
// Creo un bucket per la memorizzazione di un file
$response = $s3->create_bucket('my-bucket', AmazonS3::REGION_US_E1);
if (!$response->isOK()) {
    die('Errore durante la creazione del bucket');
}
$data = file_get_contents('/my/local/dir/picture.jpg');
$response = $s3->create_object('my-bucket', 'picture.jpg', array('body' => $data));
if (!$response->isOK()) {
    die('Errore durante la memorizzazione del file');
}
echo "Il file è stato memorizzato con successo";
Ejemplo n.º 5
0
         $jobvalues['awsBucket'] = $_POST['newawsBucket'];
     } catch (Exception $e) {
         $backwpup_message .= __($e->getMessage(), 'backwpup') . '<br />';
     }
 }
 if (!empty($_POST['GStorageAccessKey']) and !empty($_POST['GStorageSecret']) and !empty($_POST['newGStorageBucket'])) {
     //create new google storage bucket if needed
     if (!class_exists('CFRuntime')) {
         require_once dirname(__FILE__) . '/../libs/aws/sdk.class.php';
     }
     try {
         CFCredentials::set(array('backwpup' => array('key' => $_POST['GStorageAccessKey'], 'secret' => $_POST['GStorageSecret'], 'default_cache_config' => '', 'certificate_authority' => true), '@default' => 'backwpup'));
         $gstorage = new AmazonS3();
         $gstorage->set_hostname('storage.googleapis.com');
         $gstorage->allow_hostname_override(false);
         $gstorage->create_bucket($_POST['newGStorageBucket'], '');
         $jobvalues['GStorageBucket'] = $_POST['newGStorageBucket'];
         sleep(1);
         //creation take a moment
     } catch (Exception $e) {
         $backwpup_message .= __($e->getMessage(), 'backwpup') . '<br />';
     }
 }
 if (!empty($_POST['newmsazureContainer']) and !empty($_POST['msazureHost']) and !empty($_POST['msazureAccName']) and !empty($_POST['msazureKey'])) {
     //create new s3 bucket if needed
     if (!class_exists('Microsoft_WindowsAzure_Storage_Blob')) {
         require_once dirname(__FILE__) . '/../libs/Microsoft/WindowsAzure/Storage/Blob.php';
     }
     try {
         $storageClient = new Microsoft_WindowsAzure_Storage_Blob($_POST['msazureHost'], $_POST['msazureAccName'], $_POST['msazureKey']);
         $result = $storageClient->createContainer($_POST['newmsazureContainer']);
Ejemplo n.º 6
0
 /**
  * Attempt to create the bucket for the given region
  *
  * @param string $region
  * @return bool
  */
 public function createBucket($region = AmazonS3::REGION_US_W1)
 {
     $response = $this->s3->create_bucket($this->bucket, $region);
     return $response->isOK() ? true : false;
 }
/*%******************************************************************************************%*/
// SETUP
// Enable full-blown error reporting. http://twitter.com/rasmus/status/7448448829
error_reporting(-1);
// Set plain text headers
header("Content-type: text/plain; charset=utf-8");
// Include the SDK
require_once '../sdk.class.php';
/*%******************************************************************************************%*/
// UPLOAD FILES TO S3
// Instantiate the AmazonS3 class
$s3 = new AmazonS3();
// Determine a completely unique bucket name (all lowercase)
$bucket = 'php-sdk-getting-started-' . strtolower($s3->key) . '-' . time();
// Create our new bucket in the US-West region.
$create_bucket_response = $s3->create_bucket($bucket, AmazonS3::REGION_US_W1);
// Provided that the bucket was created successfully...
if ($create_bucket_response->isOK()) {
    /* Since AWS follows an "eventual consistency" model, sleep and poll
       until the bucket is available. */
    $exists = $s3->if_bucket_exists($bucket);
    while (!$exists) {
        // Not yet? Sleep for 1 second, then check again
        sleep(1);
        $exists = $s3->if_bucket_exists($bucket);
    }
    /*
    	Get a list of files to upload. We'll use some helper functions we've
    	defined below. This assumes that you have a directory called "test_files"
    	that actually contains some files you want to upload.
    */
Ejemplo n.º 8
0
 public function createAmazonBucketName($name)
 {
     $name = strtolower($name);
     ProjectConfiguration::registerAws();
     $s3 = new AmazonS3();
     if (!$s3->if_bucket_exists($name)) {
         $s3->create_bucket($name, AmazonS3::REGION_US_E1, AmazonS3::ACL_AUTH_READ);
         $exists = $s3->if_bucket_exists($name);
         $attempts = 0;
         while (!$exists && $attempts < 10) {
             // Not yet? Sleep for 1 second, then check again
             sleep(1);
             $exists = $s3->if_bucket_exists($name);
             $attempts++;
         }
         if (!$exists) {
             $cdn = new AmazonCloudFront();
             $cdn->create_distribution($name, md5('caller_reference_' . microtime()));
         }
         return $name;
     }
     $response = $s3->get_bucket_policy($name);
     if (in_array($response->status, array(403, 405))) {
         return $this->createAmazonBucketName($name . rand(0, 1000));
     }
 }
Ejemplo n.º 9
0
 public static function send($settings = array(), $files = array(), $clear_uploads = false)
 {
     // TODO: Currently force prevenrt aborting as we dont allow chunking yet as we must take care to not leave danging multipart uploads as they charge the user.
     $clear_uploads = false;
     global $pb_backupbuddy_destination_errors;
     if (!is_array($files)) {
         $files = array($files);
     }
     if ($clear_uploads === false) {
         // Uncomment the following line to override and always clear.
         //$clear_uploads = true;
     }
     $db_archive_limit = $settings['db_archive_limit'];
     $full_archive_limit = $settings['full_archive_limit'];
     $max_chunk_size = $settings['max_chunk_size'];
     $remote_path = self::get_remote_path($settings['directory']);
     // Has leading and trailng slashes.
     if ($settings['ssl'] == '0') {
         $disable_ssl = true;
     } else {
         $disable_ssl = false;
     }
     $multipart_id = $settings['_multipart_id'];
     $multipart_counts = $settings['_multipart_counts'];
     pb_backupbuddy::status('details', 'Stash remote path set to `' . $remote_path . '`.');
     require_once dirname(dirname(__FILE__)) . '/_s3lib/aws-sdk/sdk.class.php';
     $manage_data = self::get_manage_data($settings);
     // Wipe all current uploads.
     if ($clear_uploads === true) {
         pb_backupbuddy::status('details', 'Clearing any current uploads via Stash call to `abort-all`.');
         $abort_url = $stash->get_upload_url(null, 'abort-all');
         $request = new RequestCore($abort_url);
         //pb_backupbuddy::status('details', print_r( $request , true ) );
         $response = $request->send_request(true);
     }
     // Process multipart transfer that we already initiated in a previous PHP load.
     if ($multipart_id != '') {
         // Multipart upload initiated and needs parts sent.
         // Create S3 instance.
         pb_backupbuddy::status('details', 'Creating Stash S3 instance.');
         $s3 = new AmazonS3($settings['_multipart_upload_data']['credentials']);
         // the key, secret, token
         if ($disable_ssl === true) {
             @$s3->disable_ssl(true);
         }
         pb_backupbuddy::status('details', 'Stash S3 instance created.');
         $this_part_number = $settings['_multipart_partnumber'] + 1;
         pb_backupbuddy::status('details', 'Stash beginning upload of part `' . $this_part_number . '` of `' . count($settings['_multipart_counts']) . '` parts of file `' . $settings['_multipart_file'] . '` with multipart ID `' . $settings['_multipart_id'] . '`.');
         $response = $s3->upload_part($settings['_multipart_upload_data']['bucket'], $settings['_multipart_upload_data']['object'], $settings['_multipart_id'], array('expect' => '100-continue', 'fileUpload' => $settings['_multipart_file'], 'partNumber' => $this_part_number, 'seekTo' => (int) $settings['_multipart_counts'][$settings['_multipart_partnumber']]['seekTo'], 'length' => (int) $settings['_multipart_counts'][$settings['_multipart_partnumber']]['length']));
         if (!$response->isOK()) {
             $this_error = 'Stash unable to upload file part for multipart upload `' . $settings['_multipart_id'] . '`. Details: `' . print_r($response, true) . '`.';
             $pb_backupbuddy_destination_errors[] = $this_error;
             pb_backupbuddy::status('error', $this_error);
             return false;
         }
         // Update stats.
         foreach (pb_backupbuddy::$options['remote_sends'] as $identifier => $remote_send) {
             if (isset($remote_send['_multipart_id']) && $remote_send['_multipart_id'] == $multipart_id) {
                 // this item.
                 pb_backupbuddy::$options['remote_sends'][$identifier]['_multipart_status'] = 'Sent part ' . $this_part_number . ' of ' . count($settings['_multipart_counts']) . '.';
                 if ($this_part_number == count($settings['_multipart_counts'])) {
                     pb_backupbuddy::$options['remote_sends'][$identifier]['_multipart_status'] .= '<br>Success.';
                     pb_backupbuddy::$options['remote_sends'][$identifier]['finish_time'] = time();
                 }
                 pb_backupbuddy::save();
                 break;
             }
         }
         // Made it here so success sending part. Increment for next part to send.
         $settings['_multipart_partnumber']++;
         if (!isset($settings['_multipart_counts'][$settings['_multipart_partnumber']])) {
             // No more parts exist for this file. Tell S3 the multipart upload is complete and move on.
             pb_backupbuddy::status('details', 'Stash getting parts with etags to notify S3 of completed multipart send.');
             $etag_parts = $s3->list_parts($settings['_multipart_upload_data']['bucket'], $settings['_multipart_upload_data']['object'], $settings['_multipart_id']);
             pb_backupbuddy::status('details', 'Stash got parts list. Notifying S3 of multipart upload completion.');
             $response = $s3->complete_multipart_upload($settings['_multipart_upload_data']['bucket'], $settings['_multipart_upload_data']['object'], $settings['_multipart_id'], $etag_parts);
             if (!$response->isOK()) {
                 $this_error = 'Stash unable to notify S3 of completion of all parts for multipart upload `' . $settings['_multipart_id'] . '`.';
                 $pb_backupbuddy_destination_errors[] = $this_error;
                 pb_backupbuddy::status('error', $this_error);
                 return false;
             } else {
                 pb_backupbuddy::status('details', 'Stash notified S3 of multipart completion.');
             }
             // Notify Stash API that things were succesful.
             $done_url = $stash->get_upload_url($settings['_multipart_file'], 'done', $remote_path . $settings['_multipart_backup_type_dir'] . basename($settings['_multipart_file']));
             pb_backupbuddy::status('details', 'Notifying Stash of completed multipart upload with done url `' . $done_url . '`.');
             $request = new RequestCore($done_url);
             $response = $request->send_request(true);
             if (!$response->isOK()) {
                 $this_error = 'Error #753236834682. Could not finalize Stash upload. Response code: `' . $response->get_response_code() . '`; Response body: `' . $response->get_response_body() . '`; Response headers: `' . $response->get_response_header() . '`.';
                 $pb_backupbuddy_destination_errors[] = $this_error;
                 pb_backupbuddy::status('error', $this_error);
                 return false;
             } else {
                 // Good server response.
                 // See if we got an optional json response.
                 $upload_data = @json_decode($response->body, true);
                 if (isset($upload_data['error'])) {
                     $this_error = 'Stash error(s): `' . implode(' - ', $upload_data['error']) . '`.';
                     $pb_backupbuddy_destination_errors[] = $this_error;
                     pb_backupbuddy::status('error', $this_error);
                     return false;
                 }
                 pb_backupbuddy::status('details', 'Stash success sending file `' . basename($settings['_multipart_file']) . '`. File uploaded via multipart across `' . $this_part_number . '` parts and reported to Stash as completed.');
             }
             pb_backupbuddy::status('details', 'Stash has no more parts left for this multipart upload. Clearing multipart instance variables.');
             $settings['_multipart_partnumber'] = 0;
             $settings['_multipart_id'] = '';
             $settings['_multipart_file'] = '';
             $settings['_multipart_counts'] = array();
             $settings['_multipart_upload_data'] = array();
         }
         delete_transient('pb_backupbuddy_stashquota_' . $settings['itxapi_username']);
         // Delete quota transient since it probably has changed now.
         // Schedule to continue if anything is left to upload for this multipart of any individual files.
         if ($settings['_multipart_id'] != '' || count($files) > 0) {
             pb_backupbuddy::status('details', 'Stash multipart upload has more parts left. Scheduling next part send.');
             pb_backupbuddy::$classes['core']->schedule_single_event(time(), pb_backupbuddy::cron_tag('destination_send'), array($settings, $files, 'multipart', false));
             spawn_cron(time() + 150);
             // Adds > 60 seconds to get around once per minute cron running limit.
             update_option('_transient_doing_cron', 0);
             // Prevent cron-blocking for next item.
             pb_backupbuddy::status('details', 'Stash scheduled send of next part(s). Done for this cycle.');
             return array($settings['_multipart_id'], 'Sent ' . $this_part_number . ' of ' . count($multipart_destination_settings['_multipart_counts'] . ' parts.'));
         }
     }
     // Create S3 instance.
     pb_backupbuddy::status('details', 'Creating S3 instance.');
     $s3 = new AmazonS3(self::get_manage_data($settings));
     // the key, secret, token
     if ($disable_ssl === true) {
         @$s3->disable_ssl(true);
     }
     pb_backupbuddy::status('details', 'Stash S3 instance created.');
     // Verify bucket exists.
     $response = $s3->create_bucket($settings['bucket'], 's3.amazonaws.com');
     require_once pb_backupbuddy::plugin_path() . '/classes/fileoptions.php';
     // Upload each file.
     foreach ($files as $file_id => $file) {
         // Determine backup type directory (if zip).
         $backup_type_dir = '';
         $backup_type = '';
         if (stristr($file, '.zip') !== false) {
             // If a zip try to determine backup type.
             // See if we can get backup type from fileoptions data.
             $backup_options = new pb_backupbuddy_fileoptions(pb_backupbuddy::$options['log_directory'] . 'fileoptions/' . $serial . '.txt', $read_only = true);
             if (true !== ($result = $backup_options->is_ok())) {
                 pb_backupbuddy::status('error', 'Unable to open fileoptions file.');
             } else {
                 if (isset($backup_options->options['integrity']['detected_type'])) {
                     pb_backupbuddy::status('details', 'S3: Detected backup type as `' . $backup_options->options['integrity']['detected_type'] . '` via integrity check data.');
                     $backup_type_dir = $backup_options->options['integrity']['detected_type'] . '/';
                     $backup_type = $backup_options->options['integrity']['detected_type'];
                 }
             }
             if ('' == $backup_type) {
                 if (stristr($file, '-db-') !== false) {
                     pb_backupbuddy::status('details', 'Stash: Detected backup type as `db` via filename.');
                     $backup_type_dir = 'db/';
                     $backup_type = 'db';
                 } elseif (stristr($file, '-full-') !== false) {
                     pb_backupbuddy::status('details', 'Stash: Detected backup type as `full` via filename.');
                     $backup_type_dir = 'full/';
                     $backup_type = 'full';
                 } else {
                     pb_backupbuddy::status('details', 'Stash: Could not detect backup type via integrity details nor filename.');
                 }
             }
         }
         // Handle chunking of file into a multipart upload (if applicable).
         $file_size = filesize($file);
         if ($max_chunk_size >= 5 && $file_size / 1024 / 1024 > $max_chunk_size) {
             // minimum chunk size is 5mb. Anything under 5mb we will not chunk.
             pb_backupbuddy::status('details', 'Stash file size of ' . $file_size / 1024 / 1024 . 'MB exceeds max chunk size of ' . $max_chunk_size . 'MB set in settings for sending file as multipart upload.');
             // Initiate multipart upload with S3.
             pb_backupbuddy::status('details', 'Initiating Stash multipart upload.');
             $response = $s3->initiate_multipart_upload($upload_data['bucket'], $upload_data['object'], array('encryption' => $settings['server_encryption']));
             if (!$response->isOK()) {
                 $this_error = 'Stash was unable to initiate multipart upload.';
                 $pb_backupbuddy_destination_errors[] = $this_error;
                 pb_backupbuddy::status('error', $this_error);
                 return false;
             } else {
                 $upload_id = (string) $response->body->UploadId;
                 pb_backupbuddy::status('details', 'Stash initiated multipart upload with ID `' . $upload_id . '`.');
             }
             // Get chunk parts for multipart transfer.
             pb_backupbuddy::status('details', 'Stash getting multipart counts.');
             $parts = $s3->get_multipart_counts($file_size, $max_chunk_size * 1024 * 1024);
             // Size of chunks expected to be in bytes.
             $multipart_destination_settings = $settings;
             $multipart_destination_settings['_multipart_id'] = $upload_id;
             $multipart_destination_settings['_multipart_partnumber'] = 0;
             $multipart_destination_settings['_multipart_file'] = $file;
             $multipart_destination_settings['_multipart_counts'] = $parts;
             $multipart_destination_settings['_multipart_upload_data'] = $upload_data;
             $multipart_destination_settings['_multipart_backup_type_dir'] = $backup_type_dir;
             pb_backupbuddy::status('details', 'Stash multipart settings to pass:'******'details', 'Stash scheduling send of next part(s).');
             pb_backupbuddy::$classes['core']->schedule_single_event(time(), pb_backupbuddy::cron_tag('destination_send'), array($multipart_destination_settings, $files, 'multipart', false));
             spawn_cron(time() + 150);
             // Adds > 60 seconds to get around once per minute cron running limit.
             update_option('_transient_doing_cron', 0);
             // Prevent cron-blocking for next item.
             pb_backupbuddy::status('details', 'Stash scheduled send of next part(s). Done for this cycle.');
             return array($upload_id, 'Starting send of ' . count($multipart_destination_settings['_multipart_counts']) . ' parts.');
         } else {
             if ($max_chunk_size != '0') {
                 pb_backupbuddy::status('details', 'File size of ' . $file_size / 1024 / 1024 . 'MB is less than the max chunk size of ' . $max_chunk_size . 'MB; not chunking into multipart upload.');
             } else {
                 pb_backupbuddy::status('details', 'Max chunk size set to zero so not chunking into multipart upload.');
             }
         }
         // SEND file.
         pb_backupbuddy::status('details', 'About to put (upload) object to S3.');
         $response = $s3->create_object($settings['bucket'], $remote_path . basename($file), array('fileUpload' => $file, 'encryption' => $settings['server_encryption']));
         // Validate response. On failure notify Stash API that things went wrong.
         if (!$response->isOK()) {
             $this_error = 'Error #752323446834682. Could not send to S3. Body: `' . $response->body . '`; Header: `' . print_r($response->header, true) . '`; Status: `' . $response->status . '`;';
             $pb_backupbuddy_destination_errors[] = $this_error;
             pb_backupbuddy::status('error', $this_error);
             return false;
         } else {
             pb_backupbuddy::status('details', 'S3 put success. Details: `' . print_r($response, true) . '`.');
         }
         delete_transient('pb_backupbuddy_stashquota_' . $settings['itxapi_username']);
         // Delete quota transient since it probably has changed now.
         // Notify Stash API that things were succesful.
         $done_url = $stash->get_upload_url($file, 'done', $remote_path . $backup_type_dir . basename($file));
         pb_backupbuddy::status('details', 'Notifying Stash of completed upload with done url `' . $done_url . '`.');
         $request = new RequestCore($done_url);
         $response = $request->send_request(true);
         if (!$response->isOK()) {
             $this_error = 'Error #7568849984434682. Could not finalize Stash upload. Response code: `' . $response->get_response_code() . '`; Response body: `' . $response->get_response_body() . '`; Response headers: `' . $response->get_response_header() . '`.';
             $pb_backupbuddy_destination_errors[] = $this_error;
             pb_backupbuddy::status('error', $this_error);
             return false;
         } else {
             // Good server response.
             // See if we got an optional json response.
             $upload_data = @json_decode($response->body, true);
             if (isset($upload_data['error'])) {
                 // Some kind of error.
                 $this_error = 'Stash error(s): `' . implode(' - ', $upload_data['error']) . '`.';
                 $pb_backupbuddy_destination_errors[] = $this_error;
                 pb_backupbuddy::status('error', $this_error);
                 return false;
             }
             unset($files[$file_id]);
             // Remove from list of files we have not sent yet.
             pb_backupbuddy::status('details', 'Stash success sending file `' . basename($file) . '`. File uploaded and reported to Stash as completed.');
         }
         // Enforce archive limits if applicable.
         if ($backup_type == 'full') {
             $limit = $full_archive_limit;
             pb_backupbuddy::status('details', 'Stash full backup archive limit of `' . $limit . '` based on destination settings.');
         } elseif ($backup_type == 'db') {
             $limit = $db_archive_limit;
             pb_backupbuddy::status('details', 'Stash database backup archive limit of `' . $limit . '` based on destination settings.');
         } else {
             $limit = 0;
             pb_backupbuddy::status('error', 'Error #54854895. Stash was unable to determine backup type so archive limits NOT enforced for this backup.');
         }
         if ($limit > 0) {
             pb_backupbuddy::status('details', 'Stash archive limit enforcement beginning.');
             // S3 object for managing files.
             $s3_manage = new AmazonS3($manage_data['credentials']);
             if ($disable_ssl === true) {
                 @$s3_manage->disable_ssl(true);
             }
             // Get file listing.
             $response_manage = $s3_manage->list_objects($manage_data['bucket'], array('prefix' => $manage_data['subkey'] . $remote_path . $backup_type_dir));
             // list all the files in the subscriber account
             // Create array of backups and organize by date
             $prefix = pb_backupbuddy::$classes['core']->backup_prefix();
             // List backups associated with this site by date.
             $backups = array();
             foreach ($response_manage->body->Contents as $object) {
                 $file = str_replace($manage_data['subkey'] . $remote_path . $backup_type_dir, '', $object->Key);
                 // Stash stores files in a directory per site so no need to check prefix here! if ( false !== strpos( $file, 'backup-' . $prefix . '-' ) ) { // if backup has this site prefix...
                 $backups[$file] = strtotime($object->LastModified);
                 //}
             }
             arsort($backups);
             //error_log( 'backups: ' . print_r( $backups, true ) );
             pb_backupbuddy::status('details', 'Stash found `' . count($backups) . '` backups of this type when checking archive limits.');
             if (count($backups) > $limit) {
                 pb_backupbuddy::status('details', 'More archives (' . count($backups) . ') than limit (' . $limit . ') allows. Trimming...');
                 $i = 0;
                 $delete_fail_count = 0;
                 foreach ($backups as $buname => $butime) {
                     $i++;
                     if ($i > $limit) {
                         pb_backupbuddy::status('details', 'Trimming excess file `' . $buname . '`...');
                         $response = $s3_manage->delete_object($manage_data['bucket'], $manage_data['subkey'] . $remote_path . $backup_type_dir . $buname);
                         if (!$response->isOK()) {
                             pb_backupbuddy::status('details', 'Unable to delete excess Stash file `' . $buname . '`. Details: `' . print_r($response, true) . '`.');
                             $delete_fail_count++;
                         }
                     }
                 }
                 pb_backupbuddy::status('details', 'Finished trimming excess backups.');
                 if ($delete_fail_count !== 0) {
                     $error_message = 'Stash remote limit could not delete ' . $delete_fail_count . ' backups.';
                     pb_backupbuddy::status('error', $error_message);
                     pb_backupbuddy::$classes['core']->mail_error($error_message);
                 }
             }
             pb_backupbuddy::status('details', 'Stash completed archive limiting.');
         } else {
             pb_backupbuddy::status('details', 'No Stash archive file limit to enforce.');
         }
         // End remote backup limit
     }
     // end foreach.
     // Success if we made it this far.
     return true;
 }
Ejemplo n.º 10
0
 * of the License is located at
 *
 *       http://aws.amazon.com/apache2.0/
 *
 * or in the "license.txt" file accompanying this file. This file is
 * distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
 * OF ANY KIND, either express or implied. See the License for the
 * specific language governing permissions and limitations under the
 * License.
 *
 * Modified by Jeffrey S. Haemer <*****@*****.**>
 */
error_reporting(E_ALL);
require_once 'AWSSDKforPHP/sdk.class.php';
require_once 'include/book.inc.php';
if ($argc != 2) {
    exit("Usage: " . $argv[0] . " bucket name\n");
}
$bucket = $argv[1] == '-' ? BOOK_BUCKET : $argv[1];
// Create the S3 access object
$s3 = new AmazonS3();
// Create an S3 bucket
$res = $s3->create_bucket($bucket, AmazonS3::REGION_US_E1);
// Report on status
if ($res->isOK()) {
    print "'{$bucket}' bucket created\n";
} else {
    print "Error creating bucket '{$bucket}'\n";
    print_r($res);
}
exit(0);