Пример #1
0
 public function getFiles($remotePath)
 {
     $response = $this->_s3->list_objects($this->_bucket, array('prefix' => $remotePath));
     $files = array();
     if ($response->isOK()) {
         $i = 0;
         while ($object = $response->body->Contents[$i]) {
             $i++;
             $files[] = array('file' => (string) $object->Key, 'size' => (int) $object->Size, 'date' => (string) $object->LastModified);
         }
     } else {
         Api_Core_Application::log("Get files " . $remotePath, array('upload', array('header' => $response->header, 'status' => $response->status, 'body' => $response->body)), Api_Component_Log_Logger::LEVEL_ERROR);
     }
     return $files;
 }
Пример #2
0
 /**
  * lists bucket's objects, applying callback to each of them
  *
  * @param mixed $callback first argument of the callback is CFSimpleXML object
  * @param array $params
  */
 protected function _list($callback, $params = array())
 {
     // prepare data for loop
     $bucket = $this->getBucket();
     $baseDir = $this->getBaseDir();
     $marker = '';
     $itemCount = 0;
     $v = false;
     $firstBatch = true;
     do {
         $list = $this->_s3->list_objects($bucket, array('marker' => $marker, 'prefix' => $baseDir));
         if (!is_object($list->body->Contents)) {
             $this->_out->stop("S3 response problem, no content returned");
         }
         $count = $list->body->Contents->count();
         if ($count === 0) {
             if ($firstBatch) {
                 break;
             } else {
                 $this->_out->stop("S3 response problem, not all files returned");
             }
         }
         $this->_itemCount += $count;
         $jobFiles = $this->_out->jobStart("processing information about {$count} remote files");
         // download meta data
         //            $batch = new CFBatchRequest(3);
         //            foreach ($list->body->Contents as $v) {
         //                /** @noinspection PhpUndefinedMethodInspection */
         //                $this->_s3->batch($batch)->get_object_headers($bucket, $v->Key); // Get content-type
         //        }
         //            /** @var $response CFArray */
         //            $response = $this->_s3->batch($batch)->send();
         //            if (!$response->areOK()) {
         //                $this->_out->stop("S3 response problem, meta data not returned");
         //            }
         //            if (count($response) != $count) {
         //                $this->_out->stop("S3 response problem, meta data not returned for all files");
         //            }
         // process received information
         $metaId = 0;
         foreach ($list->body->Contents as $v) {
             switch (true) {
                 case is_array($callback):
                 case is_string($callback):
                     call_user_func($callback, $v, $params);
                     break;
                 case is_callable($callback):
                     /** @var $callback Closure */
                     $callback($v, $params);
                     break;
             }
         }
         $this->_out->jobEnd($jobFiles, "updated info about one batch of files");
         // move to next batch of files
         $marker = $v->Key;
         $firstBatch = false;
     } while ((string) $list->body->IsTruncated == 'true');
 }
 protected function syncToS3($arguments = array(), $options = array())
 {
     list($bucket, $prefix) = explode(':', $arguments['destination']);
     $file_list = sfFinder::type('file')->in($arguments['source']);
     $object_list_response = $this->s3->list_objects($bucket);
     if (!$object_list_response->isOk()) {
         throw new sfException($object_list_response->body->Message);
     }
     if (isset($object_list_response->body->Contents)) {
         foreach ($object_list_response->body->Contents as $object) {
             // var_dump($object->LastModified);
             $object_list[] = $object->Key;
         }
     }
     $files_queued = 0;
     foreach ($file_list as $file) {
         $filename = explode(DIRECTORY_SEPARATOR, $file);
         $filename = array_pop($filename);
         $offset = strpos($file, $arguments['source']);
         $s3_location = substr(str_replace($arguments['source'], '', substr($file, $offset)), 1);
         if (in_array($s3_location, $object_list)) {
             continue;
         }
         $this->s3->batch()->create_object($bucket, $s3_location, array('fileUpload' => $file));
         $files_queued++;
         $this->logSection('file+', $bucket . ':' . $s3_location);
     }
     if ($files_queued <= 0) {
         $this->log('All files have already been synced, no need to upload any files');
         return;
     }
     $upload_response = $this->s3->batch()->send();
     if (!$upload_response->areOk()) {
         throw new sfException($upload_response->body->Message);
     }
     $this->log('Files synced to bucket');
 }
 /**
  * Get a list of objects from within a bucket
  * @param string $dir
  * @return array
  */
 public function getS3ObjectList($dir)
 {
     $c['delimiter'] = '/';
     if (!empty($dir) && $dir != '/') {
         $c['prefix'] = $dir;
     }
     $list = array();
     $cps = $this->driver->list_objects($this->bucket, $c);
     foreach ($cps->body->CommonPrefixes as $prefix) {
         if (!empty($prefix->Prefix) && $prefix->Prefix != $dir && $prefix->Prefix != '/') {
             $list[] = (string) $prefix->Prefix;
         }
     }
     $response = $this->driver->get_object_list($this->bucket, $c);
     foreach ($response as $file) {
         $list[] = $file;
     }
     return $list;
 }
Пример #5
0
 public static function send($settings = array(), $files = array(), $send_id = '', $clear_uploads = false)
 {
     global $pb_backupbuddy_destination_errors;
     if (!is_array($files)) {
         $files = array($files);
     }
     if ($clear_uploads === false) {
         // Uncomment the following line to override and always clear.
         //$clear_uploads = true;
     }
     $itxapi_username = $settings['itxapi_username'];
     $itxapi_password = $settings['itxapi_password'];
     $db_archive_limit = $settings['db_archive_limit'];
     $full_archive_limit = $settings['full_archive_limit'];
     $files_archive_limit = $settings['files_archive_limit'];
     $max_chunk_size = $settings['max_chunk_size'];
     $remote_path = self::get_remote_path($settings['directory']);
     // Has leading and trailng slashes.
     if ($settings['ssl'] == '0') {
         $disable_ssl = true;
     } else {
         $disable_ssl = false;
     }
     $multipart_id = $settings['_multipart_id'];
     $multipart_counts = $settings['_multipart_counts'];
     pb_backupbuddy::status('details', 'Stash remote path set to `' . $remote_path . '`.');
     require_once dirname(__FILE__) . '/lib/class.itx_helper.php';
     require_once dirname(dirname(__FILE__)) . '/_s3lib/aws-sdk/sdk.class.php';
     // Stash API talk.
     $stash = new ITXAPI_Helper(pb_backupbuddy_destination_stash::ITXAPI_KEY, pb_backupbuddy_destination_stash::ITXAPI_URL, $itxapi_username, $itxapi_password);
     $manage_data = pb_backupbuddy_destination_stash::get_manage_data($settings);
     if (!is_array($manage_data['credentials'])) {
         pb_backupbuddy::status('error', 'Error #8484383b: Your authentication credentials for Stash failed. Verify your login and password to Stash. You may need to update the Stash destination settings. Perhaps you recently changed your password?');
         return false;
     }
     // Wipe all current uploads.
     if ($clear_uploads === true) {
         pb_backupbuddy::status('details', 'Clearing any current uploads via Stash call to `abort-all`.');
         $abort_url = $stash->get_upload_url(null, 'abort-all');
         $request = new RequestCore($abort_url);
         $response = $request->send_request(true);
     }
     // Process multipart transfer that we already initiated in a previous PHP load.
     if ($multipart_id != '') {
         // Multipart upload initiated and needs parts sent.
         // Create S3 instance.
         pb_backupbuddy::status('details', 'Creating Stash S3 instance.');
         $s3 = new AmazonS3($settings['_multipart_upload_data']['credentials']);
         // the key, secret, token
         if ($disable_ssl === true) {
             @$s3->disable_ssl(true);
         }
         pb_backupbuddy::status('details', 'Stash S3 instance created.');
         $backup_type = str_replace('/', '', $settings['_multipart_backup_type_dir']);
         // For use later by file limiting.
         $this_part_number = $settings['_multipart_partnumber'] + 1;
         pb_backupbuddy::status('details', 'Stash beginning upload of part `' . $this_part_number . '` of `' . count($settings['_multipart_counts']) . '` parts of file `' . $settings['_multipart_file'] . '` with multipart ID `' . $settings['_multipart_id'] . '`.');
         $response = $s3->upload_part($settings['_multipart_upload_data']['bucket'], $settings['_multipart_upload_data']['object'], $settings['_multipart_id'], array('expect' => '100-continue', 'fileUpload' => $settings['_multipart_file'], 'partNumber' => $this_part_number, 'seekTo' => (int) $settings['_multipart_counts'][$settings['_multipart_partnumber']]['seekTo'], 'length' => (int) $settings['_multipart_counts'][$settings['_multipart_partnumber']]['length']));
         if (!$response->isOK()) {
             $this_error = 'Stash unable to upload file part for multipart upload `' . $settings['_multipart_id'] . '`. Details: `' . print_r($response, true) . '`.';
             $pb_backupbuddy_destination_errors[] = $this_error;
             pb_backupbuddy::status('error', $this_error);
             return false;
         } else {
             $uploaded_size = $response->header['_info']['size_upload'];
             $uploaded_speed = $response->header['_info']['speed_upload'];
             pb_backupbuddy::status('details', 'Uploaded size: ' . pb_backupbuddy::$format->file_size($uploaded_size) . ', Speed: ' . pb_backupbuddy::$format->file_size($uploaded_speed) . '/sec.');
         }
         // Load fileoptions to the send.
         pb_backupbuddy::status('details', 'About to load fileoptions data.');
         require_once pb_backupbuddy::plugin_path() . '/classes/fileoptions.php';
         $fileoptions_obj = new pb_backupbuddy_fileoptions(backupbuddy_core::getLogDirectory() . 'fileoptions/send-' . $send_id . '.txt', $read_only = false, $ignore_lock = false, $create_file = false);
         if (true !== ($result = $fileoptions_obj->is_ok())) {
             pb_backupbuddy::status('error', __('Fatal Error #9034.2344848. Unable to access fileoptions data.', 'it-l10n-backupbuddy') . ' Error: ' . $result);
             return false;
         }
         pb_backupbuddy::status('details', 'Fileoptions data loaded.');
         $fileoptions =& $fileoptions_obj->options;
         $update_status = 'Sent part ' . $this_part_number . ' of ' . count($settings['_multipart_counts']) . '.';
         // Made it here so success sending part. Increment for next part to send.
         $settings['_multipart_partnumber']++;
         if (!isset($settings['_multipart_counts'][$settings['_multipart_partnumber']])) {
             // No more parts exist for this file. Tell S3 the multipart upload is complete and move on.
             pb_backupbuddy::status('details', 'Stash getting parts with etags to notify S3 of completed multipart send.');
             $etag_parts = $s3->list_parts($settings['_multipart_upload_data']['bucket'], $settings['_multipart_upload_data']['object'], $settings['_multipart_id']);
             pb_backupbuddy::status('details', 'Stash got parts list. Notifying S3 of multipart upload completion.');
             $response = $s3->complete_multipart_upload($settings['_multipart_upload_data']['bucket'], $settings['_multipart_upload_data']['object'], $settings['_multipart_id'], $etag_parts);
             if (!$response->isOK()) {
                 $this_error = 'Stash unable to notify S3 of completion of all parts for multipart upload `' . $settings['_multipart_id'] . '`.';
                 $pb_backupbuddy_destination_errors[] = $this_error;
                 pb_backupbuddy::status('error', $this_error);
                 return false;
             } else {
                 pb_backupbuddy::status('details', 'Stash notified S3 of multipart completion.');
             }
             $backup_type_dir = $settings['_multipart_backup_type_dir'];
             // Notify Stash API that things were succesful.
             $done_url = $stash->get_upload_url($settings['_multipart_file'], 'done', $remote_path . $backup_type_dir . basename($settings['_multipart_file']));
             pb_backupbuddy::status('details', 'Notifying Stash of completed multipart upload with done url `' . $done_url . '`.');
             $request = new RequestCore($done_url);
             $response = $request->send_request(true);
             if (!$response->isOK()) {
                 $this_error = 'Error #756834682. Could not finalize Stash upload. Response code: `' . $response->get_response_code() . '`; Response body: `' . $response->get_response_body() . '`; Response headers: `' . $response->get_response_header() . '`.';
                 $pb_backupbuddy_destination_errors[] = $this_error;
                 pb_backupbuddy::status('error', $this_error);
                 return false;
             } else {
                 // Good server response.
                 // See if we got an optional json response.
                 $upload_data = @json_decode($response->body, true);
                 if (isset($upload_data['error'])) {
                     $this_error = 'Stash error(s): `' . implode(' - ', $upload_data['error']) . '`.';
                     $pb_backupbuddy_destination_errors[] = $this_error;
                     pb_backupbuddy::status('error', $this_error);
                     return false;
                 }
                 pb_backupbuddy::status('details', 'Stash success sending file `' . basename($settings['_multipart_file']) . '`. File uploaded via multipart across `' . $this_part_number . '` parts and reported to Stash as completed.');
             }
             pb_backupbuddy::status('details', 'Stash has no more parts left for this multipart upload. Clearing multipart instance variables.');
             $settings['_multipart_partnumber'] = 0;
             $settings['_multipart_id'] = '';
             $settings['_multipart_file'] = '';
             $settings['_multipart_upload_data'] = array();
             $settings['_multipart_transferspeeds'][] = $uploaded_speed;
             // Overall upload speed average.
             $uploaded_speed = array_sum($settings['_multipart_transferspeeds']) / count($settings['_multipart_counts']);
             pb_backupbuddy::status('details', 'Upload speed average of all chunks: `' . pb_backupbuddy::$format->file_size($uploaded_speed) . '`.');
             $settings['_multipart_counts'] = array();
             // Update stats.
             $fileoptions['_multipart_status'] = $update_status;
             $fileoptions['finish_time'] = time();
             $fileoptions['status'] = 'success';
             if (isset($uploaded_speed)) {
                 $fileoptions['write_speed'] = $uploaded_speed;
             }
             $fileoptions_obj->save();
             unset($fileoptions);
         }
         delete_transient('pb_backupbuddy_stashquota_' . $settings['itxapi_username']);
         // Delete quota transient since it probably has changed now.
         // Schedule to continue if anything is left to upload for this multipart of any individual files.
         if ($settings['_multipart_id'] != '' || count($files) > 0) {
             pb_backupbuddy::status('details', 'Stash multipart upload has more parts left. Scheduling next part send.');
             $schedule_result = backupbuddy_core::schedule_single_event(time(), pb_backupbuddy::cron_tag('destination_send'), array($settings, $files, $send_id));
             if (true === $schedule_result) {
                 pb_backupbuddy::status('details', 'Next Stash chunk step cron event scheduled.');
             } else {
                 pb_backupbuddy::status('error', 'Next Stash chunk step cron even FAILED to be scheduled.');
             }
             spawn_cron(time() + 150);
             // Adds > 60 seconds to get around once per minute cron running limit.
             update_option('_transient_doing_cron', 0);
             // Prevent cron-blocking for next item.
             return array($settings['_multipart_id'], 'Sent part ' . $this_part_number . ' of ' . count($settings['_multipart_counts']) . ' parts.');
         }
     }
     // end if multipart continuation.
     require_once pb_backupbuddy::plugin_path() . '/classes/fileoptions.php';
     // Upload each file.
     foreach ($files as $file_id => $file) {
         // Determine backup type directory (if zip).
         $backup_type_dir = '';
         $backup_type = '';
         if (stristr($file, '.zip') !== false) {
             // If a zip try to determine backup type.
             pb_backupbuddy::status('details', 'Stash: Zip file. Detecting backup type if possible.');
             $serial = backupbuddy_core::get_serial_from_file($file);
             // See if we can get backup type from fileoptions data.
             $backup_options = new pb_backupbuddy_fileoptions(backupbuddy_core::getLogDirectory() . 'fileoptions/' . $serial . '.txt', $read_only = true, $ignore_lock = true);
             if (true !== ($result = $backup_options->is_ok())) {
                 pb_backupbuddy::status('error', 'Unable to open fileoptions file `' . backupbuddy_core::getLogDirectory() . 'fileoptions/' . $serial . '.txt' . '`.');
             } else {
                 if (isset($backup_options->options['integrity']['detected_type'])) {
                     pb_backupbuddy::status('details', 'Stash: Detected backup type as `' . $backup_options->options['integrity']['detected_type'] . '` via integrity check data.');
                     $backup_type_dir = $backup_options->options['integrity']['detected_type'] . '/';
                     $backup_type = $backup_options->options['integrity']['detected_type'];
                 }
             }
             // If still do not know backup type then attempt to deduce it from filename.
             if ($backup_type == '') {
                 if (stristr($file, '-db-') !== false) {
                     pb_backupbuddy::status('details', 'Stash: Detected backup type as `db` via filename.');
                     $backup_type_dir = 'db/';
                     $backup_type = 'db';
                 } elseif (stristr($file, '-full-') !== false) {
                     pb_backupbuddy::status('details', 'Stash: Detected backup type as `full` via filename.');
                     $backup_type_dir = 'full/';
                     $backup_type = 'full';
                 } elseif (stristr($file, '-files-') !== false) {
                     pb_backupbuddy::status('details', 'Stash: Detected backup type as `files` via filename.');
                     $backup_type_dir = 'files/';
                     $backup_type = 'files';
                 } else {
                     pb_backupbuddy::status('details', 'Stash: Could not detect backup type via integrity details nor filename.');
                 }
             }
         }
         // Interact with Stash API.
         pb_backupbuddy::status('details', 'Determining Stash upload URL for `' . $file . '`.` with destination remote path `' . $remote_path . $backup_type_dir . basename($file) . '`.');
         $upload_url = $stash->get_upload_url($file, 'request', $remote_path . $backup_type_dir . basename($file));
         pb_backupbuddy::status('details', 'Determined upload url: `' . $upload_url . '`.');
         $request = new RequestCore($upload_url);
         pb_backupbuddy::status('details', 'Sending Stash API request.');
         $response = $request->send_request(true);
         // Validate response.
         if (!$response->isOK()) {
             $this_error = 'Stash request for upload credentials failed.';
             $pb_backupbuddy_destination_errors[] = $this_error;
             pb_backupbuddy::status('error', $this_error);
             return false;
         }
         if (!($upload_data = json_decode($response->body, true))) {
             $this_error = 'Stash API did not give a valid JSON response.';
             $pb_backupbuddy_destination_errors[] = $this_error;
             pb_backupbuddy::status('error', $this_error);
             return false;
         }
         if (isset($upload_data['error'])) {
             $this_error = 'Stash error(s): `' . implode(' - ', $upload_data['error']) . '`.';
             $pb_backupbuddy_destination_errors[] = $this_error;
             pb_backupbuddy::status('error', $this_error);
             return false;
         }
         // Create S3 instance.
         pb_backupbuddy::status('details', 'Creating Stash S3 instance.');
         $s3 = new AmazonS3($upload_data['credentials']);
         // the key, secret, token
         if ($disable_ssl === true) {
             @$s3->disable_ssl(true);
         }
         pb_backupbuddy::status('details', 'Stash S3 instance created.');
         // Handle chunking of file into a multipart upload (if applicable).
         $file_size = filesize($file);
         if ($max_chunk_size >= self::MINIMUM_CHUNK_SIZE && $file_size / 1024 / 1024 > $max_chunk_size) {
             // minimum chunk size is 5mb. Anything under 5mb we will not chunk.
             pb_backupbuddy::status('details', 'Stash file size of ' . pb_backupbuddy::$format->file_size($file_size) . ' exceeds max chunk size of ' . $max_chunk_size . 'MB set in settings for sending file as multipart upload.');
             // Initiate multipart upload with S3.
             pb_backupbuddy::status('details', 'Initiating Stash multipart upload.');
             $response = $s3->initiate_multipart_upload($upload_data['bucket'], $upload_data['object'], array('encryption' => 'AES256'));
             if (!$response->isOK()) {
                 $this_error = 'Stash was unable to initiate multipart upload.';
                 $pb_backupbuddy_destination_errors[] = $this_error;
                 pb_backupbuddy::status('error', $this_error);
                 return false;
             } else {
                 $upload_id = (string) $response->body->UploadId;
                 pb_backupbuddy::status('details', 'Stash initiated multipart upload with ID `' . $upload_id . '`.');
             }
             // Get chunk parts for multipart transfer.
             pb_backupbuddy::status('details', 'Stash getting multipart counts.');
             $parts = $s3->get_multipart_counts($file_size, $max_chunk_size * 1024 * 1024);
             // Size of chunks expected to be in bytes.
             $multipart_destination_settings = $settings;
             $multipart_destination_settings['_multipart_id'] = $upload_id;
             $multipart_destination_settings['_multipart_partnumber'] = 0;
             $multipart_destination_settings['_multipart_file'] = $file;
             $multipart_destination_settings['_multipart_counts'] = $parts;
             $multipart_destination_settings['_multipart_upload_data'] = $upload_data;
             $multipart_destination_settings['_multipart_backup_type_dir'] = $backup_type_dir;
             pb_backupbuddy::status('details', 'Stash multipart settings to pass:'******'details', 'Stash scheduling send of next part(s).');
             backupbuddy_core::schedule_single_event(time(), pb_backupbuddy::cron_tag('destination_send'), array($multipart_destination_settings, $files, $send_id));
             spawn_cron(time() + 150);
             // Adds > 60 seconds to get around once per minute cron running limit.
             update_option('_transient_doing_cron', 0);
             // Prevent cron-blocking for next item.
             pb_backupbuddy::status('details', 'Stash scheduled send of next part(s). Done for this cycle.');
             return array($upload_id, 'Starting send of ' . count($multipart_destination_settings['_multipart_counts']) . ' parts.');
         } else {
             // did not meet chunking criteria.
             if ($max_chunk_size != '0') {
                 if ($file_size / 1024 / 1024 > self::MINIMUM_CHUNK_SIZE) {
                     pb_backupbuddy::status('details', 'File size of ' . pb_backupbuddy::$format->file_size($file_size) . ' is less than the max chunk size of ' . $max_chunk_size . 'MB; not chunking into multipart upload.');
                 } else {
                     pb_backupbuddy::status('details', 'File size of ' . pb_backupbuddy::$format->file_size($file_size) . ' is less than the minimum allowed chunk size of ' . self::MINIMUM_CHUNK_SIZE . 'MB; not chunking into multipart upload.');
                 }
             } else {
                 pb_backupbuddy::status('details', 'Max chunk size set to zero so not chunking into multipart upload.');
             }
         }
         // SEND file.
         pb_backupbuddy::status('details', 'About to put (upload) object to Stash.');
         $response = $s3->create_object($upload_data['bucket'], $upload_data['object'], array('fileUpload' => $file, 'encryption' => 'AES256'));
         // Validate response. On failure notify Stash API that things went wrong.
         if (!$response->isOK()) {
             // Send FAILED.
             pb_backupbuddy::status('details', 'Sending upload abort.');
             $request = new RequestCore($abort_url);
             $response = $request->send_request(true);
             $this_error = 'Could not upload to Stash, attempt aborted.';
             $pb_backupbuddy_destination_errors[] = $this_error;
             pb_backupbuddy::status('error', $this_error);
             return false;
         } else {
             // Send SUCCESS.
             pb_backupbuddy::status('details', 'Success uploading file to Stash storage. Notifying Stash API next. Upload details: `' . print_r($response, true) . '`.');
             $uploaded_size = $response->header['_info']['size_upload'];
             $uploaded_speed = $response->header['_info']['speed_upload'];
             pb_backupbuddy::status('details', 'Uploaded size: ' . pb_backupbuddy::$format->file_size($uploaded_size) . ', Speed: ' . pb_backupbuddy::$format->file_size($uploaded_speed) . '/sec.');
         }
         delete_transient('pb_backupbuddy_stashquota_' . $settings['itxapi_username']);
         // Delete quota transient since it probably has changed now.
         // Notify Stash API that things were succesful.
         $done_url = $stash->get_upload_url($file, 'done', $remote_path . $backup_type_dir . basename($file));
         pb_backupbuddy::status('details', 'Notifying Stash of completed upload with done url `' . $done_url . '`.');
         $request = new RequestCore($done_url);
         $response = $request->send_request(true);
         if (!$response->isOK()) {
             $this_error = 'Error #247568834682. Could not finalize Stash upload. Response code: `' . $response->get_response_code() . '`; Response body: `' . $response->get_response_body() . '`; Response headers: `' . $response->get_response_header() . '`.';
             $pb_backupbuddy_destination_errors[] = $this_error;
             pb_backupbuddy::status('error', $this_error);
             return false;
         } else {
             // Good server response.
             // See if we got an optional json response.
             $upload_data = @json_decode($response->body, true);
             if (isset($upload_data['error'])) {
                 // Some kind of error.
                 $this_error = 'Stash error(s): `' . implode(' - ', $upload_data['error']) . '`.';
                 $pb_backupbuddy_destination_errors[] = $this_error;
                 pb_backupbuddy::status('error', $this_error);
                 return false;
             }
             unset($files[$file_id]);
             // Remove from list of files we have not sent yet.
             pb_backupbuddy::status('details', 'Stash success sending file `' . basename($file) . '`. File uploaded and reported to Stash as completed.');
             // Load destination fileoptions.
             pb_backupbuddy::status('details', 'About to load fileoptions data.');
             require_once pb_backupbuddy::plugin_path() . '/classes/fileoptions.php';
             $fileoptions_obj = new pb_backupbuddy_fileoptions(backupbuddy_core::getLogDirectory() . 'fileoptions/send-' . $send_id . '.txt', $read_only = false, $ignore_lock = false, $create_file = false);
             if (true !== ($result = $fileoptions_obj->is_ok())) {
                 pb_backupbuddy::status('error', __('Fatal Error #9034.84838. Unable to access fileoptions data.', 'it-l10n-backupbuddy') . ' Error: ' . $result);
                 return false;
             }
             pb_backupbuddy::status('details', 'Fileoptions data loaded.');
             $fileoptions =& $fileoptions_obj->options;
             // Save stats.
             if (isset($uploaded_speed)) {
                 $fileoptions['write_speed'] = $uploaded_speed;
                 $fileoptions_obj->save();
             }
             //$fileoptions['finish_time'] = time();
             //$fileoptions['status'] = 'success';
             unset($fileoptions_obj);
         }
     }
     // end foreach.
     // BEGIN FILE LIMIT PROCESSING. Enforce archive limits if applicable.
     if ($backup_type == 'full') {
         $limit = $full_archive_limit;
         pb_backupbuddy::status('details', 'Stash full backup archive limit of `' . $limit . '` of type `full` based on destination settings.');
     } elseif ($backup_type == 'db') {
         $limit = $db_archive_limit;
         pb_backupbuddy::status('details', 'Stash database backup archive limit of `' . $limit . '` of type `db` based on destination settings.');
     } elseif ($backup_type == 'files') {
         $limit = $db_archive_limit;
         pb_backupbuddy::status('details', 'Stash database backup archive limit of `' . $limit . '` of type `files` based on destination settings.');
     } else {
         $limit = 0;
         pb_backupbuddy::status('warning', 'Warning #54854895. Stash was unable to determine backup type (reported: `' . $backup_type . '`) so archive limits NOT enforced for this backup.');
     }
     if ($limit > 0) {
         pb_backupbuddy::status('details', 'Stash archive limit enforcement beginning.');
         // S3 object for managing files.
         $s3_manage = new AmazonS3($manage_data['credentials']);
         if ($disable_ssl === true) {
             @$s3_manage->disable_ssl(true);
         }
         // Get file listing.
         $response_manage = $s3_manage->list_objects($manage_data['bucket'], array('prefix' => $manage_data['subkey'] . $remote_path . $backup_type_dir));
         // list all the files in the subscriber account
         // Create array of backups and organize by date
         $prefix = backupbuddy_core::backup_prefix();
         // List backups associated with this site by date.
         $backups = array();
         foreach ($response_manage->body->Contents as $object) {
             $file = str_replace($manage_data['subkey'] . $remote_path . $backup_type_dir, '', $object->Key);
             // Stash stores files in a directory per site so no need to check prefix here! if ( false !== strpos( $file, 'backup-' . $prefix . '-' ) ) { // if backup has this site prefix...
             $backups[$file] = strtotime($object->LastModified);
         }
         arsort($backups);
         pb_backupbuddy::status('details', 'Stash found `' . count($backups) . '` backups of this type when checking archive limits.');
         if (count($backups) > $limit) {
             pb_backupbuddy::status('details', 'More archives (' . count($backups) . ') than limit (' . $limit . ') allows. Trimming...');
             $i = 0;
             $delete_fail_count = 0;
             foreach ($backups as $buname => $butime) {
                 $i++;
                 if ($i > $limit) {
                     pb_backupbuddy::status('details', 'Trimming excess file `' . $buname . '`...');
                     $response = $s3_manage->delete_object($manage_data['bucket'], $manage_data['subkey'] . $remote_path . $backup_type_dir . $buname);
                     if (!$response->isOK()) {
                         pb_backupbuddy::status('details', 'Unable to delete excess Stash file `' . $buname . '`. Details: `' . print_r($response, true) . '`.');
                         $delete_fail_count++;
                     }
                 }
             }
             pb_backupbuddy::status('details', 'Finished trimming excess backups.');
             if ($delete_fail_count !== 0) {
                 $error_message = 'Stash remote limit could not delete ' . $delete_fail_count . ' backups.';
                 pb_backupbuddy::status('error', $error_message);
                 backupbuddy_core::mail_error($error_message);
             }
         }
         pb_backupbuddy::status('details', 'Stash completed archive limiting.');
     } else {
         pb_backupbuddy::status('details', 'No Stash archive file limit to enforce.');
     }
     // End remote backup limit
     if (isset($fileoptions_obj)) {
         unset($fileoptions_obj);
     }
     // END FILE LIMIT PROCESSING.
     // Success if we made it this far.
     return true;
 }
Пример #6
0
 public static function send($settings = array(), $files = array(), $clear_uploads = false)
 {
     global $pb_backupbuddy_destination_errors;
     if (!is_array($files)) {
         $files = array($files);
     }
     if ($clear_uploads === false) {
         // Uncomment the following line to override and always clear.
         //$clear_uploads = true;
     }
     $itxapi_username = $settings['itxapi_username'];
     $itxapi_password = $settings['itxapi_password'];
     $db_archive_limit = $settings['db_archive_limit'];
     $full_archive_limit = $settings['full_archive_limit'];
     $max_chunk_size = $settings['max_chunk_size'];
     $remote_path = self::get_remote_path($settings['directory']);
     // Has leading and trailng slashes.
     if ($settings['ssl'] == '0') {
         $disable_ssl = true;
     } else {
         $disable_ssl = false;
     }
     $multipart_id = $settings['_multipart_id'];
     $multipart_counts = $settings['_multipart_counts'];
     pb_backupbuddy::status('details', 'Stash remote path set to `' . $remote_path . '`.');
     require_once dirname(__FILE__) . '/lib/class.itx_helper.php';
     require_once dirname(__FILE__) . '/lib/aws-sdk/sdk.class.php';
     // Stash API talk.
     $stash = new ITXAPI_Helper(pb_backupbuddy_destination_stash::ITXAPI_KEY, pb_backupbuddy_destination_stash::ITXAPI_URL, $itxapi_username, $itxapi_password);
     $manage_data = pb_backupbuddy_destination_stash::get_manage_data($settings);
     // Wipe all current uploads.
     if ($clear_uploads === true) {
         pb_backupbuddy::status('details', 'Clearing any current uploads via Stash call to `abort-all`.');
         $abort_url = $stash->get_upload_url(null, 'abort-all');
         $request = new RequestCore($abort_url);
         //pb_backupbuddy::status('details', print_r( $request , true ) );
         $response = $request->send_request(true);
     }
     // Process multipart transfer that we already initiated in a previous PHP load.
     if ($multipart_id != '') {
         // Multipart upload initiated and needs parts sent.
         // Create S3 instance.
         pb_backupbuddy::status('details', 'Creating Stash S3 instance.');
         $s3 = new AmazonS3($settings['_multipart_upload_data']['credentials']);
         // the key, secret, token
         if ($disable_ssl === true) {
             @$s3->disable_ssl(true);
         }
         pb_backupbuddy::status('details', 'Stash S3 instance created.');
         $this_part_number = $settings['_multipart_partnumber'] + 1;
         pb_backupbuddy::status('details', 'Stash beginning upload of part `' . $this_part_number . '` of `' . count($settings['_multipart_counts']) . '` parts of file `' . $settings['_multipart_file'] . '` with multipart ID `' . $settings['_multipart_id'] . '`.');
         $response = $s3->upload_part($settings['_multipart_upload_data']['bucket'], $settings['_multipart_upload_data']['object'], $settings['_multipart_id'], array('expect' => '100-continue', 'fileUpload' => $settings['_multipart_file'], 'partNumber' => $this_part_number, 'seekTo' => (int) $settings['_multipart_counts'][$settings['_multipart_partnumber']]['seekTo'], 'length' => (int) $settings['_multipart_counts'][$settings['_multipart_partnumber']]['length']));
         if (!$response->isOK()) {
             $this_error = 'Stash unable to upload file part for multipart upload `' . $settings['_multipart_id'] . '`. Details: `' . print_r($response, true) . '`.';
             $pb_backupbuddy_destination_errors[] = $this_error;
             pb_backupbuddy::status('error', $this_error);
             return false;
         }
         // Update stats.
         foreach (pb_backupbuddy::$options['remote_sends'] as $identifier => $remote_send) {
             if (isset($remote_send['_multipart_id']) && $remote_send['_multipart_id'] == $multipart_id) {
                 // this item.
                 pb_backupbuddy::$options['remote_sends'][$identifier]['_multipart_status'] = 'Sent part ' . $this_part_number . ' of ' . count($settings['_multipart_counts']) . '.';
                 if ($this_part_number == count($settings['_multipart_counts'])) {
                     pb_backupbuddy::$options['remote_sends'][$identifier]['_multipart_status'] .= '<br>Success.';
                     pb_backupbuddy::$options['remote_sends'][$identifier]['finish_time'] = time();
                 }
                 pb_backupbuddy::save();
                 break;
             }
         }
         // Made it here so success sending part. Increment for next part to send.
         $settings['_multipart_partnumber']++;
         if (!isset($settings['_multipart_counts'][$settings['_multipart_partnumber']])) {
             // No more parts exist for this file. Tell S3 the multipart upload is complete and move on.
             pb_backupbuddy::status('details', 'Stash getting parts with etags to notify S3 of completed multipart send.');
             $etag_parts = $s3->list_parts($settings['_multipart_upload_data']['bucket'], $settings['_multipart_upload_data']['object'], $settings['_multipart_id']);
             pb_backupbuddy::status('details', 'Stash got parts list. Notifying S3 of multipart upload completion.');
             $response = $s3->complete_multipart_upload($settings['_multipart_upload_data']['bucket'], $settings['_multipart_upload_data']['object'], $settings['_multipart_id'], $etag_parts);
             if (!$response->isOK()) {
                 $this_error = 'Stash unable to notify S3 of completion of all parts for multipart upload `' . $settings['_multipart_id'] . '`.';
                 $pb_backupbuddy_destination_errors[] = $this_error;
                 pb_backupbuddy::status('error', $this_error);
                 return false;
             } else {
                 pb_backupbuddy::status('details', 'Stash notified S3 of multipart completion.');
             }
             // Notify Stash API that things were succesful.
             $done_url = $stash->get_upload_url($settings['_multipart_file'], 'done', $remote_path . $settings['_multipart_backup_type_dir'] . basename($settings['_multipart_file']));
             pb_backupbuddy::status('details', 'Notifying Stash of completed multipart upload with done url `' . $done_url . '`.');
             $request = new RequestCore($done_url);
             $response = $request->send_request(true);
             if (!$response->isOK()) {
                 $this_error = 'Error #756834682. Could not finalize Stash upload. Response code: `' . $response->get_response_code() . '`; Response body: `' . $response->get_response_body() . '`; Response headers: `' . $response->get_response_header() . '`.';
                 $pb_backupbuddy_destination_errors[] = $this_error;
                 pb_backupbuddy::status('error', $this_error);
                 return false;
             } else {
                 // Good server response.
                 // See if we got an optional json response.
                 $upload_data = @json_decode($response->body, true);
                 if (isset($upload_data['error'])) {
                     $this_error = 'Stash error(s): `' . implode(' - ', $upload_data['error']) . '`.';
                     $pb_backupbuddy_destination_errors[] = $this_error;
                     pb_backupbuddy::status('error', $this_error);
                     return false;
                 }
                 pb_backupbuddy::status('details', 'Stash success sending file `' . basename($settings['_multipart_file']) . '`. File uploaded via multipart across `' . $this_part_number . '` parts and reported to Stash as completed.');
             }
             pb_backupbuddy::status('details', 'Stash has no more parts left for this multipart upload. Clearing multipart instance variables.');
             $settings['_multipart_partnumber'] = 0;
             $settings['_multipart_id'] = '';
             $settings['_multipart_file'] = '';
             $settings['_multipart_counts'] = array();
             $settings['_multipart_upload_data'] = array();
         }
         delete_transient('pb_backupbuddy_stashquota_' . $settings['itxapi_username']);
         // Delete quota transient since it probably has changed now.
         // Schedule to continue if anything is left to upload for this multipart of any individual files.
         if ($settings['_multipart_id'] != '' || count($files) > 0) {
             pb_backupbuddy::status('details', 'Stash multipart upload has more parts left. Scheduling next part send.');
             wp_schedule_single_event(time(), pb_backupbuddy::cron_tag('destination_send'), array($settings, $files, 'multipart', false));
             spawn_cron(time() + 150);
             // Adds > 60 seconds to get around once per minute cron running limit.
             update_option('_transient_doing_cron', 0);
             // Prevent cron-blocking for next item.
             pb_backupbuddy::status('details', 'Stash scheduled send of next part(s). Done for this cycle.');
             return array($settings['_multipart_id'], 'Sent ' . $this_part_number . ' of ' . count($multipart_destination_settings['_multipart_counts'] . ' parts.'));
         }
     }
     // Upload each file.
     foreach ($files as $file_id => $file) {
         // Determine backup type directory (if zip).
         $backup_type_dir = '';
         $backup_type = '';
         if (stristr($file, '.zip') !== false) {
             // If a zip try to determine backup type.
             pb_backupbuddy::status('details', 'Stash: Zip file. Detecting backup type if possible.');
             $serial = pb_backupbuddy::$classes['core']->get_serial_from_file($file);
             if (isset(pb_backupbuddy::$options['backups'][$serial]['integrity']['detected_type'])) {
                 pb_backupbuddy::status('details', 'Stash: Detected backup type as `' . pb_backupbuddy::$options['backups'][$serial]['integrity']['detected_type'] . '` via integrity check data.');
                 $backup_type_dir = pb_backupbuddy::$options['backups'][$serial]['integrity']['detected_type'] . '/';
                 $backup_type = pb_backupbuddy::$options['backups'][$serial]['integrity']['detected_type'];
             } else {
                 if (stristr($file, '-db-') !== false) {
                     pb_backupbuddy::status('details', 'Stash: Detected backup type as `db` via filename.');
                     $backup_type_dir = 'db/';
                     $backup_type = 'db';
                 } elseif (stristr($file, '-full-') !== false) {
                     pb_backupbuddy::status('details', 'Stash: Detected backup type as `full` via filename.');
                     $backup_type_dir = 'full/';
                     $backup_type = 'full';
                 } else {
                     pb_backupbuddy::status('details', 'Stash: Could not detect backup type via integrity details nor filename.');
                 }
             }
         }
         // Interact with Stash API.
         pb_backupbuddy::status('details', 'Determining Stash upload URL for `' . $file . '`.` with destination remote path `' . $remote_path . $backup_type_dir . basename($file) . '`.');
         $upload_url = $stash->get_upload_url($file, 'request', $remote_path . $backup_type_dir . basename($file));
         pb_backupbuddy::status('details', 'Determined upload url: `' . $upload_url . '`.');
         $request = new RequestCore($upload_url);
         pb_backupbuddy::status('details', 'Sending Stash API request.');
         $response = $request->send_request(true);
         // Validate response.
         if (!$response->isOK()) {
             $this_error = 'Stash request for upload credentials failed.';
             $pb_backupbuddy_destination_errors[] = $this_error;
             pb_backupbuddy::status('error', $this_error);
             return false;
         }
         if (!($upload_data = json_decode($response->body, true))) {
             $this_error = 'Stash API did not give a valid JSON response.';
             $pb_backupbuddy_destination_errors[] = $this_error;
             pb_backupbuddy::status('error', $this_error);
             return false;
         }
         if (isset($upload_data['error'])) {
             $this_error = 'Stash error(s): `' . implode(' - ', $upload_data['error']) . '`.';
             $pb_backupbuddy_destination_errors[] = $this_error;
             pb_backupbuddy::status('error', $this_error);
             return false;
         }
         // Calculate meta data to send.
         /*
         $meta_array = array();
         if ( stristr( $file, '.zip' ) !== false ) { // If a zip try to determine backup type.
         	pb_backupbuddy::status( 'details', 'Stash: Zip file. Detecting backup type if possible.' );
         	$serial = pb_backupbuddy::$classes['core']->get_serial_from_file( $file );
         	if ( isset( pb_backupbuddy::$options['backups'][$serial]['integrity']['detected_type'] ) ) {
         		pb_backupbuddy::status( 'details', 'Stash: Detected backup type as `' . pb_backupbuddy::$options['backups'][$serial]['integrity']['detected_type'] . '` via integrity check data.' );
         		$meta_array['backup_type'] = pb_backupbuddy::$options['backups'][$serial]['integrity']['detected_type'];
         	} else {
         		if ( stristr( $file, '-db-' ) !== false ) {
         			pb_backupbuddy::status( 'details', 'Stash: Detected backup type as `db` via filename.' );
         			$meta_array['backup_type'] = 'db';
         		} elseif ( stristr( $file, '-full-' ) !== false ) {
         			pb_backupbuddy::status( 'details', 'Stash: Detected backup type as `full` via filename.' );
         			$meta_array['backup_type'] = 'full';
         		} else {
         			pb_backupbuddy::status( 'details', 'Stash: Could not detect backup type via integrity details nor filename.' );
         		}
         	}
         }
         */
         // Create S3 instance.
         pb_backupbuddy::status('details', 'Creating Stash S3 instance.');
         $s3 = new AmazonS3($upload_data['credentials']);
         // the key, secret, token
         if ($disable_ssl === true) {
             @$s3->disable_ssl(true);
         }
         pb_backupbuddy::status('details', 'Stash S3 instance created.');
         // Handle chunking of file into a multipart upload (if applicable).
         $file_size = filesize($file);
         if ($max_chunk_size >= 5 && $file_size / 1024 / 1024 > $max_chunk_size) {
             // minimum chunk size is 5mb. Anything under 5mb we will not chunk.
             pb_backupbuddy::status('details', 'Stash file size of ' . $file_size / 1024 / 1024 . 'MB exceeds max chunk size of ' . $max_chunk_size . 'MB set in settings for sending file as multipart upload.');
             // Initiate multipart upload with S3.
             pb_backupbuddy::status('details', 'Initiating Stash multipart upload.');
             $response = $s3->initiate_multipart_upload($upload_data['bucket'], $upload_data['object'], array('encryption' => 'AES256'));
             if (!$response->isOK()) {
                 $this_error = 'Stash was unable to initiate multipart upload.';
                 $pb_backupbuddy_destination_errors[] = $this_error;
                 pb_backupbuddy::status('error', $this_error);
                 return false;
             } else {
                 $upload_id = (string) $response->body->UploadId;
                 pb_backupbuddy::status('details', 'Stash initiated multipart upload with ID `' . $upload_id . '`.');
             }
             // Get chunk parts for multipart transfer.
             pb_backupbuddy::status('details', 'Stash getting multipart counts.');
             $parts = $s3->get_multipart_counts($file_size, $max_chunk_size * 1024 * 1024);
             // Size of chunks expected to be in bytes.
             $multipart_destination_settings = $settings;
             $multipart_destination_settings['_multipart_id'] = $upload_id;
             $multipart_destination_settings['_multipart_partnumber'] = 0;
             $multipart_destination_settings['_multipart_file'] = $file;
             $multipart_destination_settings['_multipart_counts'] = $parts;
             $multipart_destination_settings['_multipart_upload_data'] = $upload_data;
             $multipart_destination_settings['_multipart_backup_type_dir'] = $backup_type_dir;
             pb_backupbuddy::status('details', 'Stash multipart settings to pass:'******'details', 'Stash scheduling send of next part(s).');
             wp_schedule_single_event(time(), pb_backupbuddy::cron_tag('destination_send'), array($multipart_destination_settings, $files, 'multipart', false));
             spawn_cron(time() + 150);
             // Adds > 60 seconds to get around once per minute cron running limit.
             update_option('_transient_doing_cron', 0);
             // Prevent cron-blocking for next item.
             pb_backupbuddy::status('details', 'Stash scheduled send of next part(s). Done for this cycle.');
             return array($upload_id, 'Starting send of ' . count($multipart_destination_settings['_multipart_counts']) . ' parts.');
         } else {
             if ($max_chunk_size != '0') {
                 pb_backupbuddy::status('details', 'File size of ' . $file_size / 1024 / 1024 . 'MB is less than the max chunk size of ' . $max_chunk_size . 'MB; not chunking into multipart upload.');
             } else {
                 pb_backupbuddy::status('details', 'Max chunk size set to zero so not chunking into multipart upload.');
             }
         }
         // SEND file.
         pb_backupbuddy::status('details', 'About to put (upload) object to Stash.');
         $response = $s3->create_object($upload_data['bucket'], $upload_data['object'], array('fileUpload' => $file, 'encryption' => 'AES256'));
         //  we can also utilize the multi-part-upload to create an object
         //  $response = $s3->create_mpu_object($upload_data['bucket'], $upload_data['object'], array('fileUpload'=>$upload_file));
         // Validate response. On failure notify Stash API that things went wrong.
         if (!$response->isOK()) {
             pb_backupbuddy::status('details', 'Sending upload abort.');
             $request = new RequestCore($abort_url);
             $response = $request->send_request(true);
             $this_error = 'Could not upload to Stash, attempt aborted.';
             $pb_backupbuddy_destination_errors[] = $this_error;
             pb_backupbuddy::status('error', $this_error);
             return false;
         } else {
             //	pb_backupbuddy::status( 'details', 'Stash file upload speed: ' . ( $response->header['_info']['speed_upload'] / 1024 / 1024 ) . 'MB/sec. This number may be invalid for small file transfers.' );
             pb_backupbuddy::status('details', 'Stash put success. Need to nofity Stash of upload completion. Details: `' . print_r($response, true) . '`.');
         }
         delete_transient('pb_backupbuddy_stashquota_' . $settings['itxapi_username']);
         // Delete quota transient since it probably has changed now.
         // Notify Stash API that things were succesful.
         $done_url = $stash->get_upload_url($file, 'done', $remote_path . $backup_type_dir . basename($file));
         pb_backupbuddy::status('details', 'Notifying Stash of completed upload with done url `' . $done_url . '`.');
         $request = new RequestCore($done_url);
         $response = $request->send_request(true);
         if (!$response->isOK()) {
             $this_error = 'Error #756834682. Could not finalize Stash upload. Response code: `' . $response->get_response_code() . '`; Response body: `' . $response->get_response_body() . '`; Response headers: `' . $response->get_response_header() . '`.';
             $pb_backupbuddy_destination_errors[] = $this_error;
             pb_backupbuddy::status('error', $this_error);
             return false;
         } else {
             // Good server response.
             // See if we got an optional json response.
             $upload_data = @json_decode($response->body, true);
             if (isset($upload_data['error'])) {
                 // Some kind of error.
                 $this_error = 'Stash error(s): `' . implode(' - ', $upload_data['error']) . '`.';
                 $pb_backupbuddy_destination_errors[] = $this_error;
                 pb_backupbuddy::status('error', $this_error);
                 return false;
             }
             unset($files[$file_id]);
             // Remove from list of files we have not sent yet.
             pb_backupbuddy::status('details', 'Stash success sending file `' . basename($file) . '`. File uploaded and reported to Stash as completed.');
         }
         // Enforce archive limits if applicable.
         if ($backup_type == 'full') {
             $limit = $full_archive_limit;
             pb_backupbuddy::status('details', 'Stash full backup archive limit of `' . $limit . '` based on destination settings.');
         } elseif ($backup_type == 'db') {
             $limit = $db_archive_limit;
             pb_backupbuddy::status('details', 'Stash database backup archive limit of `' . $limit . '` based on destination settings.');
         } else {
             $limit = 0;
             pb_backupbuddy::status('error', 'Error #54854895. Stash was unable to determine backup type so archive limits NOT enforced for this backup.');
         }
         if ($limit > 0) {
             pb_backupbuddy::status('details', 'Stash archive limit enforcement beginning.');
             // S3 object for managing files.
             $s3_manage = new AmazonS3($manage_data['credentials']);
             if ($disable_ssl === true) {
                 @$s3_manage->disable_ssl(true);
             }
             // Get file listing.
             $response_manage = $s3_manage->list_objects($manage_data['bucket'], array('prefix' => $manage_data['subkey'] . $remote_path . $backup_type_dir));
             // list all the files in the subscriber account
             // Create array of backups and organize by date
             $prefix = pb_backupbuddy::$classes['core']->backup_prefix();
             // List backups associated with this site by date.
             $backups = array();
             foreach ($response_manage->body->Contents as $object) {
                 $file = str_replace($manage_data['subkey'] . $remote_path . $backup_type_dir, '', $object->Key);
                 // Stash stores files in a directory per site so no need to check prefix here! if ( false !== strpos( $file, 'backup-' . $prefix . '-' ) ) { // if backup has this site prefix...
                 $backups[$file] = strtotime($object->LastModified);
                 //}
             }
             arsort($backups);
             //error_log( 'backups: ' . print_r( $backups, true ) );
             pb_backupbuddy::status('details', 'Stash found `' . count($backups) . '` backups of this type when checking archive limits.');
             if (count($backups) > $limit) {
                 pb_backupbuddy::status('details', 'More archives (' . count($backups) . ') than limit (' . $limit . ') allows. Trimming...');
                 $i = 0;
                 $delete_fail_count = 0;
                 foreach ($backups as $buname => $butime) {
                     $i++;
                     if ($i > $limit) {
                         pb_backupbuddy::status('details', 'Trimming excess file `' . $buname . '`...');
                         $response = $s3_manage->delete_object($manage_data['bucket'], $manage_data['subkey'] . $remote_path . $backup_type_dir . $buname);
                         if (!$response->isOK()) {
                             pb_backupbuddy::status('details', 'Unable to delete excess Stash file `' . $buname . '`. Details: `' . print_r($response, true) . '`.');
                             $delete_fail_count++;
                         }
                     }
                 }
                 pb_backupbuddy::status('details', 'Finished trimming excess backups.');
                 if ($delete_fail_count !== 0) {
                     $error_message = 'Stash remote limit could not delete ' . $delete_fail_count . ' backups.';
                     pb_backupbuddy::status('error', $error_message);
                     pb_backupbuddy::$classes['core']->mail_error($error_message);
                 }
             }
             pb_backupbuddy::status('details', 'Stash completed archive limiting.');
         } else {
             pb_backupbuddy::status('details', 'No Stash archive file limit to enforce.');
         }
         // End remote backup limit
     }
     // end foreach.
     // Success if we made it this far.
     return true;
 }
Пример #7
0
 /**
  * @param $job_object
  * @return bool
  */
 public function job_run_archive(&$job_object)
 {
     $job_object->substeps_todo = 2 + $job_object->backup_filesize;
     $job_object->log(sprintf(__('%d. Trying to send backup file to S3 Service&#160;&hellip;', 'backwpup'), $job_object->steps_data[$job_object->step_working]['STEP_TRY']), E_USER_NOTICE);
     try {
         $s3 = new AmazonS3(array('key' => $job_object->job['s3accesskey'], 'secret' => BackWPup_Encryption::decrypt($job_object->job['s3secretkey']), 'certificate_authority' => TRUE));
         $base_url = $this->get_s3_base_url($job_object->job['s3region'], $job_object->job['s3base_url']);
         if (stristr($base_url, 'amazonaws.com')) {
             $s3->set_region(str_replace(array('http://', 'https://'), '', $base_url));
         } else {
             $s3->set_hostname(str_replace(array('http://', 'https://'), '', $base_url));
             $s3->allow_hostname_override(FALSE);
             if (substr($base_url, -1) == '/') {
                 $s3->enable_path_style(TRUE);
             }
         }
         if (stristr($base_url, 'http://')) {
             $s3->disable_ssl();
         }
         if ($s3->if_bucket_exists($job_object->job['s3bucket'])) {
             $job_object->log(sprintf(__('Connected to S3 Bucket "%1$s" in %2$s', 'backwpup'), $job_object->job['s3bucket'], $base_url), E_USER_NOTICE);
         } else {
             $job_object->log(sprintf(__('S3 Bucket "%s" does not exist!', 'backwpup'), $job_object->job['s3bucket']), E_USER_ERROR);
             return TRUE;
         }
         //transfer file to S3
         $job_object->log(__('Starting upload to S3 Service&#160;&hellip;', 'backwpup'), E_USER_NOTICE);
         //Transfer Backup to S3
         if ($job_object->job['s3storageclass'] == 'REDUCED_REDUNDANCY') {
             //set reduced redundancy or not
             $storage = AmazonS3::STORAGE_REDUCED;
         } else {
             $storage = AmazonS3::STORAGE_STANDARD;
         }
         if (empty($job_object->job['s3ssencrypt'])) {
             $job_object->job['s3ssencrypt'] = NULL;
         }
         //set progress bar
         $s3->register_streaming_read_callback(array($job_object, 'curl_read_callback'));
         $result = $s3->create_object($job_object->job['s3bucket'], $job_object->job['s3dir'] . $job_object->backup_file, array('fileUpload' => $job_object->backup_folder . $job_object->backup_file, 'acl' => AmazonS3::ACL_PRIVATE, 'storage' => $storage, 'encryption' => $job_object->job['s3ssencrypt']));
         if ($result->status >= 200 and $result->status < 300) {
             $job_object->substeps_done = 1 + $job_object->backup_filesize;
             $job_object->log(sprintf(__('Backup transferred to %s.', 'backwpup'), $this->get_s3_base_url($job_object->job['s3region'], $job_object->job['s3base_url']) . '/' . $job_object->job['s3bucket'] . '/' . $job_object->job['s3dir'] . $job_object->backup_file), E_USER_NOTICE);
             if (!empty($job_object->job['jobid'])) {
                 BackWPup_Option::update($job_object->job['jobid'], 'lastbackupdownloadurl', network_admin_url('admin.php') . '?page=backwpupbackups&action=downloads3&file=' . $job_object->job['s3dir'] . $job_object->backup_file . '&jobid=' . $job_object->job['jobid']);
             }
         } else {
             $job_object->log(sprintf(__('Cannot transfer backup to S3! (%1$d) %2$s', 'backwpup'), $result->status, $result->body), E_USER_ERROR);
         }
     } catch (Exception $e) {
         $job_object->log(E_USER_ERROR, sprintf(__('S3 Service API: %s', 'backwpup'), htmlentities($e->getMessage())), $e->getFile(), $e->getLine());
         return FALSE;
     }
     try {
         $backupfilelist = array();
         $filecounter = 0;
         $files = array();
         $objects = $s3->list_objects($job_object->job['s3bucket'], array('prefix' => $job_object->job['s3dir']));
         if (is_object($objects)) {
             foreach ($objects->body->Contents as $object) {
                 $file = basename((string) $object->Key);
                 $changetime = strtotime((string) $object->LastModified) + get_option('gmt_offset') * 3600;
                 if ($job_object->is_backup_archive($file)) {
                     $backupfilelist[$changetime] = $file;
                 }
                 $files[$filecounter]['folder'] = $this->get_s3_base_url($job_object->job['s3region'], $job_object->job['s3base_url']) . '/' . $job_object->job['s3bucket'] . '/' . dirname((string) $object->Key);
                 $files[$filecounter]['file'] = (string) $object->Key;
                 $files[$filecounter]['filename'] = basename($object->Key);
                 $files[$filecounter]['downloadurl'] = network_admin_url('admin.php') . '?page=backwpupbackups&action=downloads3&file=' . (string) $object->Key . '&jobid=' . $job_object->job['jobid'];
                 $files[$filecounter]['filesize'] = (int) $object->Size;
                 $files[$filecounter]['time'] = $changetime;
                 $filecounter++;
             }
         }
         if ($job_object->job['s3maxbackups'] > 0 && is_object($s3)) {
             //Delete old backups
             if (count($backupfilelist) > $job_object->job['s3maxbackups']) {
                 ksort($backupfilelist);
                 $numdeltefiles = 0;
                 while ($file = array_shift($backupfilelist)) {
                     if (count($backupfilelist) < $job_object->job['s3maxbackups']) {
                         break;
                     }
                     //delete files on S3
                     $delete_s3 = $s3->delete_object($job_object->job['s3bucket'], $job_object->job['s3dir'] . $file);
                     if ($delete_s3) {
                         foreach ($files as $key => $filedata) {
                             if ($filedata['file'] == $job_object->job['s3dir'] . $file) {
                                 unset($files[$key]);
                             }
                         }
                         $numdeltefiles++;
                     } else {
                         $job_object->log(sprintf(__('Cannot delete backup from %s.', 'backwpup'), $this->get_s3_base_url($job_object->job['s3region'], $job_object->job['s3base_url']) . '/' . $job_object->job['s3bucket'] . '/' . $job_object->job['s3dir'] . $file), E_USER_ERROR);
                     }
                 }
                 if ($numdeltefiles > 0) {
                     $job_object->log(sprintf(_n('One file deleted on S3 Bucket.', '%d files deleted on S3 Bucket', $numdeltefiles, 'backwpup'), $numdeltefiles), E_USER_NOTICE);
                 }
             }
         }
         set_site_transient('backwpup_' . $job_object->job['jobid'] . '_s3', $files, 60 * 60 * 24 * 7);
     } catch (Exception $e) {
         $job_object->log(E_USER_ERROR, sprintf(__('S3 Service API: %s', 'backwpup'), htmlentities($e->getMessage())), $e->getFile(), $e->getLine());
         return FALSE;
     }
     $job_object->substeps_done = 2 + $job_object->backup_filesize;
     return TRUE;
 }
Пример #8
0
';
echo '<br><br></div>';
// Welcome text.
$up_path = '/';
if ($settings['manage_all_files'] == '1') {
    $manage_all_link = ' <a href="' . pb_backupbuddy::ajax_url('remoteClient') . '&destination_id=' . htmlentities(pb_backupbuddy::_GET('destination_id')) . '&remote_path=' . $up_path . '" style="text-decoration: none; margin-left: 15px;" title="By default, Stash will display files in the Stash directory for this particular site. Clicking this will display files for all your sites in Stash.">List files for all sites</a>';
} else {
    $manage_all_link = '<!-- manage all disabled based on settings -->';
    if ($remote_path == '/') {
        die('Access denied. Possible hacking attempt has been logged. Error #5549450.');
    }
}
$reauth_link = ' <a href="' . pb_backupbuddy::ajax_url('remoteClient') . '&destination_id=' . htmlentities(pb_backupbuddy::_GET('destination_id')) . '&force_stash_reauth=1" style="text-decoration: none; margin-left: 15px;" title="Re-authenticate to Stash or change the Stash account this Stash destination uses.">Re-authenticate</a>';
echo '<div style="font-size: 12px; text-align: center;"><b>Current Remote Directory</b>: ' . $remote_path . $manage_all_link . $reauth_link . '</div>';
// Get file listing.
$response = $s3->list_objects($manage_data['bucket'], array('prefix' => $manage_data['subkey'] . $remote_path));
// list all the files in the subscriber account
/*
echo '<pre>';
print_r( $response );
echo '</pre>';
*/
// Display prefix somewhere to aid in troubleshooting/support.
$subscriber_prefix = substr($response->body->Prefix, 0, strpos($response->body->Prefix, '/'));
// Get list of files.
$backup_list_temp = array();
foreach ($response->body->Contents as $object) {
    $file = str_ireplace($manage_data['subkey'] . $remote_path, '', $object->Key);
    $last_modified = strtotime($object->LastModified);
    $size = (double) $object->Size;
    if (substr($file, 0, 3) == 'db/') {
#!/usr/bin/php
<?php 
/*
 * list_bucket_objects_raw.php
 *
 * Display the raw bucket data returned by list_objects
 *
 * Copyright 2009-2010 Amazon.com, Inc. or its affiliates. All Rights
 * Reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License"). You
 * may not use this file except in compliance with the License. A copy
 * of the License is located at
 *
 *       http://aws.amazon.com/apache2.0/
 *
 * or in the "license.txt" file accompanying this file. This file is
 * distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
 * OF ANY KIND, either express or implied. See the License for the
 * specific language governing permissions and limitations under the
 * License.
 */
error_reporting(E_ALL);
require_once 'sdk.class.php';
require_once 'include/book.inc.php';
// Create the S3 access object
$s3 = new AmazonS3();
// List the bucket
$res = $s3->list_objects(BOOK_BUCKET);
// Display the resulting object tree
print_r($res);
Пример #10
0
function dest_s3()
{
    global $WORKING, $STATIC;
    trigger_error(sprintf(__('%d. try sending backup file to Amazon S3...', 'backwpup'), $WORKING['DEST_S3']['STEP_TRY']), E_USER_NOTICE);
    $WORKING['STEPTODO'] = 2 + filesize($STATIC['JOB']['backupdir'] . $STATIC['backupfile']);
    $WORKING['STEPDONE'] = 0;
    require_once dirname(__FILE__) . '/../libs/aws/sdk.class.php';
    need_free_memory(26214400 * 1.1);
    try {
        $s3 = new AmazonS3(array('key' => $STATIC['JOB']['awsAccessKey'], 'secret' => $STATIC['JOB']['awsSecretKey'], 'certificate_authority' => true));
        if ($s3->if_bucket_exists($STATIC['JOB']['awsBucket'])) {
            trigger_error(sprintf(__('Connected to S3 Bucket: %s', 'backwpup'), $STATIC['JOB']['awsBucket']), E_USER_NOTICE);
            //Transfer Backup to S3
            if ($STATIC['JOB']['awsrrs']) {
                //set reduced redundancy or not
                $storage = AmazonS3::STORAGE_REDUCED;
            } else {
                $storage = AmazonS3::STORAGE_STANDARD;
            }
            //set curl Progress bar
            $curlops = array();
            if (defined('CURLOPT_PROGRESSFUNCTION')) {
                $curlops = array(CURLOPT_NOPROGRESS => false, CURLOPT_PROGRESSFUNCTION => 'curl_progresscallback', CURLOPT_BUFFERSIZE => 1048576);
            }
            trigger_error(__('Upload to Amazon S3 now started... ', 'backwpup'), E_USER_NOTICE);
            //transferee file to S3
            $result = $s3->create_object($STATIC['JOB']['awsBucket'], $STATIC['JOB']['awsdir'] . $STATIC['backupfile'], array('fileUpload' => $STATIC['JOB']['backupdir'] . $STATIC['backupfile'], 'acl' => AmazonS3::ACL_PRIVATE, 'storage' => $storage, 'curlopts' => $curlops));
            $result = (array) $result;
            if ($result["status"] >= 200 and $result["status"] < 300) {
                $WORKING['STEPTODO'] = 1 + filesize($STATIC['JOB']['backupdir'] . $STATIC['backupfile']);
                trigger_error(sprintf(__('Backup transferred to %s', 'backwpup'), $result["header"]["_info"]["url"]), E_USER_NOTICE);
                $STATIC['JOB']['lastbackupdownloadurl'] = $STATIC['WP']['ADMINURL'] . '?page=backwpupbackups&action=downloads3&file=' . $STATIC['JOB']['awsdir'] . $STATIC['backupfile'] . '&jobid=' . $STATIC['JOB']['jobid'];
                $WORKING['STEPSDONE'][] = 'DEST_S3';
                //set done
            } else {
                trigger_error(sprintf(__('Can not transfer backup to S3! (%1$d) %2$s', 'backwpup'), $result["status"], $result["Message"]), E_USER_ERROR);
            }
        } else {
            trigger_error(sprintf(__('S3 Bucket "%s" not exists!', 'backwpup'), $STATIC['JOB']['awsBucket']), E_USER_ERROR);
        }
    } catch (Exception $e) {
        trigger_error(sprintf(__('Amazon API: %s', 'backwpup'), $e->getMessage()), E_USER_ERROR);
        return;
    }
    try {
        if ($s3->if_bucket_exists($STATIC['JOB']['awsBucket'])) {
            if ($STATIC['JOB']['awsmaxbackups'] > 0) {
                //Delete old backups
                $backupfilelist = array();
                if (($contents = $s3->list_objects($STATIC['JOB']['awsBucket'], array('prefix' => $STATIC['JOB']['awsdir']))) !== false) {
                    foreach ($contents->body->Contents as $object) {
                        $file = basename($object->Key);
                        if ($STATIC['JOB']['fileprefix'] == substr($file, 0, strlen($STATIC['JOB']['fileprefix'])) and $STATIC['JOB']['fileformart'] == substr($file, -strlen($STATIC['JOB']['fileformart']))) {
                            $backupfilelist[] = $file;
                        }
                    }
                }
                if (sizeof($backupfilelist) > 0) {
                    rsort($backupfilelist);
                    $numdeltefiles = 0;
                    for ($i = $STATIC['JOB']['awsmaxbackups']; $i < sizeof($backupfilelist); $i++) {
                        if ($s3->delete_object($STATIC['JOB']['awsBucket'], $STATIC['JOB']['awsdir'] . $backupfilelist[$i])) {
                            //delte files on S3
                            $numdeltefiles++;
                        } else {
                            trigger_error(sprintf(__('Can not delete backup on S3://%s', 'backwpup'), $STATIC['JOB']['awsBucket'] . '/' . $STATIC['JOB']['awsdir'] . $backupfilelist[$i]), E_USER_ERROR);
                        }
                    }
                    if ($numdeltefiles > 0) {
                        trigger_error(sprintf(_n('One file deleted on S3 Bucket', '%d files deleted on S3 Bucket', $numdeltefiles, 'backwpup'), $numdeltefiles), E_USER_NOTICE);
                    }
                }
            }
        }
    } catch (Exception $e) {
        trigger_error(sprintf(__('Amazon API: %s', 'backwpup'), $e->getMessage()), E_USER_ERROR);
        return;
    }
    $WORKING['STEPDONE']++;
}
Пример #11
0
function dest_gstorage()
{
    global $WORKING, $STATIC;
    trigger_error(sprintf(__('%d. try sending backup to Google Storage...', 'backwpup'), $WORKING['DEST_GSTORAGE']['STEP_TRY']), E_USER_NOTICE);
    $WORKING['STEPTODO'] = 2 + filesize($STATIC['JOB']['backupdir'] . $STATIC['backupfile']);
    $WORKING['STEPDONE'] = 0;
    require_once dirname(__FILE__) . '/../libs/aws/sdk.class.php';
    need_free_memory(26214400 * 1.1);
    try {
        $gstorage = new AmazonS3(array('key' => $STATIC['JOB']['GStorageAccessKey'], 'secret' => $STATIC['JOB']['GStorageSecret'], 'certificate_authority' => true));
        //set up s3 for google
        $gstorage->set_hostname('storage.googleapis.com');
        $gstorage->allow_hostname_override(false);
        if ($gstorage->if_bucket_exists($STATIC['JOB']['GStorageBucket'])) {
            trigger_error(sprintf(__('Connected to GStorage Bucket: %s', 'backwpup'), $STATIC['JOB']['GStorageBucket']), E_USER_NOTICE);
            //set curl Prozess bar
            $curlops = array();
            if (defined('CURLOPT_PROGRESSFUNCTION')) {
                $curlops = array(CURLOPT_NOPROGRESS => false, CURLOPT_PROGRESSFUNCTION => 'curl_progresscallback', CURLOPT_BUFFERSIZE => 1048576);
            }
            trigger_error(__('Upload to GStorage now started... ', 'backwpup'), E_USER_NOTICE);
            //transferee file to GStorage
            $result = $gstorage->create_object($STATIC['JOB']['GStorageBucket'], $STATIC['JOB']['GStoragedir'] . $STATIC['backupfile'], array('fileUpload' => $STATIC['JOB']['backupdir'] . $STATIC['backupfile'], 'acl' => 'private', 'curlopts' => $curlops));
            $result = (array) $result;
            if ($result["status"] >= 200 and $result["status"] < 300) {
                $WORKING['STEPTODO'] = 1 + filesize($STATIC['JOB']['backupdir'] . $STATIC['backupfile']);
                trigger_error(sprintf(__('Backup transferred to %s', 'backwpup'), "https://storage.cloud.google.com/" . $STATIC['JOB']['GStorageBucket'] . "/" . $STATIC['JOB']['GStoragedir'] . $STATIC['backupfile']), E_USER_NOTICE);
                $STATIC['JOB']['lastbackupdownloadurl'] = "https://storage.cloud.google.com/" . $STATIC['JOB']['GStorageBucket'] . "/" . $STATIC['JOB']['GStoragedir'] . $STATIC['backupfile'];
                $WORKING['STEPSDONE'][] = 'DEST_GSTORAGE';
                //set done
            } else {
                trigger_error(sprintf(__('Can not transfer backup to GStorage! (%1$d) %2$s', 'backwpup'), $result["status"], $result["Message"]), E_USER_ERROR);
            }
        } else {
            trigger_error(sprintf(__('GStorage Bucket "%s" not exists!', 'backwpup'), $STATIC['JOB']['GStorageBucket']), E_USER_ERROR);
        }
    } catch (Exception $e) {
        trigger_error(sprintf(__('GStorage API: %s', 'backwpup'), $e->getMessage()), E_USER_ERROR);
        return;
    }
    try {
        if ($gstorage->if_bucket_exists($STATIC['JOB']['GStorageBucket'])) {
            if ($STATIC['JOB']['GStoragemaxbackups'] > 0) {
                //Delete old backups
                $backupfilelist = array();
                if (($contents = $gstorage->list_objects($STATIC['JOB']['GStorageBucket'], array('prefix' => $STATIC['JOB']['GStoragedir']))) !== false) {
                    foreach ($contents->body->Contents as $object) {
                        $file = basename($object->Key);
                        if ($STATIC['JOB']['fileprefix'] == substr($file, 0, strlen($STATIC['JOB']['fileprefix'])) and $STATIC['JOB']['fileformart'] == substr($file, -strlen($STATIC['JOB']['fileformart']))) {
                            $backupfilelist[] = $file;
                        }
                    }
                }
                if (sizeof($backupfilelist) > 0) {
                    rsort($backupfilelist);
                    $numdeltefiles = 0;
                    for ($i = $STATIC['JOB']['GStoragemaxbackups']; $i < sizeof($backupfilelist); $i++) {
                        if ($gstorage->delete_object($STATIC['JOB']['GStorageBucket'], $STATIC['JOB']['GStoragedir'] . $backupfilelist[$i])) {
                            //delte files on S3
                            $numdeltefiles++;
                        } else {
                            trigger_error(sprintf(__('Can not delete backup on GStorage://%s', 'backwpup'), $STATIC['JOB']['awsBucket'] . '/' . $STATIC['JOB']['GStoragedir'] . $backupfilelist[$i]), E_USER_ERROR);
                        }
                    }
                    if ($numdeltefiles > 0) {
                        trigger_error(sprintf(_n('One file deleted on GStorage Bucket', '%d files deleted on GStorage Bucket', $numdeltefiles, 'backwpup'), $numdeltefiles), E_USER_NOTICE);
                    }
                }
            }
        }
    } catch (Exception $e) {
        trigger_error(sprintf(__('GStorage API: %s', 'backwpup'), $e->getMessage()), E_USER_ERROR);
        return;
    }
    $WORKING['STEPDONE']++;
}
}
if (false !== $stashDestination) {
    require_once pb_backupbuddy::plugin_path() . '/destinations/stash/lib/class.itx_helper.php';
    require_once pb_backupbuddy::plugin_path() . '/destinations/stash/init.php';
    //$stash = new ITXAPI_Helper( pb_backupbuddy_destination_stash::ITXAPI_KEY, pb_backupbuddy_destination_stash::ITXAPI_URL, pb_backupbuddy::$options['remote_destinations'][ $stashDestination ]['itxapi_username'], pb_backupbuddy::$options['remote_destinations'][ $stashDestination ]['itxapi_password'] );
    $manage_data = pb_backupbuddy_destination_stash::get_manage_data(pb_backupbuddy::$options['remote_destinations'][$stashDestination]);
    // Connect to S3.
    if (!is_array($manage_data['credentials'])) {
        die('Error #8484383c: Your authentication credentials for Stash failed. Verify your login and password to Stash. You may need to update the Stash destination settings. Perhaps you recently changed your password?');
    }
    $s3 = new AmazonS3($manage_data['credentials']);
    // the key, secret, token
    if (pb_backupbuddy::$options['remote_destinations'][$stashDestination]['ssl'] == '0') {
        @$s3->disable_ssl(true);
    }
    $response = $s3->list_objects($manage_data['bucket'], array('prefix' => $manage_data['subkey'] . '/deploy'));
    // list all the files in the subscriber account
    echo '<pre>';
    print_r($response);
    echo '</pre>';
    foreach ($response->body->Contents as $object) {
        print_r($object);
        echo '<br><br>';
        echo 'Bucket: ' . $manage_data['bucket'] . '<br>';
        echo 'Key: ' . $object->Key . '<br>';
        $metadata = $s3->get_object_metadata($manage_data['bucket'], $object->Key);
        //$metadata = $s3->get_object_metadata( "storage-api-ithemes", "y3xw057s35zp6s4i/deploy-backupbuddy.dat" );
        if (false === $metadata) {
            echo 'Meta result was FALSE.';
            print_r($metadata);
        } else {
Пример #13
0
function backwpup_get_backup_files($jobid, $dest)
{
    global $backwpup_message;
    if (empty($jobid) or !in_array(strtoupper($dest), explode(',', strtoupper(BACKWPUP_DESTS))) and $dest != 'FOLDER') {
        return false;
    }
    $jobs = get_option('backwpup_jobs');
    //Load jobs
    $jobvalue = $jobs[$jobid];
    $filecounter = 0;
    $files = array();
    //Get files/filinfo in backup folder
    if ($dest == 'FOLDER' and !empty($jobvalue['backupdir']) and is_dir($jobvalue['backupdir'])) {
        if ($dir = opendir($jobvalue['backupdir'])) {
            while (($file = readdir($dir)) !== false) {
                if (substr($file, 0, 1) == '.') {
                    continue;
                }
                if (is_file($jobvalue['backupdir'] . $file)) {
                    $files[$filecounter]['JOBID'] = $jobid;
                    $files[$filecounter]['DEST'] = $dest;
                    $files[$filecounter]['folder'] = $jobvalue['backupdir'];
                    $files[$filecounter]['file'] = $jobvalue['backupdir'] . $file;
                    $files[$filecounter]['filename'] = $file;
                    $files[$filecounter]['downloadurl'] = backwpup_admin_url('admin.php') . '?page=backwpupbackups&action=download&file=' . $jobvalue['backupdir'] . $file;
                    $files[$filecounter]['filesize'] = filesize($jobvalue['backupdir'] . $file);
                    $files[$filecounter]['time'] = filemtime($jobvalue['backupdir'] . $file);
                    $filecounter++;
                }
            }
            closedir($dir);
        }
    }
    //Get files/filinfo from Dropbox
    if ($dest == 'DROPBOX' and !empty($jobvalue['dropetoken']) and !empty($jobvalue['dropesecret'])) {
        require_once realpath(dirname(__FILE__) . '/../libs/dropbox.php');
        try {
            $dropbox = new backwpup_Dropbox('dropbox');
            $dropbox->setOAuthTokens($jobvalue['dropetoken'], $jobvalue['dropesecret']);
            $contents = $dropbox->metadata($jobvalue['dropedir']);
            if (is_array($contents)) {
                foreach ($contents['contents'] as $object) {
                    if ($object['is_dir'] != true) {
                        $files[$filecounter]['JOBID'] = $jobid;
                        $files[$filecounter]['DEST'] = $dest;
                        $files[$filecounter]['folder'] = "https://api-content.dropbox.com/1/files/" . $jobvalue['droperoot'] . "/" . dirname($object['path']) . "/";
                        $files[$filecounter]['file'] = $object['path'];
                        $files[$filecounter]['filename'] = basename($object['path']);
                        $files[$filecounter]['downloadurl'] = backwpup_admin_url('admin.php') . '?page=backwpupbackups&action=downloaddropbox&file=' . $object['path'] . '&jobid=' . $jobid;
                        $files[$filecounter]['filesize'] = $object['bytes'];
                        $files[$filecounter]['time'] = strtotime($object['modified']);
                        $filecounter++;
                    }
                }
            }
        } catch (Exception $e) {
            $backwpup_message .= 'DROPBOX: ' . $e->getMessage() . '<br />';
        }
    }
    //Get files/filinfo from Sugarsync
    if ($dest == 'SUGARSYNC' and !empty($jobvalue['sugarrefreshtoken'])) {
        if (!class_exists('SugarSync')) {
            require_once dirname(__FILE__) . '/../libs/sugarsync.php';
        }
        if (class_exists('SugarSync')) {
            try {
                $sugarsync = new SugarSync($jobvalue['sugarrefreshtoken']);
                $dirid = $sugarsync->chdir($jobvalue['sugardir'], $jobvalue['sugarroot']);
                $user = $sugarsync->user();
                $dir = $sugarsync->showdir($dirid);
                $getfiles = $sugarsync->getcontents('file');
                if (is_object($getfiles)) {
                    foreach ($getfiles->file as $getfile) {
                        $files[$filecounter]['JOBID'] = $jobid;
                        $files[$filecounter]['DEST'] = $dest;
                        $files[$filecounter]['folder'] = 'https://' . $user->nickname . '.sugarsync.com/' . $dir;
                        $files[$filecounter]['file'] = (string) $getfile->ref;
                        $files[$filecounter]['filename'] = utf8_decode((string) $getfile->displayName);
                        $files[$filecounter]['downloadurl'] = backwpup_admin_url('admin.php') . '?page=backwpupbackups&action=downloadsugarsync&file=' . (string) $getfile->ref . '&jobid=' . $jobid;
                        $files[$filecounter]['filesize'] = (int) $getfile->size;
                        $files[$filecounter]['time'] = strtotime((string) $getfile->lastModified);
                        $filecounter++;
                    }
                }
            } catch (Exception $e) {
                $backwpup_message .= 'SUGARSYNC: ' . $e->getMessage() . '<br />';
            }
        }
    }
    //Get files/filinfo from S3
    if ($dest == 'S3' and !empty($jobvalue['awsAccessKey']) and !empty($jobvalue['awsSecretKey']) and !empty($jobvalue['awsBucket'])) {
        if (!class_exists('AmazonS3')) {
            require_once dirname(__FILE__) . '/../libs/aws/sdk.class.php';
        }
        if (class_exists('AmazonS3')) {
            try {
                $s3 = new AmazonS3(array('key' => $jobvalue['awsAccessKey'], 'secret' => $jobvalue['awsSecretKey'], 'certificate_authority' => true));
                if (($contents = $s3->list_objects($jobvalue['awsBucket'], array('prefix' => $jobvalue['awsdir']))) !== false) {
                    foreach ($contents->body->Contents as $object) {
                        $files[$filecounter]['JOBID'] = $jobid;
                        $files[$filecounter]['DEST'] = $dest;
                        $files[$filecounter]['folder'] = "https://" . $jobvalue['awsBucket'] . ".s3.amazonaws.com/" . dirname((string) $object->Key) . '/';
                        $files[$filecounter]['file'] = (string) $object->Key;
                        $files[$filecounter]['filename'] = basename($object->Key);
                        $files[$filecounter]['downloadurl'] = backwpup_admin_url('admin.php') . '?page=backwpupbackups&action=downloads3&file=' . $object->Key . '&jobid=' . $jobid;
                        $files[$filecounter]['filesize'] = (string) $object->Size;
                        $files[$filecounter]['time'] = strtotime($object->LastModified);
                        $filecounter++;
                    }
                }
            } catch (Exception $e) {
                $backwpup_message .= 'Amazon S3: ' . $e->getMessage() . '<br />';
            }
        }
    }
    //Get files/filinfo from Google Storage
    if ($dest == 'GSTORAGE' and !empty($jobvalue['GStorageAccessKey']) and !empty($jobvalue['GStorageSecret']) and !empty($jobvalue['GStorageBucket'])) {
        if (!class_exists('AmazonS3')) {
            require_once dirname(__FILE__) . '/../libs/aws/sdk.class.php';
        }
        if (class_exists('AmazonS3')) {
            try {
                $gstorage = new AmazonS3(array('key' => $jobvalue['GStorageAccessKey'], 'secret' => $jobvalue['GStorageSecret'], 'certificate_authority' => true));
                $gstorage->set_hostname('storage.googleapis.com');
                $gstorage->allow_hostname_override(false);
                if (($contents = $gstorage->list_objects($jobvalue['GStorageBucket'], array('prefix' => $jobvalue['GStoragedir']))) !== false) {
                    foreach ($contents->body->Contents as $object) {
                        $files[$filecounter]['JOBID'] = $jobid;
                        $files[$filecounter]['DEST'] = $dest;
                        $files[$filecounter]['folder'] = "https://storage.cloud.google.com/" . $jobvalue['GStorageBucket'] . "/" . dirname((string) $object->Key) . '/';
                        $files[$filecounter]['file'] = (string) $object->Key;
                        $files[$filecounter]['filename'] = basename($object->Key);
                        $files[$filecounter]['downloadurl'] = "https://storage.cloud.google.com/" . $jobvalue['GStorageBucket'] . "/" . (string) $object->Key;
                        $files[$filecounter]['filesize'] = (string) $object->Size;
                        $files[$filecounter]['time'] = strtotime($object->LastModified);
                        $filecounter++;
                    }
                }
            } catch (Exception $e) {
                $backwpup_message .= sprintf(__('GStorage API: %s', 'backwpup'), $e->getMessage()) . '<br />';
            }
        }
    }
    //Get files/filinfo from Microsoft Azure
    if ($dest == 'MSAZURE' and !empty($jobvalue['msazureHost']) and !empty($jobvalue['msazureAccName']) and !empty($jobvalue['msazureKey']) and !empty($jobvalue['msazureContainer'])) {
        if (!class_exists('Microsoft_WindowsAzure_Storage_Blob')) {
            require_once dirname(__FILE__) . '/../libs/Microsoft/WindowsAzure/Storage/Blob.php';
        }
        if (class_exists('Microsoft_WindowsAzure_Storage_Blob')) {
            try {
                $storageClient = new Microsoft_WindowsAzure_Storage_Blob($jobvalue['msazureHost'], $jobvalue['msazureAccName'], $jobvalue['msazureKey']);
                $blobs = $storageClient->listBlobs($jobvalue['msazureContainer'], $jobvalue['msazuredir']);
                if (is_array($blobs)) {
                    foreach ($blobs as $blob) {
                        $files[$filecounter]['JOBID'] = $jobid;
                        $files[$filecounter]['DEST'] = $dest;
                        $files[$filecounter]['folder'] = "https://" . $jobvalue['msazureAccName'] . '.' . $jobvalue['msazureHost'] . "/" . $jobvalue['msazureContainer'] . "/" . dirname($blob->Name) . "/";
                        $files[$filecounter]['file'] = $blob->Name;
                        $files[$filecounter]['filename'] = basename($blob->Name);
                        $files[$filecounter]['downloadurl'] = backwpup_admin_url('admin.php') . '?page=backwpupbackups&action=downloadmsazure&file=' . $blob->Name . '&jobid=' . $jobid;
                        $files[$filecounter]['filesize'] = $blob->size;
                        $files[$filecounter]['time'] = strtotime($blob->lastmodified);
                        $filecounter++;
                    }
                }
            } catch (Exception $e) {
                $backwpup_message .= 'MSAZURE: ' . $e->getMessage() . '<br />';
            }
        }
    }
    //Get files/filinfo from RSC
    if ($dest == 'RSC' and !empty($jobvalue['rscUsername']) and !empty($jobvalue['rscAPIKey']) and !empty($jobvalue['rscContainer'])) {
        if (!class_exists('CF_Authentication')) {
            require_once dirname(__FILE__) . '/../libs/rackspace/cloudfiles.php';
        }
        if (class_exists('CF_Authentication')) {
            try {
                $auth = new CF_Authentication($jobvalue['rscUsername'], $jobvalue['rscAPIKey']);
                $auth->ssl_use_cabundle();
                if ($auth->authenticate()) {
                    $conn = new CF_Connection($auth);
                    $conn->ssl_use_cabundle();
                    $backwpupcontainer = $conn->get_container($jobvalue['rscContainer']);
                    $contents = $backwpupcontainer->get_objects(0, NULL, NULL, $jobvalue['rscdir']);
                    foreach ($contents as $object) {
                        $files[$filecounter]['JOBID'] = $jobid;
                        $files[$filecounter]['DEST'] = $dest;
                        $files[$filecounter]['folder'] = "RSC://" . $jobvalue['rscContainer'] . "/" . dirname($object->name) . "/";
                        $files[$filecounter]['file'] = $object->name;
                        $files[$filecounter]['filename'] = basename($object->name);
                        $files[$filecounter]['downloadurl'] = backwpup_admin_url('admin.php') . '?page=backwpupbackups&action=downloadrsc&file=' . $object->name . '&jobid=' . $jobid;
                        $files[$filecounter]['filesize'] = $object->content_length;
                        $files[$filecounter]['time'] = strtotime($object->last_modified);
                        $filecounter++;
                    }
                }
            } catch (Exception $e) {
                $backwpup_message .= 'RSC: ' . $e->getMessage() . '<br />';
            }
        }
    }
    //Get files/filinfo from FTP
    if ($dest == 'FTP' and !empty($jobvalue['ftphost']) and function_exists('ftp_connect') and !empty($jobvalue['ftpuser']) and !empty($jobvalue['ftppass'])) {
        if (function_exists('ftp_ssl_connect') and $jobvalue['ftpssl']) {
            //make SSL FTP connection
            $ftp_conn_id = ftp_ssl_connect($jobvalue['ftphost'], $jobvalue['ftphostport'], 10);
        } elseif (!$jobvalue['ftpssl']) {
            //make normal FTP conection if SSL not work
            $ftp_conn_id = ftp_connect($jobvalue['ftphost'], $jobvalue['ftphostport'], 10);
        }
        $loginok = false;
        if ($ftp_conn_id) {
            //FTP Login
            if (@ftp_login($ftp_conn_id, $jobvalue['ftpuser'], backwpup_base64($jobvalue['ftppass']))) {
                $loginok = true;
            } else {
                //if PHP ftp login don't work use raw login
                ftp_raw($ftp_conn_id, 'USER ' . $jobvalue['ftpuser']);
                $return = ftp_raw($ftp_conn_id, 'PASS ' . backwpup_base64($jobvalue['ftppass']));
                if (substr(trim($return[0]), 0, 3) <= 400) {
                    $loginok = true;
                }
            }
        }
        if ($loginok) {
            ftp_chdir($ftp_conn_id, $jobvalue['ftpdir']);
            $currentftpdir = rtrim(ftp_pwd($ftp_conn_id), '/') . '/';
            ftp_pasv($ftp_conn_id, $jobvalue['ftppasv']);
            if ($ftpfilelist = ftp_nlist($ftp_conn_id, $currentftpdir)) {
                foreach ($ftpfilelist as $ftpfiles) {
                    if (substr(basename($ftpfiles), 0, 1) == '.') {
                        continue;
                    }
                    $files[$filecounter]['JOBID'] = $jobid;
                    $files[$filecounter]['DEST'] = $dest;
                    $files[$filecounter]['folder'] = "ftp://" . $jobvalue['ftphost'] . ':' . $jobvalue['ftphostport'] . dirname($ftpfiles) . "/";
                    $files[$filecounter]['file'] = $ftpfiles;
                    $files[$filecounter]['filename'] = basename($ftpfiles);
                    $files[$filecounter]['downloadurl'] = "ftp://" . rawurlencode($jobvalue['ftpuser']) . ":" . rawurlencode(backwpup_base64($jobvalue['ftppass'])) . "@" . $jobvalue['ftphost'] . ':' . $jobvalue['ftphostport'] . $ftpfiles;
                    $files[$filecounter]['filesize'] = ftp_size($ftp_conn_id, $ftpfiles);
                    $files[$filecounter]['time'] = ftp_mdtm($ftp_conn_id, $ftpfiles);
                    $filecounter++;
                }
            }
        } else {
            $backwpup_message .= 'FTP: ' . __('Login failure!', 'backwpup') . '<br />';
        }
        $donefolders[] = $jobvalue['ftphost'] . '|' . $jobvalue['ftpuser'] . '|' . $jobvalue['ftpdir'];
    }
    return $files;
}
Пример #14
0
 *
 * Copyright 2009-2010 Amazon.com, Inc. or its affiliates. All Rights
 * Reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License"). You
 * may not use this file except in compliance with the License. A copy
 * of the License is located at
 *
 *       http://aws.amazon.com/apache2.0/
 *
 * or in the "license.txt" file accompanying this file. This file is
 * distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
 * OF ANY KIND, either express or implied. See the License for the
 * specific language governing permissions and limitations under the
 * License.
 *
 * Modified by Jeffrey S. Haemer <*****@*****.**>
 */
error_reporting(E_ALL);
require_once 'AWSSDKforPHP/sdk.class.php';
require_once 'include/book.inc.php';
if ($argc != 2) {
    exit("Usage: " . $argv[0] . " bucket_name\n");
}
$bucket = $argv[1] == '-' ? BOOK_BUCKET : $argv[1];
// Create the S3 access object
$s3 = new AmazonS3();
// List the bucket
$res = $s3->list_objects($bucket);
// Display the resulting object tree
print_r($res);