private static function _init($settings) { pb_backupbuddy::status('details', 'Loading AWS SDK...'); require_once dirname(dirname(__FILE__)) . '/_s3lib2/aws-autoloader.php'; pb_backupbuddy::status('details', 'SDK loaded.'); // Format all settings. $settings = self::_formatSettings($settings); // If not connected with these exact settings (by comparisong signatue of $settings ) then connect & prepare bucket. //if ( ! isset( self::$_client ) ) { $newSignature = md5(serialize($settings)); if ($newSignature != self::$_client_signature) { self::$_client_signature = md5(serialize($settings)); $s3Config = self::getCredentials($settings); if ('0' == $settings['ssl']) { $s3Config['scheme'] = 'http'; pb_backupbuddy::status('details', 'SSL disabled.'); } self::$_client = S3Client::factory($s3Config); // Verify bucket exists; create if not. Also set region to the region bucket exists in. if (false === self::_prepareBucketAndRegion($settings)) { return self::_error('Error #983483437: Could not prepare bucket `' . $settings['bucket'] . '` in region `' . $settings['regision'] . '`.'); } } return $settings; // Formatted & updated settings. }
public static function deleteFiles($settings, $files = array()) { $settings = self::_init($settings); if (!is_array($files)) { $file = array($files); } $remote_path = self::get_remote_path(); // Has leading and trailng slashes. $additionalParams = array(); $manage_data = self::stashAPI($settings, 'manage', $additionalParams); if (!is_array($manage_data)) { $error = 'Error #47349723: Unable to initiate file deletion for file(s) `' . implode(', ', $files) . '`. Details: `' . $manage_data . '`.'; self::_error($error); return $error; } $settings['bucket'] = $manage_data['bucket']; $settings['credentials'] = $manage_data['credentials']; foreach ($files as &$file) { $file = $manage_data['subkey'] . $remote_path . $file; } print_r($files); return pb_backupbuddy_destination_s32::deleteFiles($settings, $files); }
public static function send($settings = array(), $file, $send_id = '', $delete_after = false) { global $pb_backupbuddy_destination_errors; if ('1' == $settings['disabled']) { $pb_backupbuddy_destination_errors[] = __('Error #48933: This destination is currently disabled. Enable it under this destination\'s Advanced Settings.', 'it-l10n-backupbuddy'); return false; } $settings = self::_init($settings); // Handles formatting & sanitizing settings. $chunkSizeBytes = $settings['max_burst'] * 1024 * 1024; // Send X mb at a time to limit memory usage. self::$_timeStart = microtime(true); if (pb_backupbuddy::$options['log_level'] == '3') { // Full logging enabled. pb_backupbuddy::status('details', 'Settings due to log level: `' . print_r($settings, true) . '`.'); } // Initiate multipart upload. if ('' == $settings['_multipart_id']) { // New transfer. Note: All transfers are handled as presumed multiparts for ease. // Handle chunking of file into a multipart upload (if applicable). $file_size = filesize($file); pb_backupbuddy::status('details', 'File size of `' . pb_backupbuddy::$format->file_size($file_size) . '`.'); if ('1' != $settings['stash_mode']) { // About to chunk so cleanup any previous hanging multipart transfers. self::multipart_cleanup($settings); } // Initiate multipart upload with S3. pb_backupbuddy::status('details', 'Initiating multipart transfer.'); $thisCall = array('Bucket' => $settings['bucket'], 'Key' => $settings['directory'] . basename($file), 'StorageClass' => $settings['storage'], 'ServerSideEncryption' => 'AES256'); if ('1' == $settings['stash_mode']) { $thisCall['Key'] = $settings['_stash_object']; unset($thisCall['StorageClass']); } try { $response = self::$_client->createMultipartUpload($thisCall); } catch (Exception $e) { if (pb_backupbuddy::$options['log_level'] == '3') { // Full logging enabled. pb_backupbuddy::status('details', 'Call details due to logging level: `' . print_r($thisCall, true) . '`.'); } return self::_error('Error #389383: Unable to initiate multipart upload. Details: `' . $e->getMessage() . '`.'); } // Made it here so SUCCESS initiating multipart! $upload_id = (string) $response['UploadId']; pb_backupbuddy::status('details', 'Initiated multipart upload with ID `' . $upload_id . '`.'); $backup_type = backupbuddy_core::getBackupTypeFromFile($file); // Calculate multipart settings. $multipart_destination_settings = $settings; $multipart_destination_settings['_multipart_id'] = $upload_id; $multipart_destination_settings['_multipart_partnumber'] = 0; $multipart_destination_settings['_multipart_file'] = $file; $multipart_destination_settings['_multipart_remotefile'] = $settings['directory'] . basename($file); if ('1' == $settings['stash_mode']) { $multipart_destination_settings['_multipart_remotefile'] = $settings['_stash_object']; } $multipart_destination_settings['_multipart_counts'] = self::_get_multipart_counts($file_size, $settings['max_burst'] * 1024 * 1024); // Size of chunks expected to be in bytes. $multipart_destination_settings['_multipart_backup_type'] = $backup_type; $multipart_destination_settings['_multipart_backup_size'] = $file_size; $multipart_destination_settings['_multipart_etag_parts'] = array(); //pb_backupbuddy::status( 'details', 'Multipart settings to pass:'******'_multipart_status'] = 'Starting send of ' . count( $multipart_destination_settings['_multipart_counts'] ) . ' parts.'; pb_backupbuddy::status('details', 'Multipart initiated; passing over to send first chunk this run.'); $settings = $multipart_destination_settings; // Copy over settings. unset($multipart_destination_settings); } // end initiating multipart. // Send parts. $backup_type = str_replace('/', '', $settings['_multipart_backup_type']); // For use later by file limiting. $backup_size = $settings['_multipart_backup_size']; $maxTime = $settings['max_time']; if ('' == $maxTime || !is_numeric($maxTime)) { pb_backupbuddy::status('details', 'Max time not set in settings so detecting server max PHP runtime.'); $maxTime = backupbuddy_core::detectMaxExecutionTime(); } pb_backupbuddy::status('details', 'Using max runtime: `' . $maxTime . '`.'); // Open file for streaming. $f = @fopen($settings['_multipart_file'], 'r'); if (false === $f) { return self::_error('Error #437734. Unable to open file `' . $settings['_multipart_file'] . '` to send. Did it get deleted?'); } $fileDone = false; while (!$fileDone && !feof($f)) { $sendStart = microtime(true); if (!isset($settings['_retry_stash_confirm']) || true !== $settings['_retry_stash_confirm']) { // Skip send if only needing to confirm. // Made it here so success sending part. Increment for next part to send. $settings['_multipart_partnumber']++; if (!isset($settings['_multipart_counts'][$settings['_multipart_partnumber'] - 1]['seekTo'])) { pb_backupbuddy::status('error', 'Error #8239933: Missing multipart partnumber to seek to. Settings array: `' . print_r($settings, true) . '`.'); } if (-1 == fseek($f, (int) $settings['_multipart_counts'][$settings['_multipart_partnumber'] - 1]['seekTo'])) { return self::_error('Error #833838: Unable to fseek file.'); } pb_backupbuddy::status('details', 'Beginning upload of part `' . $settings['_multipart_partnumber'] . '` of `' . count($settings['_multipart_counts']) . '` parts of file `' . $settings['_multipart_file'] . '` to remote location `' . $settings['_multipart_remotefile'] . '` with multipart ID `' . $settings['_multipart_id'] . '`.'); $contentLength = (int) $settings['_multipart_counts'][$settings['_multipart_partnumber'] - 1]['length']; $uploadArr = array('Bucket' => $settings['bucket'], 'Key' => $settings['_multipart_remotefile'], 'UploadId' => $settings['_multipart_id'], 'PartNumber' => $settings['_multipart_partnumber'], 'ContentLength' => $contentLength, 'Body' => fread($f, $contentLength)); //pb_backupbuddy::status( 'details', 'Send array: `' . print_r( $uploadArr, true ) . '`.' ); //error_log( print_r( $uploadArr, true ) ); try { $response = self::$_client->uploadPart($uploadArr); } catch (Exception $e) { @fclose($f); return self::_error('Error #3897923: Unable to upload file part for multipart upload of ID `' . $settings['_multipart_id'] . '`. Details: `' . $e->getMessage() . '`.'); } self::$_chunksSentThisRound++; $settings['_multipart_etag_parts'][] = array('PartNumber' => $settings['_multipart_partnumber'], 'ETag' => $response['ETag']); if (pb_backupbuddy::$options['log_level'] == '3') { // Full logging enabled. pb_backupbuddy::status('details', 'Success sending chunk. Upload details due to log level: `' . print_r($response, true) . '`.'); } else { pb_backupbuddy::status('details', 'Success sending chunk. Enable full logging for upload result details.'); } $uploaded_size = $contentLength; $elapseTime = microtime(true) - $sendStart; if (0 == $elapseTime) { $elapseTime = 1; } $uploaded_speed = $uploaded_size / $elapseTime; pb_backupbuddy::status('details', 'Uploaded size this burst: `' . pb_backupbuddy::$format->file_size($uploaded_size) . '`, Start time: `' . $sendStart . '`. Finish time: `' . microtime(true) . '`. Elapsed: `' . (microtime(true) - $sendStart) . '`. Speed: `' . pb_backupbuddy::$format->file_size($uploaded_speed) . '`/sec.'); } // Load fileoptions to the send. if (isset($fileoptions_obj)) { pb_backupbuddy::status('details', 'fileoptions already loaded from prior pass.'); } else { // load fileoptions pb_backupbuddy::status('details', 'About to load fileoptions data.'); require_once pb_backupbuddy::plugin_path() . '/classes/fileoptions.php'; pb_backupbuddy::status('details', 'Fileoptions instance #10.'); $fileoptions_obj = new pb_backupbuddy_fileoptions(backupbuddy_core::getLogDirectory() . 'fileoptions/send-' . $send_id . '.txt', $read_only = false, $ignore_lock = false, $create_file = false); if (true !== ($result = $fileoptions_obj->is_ok())) { return self::_error(__('Fatal Error #9034.23788723. Unable to access fileoptions data.', 'it-l10n-backupbuddy') . ' Error: ' . $result); } pb_backupbuddy::status('details', 'Fileoptions data loaded.'); $fileoptions =& $fileoptions_obj->options; } //$update_status = 'Sent part ' . $settings['_multipart_partnumber'] . ' of ' . count( $settings['_multipart_counts'] ) . '.'; if (!isset($settings['_multipart_counts'][$settings['_multipart_partnumber']])) { // No more parts exist for this file. Tell S3 the multipart upload is complete and move on. if (isset($settings['_retry_stash_confirm']) && true === $settings['_retry_stash_confirm']) { // Need to retry checking that the file confirm was a success. // Grab array of files from customer's stash directory $files = pb_backupbuddy_destination_stash2::listFiles($settings, $settings['_multipart_file']); if (count($files) > 0) { pb_backupbuddy::status('details', 'Stash confirmed upload completition was successful.'); } else { pb_backupbuddy::status('error', 'Error #23972793: Error notifying Stash of upload success even after wait. Details: `' . print_r($response, true) . '`.'); return false; } } else { // Normal Stash part send. $update_status = 'Sent part ' . $settings['_multipart_partnumber'] . ' of ' . count($settings['_multipart_counts']) . ' parts.'; pb_backupbuddy::status('details', 'Getting etags and notifying of multipart upload completion.'); try { $response = self::$_client->completeMultipartUpload(array('Bucket' => $settings['bucket'], 'UploadId' => $settings['_multipart_id'], 'Key' => $settings['_multipart_remotefile'], 'Parts' => $settings['_multipart_etag_parts'])); } catch (Exception $e) { return self::_error('Unable to notify server of completion of all parts for multipart upload `' . $settings['_multipart_id'] . '`. Details: `' . $e->getMessage() . '`.'); } pb_backupbuddy::status('details', 'Server notified of multipart completion.'); if ('1' == $settings['stash_mode']) { // Stash send confirm. pb_backupbuddy::status('details', 'Notifying Stash of upload completion.'); $additionalParams = array('upload_id' => $settings['_stash_upload_id']); $response = pb_backupbuddy_destination_stash2::stashAPI($settings, 'upload-complete', $additionalParams); if (!is_array($response) || !isset($response['success'])) { // If not array OR success key missing. May be a timeout or waiting on AWS system to combine multipart still. Check for file later. $settings['_retry_stash_confirm'] = true; $settings['_multipart_counts'] = array(); // No more parts remain. $cronTime = time() + self::STASH_CONFIRM_RETRY_DELAY; $cronArgs = array($settings, $file, $send_id, $delete_after); $cronHashID = md5($cronTime . serialize($cronArgs)); $cronArgs[] = $cronHashID; $schedule_result = backupbuddy_core::schedule_single_event($cronTime, 'destination_send', $cronArgs); if (true === $schedule_result) { pb_backupbuddy::status('details', 'Scheduled retry attempt to confirm send in `' . self::STASH_CONFIRM_RETRY_DELAY . '` seconds.'); } else { pb_backupbuddy::status('error', 'Scheduled retry attempt FAILED to be scheduled.'); } /* * TODO: Once PING API is available, request a ping in the future so we make sure this actually runs reasonably soon. * Because we need a delay we are not firing off the cron here immediately so there will be no chaining of PHP * which may result in large delays before the next process if there's little site traffic. */ return array($settings['_multipart_id'], 'Pending multipart send confirmation.'); } else { // Array. if (isset($response['success']) && true !== $response['success']) { // Success key set AND not true. pb_backupbuddy::status('error', 'Error #83298932: Error notifying Stash of upload success. Details: `' . print_r($response, true) . '`.'); return false; } else { // Success. pb_backupbuddy::status('details', 'Stash notified of upload completition.'); } } } } // end not a Stash confirm retry. pb_backupbuddy::status('details', 'No more parts left for this multipart upload. Clearing multipart instance variables.'); $settings['_multipart_partnumber'] = 0; $settings['_multipart_id'] = ''; $settings['_multipart_file'] = ''; $settings['_multipart_remotefile'] = ''; // Multipart completed so safe to prevent housekeeping of incomplete multipart uploads. $settings['_multipart_transferspeeds'][] = $uploaded_speed; // Overall upload speed average. $uploaded_speed = array_sum($settings['_multipart_transferspeeds']) / count($settings['_multipart_counts']); pb_backupbuddy::status('details', 'Upload speed average of all chunks: `' . pb_backupbuddy::$format->file_size($uploaded_speed) . '`.'); $settings['_multipart_counts'] = array(); // Update stats. $fileoptions['_multipart_status'] = $update_status; $fileoptions['finish_time'] = microtime(true); $fileoptions['status'] = 'success'; if (isset($uploaded_speed)) { $fileoptions['write_speed'] = $uploaded_speed; } $fileoptions_obj->save(); unset($fileoptions); $fileDone = true; @fclose($f); } else { // Parts remain. Schedule to continue if anything is left to upload for this multipart of any individual files. pb_backupbuddy::status('details', 'S3 multipart upload has more parts left.'); $update_status = '<br>'; $totalSent = 0; for ($i = 0; $i < $settings['_multipart_partnumber']; $i++) { $totalSent += $settings['_multipart_counts'][$i]['length']; } $percentSent = ceil($totalSent / $settings['_multipart_backup_size'] * 100); $update_status .= '<div class="backupbuddy-progressbar" data-percent="' . $percentSent . '"><div class="backupbuddy-progressbar-label"></div></div>'; if ('0' != $maxTime) { // Not unlimited time so see if we can send more bursts this time or if we need to chunk. // If we are within X second of reaching maximum PHP runtime then stop here so that it can be picked up in another PHP process... $totalSizeSent = self::$_chunksSentThisRound * $chunkSizeBytes; // Total bytes sent this PHP load. $bytesPerSec = $totalSizeSent / (microtime(true) - $sendStart); $timeRemaining = $maxTime - (microtime(true) - self::$_timeStart + self::TIME_WIGGLE_ROOM); if ($timeRemaining < 0) { $timeRemaining = 0; } $bytesWeCouldSendWithTimeLeft = $bytesPerSec * $timeRemaining; //pb_backupbuddy::status( 'details', 'Sent this burst: `' . pb_backupbuddy::$format->file_size( $totalSizeSent ) .'` in `' . (microtime(true) - $sendStart ) . '` secs. Speed: `' . pb_backupbuddy::$format->file_size( $bytesPerSec ) . '`/sec. Time Remaining (w/ wiggle): `' . $timeRemaining . '`. Size that could potentially be sent with remaining time: `' . pb_backupbuddy::$format->file_size( $bytesWeCouldSendWithTimeLeft ) . '` with chunk size of `' . pb_backupbuddy::$format->file_size( $chunkSizeBytes ) . '`.' ); if ($bytesWeCouldSendWithTimeLeft < $chunkSizeBytes) { // We can send more than a whole chunk (including wiggle room) so send another bit. pb_backupbuddy::status('message', 'Not enough time left (~`' . $timeRemaining . '`) with max time of `' . $maxTime . '` sec to send another chunk at `' . pb_backupbuddy::$format->file_size($bytesPerSec) . '` / sec. Ran for ' . round(microtime(true) - self::$_timeStart, 3) . ' sec. Proceeding to use chunking.'); @fclose($fs); $cronTime = time(); $cronArgs = array($settings, $file, $send_id, $delete_after); $cronHashID = md5($cronTime . serialize($cronArgs)); $cronArgs[] = $cronHashID; $schedule_result = backupbuddy_core::schedule_single_event($cronTime, 'destination_send', $cronArgs); if (true === $schedule_result) { pb_backupbuddy::status('details', 'Next S3 chunk step cron event scheduled.'); } else { pb_backupbuddy::status('error', 'Next S3 chunk step cron even FAILED to be scheduled.'); } spawn_cron(time() + 150); // Adds > 60 seconds to get around once per minute cron running limit. update_option('_transient_doing_cron', 0); // Prevent cron-blocking for next item. @fclose($f); unset($fileoptions); return array($settings['_multipart_id'], 'Sent part ' . $settings['_multipart_partnumber'] . ' of ' . count($settings['_multipart_counts']) . ' parts.' . $update_status); } else { // End if. pb_backupbuddy::status('details', 'Not approaching limits. Proceeding to next burst this run.'); } } else { pb_backupbuddy::status('details', 'Max time of zero (0) so assuming unlimited time.'); } $fileoptions['_multipart_status'] = 'Sent part ' . $settings['_multipart_partnumber'] . ' of ' . count($settings['_multipart_counts']) . ' parts.' . $update_status; $fileoptions_obj->save(); //unset( $fileoptions ); } // end no more parts remain. } // End while not feof. /***** BEGIN FILE ARCHIVE LIMITS *****/ if ('1' == $settings['stash_mode']) { // This is being wrapped by the Stash destination. Stash uses a different method of handling archive limiting due to using Stash API. pb_backupbuddy_destination_stash2::archiveLimit($settings, $backup_type); } else { // Normal. This is just a s32 destination. self::archiveLimit($settings, $backup_type); } /***** END FILE ARCHIVE LIMITS *****/ if (isset($fileoptions_obj)) { unset($fileoptions_obj); } // Success if we made it this far. return true; }
} // end copying to local. // Handle download link if (pb_backupbuddy::_GET('downloadlink_file') != '') { $link = pb_backupbuddy_destination_s32::getFileURL($settings, pb_backupbuddy::_GET('downloadlink_file')); pb_backupbuddy::alert('You may download this backup (' . pb_backupbuddy::_GET('downloadlink_file') . ') with <a href="' . $link . '">this link</a>. The link is valid for one hour.'); echo '<br>'; } // end download link. // Get list of files for this site. if ('true' != pb_backupbuddy::_GET('listAll')) { $remotePath = $settings['directory'] . 'backup-' . backupbuddy_core::backup_prefix(); } else { $remotePath = $settings['directory']; } $files = pb_backupbuddy_destination_s32::listFiles($settings, $remotePath); if (!is_array($files)) { die('Error listing files: `' . $files . '`.'); } $backup_list_temp = array(); foreach ($files as $object) { $file = str_ireplace($settings['directory'], '', $object['Key']); if (FALSE !== stristr($file, '/')) { // Do NOT display any files within a deeper subdirectory. continue; } if (!preg_match(pb_backupbuddy_destination_s32::BACKUP_FILENAME_PATTERN, $file) && 'importbuddy.php' !== $file) { // Do not display any files that do not appear to be a BackupBuddy backup file (except importbuddy.php). continue; } $last_modified = strtotime($object['LastModified']);
public static function deleteFiles($settings, $files) { $settings = self::_formatSettings($settings); $remote_path = self::get_remote_path(); // Has leading and trailng slashes. $additionalParams = array(); $manage_data = self::stashAPI($settings, 'manage', $additionalParams); $settings['bucket'] = $manage_data['bucket']; $settings['credentials'] = $manage_data['credentials']; foreach ($files as &$file) { $file = $manage_data['subkey'] . $remote_path . $file; } echo '<br>Delete: ' . $file . '<br>'; print_r($files); return pb_backupbuddy_destination_s32::deleteFiles($settings, $files); }