Пример #1
0
 public static function send($settings = array(), $files = array(), $clear_uploads = false)
 {
     global $pb_backupbuddy_destination_errors;
     if (!is_array($files)) {
         $files = array($files);
     }
     if ($clear_uploads === false) {
         // Uncomment the following line to override and always clear.
         //$clear_uploads = true;
     }
     $itxapi_username = $settings['itxapi_username'];
     $itxapi_password = $settings['itxapi_password'];
     $db_archive_limit = $settings['db_archive_limit'];
     $full_archive_limit = $settings['full_archive_limit'];
     $max_chunk_size = $settings['max_chunk_size'];
     $remote_path = self::get_remote_path($settings['directory']);
     // Has leading and trailng slashes.
     if ($settings['ssl'] == '0') {
         $disable_ssl = true;
     } else {
         $disable_ssl = false;
     }
     $multipart_id = $settings['_multipart_id'];
     $multipart_counts = $settings['_multipart_counts'];
     pb_backupbuddy::status('details', 'Stash remote path set to `' . $remote_path . '`.');
     require_once dirname(__FILE__) . '/lib/class.itx_helper.php';
     require_once dirname(__FILE__) . '/lib/aws-sdk/sdk.class.php';
     // Stash API talk.
     $stash = new ITXAPI_Helper(pb_backupbuddy_destination_stash::ITXAPI_KEY, pb_backupbuddy_destination_stash::ITXAPI_URL, $itxapi_username, $itxapi_password);
     $manage_data = pb_backupbuddy_destination_stash::get_manage_data($settings);
     // Wipe all current uploads.
     if ($clear_uploads === true) {
         pb_backupbuddy::status('details', 'Clearing any current uploads via Stash call to `abort-all`.');
         $abort_url = $stash->get_upload_url(null, 'abort-all');
         $request = new RequestCore($abort_url);
         //pb_backupbuddy::status('details', print_r( $request , true ) );
         $response = $request->send_request(true);
     }
     // Process multipart transfer that we already initiated in a previous PHP load.
     if ($multipart_id != '') {
         // Multipart upload initiated and needs parts sent.
         // Create S3 instance.
         pb_backupbuddy::status('details', 'Creating Stash S3 instance.');
         $s3 = new AmazonS3($settings['_multipart_upload_data']['credentials']);
         // the key, secret, token
         if ($disable_ssl === true) {
             @$s3->disable_ssl(true);
         }
         pb_backupbuddy::status('details', 'Stash S3 instance created.');
         $this_part_number = $settings['_multipart_partnumber'] + 1;
         pb_backupbuddy::status('details', 'Stash beginning upload of part `' . $this_part_number . '` of `' . count($settings['_multipart_counts']) . '` parts of file `' . $settings['_multipart_file'] . '` with multipart ID `' . $settings['_multipart_id'] . '`.');
         $response = $s3->upload_part($settings['_multipart_upload_data']['bucket'], $settings['_multipart_upload_data']['object'], $settings['_multipart_id'], array('expect' => '100-continue', 'fileUpload' => $settings['_multipart_file'], 'partNumber' => $this_part_number, 'seekTo' => (int) $settings['_multipart_counts'][$settings['_multipart_partnumber']]['seekTo'], 'length' => (int) $settings['_multipart_counts'][$settings['_multipart_partnumber']]['length']));
         if (!$response->isOK()) {
             $this_error = 'Stash unable to upload file part for multipart upload `' . $settings['_multipart_id'] . '`. Details: `' . print_r($response, true) . '`.';
             $pb_backupbuddy_destination_errors[] = $this_error;
             pb_backupbuddy::status('error', $this_error);
             return false;
         }
         // Update stats.
         foreach (pb_backupbuddy::$options['remote_sends'] as $identifier => $remote_send) {
             if (isset($remote_send['_multipart_id']) && $remote_send['_multipart_id'] == $multipart_id) {
                 // this item.
                 pb_backupbuddy::$options['remote_sends'][$identifier]['_multipart_status'] = 'Sent part ' . $this_part_number . ' of ' . count($settings['_multipart_counts']) . '.';
                 if ($this_part_number == count($settings['_multipart_counts'])) {
                     pb_backupbuddy::$options['remote_sends'][$identifier]['_multipart_status'] .= '<br>Success.';
                     pb_backupbuddy::$options['remote_sends'][$identifier]['finish_time'] = time();
                 }
                 pb_backupbuddy::save();
                 break;
             }
         }
         // Made it here so success sending part. Increment for next part to send.
         $settings['_multipart_partnumber']++;
         if (!isset($settings['_multipart_counts'][$settings['_multipart_partnumber']])) {
             // No more parts exist for this file. Tell S3 the multipart upload is complete and move on.
             pb_backupbuddy::status('details', 'Stash getting parts with etags to notify S3 of completed multipart send.');
             $etag_parts = $s3->list_parts($settings['_multipart_upload_data']['bucket'], $settings['_multipart_upload_data']['object'], $settings['_multipart_id']);
             pb_backupbuddy::status('details', 'Stash got parts list. Notifying S3 of multipart upload completion.');
             $response = $s3->complete_multipart_upload($settings['_multipart_upload_data']['bucket'], $settings['_multipart_upload_data']['object'], $settings['_multipart_id'], $etag_parts);
             if (!$response->isOK()) {
                 $this_error = 'Stash unable to notify S3 of completion of all parts for multipart upload `' . $settings['_multipart_id'] . '`.';
                 $pb_backupbuddy_destination_errors[] = $this_error;
                 pb_backupbuddy::status('error', $this_error);
                 return false;
             } else {
                 pb_backupbuddy::status('details', 'Stash notified S3 of multipart completion.');
             }
             // Notify Stash API that things were succesful.
             $done_url = $stash->get_upload_url($settings['_multipart_file'], 'done', $remote_path . $settings['_multipart_backup_type_dir'] . basename($settings['_multipart_file']));
             pb_backupbuddy::status('details', 'Notifying Stash of completed multipart upload with done url `' . $done_url . '`.');
             $request = new RequestCore($done_url);
             $response = $request->send_request(true);
             if (!$response->isOK()) {
                 $this_error = 'Error #756834682. Could not finalize Stash upload. Response code: `' . $response->get_response_code() . '`; Response body: `' . $response->get_response_body() . '`; Response headers: `' . $response->get_response_header() . '`.';
                 $pb_backupbuddy_destination_errors[] = $this_error;
                 pb_backupbuddy::status('error', $this_error);
                 return false;
             } else {
                 // Good server response.
                 // See if we got an optional json response.
                 $upload_data = @json_decode($response->body, true);
                 if (isset($upload_data['error'])) {
                     $this_error = 'Stash error(s): `' . implode(' - ', $upload_data['error']) . '`.';
                     $pb_backupbuddy_destination_errors[] = $this_error;
                     pb_backupbuddy::status('error', $this_error);
                     return false;
                 }
                 pb_backupbuddy::status('details', 'Stash success sending file `' . basename($settings['_multipart_file']) . '`. File uploaded via multipart across `' . $this_part_number . '` parts and reported to Stash as completed.');
             }
             pb_backupbuddy::status('details', 'Stash has no more parts left for this multipart upload. Clearing multipart instance variables.');
             $settings['_multipart_partnumber'] = 0;
             $settings['_multipart_id'] = '';
             $settings['_multipart_file'] = '';
             $settings['_multipart_counts'] = array();
             $settings['_multipart_upload_data'] = array();
         }
         delete_transient('pb_backupbuddy_stashquota_' . $settings['itxapi_username']);
         // Delete quota transient since it probably has changed now.
         // Schedule to continue if anything is left to upload for this multipart of any individual files.
         if ($settings['_multipart_id'] != '' || count($files) > 0) {
             pb_backupbuddy::status('details', 'Stash multipart upload has more parts left. Scheduling next part send.');
             wp_schedule_single_event(time(), pb_backupbuddy::cron_tag('destination_send'), array($settings, $files, 'multipart', false));
             spawn_cron(time() + 150);
             // Adds > 60 seconds to get around once per minute cron running limit.
             update_option('_transient_doing_cron', 0);
             // Prevent cron-blocking for next item.
             pb_backupbuddy::status('details', 'Stash scheduled send of next part(s). Done for this cycle.');
             return array($settings['_multipart_id'], 'Sent ' . $this_part_number . ' of ' . count($multipart_destination_settings['_multipart_counts'] . ' parts.'));
         }
     }
     // Upload each file.
     foreach ($files as $file_id => $file) {
         // Determine backup type directory (if zip).
         $backup_type_dir = '';
         $backup_type = '';
         if (stristr($file, '.zip') !== false) {
             // If a zip try to determine backup type.
             pb_backupbuddy::status('details', 'Stash: Zip file. Detecting backup type if possible.');
             $serial = pb_backupbuddy::$classes['core']->get_serial_from_file($file);
             if (isset(pb_backupbuddy::$options['backups'][$serial]['integrity']['detected_type'])) {
                 pb_backupbuddy::status('details', 'Stash: Detected backup type as `' . pb_backupbuddy::$options['backups'][$serial]['integrity']['detected_type'] . '` via integrity check data.');
                 $backup_type_dir = pb_backupbuddy::$options['backups'][$serial]['integrity']['detected_type'] . '/';
                 $backup_type = pb_backupbuddy::$options['backups'][$serial]['integrity']['detected_type'];
             } else {
                 if (stristr($file, '-db-') !== false) {
                     pb_backupbuddy::status('details', 'Stash: Detected backup type as `db` via filename.');
                     $backup_type_dir = 'db/';
                     $backup_type = 'db';
                 } elseif (stristr($file, '-full-') !== false) {
                     pb_backupbuddy::status('details', 'Stash: Detected backup type as `full` via filename.');
                     $backup_type_dir = 'full/';
                     $backup_type = 'full';
                 } else {
                     pb_backupbuddy::status('details', 'Stash: Could not detect backup type via integrity details nor filename.');
                 }
             }
         }
         // Interact with Stash API.
         pb_backupbuddy::status('details', 'Determining Stash upload URL for `' . $file . '`.` with destination remote path `' . $remote_path . $backup_type_dir . basename($file) . '`.');
         $upload_url = $stash->get_upload_url($file, 'request', $remote_path . $backup_type_dir . basename($file));
         pb_backupbuddy::status('details', 'Determined upload url: `' . $upload_url . '`.');
         $request = new RequestCore($upload_url);
         pb_backupbuddy::status('details', 'Sending Stash API request.');
         $response = $request->send_request(true);
         // Validate response.
         if (!$response->isOK()) {
             $this_error = 'Stash request for upload credentials failed.';
             $pb_backupbuddy_destination_errors[] = $this_error;
             pb_backupbuddy::status('error', $this_error);
             return false;
         }
         if (!($upload_data = json_decode($response->body, true))) {
             $this_error = 'Stash API did not give a valid JSON response.';
             $pb_backupbuddy_destination_errors[] = $this_error;
             pb_backupbuddy::status('error', $this_error);
             return false;
         }
         if (isset($upload_data['error'])) {
             $this_error = 'Stash error(s): `' . implode(' - ', $upload_data['error']) . '`.';
             $pb_backupbuddy_destination_errors[] = $this_error;
             pb_backupbuddy::status('error', $this_error);
             return false;
         }
         // Calculate meta data to send.
         /*
         $meta_array = array();
         if ( stristr( $file, '.zip' ) !== false ) { // If a zip try to determine backup type.
         	pb_backupbuddy::status( 'details', 'Stash: Zip file. Detecting backup type if possible.' );
         	$serial = pb_backupbuddy::$classes['core']->get_serial_from_file( $file );
         	if ( isset( pb_backupbuddy::$options['backups'][$serial]['integrity']['detected_type'] ) ) {
         		pb_backupbuddy::status( 'details', 'Stash: Detected backup type as `' . pb_backupbuddy::$options['backups'][$serial]['integrity']['detected_type'] . '` via integrity check data.' );
         		$meta_array['backup_type'] = pb_backupbuddy::$options['backups'][$serial]['integrity']['detected_type'];
         	} else {
         		if ( stristr( $file, '-db-' ) !== false ) {
         			pb_backupbuddy::status( 'details', 'Stash: Detected backup type as `db` via filename.' );
         			$meta_array['backup_type'] = 'db';
         		} elseif ( stristr( $file, '-full-' ) !== false ) {
         			pb_backupbuddy::status( 'details', 'Stash: Detected backup type as `full` via filename.' );
         			$meta_array['backup_type'] = 'full';
         		} else {
         			pb_backupbuddy::status( 'details', 'Stash: Could not detect backup type via integrity details nor filename.' );
         		}
         	}
         }
         */
         // Create S3 instance.
         pb_backupbuddy::status('details', 'Creating Stash S3 instance.');
         $s3 = new AmazonS3($upload_data['credentials']);
         // the key, secret, token
         if ($disable_ssl === true) {
             @$s3->disable_ssl(true);
         }
         pb_backupbuddy::status('details', 'Stash S3 instance created.');
         // Handle chunking of file into a multipart upload (if applicable).
         $file_size = filesize($file);
         if ($max_chunk_size >= 5 && $file_size / 1024 / 1024 > $max_chunk_size) {
             // minimum chunk size is 5mb. Anything under 5mb we will not chunk.
             pb_backupbuddy::status('details', 'Stash file size of ' . $file_size / 1024 / 1024 . 'MB exceeds max chunk size of ' . $max_chunk_size . 'MB set in settings for sending file as multipart upload.');
             // Initiate multipart upload with S3.
             pb_backupbuddy::status('details', 'Initiating Stash multipart upload.');
             $response = $s3->initiate_multipart_upload($upload_data['bucket'], $upload_data['object'], array('encryption' => 'AES256'));
             if (!$response->isOK()) {
                 $this_error = 'Stash was unable to initiate multipart upload.';
                 $pb_backupbuddy_destination_errors[] = $this_error;
                 pb_backupbuddy::status('error', $this_error);
                 return false;
             } else {
                 $upload_id = (string) $response->body->UploadId;
                 pb_backupbuddy::status('details', 'Stash initiated multipart upload with ID `' . $upload_id . '`.');
             }
             // Get chunk parts for multipart transfer.
             pb_backupbuddy::status('details', 'Stash getting multipart counts.');
             $parts = $s3->get_multipart_counts($file_size, $max_chunk_size * 1024 * 1024);
             // Size of chunks expected to be in bytes.
             $multipart_destination_settings = $settings;
             $multipart_destination_settings['_multipart_id'] = $upload_id;
             $multipart_destination_settings['_multipart_partnumber'] = 0;
             $multipart_destination_settings['_multipart_file'] = $file;
             $multipart_destination_settings['_multipart_counts'] = $parts;
             $multipart_destination_settings['_multipart_upload_data'] = $upload_data;
             $multipart_destination_settings['_multipart_backup_type_dir'] = $backup_type_dir;
             pb_backupbuddy::status('details', 'Stash multipart settings to pass:'******'details', 'Stash scheduling send of next part(s).');
             wp_schedule_single_event(time(), pb_backupbuddy::cron_tag('destination_send'), array($multipart_destination_settings, $files, 'multipart', false));
             spawn_cron(time() + 150);
             // Adds > 60 seconds to get around once per minute cron running limit.
             update_option('_transient_doing_cron', 0);
             // Prevent cron-blocking for next item.
             pb_backupbuddy::status('details', 'Stash scheduled send of next part(s). Done for this cycle.');
             return array($upload_id, 'Starting send of ' . count($multipart_destination_settings['_multipart_counts']) . ' parts.');
         } else {
             if ($max_chunk_size != '0') {
                 pb_backupbuddy::status('details', 'File size of ' . $file_size / 1024 / 1024 . 'MB is less than the max chunk size of ' . $max_chunk_size . 'MB; not chunking into multipart upload.');
             } else {
                 pb_backupbuddy::status('details', 'Max chunk size set to zero so not chunking into multipart upload.');
             }
         }
         // SEND file.
         pb_backupbuddy::status('details', 'About to put (upload) object to Stash.');
         $response = $s3->create_object($upload_data['bucket'], $upload_data['object'], array('fileUpload' => $file, 'encryption' => 'AES256'));
         //  we can also utilize the multi-part-upload to create an object
         //  $response = $s3->create_mpu_object($upload_data['bucket'], $upload_data['object'], array('fileUpload'=>$upload_file));
         // Validate response. On failure notify Stash API that things went wrong.
         if (!$response->isOK()) {
             pb_backupbuddy::status('details', 'Sending upload abort.');
             $request = new RequestCore($abort_url);
             $response = $request->send_request(true);
             $this_error = 'Could not upload to Stash, attempt aborted.';
             $pb_backupbuddy_destination_errors[] = $this_error;
             pb_backupbuddy::status('error', $this_error);
             return false;
         } else {
             //	pb_backupbuddy::status( 'details', 'Stash file upload speed: ' . ( $response->header['_info']['speed_upload'] / 1024 / 1024 ) . 'MB/sec. This number may be invalid for small file transfers.' );
             pb_backupbuddy::status('details', 'Stash put success. Need to nofity Stash of upload completion. Details: `' . print_r($response, true) . '`.');
         }
         delete_transient('pb_backupbuddy_stashquota_' . $settings['itxapi_username']);
         // Delete quota transient since it probably has changed now.
         // Notify Stash API that things were succesful.
         $done_url = $stash->get_upload_url($file, 'done', $remote_path . $backup_type_dir . basename($file));
         pb_backupbuddy::status('details', 'Notifying Stash of completed upload with done url `' . $done_url . '`.');
         $request = new RequestCore($done_url);
         $response = $request->send_request(true);
         if (!$response->isOK()) {
             $this_error = 'Error #756834682. Could not finalize Stash upload. Response code: `' . $response->get_response_code() . '`; Response body: `' . $response->get_response_body() . '`; Response headers: `' . $response->get_response_header() . '`.';
             $pb_backupbuddy_destination_errors[] = $this_error;
             pb_backupbuddy::status('error', $this_error);
             return false;
         } else {
             // Good server response.
             // See if we got an optional json response.
             $upload_data = @json_decode($response->body, true);
             if (isset($upload_data['error'])) {
                 // Some kind of error.
                 $this_error = 'Stash error(s): `' . implode(' - ', $upload_data['error']) . '`.';
                 $pb_backupbuddy_destination_errors[] = $this_error;
                 pb_backupbuddy::status('error', $this_error);
                 return false;
             }
             unset($files[$file_id]);
             // Remove from list of files we have not sent yet.
             pb_backupbuddy::status('details', 'Stash success sending file `' . basename($file) . '`. File uploaded and reported to Stash as completed.');
         }
         // Enforce archive limits if applicable.
         if ($backup_type == 'full') {
             $limit = $full_archive_limit;
             pb_backupbuddy::status('details', 'Stash full backup archive limit of `' . $limit . '` based on destination settings.');
         } elseif ($backup_type == 'db') {
             $limit = $db_archive_limit;
             pb_backupbuddy::status('details', 'Stash database backup archive limit of `' . $limit . '` based on destination settings.');
         } else {
             $limit = 0;
             pb_backupbuddy::status('error', 'Error #54854895. Stash was unable to determine backup type so archive limits NOT enforced for this backup.');
         }
         if ($limit > 0) {
             pb_backupbuddy::status('details', 'Stash archive limit enforcement beginning.');
             // S3 object for managing files.
             $s3_manage = new AmazonS3($manage_data['credentials']);
             if ($disable_ssl === true) {
                 @$s3_manage->disable_ssl(true);
             }
             // Get file listing.
             $response_manage = $s3_manage->list_objects($manage_data['bucket'], array('prefix' => $manage_data['subkey'] . $remote_path . $backup_type_dir));
             // list all the files in the subscriber account
             // Create array of backups and organize by date
             $prefix = pb_backupbuddy::$classes['core']->backup_prefix();
             // List backups associated with this site by date.
             $backups = array();
             foreach ($response_manage->body->Contents as $object) {
                 $file = str_replace($manage_data['subkey'] . $remote_path . $backup_type_dir, '', $object->Key);
                 // Stash stores files in a directory per site so no need to check prefix here! if ( false !== strpos( $file, 'backup-' . $prefix . '-' ) ) { // if backup has this site prefix...
                 $backups[$file] = strtotime($object->LastModified);
                 //}
             }
             arsort($backups);
             //error_log( 'backups: ' . print_r( $backups, true ) );
             pb_backupbuddy::status('details', 'Stash found `' . count($backups) . '` backups of this type when checking archive limits.');
             if (count($backups) > $limit) {
                 pb_backupbuddy::status('details', 'More archives (' . count($backups) . ') than limit (' . $limit . ') allows. Trimming...');
                 $i = 0;
                 $delete_fail_count = 0;
                 foreach ($backups as $buname => $butime) {
                     $i++;
                     if ($i > $limit) {
                         pb_backupbuddy::status('details', 'Trimming excess file `' . $buname . '`...');
                         $response = $s3_manage->delete_object($manage_data['bucket'], $manage_data['subkey'] . $remote_path . $backup_type_dir . $buname);
                         if (!$response->isOK()) {
                             pb_backupbuddy::status('details', 'Unable to delete excess Stash file `' . $buname . '`. Details: `' . print_r($response, true) . '`.');
                             $delete_fail_count++;
                         }
                     }
                 }
                 pb_backupbuddy::status('details', 'Finished trimming excess backups.');
                 if ($delete_fail_count !== 0) {
                     $error_message = 'Stash remote limit could not delete ' . $delete_fail_count . ' backups.';
                     pb_backupbuddy::status('error', $error_message);
                     pb_backupbuddy::$classes['core']->mail_error($error_message);
                 }
             }
             pb_backupbuddy::status('details', 'Stash completed archive limiting.');
         } else {
             pb_backupbuddy::status('details', 'No Stash archive file limit to enforce.');
         }
         // End remote backup limit
     }
     // end foreach.
     // Success if we made it this far.
     return true;
 }
Пример #2
0
 public static function send($settings = array(), $files = array(), $send_id = '', $clear_uploads = false)
 {
     global $pb_backupbuddy_destination_errors;
     if (!is_array($files)) {
         $files = array($files);
     }
     if ($clear_uploads === false) {
         // Uncomment the following line to override and always clear.
         //$clear_uploads = true;
     }
     $itxapi_username = $settings['itxapi_username'];
     $itxapi_password = $settings['itxapi_password'];
     $db_archive_limit = $settings['db_archive_limit'];
     $full_archive_limit = $settings['full_archive_limit'];
     $files_archive_limit = $settings['files_archive_limit'];
     $max_chunk_size = $settings['max_chunk_size'];
     $remote_path = self::get_remote_path($settings['directory']);
     // Has leading and trailng slashes.
     if ($settings['ssl'] == '0') {
         $disable_ssl = true;
     } else {
         $disable_ssl = false;
     }
     $multipart_id = $settings['_multipart_id'];
     $multipart_counts = $settings['_multipart_counts'];
     pb_backupbuddy::status('details', 'Stash remote path set to `' . $remote_path . '`.');
     require_once dirname(__FILE__) . '/lib/class.itx_helper.php';
     require_once dirname(dirname(__FILE__)) . '/_s3lib/aws-sdk/sdk.class.php';
     // Stash API talk.
     $stash = new ITXAPI_Helper(pb_backupbuddy_destination_stash::ITXAPI_KEY, pb_backupbuddy_destination_stash::ITXAPI_URL, $itxapi_username, $itxapi_password);
     $manage_data = pb_backupbuddy_destination_stash::get_manage_data($settings);
     if (!is_array($manage_data['credentials'])) {
         pb_backupbuddy::status('error', 'Error #8484383b: Your authentication credentials for Stash failed. Verify your login and password to Stash. You may need to update the Stash destination settings. Perhaps you recently changed your password?');
         return false;
     }
     // Wipe all current uploads.
     if ($clear_uploads === true) {
         pb_backupbuddy::status('details', 'Clearing any current uploads via Stash call to `abort-all`.');
         $abort_url = $stash->get_upload_url(null, 'abort-all');
         $request = new RequestCore($abort_url);
         $response = $request->send_request(true);
     }
     // Process multipart transfer that we already initiated in a previous PHP load.
     if ($multipart_id != '') {
         // Multipart upload initiated and needs parts sent.
         // Create S3 instance.
         pb_backupbuddy::status('details', 'Creating Stash S3 instance.');
         $s3 = new AmazonS3($settings['_multipart_upload_data']['credentials']);
         // the key, secret, token
         if ($disable_ssl === true) {
             @$s3->disable_ssl(true);
         }
         pb_backupbuddy::status('details', 'Stash S3 instance created.');
         $backup_type = str_replace('/', '', $settings['_multipart_backup_type_dir']);
         // For use later by file limiting.
         $this_part_number = $settings['_multipart_partnumber'] + 1;
         pb_backupbuddy::status('details', 'Stash beginning upload of part `' . $this_part_number . '` of `' . count($settings['_multipart_counts']) . '` parts of file `' . $settings['_multipart_file'] . '` with multipart ID `' . $settings['_multipart_id'] . '`.');
         $response = $s3->upload_part($settings['_multipart_upload_data']['bucket'], $settings['_multipart_upload_data']['object'], $settings['_multipart_id'], array('expect' => '100-continue', 'fileUpload' => $settings['_multipart_file'], 'partNumber' => $this_part_number, 'seekTo' => (int) $settings['_multipart_counts'][$settings['_multipart_partnumber']]['seekTo'], 'length' => (int) $settings['_multipart_counts'][$settings['_multipart_partnumber']]['length']));
         if (!$response->isOK()) {
             $this_error = 'Stash unable to upload file part for multipart upload `' . $settings['_multipart_id'] . '`. Details: `' . print_r($response, true) . '`.';
             $pb_backupbuddy_destination_errors[] = $this_error;
             pb_backupbuddy::status('error', $this_error);
             return false;
         } else {
             $uploaded_size = $response->header['_info']['size_upload'];
             $uploaded_speed = $response->header['_info']['speed_upload'];
             pb_backupbuddy::status('details', 'Uploaded size: ' . pb_backupbuddy::$format->file_size($uploaded_size) . ', Speed: ' . pb_backupbuddy::$format->file_size($uploaded_speed) . '/sec.');
         }
         // Load fileoptions to the send.
         pb_backupbuddy::status('details', 'About to load fileoptions data.');
         require_once pb_backupbuddy::plugin_path() . '/classes/fileoptions.php';
         $fileoptions_obj = new pb_backupbuddy_fileoptions(backupbuddy_core::getLogDirectory() . 'fileoptions/send-' . $send_id . '.txt', $read_only = false, $ignore_lock = false, $create_file = false);
         if (true !== ($result = $fileoptions_obj->is_ok())) {
             pb_backupbuddy::status('error', __('Fatal Error #9034.2344848. Unable to access fileoptions data.', 'it-l10n-backupbuddy') . ' Error: ' . $result);
             return false;
         }
         pb_backupbuddy::status('details', 'Fileoptions data loaded.');
         $fileoptions =& $fileoptions_obj->options;
         $update_status = 'Sent part ' . $this_part_number . ' of ' . count($settings['_multipart_counts']) . '.';
         // Made it here so success sending part. Increment for next part to send.
         $settings['_multipart_partnumber']++;
         if (!isset($settings['_multipart_counts'][$settings['_multipart_partnumber']])) {
             // No more parts exist for this file. Tell S3 the multipart upload is complete and move on.
             pb_backupbuddy::status('details', 'Stash getting parts with etags to notify S3 of completed multipart send.');
             $etag_parts = $s3->list_parts($settings['_multipart_upload_data']['bucket'], $settings['_multipart_upload_data']['object'], $settings['_multipart_id']);
             pb_backupbuddy::status('details', 'Stash got parts list. Notifying S3 of multipart upload completion.');
             $response = $s3->complete_multipart_upload($settings['_multipart_upload_data']['bucket'], $settings['_multipart_upload_data']['object'], $settings['_multipart_id'], $etag_parts);
             if (!$response->isOK()) {
                 $this_error = 'Stash unable to notify S3 of completion of all parts for multipart upload `' . $settings['_multipart_id'] . '`.';
                 $pb_backupbuddy_destination_errors[] = $this_error;
                 pb_backupbuddy::status('error', $this_error);
                 return false;
             } else {
                 pb_backupbuddy::status('details', 'Stash notified S3 of multipart completion.');
             }
             $backup_type_dir = $settings['_multipart_backup_type_dir'];
             // Notify Stash API that things were succesful.
             $done_url = $stash->get_upload_url($settings['_multipart_file'], 'done', $remote_path . $backup_type_dir . basename($settings['_multipart_file']));
             pb_backupbuddy::status('details', 'Notifying Stash of completed multipart upload with done url `' . $done_url . '`.');
             $request = new RequestCore($done_url);
             $response = $request->send_request(true);
             if (!$response->isOK()) {
                 $this_error = 'Error #756834682. Could not finalize Stash upload. Response code: `' . $response->get_response_code() . '`; Response body: `' . $response->get_response_body() . '`; Response headers: `' . $response->get_response_header() . '`.';
                 $pb_backupbuddy_destination_errors[] = $this_error;
                 pb_backupbuddy::status('error', $this_error);
                 return false;
             } else {
                 // Good server response.
                 // See if we got an optional json response.
                 $upload_data = @json_decode($response->body, true);
                 if (isset($upload_data['error'])) {
                     $this_error = 'Stash error(s): `' . implode(' - ', $upload_data['error']) . '`.';
                     $pb_backupbuddy_destination_errors[] = $this_error;
                     pb_backupbuddy::status('error', $this_error);
                     return false;
                 }
                 pb_backupbuddy::status('details', 'Stash success sending file `' . basename($settings['_multipart_file']) . '`. File uploaded via multipart across `' . $this_part_number . '` parts and reported to Stash as completed.');
             }
             pb_backupbuddy::status('details', 'Stash has no more parts left for this multipart upload. Clearing multipart instance variables.');
             $settings['_multipart_partnumber'] = 0;
             $settings['_multipart_id'] = '';
             $settings['_multipart_file'] = '';
             $settings['_multipart_upload_data'] = array();
             $settings['_multipart_transferspeeds'][] = $uploaded_speed;
             // Overall upload speed average.
             $uploaded_speed = array_sum($settings['_multipart_transferspeeds']) / count($settings['_multipart_counts']);
             pb_backupbuddy::status('details', 'Upload speed average of all chunks: `' . pb_backupbuddy::$format->file_size($uploaded_speed) . '`.');
             $settings['_multipart_counts'] = array();
             // Update stats.
             $fileoptions['_multipart_status'] = $update_status;
             $fileoptions['finish_time'] = time();
             $fileoptions['status'] = 'success';
             if (isset($uploaded_speed)) {
                 $fileoptions['write_speed'] = $uploaded_speed;
             }
             $fileoptions_obj->save();
             unset($fileoptions);
         }
         delete_transient('pb_backupbuddy_stashquota_' . $settings['itxapi_username']);
         // Delete quota transient since it probably has changed now.
         // Schedule to continue if anything is left to upload for this multipart of any individual files.
         if ($settings['_multipart_id'] != '' || count($files) > 0) {
             pb_backupbuddy::status('details', 'Stash multipart upload has more parts left. Scheduling next part send.');
             $schedule_result = backupbuddy_core::schedule_single_event(time(), pb_backupbuddy::cron_tag('destination_send'), array($settings, $files, $send_id));
             if (true === $schedule_result) {
                 pb_backupbuddy::status('details', 'Next Stash chunk step cron event scheduled.');
             } else {
                 pb_backupbuddy::status('error', 'Next Stash chunk step cron even FAILED to be scheduled.');
             }
             spawn_cron(time() + 150);
             // Adds > 60 seconds to get around once per minute cron running limit.
             update_option('_transient_doing_cron', 0);
             // Prevent cron-blocking for next item.
             return array($settings['_multipart_id'], 'Sent part ' . $this_part_number . ' of ' . count($settings['_multipart_counts']) . ' parts.');
         }
     }
     // end if multipart continuation.
     require_once pb_backupbuddy::plugin_path() . '/classes/fileoptions.php';
     // Upload each file.
     foreach ($files as $file_id => $file) {
         // Determine backup type directory (if zip).
         $backup_type_dir = '';
         $backup_type = '';
         if (stristr($file, '.zip') !== false) {
             // If a zip try to determine backup type.
             pb_backupbuddy::status('details', 'Stash: Zip file. Detecting backup type if possible.');
             $serial = backupbuddy_core::get_serial_from_file($file);
             // See if we can get backup type from fileoptions data.
             $backup_options = new pb_backupbuddy_fileoptions(backupbuddy_core::getLogDirectory() . 'fileoptions/' . $serial . '.txt', $read_only = true, $ignore_lock = true);
             if (true !== ($result = $backup_options->is_ok())) {
                 pb_backupbuddy::status('error', 'Unable to open fileoptions file `' . backupbuddy_core::getLogDirectory() . 'fileoptions/' . $serial . '.txt' . '`.');
             } else {
                 if (isset($backup_options->options['integrity']['detected_type'])) {
                     pb_backupbuddy::status('details', 'Stash: Detected backup type as `' . $backup_options->options['integrity']['detected_type'] . '` via integrity check data.');
                     $backup_type_dir = $backup_options->options['integrity']['detected_type'] . '/';
                     $backup_type = $backup_options->options['integrity']['detected_type'];
                 }
             }
             // If still do not know backup type then attempt to deduce it from filename.
             if ($backup_type == '') {
                 if (stristr($file, '-db-') !== false) {
                     pb_backupbuddy::status('details', 'Stash: Detected backup type as `db` via filename.');
                     $backup_type_dir = 'db/';
                     $backup_type = 'db';
                 } elseif (stristr($file, '-full-') !== false) {
                     pb_backupbuddy::status('details', 'Stash: Detected backup type as `full` via filename.');
                     $backup_type_dir = 'full/';
                     $backup_type = 'full';
                 } elseif (stristr($file, '-files-') !== false) {
                     pb_backupbuddy::status('details', 'Stash: Detected backup type as `files` via filename.');
                     $backup_type_dir = 'files/';
                     $backup_type = 'files';
                 } else {
                     pb_backupbuddy::status('details', 'Stash: Could not detect backup type via integrity details nor filename.');
                 }
             }
         }
         // Interact with Stash API.
         pb_backupbuddy::status('details', 'Determining Stash upload URL for `' . $file . '`.` with destination remote path `' . $remote_path . $backup_type_dir . basename($file) . '`.');
         $upload_url = $stash->get_upload_url($file, 'request', $remote_path . $backup_type_dir . basename($file));
         pb_backupbuddy::status('details', 'Determined upload url: `' . $upload_url . '`.');
         $request = new RequestCore($upload_url);
         pb_backupbuddy::status('details', 'Sending Stash API request.');
         $response = $request->send_request(true);
         // Validate response.
         if (!$response->isOK()) {
             $this_error = 'Stash request for upload credentials failed.';
             $pb_backupbuddy_destination_errors[] = $this_error;
             pb_backupbuddy::status('error', $this_error);
             return false;
         }
         if (!($upload_data = json_decode($response->body, true))) {
             $this_error = 'Stash API did not give a valid JSON response.';
             $pb_backupbuddy_destination_errors[] = $this_error;
             pb_backupbuddy::status('error', $this_error);
             return false;
         }
         if (isset($upload_data['error'])) {
             $this_error = 'Stash error(s): `' . implode(' - ', $upload_data['error']) . '`.';
             $pb_backupbuddy_destination_errors[] = $this_error;
             pb_backupbuddy::status('error', $this_error);
             return false;
         }
         // Create S3 instance.
         pb_backupbuddy::status('details', 'Creating Stash S3 instance.');
         $s3 = new AmazonS3($upload_data['credentials']);
         // the key, secret, token
         if ($disable_ssl === true) {
             @$s3->disable_ssl(true);
         }
         pb_backupbuddy::status('details', 'Stash S3 instance created.');
         // Handle chunking of file into a multipart upload (if applicable).
         $file_size = filesize($file);
         if ($max_chunk_size >= self::MINIMUM_CHUNK_SIZE && $file_size / 1024 / 1024 > $max_chunk_size) {
             // minimum chunk size is 5mb. Anything under 5mb we will not chunk.
             pb_backupbuddy::status('details', 'Stash file size of ' . pb_backupbuddy::$format->file_size($file_size) . ' exceeds max chunk size of ' . $max_chunk_size . 'MB set in settings for sending file as multipart upload.');
             // Initiate multipart upload with S3.
             pb_backupbuddy::status('details', 'Initiating Stash multipart upload.');
             $response = $s3->initiate_multipart_upload($upload_data['bucket'], $upload_data['object'], array('encryption' => 'AES256'));
             if (!$response->isOK()) {
                 $this_error = 'Stash was unable to initiate multipart upload.';
                 $pb_backupbuddy_destination_errors[] = $this_error;
                 pb_backupbuddy::status('error', $this_error);
                 return false;
             } else {
                 $upload_id = (string) $response->body->UploadId;
                 pb_backupbuddy::status('details', 'Stash initiated multipart upload with ID `' . $upload_id . '`.');
             }
             // Get chunk parts for multipart transfer.
             pb_backupbuddy::status('details', 'Stash getting multipart counts.');
             $parts = $s3->get_multipart_counts($file_size, $max_chunk_size * 1024 * 1024);
             // Size of chunks expected to be in bytes.
             $multipart_destination_settings = $settings;
             $multipart_destination_settings['_multipart_id'] = $upload_id;
             $multipart_destination_settings['_multipart_partnumber'] = 0;
             $multipart_destination_settings['_multipart_file'] = $file;
             $multipart_destination_settings['_multipart_counts'] = $parts;
             $multipart_destination_settings['_multipart_upload_data'] = $upload_data;
             $multipart_destination_settings['_multipart_backup_type_dir'] = $backup_type_dir;
             pb_backupbuddy::status('details', 'Stash multipart settings to pass:'******'details', 'Stash scheduling send of next part(s).');
             backupbuddy_core::schedule_single_event(time(), pb_backupbuddy::cron_tag('destination_send'), array($multipart_destination_settings, $files, $send_id));
             spawn_cron(time() + 150);
             // Adds > 60 seconds to get around once per minute cron running limit.
             update_option('_transient_doing_cron', 0);
             // Prevent cron-blocking for next item.
             pb_backupbuddy::status('details', 'Stash scheduled send of next part(s). Done for this cycle.');
             return array($upload_id, 'Starting send of ' . count($multipart_destination_settings['_multipart_counts']) . ' parts.');
         } else {
             // did not meet chunking criteria.
             if ($max_chunk_size != '0') {
                 if ($file_size / 1024 / 1024 > self::MINIMUM_CHUNK_SIZE) {
                     pb_backupbuddy::status('details', 'File size of ' . pb_backupbuddy::$format->file_size($file_size) . ' is less than the max chunk size of ' . $max_chunk_size . 'MB; not chunking into multipart upload.');
                 } else {
                     pb_backupbuddy::status('details', 'File size of ' . pb_backupbuddy::$format->file_size($file_size) . ' is less than the minimum allowed chunk size of ' . self::MINIMUM_CHUNK_SIZE . 'MB; not chunking into multipart upload.');
                 }
             } else {
                 pb_backupbuddy::status('details', 'Max chunk size set to zero so not chunking into multipart upload.');
             }
         }
         // SEND file.
         pb_backupbuddy::status('details', 'About to put (upload) object to Stash.');
         $response = $s3->create_object($upload_data['bucket'], $upload_data['object'], array('fileUpload' => $file, 'encryption' => 'AES256'));
         // Validate response. On failure notify Stash API that things went wrong.
         if (!$response->isOK()) {
             // Send FAILED.
             pb_backupbuddy::status('details', 'Sending upload abort.');
             $request = new RequestCore($abort_url);
             $response = $request->send_request(true);
             $this_error = 'Could not upload to Stash, attempt aborted.';
             $pb_backupbuddy_destination_errors[] = $this_error;
             pb_backupbuddy::status('error', $this_error);
             return false;
         } else {
             // Send SUCCESS.
             pb_backupbuddy::status('details', 'Success uploading file to Stash storage. Notifying Stash API next. Upload details: `' . print_r($response, true) . '`.');
             $uploaded_size = $response->header['_info']['size_upload'];
             $uploaded_speed = $response->header['_info']['speed_upload'];
             pb_backupbuddy::status('details', 'Uploaded size: ' . pb_backupbuddy::$format->file_size($uploaded_size) . ', Speed: ' . pb_backupbuddy::$format->file_size($uploaded_speed) . '/sec.');
         }
         delete_transient('pb_backupbuddy_stashquota_' . $settings['itxapi_username']);
         // Delete quota transient since it probably has changed now.
         // Notify Stash API that things were succesful.
         $done_url = $stash->get_upload_url($file, 'done', $remote_path . $backup_type_dir . basename($file));
         pb_backupbuddy::status('details', 'Notifying Stash of completed upload with done url `' . $done_url . '`.');
         $request = new RequestCore($done_url);
         $response = $request->send_request(true);
         if (!$response->isOK()) {
             $this_error = 'Error #247568834682. Could not finalize Stash upload. Response code: `' . $response->get_response_code() . '`; Response body: `' . $response->get_response_body() . '`; Response headers: `' . $response->get_response_header() . '`.';
             $pb_backupbuddy_destination_errors[] = $this_error;
             pb_backupbuddy::status('error', $this_error);
             return false;
         } else {
             // Good server response.
             // See if we got an optional json response.
             $upload_data = @json_decode($response->body, true);
             if (isset($upload_data['error'])) {
                 // Some kind of error.
                 $this_error = 'Stash error(s): `' . implode(' - ', $upload_data['error']) . '`.';
                 $pb_backupbuddy_destination_errors[] = $this_error;
                 pb_backupbuddy::status('error', $this_error);
                 return false;
             }
             unset($files[$file_id]);
             // Remove from list of files we have not sent yet.
             pb_backupbuddy::status('details', 'Stash success sending file `' . basename($file) . '`. File uploaded and reported to Stash as completed.');
             // Load destination fileoptions.
             pb_backupbuddy::status('details', 'About to load fileoptions data.');
             require_once pb_backupbuddy::plugin_path() . '/classes/fileoptions.php';
             $fileoptions_obj = new pb_backupbuddy_fileoptions(backupbuddy_core::getLogDirectory() . 'fileoptions/send-' . $send_id . '.txt', $read_only = false, $ignore_lock = false, $create_file = false);
             if (true !== ($result = $fileoptions_obj->is_ok())) {
                 pb_backupbuddy::status('error', __('Fatal Error #9034.84838. Unable to access fileoptions data.', 'it-l10n-backupbuddy') . ' Error: ' . $result);
                 return false;
             }
             pb_backupbuddy::status('details', 'Fileoptions data loaded.');
             $fileoptions =& $fileoptions_obj->options;
             // Save stats.
             if (isset($uploaded_speed)) {
                 $fileoptions['write_speed'] = $uploaded_speed;
                 $fileoptions_obj->save();
             }
             //$fileoptions['finish_time'] = time();
             //$fileoptions['status'] = 'success';
             unset($fileoptions_obj);
         }
     }
     // end foreach.
     // BEGIN FILE LIMIT PROCESSING. Enforce archive limits if applicable.
     if ($backup_type == 'full') {
         $limit = $full_archive_limit;
         pb_backupbuddy::status('details', 'Stash full backup archive limit of `' . $limit . '` of type `full` based on destination settings.');
     } elseif ($backup_type == 'db') {
         $limit = $db_archive_limit;
         pb_backupbuddy::status('details', 'Stash database backup archive limit of `' . $limit . '` of type `db` based on destination settings.');
     } elseif ($backup_type == 'files') {
         $limit = $db_archive_limit;
         pb_backupbuddy::status('details', 'Stash database backup archive limit of `' . $limit . '` of type `files` based on destination settings.');
     } else {
         $limit = 0;
         pb_backupbuddy::status('warning', 'Warning #54854895. Stash was unable to determine backup type (reported: `' . $backup_type . '`) so archive limits NOT enforced for this backup.');
     }
     if ($limit > 0) {
         pb_backupbuddy::status('details', 'Stash archive limit enforcement beginning.');
         // S3 object for managing files.
         $s3_manage = new AmazonS3($manage_data['credentials']);
         if ($disable_ssl === true) {
             @$s3_manage->disable_ssl(true);
         }
         // Get file listing.
         $response_manage = $s3_manage->list_objects($manage_data['bucket'], array('prefix' => $manage_data['subkey'] . $remote_path . $backup_type_dir));
         // list all the files in the subscriber account
         // Create array of backups and organize by date
         $prefix = backupbuddy_core::backup_prefix();
         // List backups associated with this site by date.
         $backups = array();
         foreach ($response_manage->body->Contents as $object) {
             $file = str_replace($manage_data['subkey'] . $remote_path . $backup_type_dir, '', $object->Key);
             // Stash stores files in a directory per site so no need to check prefix here! if ( false !== strpos( $file, 'backup-' . $prefix . '-' ) ) { // if backup has this site prefix...
             $backups[$file] = strtotime($object->LastModified);
         }
         arsort($backups);
         pb_backupbuddy::status('details', 'Stash found `' . count($backups) . '` backups of this type when checking archive limits.');
         if (count($backups) > $limit) {
             pb_backupbuddy::status('details', 'More archives (' . count($backups) . ') than limit (' . $limit . ') allows. Trimming...');
             $i = 0;
             $delete_fail_count = 0;
             foreach ($backups as $buname => $butime) {
                 $i++;
                 if ($i > $limit) {
                     pb_backupbuddy::status('details', 'Trimming excess file `' . $buname . '`...');
                     $response = $s3_manage->delete_object($manage_data['bucket'], $manage_data['subkey'] . $remote_path . $backup_type_dir . $buname);
                     if (!$response->isOK()) {
                         pb_backupbuddy::status('details', 'Unable to delete excess Stash file `' . $buname . '`. Details: `' . print_r($response, true) . '`.');
                         $delete_fail_count++;
                     }
                 }
             }
             pb_backupbuddy::status('details', 'Finished trimming excess backups.');
             if ($delete_fail_count !== 0) {
                 $error_message = 'Stash remote limit could not delete ' . $delete_fail_count . ' backups.';
                 pb_backupbuddy::status('error', $error_message);
                 backupbuddy_core::mail_error($error_message);
             }
         }
         pb_backupbuddy::status('details', 'Stash completed archive limiting.');
     } else {
         pb_backupbuddy::status('details', 'No Stash archive file limit to enforce.');
     }
     // End remote backup limit
     if (isset($fileoptions_obj)) {
         unset($fileoptions_obj);
     }
     // END FILE LIMIT PROCESSING.
     // Success if we made it this far.
     return true;
 }
Пример #3
0
 function amazons3_backup_bwd_comp($historyID, $args = '')
 {
     $s3StartTime = $this->iwpScriptStartTime;
     $this->backup_settings_vals = get_option('iwp_client_multi_backup_temp_values');
     $backup_settings_values = $this->backup_settings_vals;
     if (isset($backup_settings_values['s3_retrace_count']) && !empty($backup_settings_values['s3_retrace_count'])) {
         $s3_retrace_count = $backup_settings_values['s3_retrace_count'][$historyID];
     } else {
         $s3_retrace_count = 0;
     }
     //get the settings by other method
     $requestParams = $this->getRequiredData($historyID, "requestParams");
     $upload_loop_break_time = $requestParams['account_info']['upload_loop_break_time'];
     //darkcode changed
     $upload_file_block_size = $requestParams['account_info']['upload_file_block_size'];
     if ($upload_file_block_size < 5 * 1024 * 1024) {
         $upload_file_block_size = 5 * 1024 * 1024 + 1;
     }
     $del_host_file = $requestParams['args']['del_host_file'];
     $task_result = $this->getRequiredData($historyID, "taskResults");
     @set_time_limit(0);
     $this->hisID = $historyID;
     $uploadLoopCount = 0;
     $upload_id = 'start';
     $partsArray = array();
     $nextPart = 1;
     $retrace = 'notSet';
     $doComplete = false;
     if ($args == '') {
         //on the next call $args would be ''
         //set $args, $uploadid, $offset  from the DB
         $responseParams = $this->getRequiredData($historyID, "responseParams");
         if (!$responseParams) {
             return $this->statusLog($this->hisID, array('stage' => 's3Upload', 'status' => 'error', 'statusMsg' => 'S3 Upload failed: Error while fetching table data.', 'statusCode' => 's3_upload_failed_error_while_fetching_table_data'));
         }
         $args = $responseParams['s3Args'];
         $prevChunkResults = $responseParams['response_data'];
         $upload_id = $prevChunkResults['upload_id'];
         $nextPart = $prevChunkResults['nextPart'];
         $partsArray = $prevChunkResults['partsArray'];
         $current_file_num = $responseParams['current_file_num'];
         $dont_retrace = $responseParams['dont_retrace'];
         $start_new_backup = $responseParams['start_new_backup'];
     }
     if (empty($current_file_num)) {
         $current_file_num = 0;
     }
     //traceback options and setting values
     if (!$upload_id && empty($dont_retrace)) {
         if ($s3_retrace_count <= 3) {
             $args = $requestParams['secure']['account_info']['iwp_amazon_s3'];
             if ($backup_settings_values['s3_upload_id']) {
                 $upload_id = $backup_settings_values['s3_upload_id'][$historyID];
             } else {
                 return $this->statusLog($this->hisID, array('stage' => 's3Upload Retrace', 'status' => 'error', 'statusMsg' => 'S3 Upload failed: Error while fetching table data during retrace', 'statusCode' => 's3_upload_failed_error_while_fetching_table_data_during_retrace'));
             }
             $backup_file = $backup_settings_values['backup_file'];
             $retrace = 'set';
             $s3_retrace_count++;
             $backup_settings_values['s3_retrace_count'][$historyID] = $s3_retrace_count;
             update_option('iwp_client_multi_backup_temp_values', $backup_settings_values);
         } else {
             return $this->statusLog($this->hisID, array('stage' => 's3Upload', 'status' => 'error', 'statusMsg' => 'S3 upload failed: Retrace limit reached.', 'statusCode' => 's3_upload_failed_retrace_limit_reached'));
         }
     }
     if (!$this->iwp_mmb_function_exists('curl_init')) {
         return array('error' => 'You cannot use Amazon S3 on your server. Please enable curl first.', 'partial' => 1, 'error_code' => 'cannot_use_s3_enable_curl_first');
     }
     require_once $GLOBALS['iwp_mmb_plugin_dir'] . '/lib/amazon_s3_bwd_comp/sdk.class.php';
     $tempArgs = $args;
     extract($args);
     if (!is_array($backup_file)) {
         $temp_backup_file = $backup_file;
         $backup_file = array();
         $backup_file[] = $temp_backup_file;
     }
     if (is_array($backup_file)) {
         $backup_files_count = count($backup_file);
         $temp_single_file = $backup_file[$current_file_num];
         unset($backup_file);
         $backup_file = $temp_single_file;
     }
     if ($as3_site_folder == true) {
         if (!empty($as3_directory)) {
             $as3_directory .= '/' . $this->site_name;
         } else {
             $as3_directory = $this->site_name;
         }
     }
     try {
         CFCredentials::set(array('development' => array('key' => trim($as3_access_key), 'secret' => trim(str_replace(' ', '+', $as3_secure_key)), 'default_cache_config' => '', 'certificate_authority' => true, 'use_ssl' => false, 'ssl_verification' => false), '@default' => 'development'));
         $s3 = new AmazonS3();
         $cfu_obj = new CFUtilities();
         //the mulitCall upload starts				darkCode starts
         //$this->statusLog($this -> hisID, array('stage' => 'uploadingFiles', 'status' => 'partiallyCompleted', 'statusMsg' => 's3MultiCallStartsHere'));
         if (!empty($as3_directory)) {
             $as3_file = $as3_directory . '/' . basename($backup_file);
         } else {
             $as3_file = basename($backup_file);
         }
         if (iwp_mmb_get_file_size($backup_file) <= 5 * 1024 * 1024) {
             echo "<br>small backup so single upload<br>";
             $response = $s3->create_object($as3_bucket, $as3_file, array('fileUpload' => $backup_file));
             if ($response->isOK()) {
                 $current_file_num += 1;
                 $resArray = array('status' => "completed", 'backupParentHID' => $historyID);
                 $result_arr = array();
                 $result_arr['status'] = 'completed';
                 $result_arr['nextFunc'] = 'amazons3_backup_over';
                 $result_arr['s3Args'] = $tempArgs;
                 $result_arr['current_file_num'] = $current_file_num;
                 $result_arr['dont_retrace'] = true;
                 $task_result['task_results'][$historyID]['amazons3'][$current_file_num - 1] = basename($backup_file);
                 $task_result['amazons3'][$current_file_num - 1] = basename($backup_file);
                 if ($current_file_num >= $backup_files_count) {
                     unset($task_result['task_results'][$historyID]['server']);
                     @unlink($backup_file);
                 } else {
                     //to continue zip split parts
                     $resArray['status'] = 'partiallyCompleted';
                     $chunkResult = array();
                     $chunkResult['partsArray'] = array();
                     $chunkResult['nextPart'] = 1;
                     $chunkResult['upload_id'] = 'start';
                     $result_arr['response_data'] = $chunkResult;
                     $result_arr['nextFunc'] = 'amazons3_backup';
                     $result_arr['status'] = 'partiallyCompleted';
                     $result_arr['start_new_backup'] = true;
                     @unlink($backup_file);
                 }
                 $this->statusLog($this->hisID, array('stage' => 's3MultiCall', 'status' => 'completed', 'statusMsg' => 'nextCall', 'nextFunc' => 'amazons3_backup', 'task_result' => $task_result, 'responseParams' => $result_arr));
                 return $resArray;
             } else {
                 return array('error' => 'Failed to upload to Amazon S3.');
             }
         }
         if ($upload_id == 'start') {
             echo "initiating multiCall upload";
             //initiate the multiPartUpload to get the uploadID from its response
             $response = $s3->initiate_multipart_upload($as3_bucket, $as3_file);
             //createMultipartUpload
             //convert the response into an array
             $response_array = $cfu_obj->convert_response_to_array($response);
             //get the uploadID
             $upload_id = $response_array['body']['UploadId'];
             //storing the uploadID in DB
             $backup_settings_values['s3_upload_id'][$historyID] = $upload_id;
             $backup_settings_values['backup_file'] = $backup_file;
             update_option('iwp_client_multi_backup_temp_values', $backup_settings_values);
         }
         //get the parts of the big file
         $parts = $s3->get_multipart_counts(iwp_mmb_get_file_size($backup_file), $upload_file_block_size);
         //1 MB chunks
         if ($retrace == 'set') {
             $list_parts_response = $s3->list_parts($as3_bucket, $as3_file, $upload_id);
             $partsArray = CFUtilities::convert_response_to_array($list_parts_response);
             $nextPart = count($partsArray) + 1;
             $this->statusLog($this->hisID, array('stage' => 's3MultiCall', 'status' => 'partiallyCompleted', 'statusMsg' => 'retracingValues', 'nextFunc' => 'amazons3_backup', 'task_result' => $task_result, 'responseParams' => $result_arr));
             $retrace = 'unset';
         }
         //this is the main upload loop break it on when the timeLimit is reached
         //chunk upload loop
         $partsArraySize = count($parts);
         $s3ChunkTimeTaken = 0;
         $s3ChunkCount = 0;
         $reloop = false;
         $reloopCount = 0;
         $status = '';
         do {
             $uploadLoopCount = 0;
             if ($reloopCount == 0) {
                 $s3ChunkStartTime = $s3StartTime;
             } else {
                 $s3ChunkStartTime = microtime(true);
             }
             foreach ($parts as $i => $part) {
                 $uploadLoopCount += 1;
                 if ($uploadLoopCount == $nextPart) {
                     $singleUploadResponse = $s3->upload_part($as3_bucket, $as3_file, $upload_id, array('fileUpload' => $backup_file, 'partNumber' => $i + 1, 'seekTo' => $part['seekTo'], 'length' => $part['length']));
                     $singleUploadResult = $singleUploadResponse->isOk();
                     echo "singleUploadResult - " . $singleUploadResult;
                     $singleUploadResponseArray = $cfu_obj->convert_response_to_array($singleUploadResponse);
                     /* $response = $s3->complete_multipart_upload($bucket, $filename, $upload_id, array(
                     				array('PartNumber' => 1, 'ETag' => '"25e317773f308e446cc84c503a6d1f85"'),
                     				array('PartNumber' => 2, 'ETag' => '"a6d1f85f58498973f308e446cc84c503"'),
                     				array('PartNumber' => 3, 'ETag' => '"bed3c0a4a1407f584989b4009e9ce33f"'),
                     			)); */
                     $nextPart = $uploadLoopCount;
                     $partsArray[$i + 1]['PartNumber'] = $i + 1;
                     $partsArray[$i + 1]['ETag'] = $singleUploadResponseArray['header']['etag'];
                     $chunkResult = array();
                     $chunkResult['partsArray'] = $partsArray;
                     $chunkResult['nextPart'] = $nextPart + 1;
                     $chunkResult['upload_id'] = $upload_id;
                     $nextPart = $nextPart + 1;
                     $backup_settings_values['s3_retrace_count'][$historyID] = 0;
                     update_option('iwp_client_multi_backup_temp_values', $backup_settings_values);
                     $status = 'partiallyCompleted';
                     if ($nextPart == $partsArraySize + 1) {
                         $doComplete = true;
                         $status = 'completed';
                     }
                     $result_arr = array();
                     $result_arr['response_data'] = $chunkResult;
                     $result_arr['status'] = $status;
                     $result_arr['nextFunc'] = 'amazons3_backup';
                     $result_arr['s3Args'] = $tempArgs;
                     $result_arr['current_file_num'] = $current_file_num;
                     $task_result['task_results'][$historyID]['amazons3'][$current_file_num] = basename($backup_file);
                     $task_result['amazons3'][$current_file_num] = basename($backup_file);
                     $this->statusLog($this->hisID, array('stage' => 's3MultiCall', 'status' => 'completed', 'statusMsg' => 'nextCall', 'nextFunc' => 'amazons3_backup', 'task_result' => $task_result, 'responseParams' => $result_arr));
                     $resArray = array('status' => $status, 'backupParentHID' => $historyID);
                     /* $resArray = array (
                     			  'status' => 'completed',
                     			  'backupParentHID' => $historyID,
                     			); */
                     break;
                     //return $resArray;
                     //exit;
                 } else {
                     if ($nextPart == $partsArraySize + 1) {
                         $doComplete = true;
                         break;
                     }
                 }
             }
             if ($doComplete) {
                 // complete the multipart upload
                 $response = $s3->complete_multipart_upload($as3_bucket, $as3_file, $upload_id, $partsArray);
                 if ($response->isOK() != true) {
                     $response = $s3->abort_multipart_upload($as3_bucket, $as3_file, $upload_id);
                 }
                 $response_array = $cfu_obj->convert_response_to_array($response);
                 $current_file_num += 1;
                 $result_arr = array();
                 $result_arr['response_data'] = $chunkResult;
                 $result_arr['status'] = 'completed';
                 $result_arr['nextFunc'] = 'amazons3_backup_over';
                 $result_arr['s3Args'] = $tempArgs;
                 $result_arr['dont_retrace'] = true;
                 $result_arr['current_file_num'] = $current_file_num;
                 $resArray = array('status' => 'completed', 'backupParentHID' => $historyID);
                 if ($current_file_num >= $backup_files_count) {
                     $task_result['task_results'][$historyID]['amazons3'][$current_file_num - 1] = basename($backup_file);
                     $task_result['amazons3'][$current_file_num - 1] = basename($backup_file);
                     unset($task_result['task_results'][$historyID]['server']);
                 } else {
                     //to continue zip split parts
                     $status = 'partiallyCompleted';
                     $chunkResult = array();
                     $chunkResult['partsArray'] = array();
                     $chunkResult['nextPart'] = 1;
                     $chunkResult['upload_id'] = 'start';
                     $result_arr['response_data'] = $chunkResult;
                     $result_arr['status'] = 'partiallyCompleted';
                     $result_arr['nextFunc'] = 'amazons3_backup';
                     $result_arr['start_new_backup'] = true;
                     $resArray['status'] = 'partiallyCompleted';
                 }
                 $this->statusLog($this->hisID, array('stage' => 's3MultiCall', 'status' => 'completed', 'statusMsg' => 'finalCall', 'nextFunc' => 'amazons3_backup', 'task_result' => $task_result, 'responseParams' => $result_arr));
                 $upload = $response->isOk();
             }
             //check time
             $s3ChunkEndTime = microtime(true);
             $s3ChunkTimeTaken = $s3ChunkEndTime - $s3ChunkStartTime + $s3ChunkTimeTaken / ($reloopCount + 1);
             $s3EndTime = microtime(true);
             $s3TimeTaken = $s3EndTime - $s3StartTime;
             $s3TimeLeft = $upload_loop_break_time - $s3TimeTaken;
             $s3TimeLeft = $s3TimeLeft - 5;
             //for safe timeLimit
             if (!empty($chunkResult['nextPart'])) {
                 echo 'parts' . $chunkResult['nextPart'];
             }
             echo " s3TimeTaken " . $s3TimeTaken;
             $s3UploadedSize = $uploadLoopCount * 5;
             echo " s3 approx file size written " . $s3UploadedSize;
             iwp_mmb_print_flush("s3loop");
             echo " s3TimeLeft " . $s3TimeLeft;
             echo " s3ChunkTimeTaken " . $s3ChunkTimeTaken;
             if ($s3TimeLeft <= $s3ChunkTimeTaken || !$singleUploadResult || $doComplete) {
                 $reloop = false;
                 echo "reloop stopped";
             } else {
                 $reloop = true;
                 $reloopCount++;
             }
         } while ($reloop);
         if (!$doComplete) {
             return $resArray;
         }
         if ($doComplete && $upload) {
             $status = 'completed';
             iwp_mmb_print_flush('Amazon S3 upload: End');
             if ($status == 'completed') {
                 //file verification
                 //checking file size and comparing
                 //getting the hash value
                 $partArrayLength = count($partsArray);
                 $verificationResult = $this->postUploadVerification($s3, $backup_file, $as3_file, $type = "amazons3", $as3_bucket);
                 if (!$verificationResult) {
                     return $this->statusLog($historyID, array('stage' => 'uploadAmazons3', 'status' => 'error', 'statusMsg' => 'S3 verification failed: File may be corrupted.', 'statusCode' => 'docomplete_S3_verification_failed_file_may_be_corrupted'));
                 }
                 if ($del_host_file) {
                     @unlink($backup_file);
                 }
             }
             return $resArray;
         } else {
             return array('error' => 'Failed to upload to Amazon S3. Please check your details and set upload/delete permissions on your bucket.', 'partial' => 1, 'error_code' => 'failed_to_upload_to_s3_check_your_details_and_set_upload_delete_permissions_on_your_bucket');
         }
     } catch (Exception $e) {
         $err = $e->getMessage();
         if ($err) {
             return array('error' => 'Failed to upload to AmazonS3 (' . $err . ').', 'error_code' => 'failed_to_upload_s3_err');
         } else {
             return array('error' => 'Failed to upload to Amazon S3.', 'error_code' => 'failed_to_upload_s3');
         }
     }
 }