Ejemplo n.º 1
0
 /**
  * {@inheritdoc}
  */
 public function store(BinaryInterface $binary, $path, $filter)
 {
     $objectPath = $this->getObjectPath($path, $filter);
     $storageResponse = $this->storage->create_object($this->bucket, $objectPath, array('body' => $binary->getContent(), 'contentType' => $binary->getMimeType(), 'length' => strlen($binary->getContent()), 'acl' => $this->acl));
     if (!$storageResponse->isOK()) {
         $this->logError('The object could not be created on Amazon S3.', array('objectPath' => $objectPath, 'filter' => $filter, 's3_response' => $storageResponse));
         throw new NotStorableException('The object could not be created on Amazon S3.');
     }
 }
Ejemplo n.º 2
0
 public function upload($remotePath, $localPath, $cnt = 0)
 {
     $response = $this->_s3->create_object($this->_bucket, $remotePath, array('fileUpload' => $localPath));
     $isOK = $response->isOK();
     if (!$isOK) {
         Api_Core_Application::log("Проблема с загрузкой файла " . $localPath, array('upload', array('header' => $response->header, 'status' => $response->status, 'body' => $response->body)), Api_Component_Log_Logger::LEVEL_ERROR);
         if ($cnt > self::TRY_UPLOAD_CNT) {
             return false;
         }
         sleep(5);
         $cnt++;
         return $this->upload($remotePath, $localPath, $cnt);
     }
     Api_Core_Application::log("Архив {$localPath} отправлен в хранилище");
     return $isOK;
 }
Ejemplo n.º 3
0
 /**
  * Store Upload file to S3.
  * @param CUploadedFile $uploadedFile
  * @param string $bucket The file to create the object
  * @return string url to the file.
  */
 public function store($uploadedFile, $bucket = NULL)
 {
     if ($this->config['randomPath']) {
         $filePath = $this->config['pathPrefix'] . md5(date('His')) . '/' . $uploadedFile->getName();
     } else {
         $filePath = $this->config['pathPrefix'] . $uploadedFile->getName();
     }
     if ($bucket === NULL) {
         $bucket = $this->config['defaultBucket'];
     }
     /** @var CFResponse $result */
     $result = $this->s3->create_object($bucket, $filePath, array('fileUpload' => $uploadedFile->getTempName(), 'acl' => $this->config['defaultACL']));
     if ($result->isOk()) {
         return urldecode($this->s3->get_object_url($bucket, $filePath));
     } else {
         Yii::log("STATUS:" . $result->status . "\nHEDAER:" . $result->header . "\nBODY:" . $result->body, CLogger::LEVEL_ERROR, "application");
         throw new CException($result->status);
     }
 }
 /**
  * create a directory
  *
  * @param   string  $filePath
  *
  * @return  CFResponse
  */
 protected function createDirectory($filePath)
 {
     // bail-out of the directory already exists
     if ($this->isDir($this->metadata($filePath))) {
         $this->debug(sprintf(self::MSG_DIR_NOTCREATED_EXISTS, $filePath));
         return;
     }
     // properties that make up a directory
     $acl = AmazonS3::ACL_PUBLIC;
     $body = null;
     $contentType = 'binary/octet-stream';
     // create directory
     $response = $this->s3->create_object($this->bucket, sprintf('%s/', $filePath), compact('acl', 'body', 'contentType'));
     return $response;
 }
Ejemplo n.º 5
0
 /**
  * Upload a single item to S3
  * 
  * @param array $file The PHP FILE array for the file
  * @param string $target The relative path in the bucket in which to place the file
  * @param array $options An array of options for uploading to S3
  * @return bool|string
  */
 public function uploadSingle($file, $target, array $options = array())
 {
     $options = array_merge(array('acl' => AmazonS3::ACL_PUBLIC), $options);
     if (is_array($file)) {
         $filename = basename($file['name']);
         $file = $file['tmp_name'];
     } else {
         $filename = basename($file);
     }
     $options['fileUpload'] = $file;
     $response = $this->s3->create_object($this->bucket, $target . $filename, $options);
     if ($response->status != 200) {
         return false;
     }
     return $this->s3->get_object_url($this->bucket, $target . $filename);
 }
 /**
  * Upload files to S3
  * 
  * @param string $container
  * @param array $objects
  * @return bool
  */
 public function uploadObjectsToContainer($container, array $objects = array())
 {
     if ($container == '/' || $container == '.') {
         $container = '';
     }
     $allowedFileTypes = explode(',', $this->xpdo->getOption('upload_files', null, ''));
     $allowedFileTypes = array_merge(explode(',', $this->xpdo->getOption('upload_images')), explode(',', $this->xpdo->getOption('upload_media')), explode(',', $this->xpdo->getOption('upload_flash')), $allowedFileTypes);
     $allowedFileTypes = array_unique($allowedFileTypes);
     $maxFileSize = $this->xpdo->getOption('upload_maxsize', null, 1048576);
     /* loop through each file and upload */
     foreach ($objects as $file) {
         if ($file['error'] != 0) {
             continue;
         }
         if (empty($file['name'])) {
             continue;
         }
         $ext = @pathinfo($file['name'], PATHINFO_EXTENSION);
         $ext = strtolower($ext);
         if (empty($ext) || !in_array($ext, $allowedFileTypes)) {
             $this->addError('path', $this->xpdo->lexicon('file_err_ext_not_allowed', array('ext' => $ext)));
             continue;
         }
         $size = @filesize($file['tmp_name']);
         if ($size > $maxFileSize) {
             $this->addError('path', $this->xpdo->lexicon('file_err_too_large', array('size' => $size, 'allowed' => $maxFileSize)));
             continue;
         }
         $newPath = $container . $file['name'];
         $contentType = $this->getContentType($ext);
         $uploaded = $this->driver->create_object($this->bucket, $newPath, array('fileUpload' => $file['tmp_name'], 'acl' => AmazonS3::ACL_PUBLIC, 'length' => $size, 'contentType' => $contentType));
         if (!$uploaded) {
             $this->addError('path', $this->xpdo->lexicon('file_err_upload'));
         }
     }
     /* invoke event */
     $this->xpdo->invokeEvent('OnFileManagerUpload', array('files' => &$objects, 'directory' => $container, 'source' => &$this));
     $this->xpdo->logManagerAction('file_upload', '', $container);
     return !$this->hasErrors();
 }
Ejemplo n.º 7
0
 public function transer_dir($image_id)
 {
     $this->EE->load->helper('file');
     // Grab image info
     $query = $this->EE->db->select('field_id, entry_id, filename, extension')->from('exp_channel_images')->where('image_id', $image_id)->limit(1)->get();
     $field_id = $query->row('field_id');
     // Grab settings
     $settings = $this->EE->image_helper->grabFieldSettings($field_id);
     $filename = $query->row('filename');
     $extension = '.' . substr(strrchr($filename, '.'), 1);
     $entry_id = $query->row('entry_id');
     // -----------------------------------------
     // Load Location
     // -----------------------------------------
     $location_type = $settings['upload_location'];
     $location_class = 'CI_Location_' . $location_type;
     // Load Settings
     if (isset($settings['locations'][$location_type]) == FALSE) {
         $o['body'] = $this->EE->lang->line('ci:location_settings_failure');
         exit($this->EE->image_helper->generate_json($o));
     }
     $location_settings = $settings['locations'][$location_type];
     // Load Main Class
     if (class_exists('Image_Location') == FALSE) {
         require PATH_THIRD . 'channel_images/locations/image_location.php';
     }
     // Try to load Location Class
     if (class_exists($location_class) == FALSE) {
         $location_file = PATH_THIRD . 'channel_images/locations/' . $location_type . '/' . $location_type . '.php';
         if (file_exists($location_file) == FALSE) {
             $o['body'] = $this->EE->lang->line('ci:location_load_failure');
             exit($this->EE->image_helper->generate_json($o));
         }
         require $location_file;
     }
     // Init
     $LOC = new $location_class($location_settings);
     // Temp Dir
     @mkdir($this->EE->channel_images->cache_path . 'channel_images/', 0777);
     @mkdir($this->EE->channel_images->cache_path . 'channel_images/transfer/', 0777);
     @mkdir($this->EE->channel_images->cache_path . 'channel_images/transfer/' . $image_id . '/', 0777);
     $temp_dir = $this->EE->channel_images->cache_path . 'channel_images/transfer/' . $image_id . '/';
     // -----------------------------------------
     // Copy Image to temp location
     // -----------------------------------------
     $LOC->download_file($entry_id, $filename, $temp_dir);
     //if ($response !== TRUE) exit($response);
     foreach ($settings['action_groups'] as $group) {
         $size_name = $group['group_name'];
         $size_filename = str_replace($extension, "__{$size_name}{$extension}", $filename);
         $LOC->download_file($entry_id, $size_filename, $temp_dir);
     }
     // -----------------------------------------
     // Init Amazon
     // -----------------------------------------
     if ($_POST['transfer']['to'] == 's3') {
         if (class_exists('CFRuntime') == FALSE) {
             // Include the SDK
             require_once PATH_THIRD . 'channel_images/locations/s3/sdk/sdk.class.php';
         }
         // Just to be sure
         if (class_exists('AmazonS3') == FALSE) {
             include PATH_THIRD . 'channel_images/locations/s3/sdk/services/s3.class.php';
         }
         // Instantiate the AmazonS3 class
         $S3 = new AmazonS3(array('key' => trim($_POST['s3']['key']), 'secret' => trim($_POST['s3']['secret_key'])));
         $S3->ssl_verification = FALSE;
         // Init Configs
         $temp = $this->EE->config->item('ci_s3_storage');
         $s3_storage = constant('AmazonS3::' . $temp[$_POST['s3']['storage']]);
         $temp = $this->EE->config->item('ci_s3_acl');
         $s3_acl = constant('AmazonS3::' . $temp[$_POST['s3']['acl']]);
         $s3_directory = trim($_POST['s3']['directory']);
         $s3_bucket = $_POST['s3']['bucket'];
         $s3_subdir = '';
         if ($s3_directory) {
             $s3_subdir = $s3_directory . '/';
         }
         $s3_headers = $this->EE->config->item('ci_s3_headers');
         // Test it
         $resp = $S3->get_bucket_headers($s3_bucket);
         if (!$resp->isOK()) {
             if (isset($resp->body->Message)) {
                 exit('ERROR_S3: ' . $resp->body->Message);
             } else {
                 exit('ERROR_S3: Bucket error');
             }
         }
     } else {
         // Include the SDK
         if (class_exists('CF_Authentication') == FALSE) {
             require_once PATH_THIRD . 'channel_images/locations/cloudfiles/sdk/cloudfiles.php';
         }
         // Which Region?
         if ($_POST['cloudfiles']['region'] == 'uk') {
             $_POST['cloudfiles']['region'] = constant('UK_AUTHURL');
         } else {
             $_POST['cloudfiles']['region'] = constant('US_AUTHURL');
         }
         // Instantiate the Cloudfiles class
         $CF_AUTH = new CF_Authentication($_POST['cloudfiles']['username'], $_POST['cloudfiles']['api'], NULL, $_POST['cloudfiles']['region']);
         try {
             $CF_AUTH->ssl_use_cabundle();
             $CF_AUTH->authenticate();
         } catch (AuthenticationException $e) {
             exit('ERROR_CLOUDFILES:' . $e->getMessage());
         }
         $CF_CONN = new CF_Connection($CF_AUTH);
         $CF_CONN->ssl_use_cabundle();
         $CF_CONT = $CF_CONN->get_container($_POST['cloudfiles']['container']);
     }
     // -----------------------------------------
     // Loop over all dirs
     // -----------------------------------------
     $files = scandir($temp_dir);
     foreach ($files as $file) {
         $full_path = $temp_dir . $file;
         if (is_file($full_path) == false) {
             continue;
         }
         $extension = substr(strrchr($file, '.'), 1);
         // Mime type
         if ($extension == 'jpg') {
             $filemime = 'image/jpeg';
         } elseif ($extension == 'jpeg') {
             $filemime = 'image/jpeg';
         } elseif ($extension == 'png') {
             $filemime = 'image/png';
         } elseif ($extension == 'gif') {
             $filemime = 'image/gif';
         } else {
             continue;
         }
         if (isset($S3) == true) {
             $upload_arr = array();
             $upload_arr['fileUpload'] = $full_path;
             $upload_arr['contentType'] = $filemime;
             $upload_arr['acl'] = $s3_acl;
             $upload_arr['storage'] = $s3_storage;
             $upload_arr['headers'] = array();
             if ($s3_headers != FALSE && is_array($s3_headers) === TRUE) {
                 $upload_arr['headers'] = $s3_headers;
             }
             $response = $S3->create_object($s3_bucket, $s3_subdir . $entry_id . '/' . $file, $upload_arr);
             // Success?
             if (!$response->isOK()) {
                 exit((string) $response->body->Message);
             }
         } else {
             $OBJECT = $CF_CONT->create_object($entry_id . '/' . $file);
             $OBJECT->content_type = $filemime;
             try {
                 $OBJECT->load_from_filename($full_path);
             } catch (Exception $e) {
                 exit($e->getMessage());
             }
         }
         //@unlink($temp_dir.$file);
     }
     @delete_files($temp_dir, true);
     @rmdir($temp_dir);
     $o = array('success' => 'yes');
     exit($this->EE->image_helper->generate_json($o));
 }
Ejemplo n.º 8
0
        ?>
		<?php 
        $tempData = $db->Raw("SELECT `md5`,`filesize`,`fileformat`,`playtime`,`sample_rate`,`location` FROM `userdb_temporary` WHERE `user`='{$user}'");
        ?>
		<?php 
        $filesize = $tempData[0]['filesize'];
        $sample_rate = $tempData[0]['sample_rate'];
        $fileformat = $tempData[0]['fileformat'];
        $md5 = $tempData[0]['md5'];
        $playtime = $tempData[0]['playtime'];
        ?>

		<?php 
        include 'include/aws/sdk.class.php';
        $s3 = new AmazonS3();
        $s3->create_object('fb-music', basename($tempData[0]['location']), array('fileUpload' => $tempData[0]['location'], 'acl' => AmazonS3::ACL_AUTH_READ, 'storage' => AmazonS3::STORAGE_REDUCED));
        /*
              $selDrive = $db->Raw("SELECT `data` FROM `system` WHERE `var`='drive'");
        $userFolder = array_sum(str_split($user));
        
        if(!file_exists('users/' . $selDrive[0]['data'] . '/' . $userFolder . '/'))
        	mkdir('users/' . $selDrive[0]['data'] . '/' . $userFolder . '/');
        rename($tempData[0]['location'], 'users/' . $selDrive[0]['data'] . '/' . $userFolder . '/' . basename($tempData[0]['location']) . '');
        */
        $db->Raw("DELETE FROM `userdb_temporary` WHERE `user`='{$user}' LIMIT 1");
        unlink($tempData[0]['location']);
        /*
        $link = '' . $config['server']['streaming'] . '/stream/' . $selDrive[0]['data'] . '/' . $userFolder . '/' . basename($tempData[0]['location']) . '';
        $drive = $selDrive[0]['data'];
        */
        $link = basename($tempData[0]['location']);
Ejemplo n.º 9
0
function dest_gstorage()
{
    global $WORKING, $STATIC;
    trigger_error(sprintf(__('%d. try sending backup to Google Storage...', 'backwpup'), $WORKING['DEST_GSTORAGE']['STEP_TRY']), E_USER_NOTICE);
    $WORKING['STEPTODO'] = 2 + filesize($STATIC['JOB']['backupdir'] . $STATIC['backupfile']);
    $WORKING['STEPDONE'] = 0;
    require_once dirname(__FILE__) . '/../libs/aws/sdk.class.php';
    need_free_memory(26214400 * 1.1);
    try {
        $gstorage = new AmazonS3(array('key' => $STATIC['JOB']['GStorageAccessKey'], 'secret' => $STATIC['JOB']['GStorageSecret'], 'certificate_authority' => true));
        //set up s3 for google
        $gstorage->set_hostname('storage.googleapis.com');
        $gstorage->allow_hostname_override(false);
        if ($gstorage->if_bucket_exists($STATIC['JOB']['GStorageBucket'])) {
            trigger_error(sprintf(__('Connected to GStorage Bucket: %s', 'backwpup'), $STATIC['JOB']['GStorageBucket']), E_USER_NOTICE);
            //set curl Prozess bar
            $curlops = array();
            if (defined('CURLOPT_PROGRESSFUNCTION')) {
                $curlops = array(CURLOPT_NOPROGRESS => false, CURLOPT_PROGRESSFUNCTION => 'curl_progresscallback', CURLOPT_BUFFERSIZE => 1048576);
            }
            trigger_error(__('Upload to GStorage now started... ', 'backwpup'), E_USER_NOTICE);
            //transferee file to GStorage
            $result = $gstorage->create_object($STATIC['JOB']['GStorageBucket'], $STATIC['JOB']['GStoragedir'] . $STATIC['backupfile'], array('fileUpload' => $STATIC['JOB']['backupdir'] . $STATIC['backupfile'], 'acl' => 'private', 'curlopts' => $curlops));
            $result = (array) $result;
            if ($result["status"] >= 200 and $result["status"] < 300) {
                $WORKING['STEPTODO'] = 1 + filesize($STATIC['JOB']['backupdir'] . $STATIC['backupfile']);
                trigger_error(sprintf(__('Backup transferred to %s', 'backwpup'), "https://storage.cloud.google.com/" . $STATIC['JOB']['GStorageBucket'] . "/" . $STATIC['JOB']['GStoragedir'] . $STATIC['backupfile']), E_USER_NOTICE);
                $STATIC['JOB']['lastbackupdownloadurl'] = "https://storage.cloud.google.com/" . $STATIC['JOB']['GStorageBucket'] . "/" . $STATIC['JOB']['GStoragedir'] . $STATIC['backupfile'];
                $WORKING['STEPSDONE'][] = 'DEST_GSTORAGE';
                //set done
            } else {
                trigger_error(sprintf(__('Can not transfer backup to GStorage! (%1$d) %2$s', 'backwpup'), $result["status"], $result["Message"]), E_USER_ERROR);
            }
        } else {
            trigger_error(sprintf(__('GStorage Bucket "%s" not exists!', 'backwpup'), $STATIC['JOB']['GStorageBucket']), E_USER_ERROR);
        }
    } catch (Exception $e) {
        trigger_error(sprintf(__('GStorage API: %s', 'backwpup'), $e->getMessage()), E_USER_ERROR);
        return;
    }
    try {
        if ($gstorage->if_bucket_exists($STATIC['JOB']['GStorageBucket'])) {
            if ($STATIC['JOB']['GStoragemaxbackups'] > 0) {
                //Delete old backups
                $backupfilelist = array();
                if (($contents = $gstorage->list_objects($STATIC['JOB']['GStorageBucket'], array('prefix' => $STATIC['JOB']['GStoragedir']))) !== false) {
                    foreach ($contents->body->Contents as $object) {
                        $file = basename($object->Key);
                        if ($STATIC['JOB']['fileprefix'] == substr($file, 0, strlen($STATIC['JOB']['fileprefix'])) and $STATIC['JOB']['fileformart'] == substr($file, -strlen($STATIC['JOB']['fileformart']))) {
                            $backupfilelist[] = $file;
                        }
                    }
                }
                if (sizeof($backupfilelist) > 0) {
                    rsort($backupfilelist);
                    $numdeltefiles = 0;
                    for ($i = $STATIC['JOB']['GStoragemaxbackups']; $i < sizeof($backupfilelist); $i++) {
                        if ($gstorage->delete_object($STATIC['JOB']['GStorageBucket'], $STATIC['JOB']['GStoragedir'] . $backupfilelist[$i])) {
                            //delte files on S3
                            $numdeltefiles++;
                        } else {
                            trigger_error(sprintf(__('Can not delete backup on GStorage://%s', 'backwpup'), $STATIC['JOB']['awsBucket'] . '/' . $STATIC['JOB']['GStoragedir'] . $backupfilelist[$i]), E_USER_ERROR);
                        }
                    }
                    if ($numdeltefiles > 0) {
                        trigger_error(sprintf(_n('One file deleted on GStorage Bucket', '%d files deleted on GStorage Bucket', $numdeltefiles, 'backwpup'), $numdeltefiles), E_USER_NOTICE);
                    }
                }
            }
        }
    } catch (Exception $e) {
        trigger_error(sprintf(__('GStorage API: %s', 'backwpup'), $e->getMessage()), E_USER_ERROR);
        return;
    }
    $WORKING['STEPDONE']++;
}
Ejemplo n.º 10
0
 public function saveFileToApplicationBucket($file_location, $filename, $prefix, $permissions = null)
 {
     $permissions = is_null($permissions) ? AmazonS3::ACL_PRIVATE : $permissions;
     $location = $file_location . $filename;
     if (!file_exists($location)) {
         throw new Exception("No local file to upload!");
     }
     ProjectConfiguration::registerAws();
     $s3 = new AmazonS3();
     $bucket = ProjectConfiguration::getApplicationAmazonBucketName();
     if ($s3->if_bucket_exists($bucket)) {
         $s3->delete_object($bucket, $prefix . '/' . $filename);
         $response = $s3->create_object($bucket, $prefix . '/' . $filename, array('fileUpload' => $location, 'acl' => $permissions));
         if (!$response->isOK()) {
             throw new Exception("Error uploading file!");
         }
     } else {
         throw new Exception("Amazon bucket '{$bucket}' does not exist!");
     }
     return $response;
 }
Ejemplo n.º 11
0
function createImgThumb($link, $conf)
{
    // get the file
    $hash = md5($link);
    $res = '';
    $filePathDestOriginal = $conf->originalpath() . $hash . '.jpg';
    $filePathDestThumb = $conf->thumbpath() . $hash . '.jpg';
    $filePathDestMedium = $conf->mediumpath() . $hash . '.jpg';
    $filePathDestBig = $conf->bigpath() . $hash . '.jpg';
    $ch = curl_init($link);
    curl_setopt($ch, CURLOPT_HEADER, 0);
    curl_setopt($ch, CURLOPT_RETURNTRANSFER, 1);
    curl_setopt($ch, CURLOPT_BINARYTRANSFER, 1);
    $rawdata = curl_exec($ch);
    curl_close($ch);
    if (file_exists($filePathDestOriginal)) {
        @unlink($filePathDestOriginal);
    }
    $fp = fopen($filePathDestOriginal, 'x');
    fwrite($fp, $rawdata);
    fclose($fp);
    // create thumb and full size
    if ($rawdata) {
        $res1 = redimg(array(0 => array('W' => 120, 'H' => 90)), $filePathDestThumb, $filePathDestOriginal, 0);
        $res2 = redimg(array(0 => array('W' => 256, 'H' => 0)), $filePathDestMedium, $filePathDestOriginal, 0);
        $res3 = redimg(array(0 => array('W' => 512, 'H' => 0)), $filePathDestBig, $filePathDestOriginal, 0);
        require_once "aws-sdk/sdk.class.php";
        $s3 = new AmazonS3();
        if (file_exists($filePathDestThumb)) {
            $response1 = $s3->create_object($conf->bucket(), '120_90/' . $hash . '.jpg', array('fileUpload' => $filePathDestThumb, 'contentType' => 'image/jpeg', 'acl' => AmazonS3::ACL_PUBLIC));
        }
        if (file_exists($filePathDestMedium)) {
            $response2 = $s3->create_object($conf->bucket(), '256_0/' . $hash . '.jpg', array('fileUpload' => $filePathDestMedium, 'contentType' => 'image/jpeg', 'acl' => AmazonS3::ACL_PUBLIC));
        }
        if (file_exists($filePathDestBig)) {
            $response3 = $s3->create_object($conf->bucket(), '512_0/' . $hash . '.jpg', array('fileUpload' => $filePathDestBig, 'contentType' => 'image/jpeg', 'acl' => AmazonS3::ACL_PUBLIC));
        }
        if ($res1 && $res2 && $res3) {
            $res = $hash . '.jpg';
        }
        // NOTE :  your local server has to be on time to send images to S3
        //var_dump($response1);
        //var_dump($response2);
        //var_dump($response3);
        if ($response1->status == 200 && $response2->status == 200 && $response3->status == 200) {
            unlink($filePathDestOriginal);
        }
        if ($response1->status == 200) {
            unlink($filePathDestThumb);
        }
        if ($response2->status == 200) {
            unlink($filePathDestMedium);
        }
        if ($response3->status == 200) {
            unlink($filePathDestBig);
        }
    } else {
        $res = '';
    }
    return $res;
}
Ejemplo n.º 12
0
    public function cache_thumb($job, $user_id)
    {
        $keepsizes = array('thumb_medium.', 'thumb_large.');
        $apikey = DB::get()->val("SELECT value FROM options WHERE grouping = 'Thumbnails' AND name = 'Bluga API Key'");
        $username = DB::get()->val('SELECT username FROM users where id = ?', array($user_id));
        $statusrequest = <<<STATUSREQ
<webthumb>
\t<apikey>{$apikey}</apikey>
\t<status>
\t\t<job>{$job}</job>
\t</status>
</webthumb>
STATUSREQ;
        //header('Content-type: text/plain');
        //echo "{$statusrequest}\n";
        $xml = new SimpleXMLElement(self::execute('http://webthumb.bluga.net/api.php', 'POST', $statusrequest));
        //echo "$jobs\n";
        //echo $xml->asXML();
        $href = false;
        foreach ($xml->jobStatus->status as $status) {
            if ((string) $status == 'Complete') {
                $zipurl = $status['pickup'];
                $zipfiledata = self::execute($zipurl);
                $zipfile = tempnam(sys_get_temp_dir(), 'thm');
                file_put_contents($zipfile, $zipfiledata);
                if (file_exists($zipfile)) {
                    $zip = zip_open($zipfile);
                    $names = array();
                    while ($zip_entry = zip_read($zip)) {
                        $size = zip_entry_filesize($zip_entry);
                        $zdata = zip_entry_read($zip_entry, $size);
                        $zfile = zip_entry_name($zip_entry);
                        $keep = false;
                        foreach ($keepsizes as $size) {
                            if (strpos($zfile, $size) !== false) {
                                $keep = true;
                                break;
                            }
                        }
                        if (strpos($zfile, '-') === false) {
                            $keep = true;
                        }
                        if ($keep) {
                            $access = DB::get()->assoc("SELECT name, value FROM options WHERE grouping = 'Amazon Web Services'");
                            $bucketname = $access['S3 Bucket Name'];
                            $s3 = new AmazonS3($access['AWS Access Key ID'], $access['AWS Secret Access Key']);
                            $s3filename = strtolower(preg_replace('%\\W+%', '', $username)) . '/' . date('Ym') . '/webthumb_';
                            $s3filename .= basename($zfile);
                            $s3filename = trim($s3filename, '/');
                            $headers = get_headers($href, 1);
                            $opt = array('filename' => $s3filename, 'body' => $zdata, 'contentType' => 'image/png', 'acl' => S3_ACL_OPEN);
                            $s3->create_object($bucketname, $opt);
                            $href = "http://{$bucketname}.s3.amazonaws.com/{$s3filename}#{$username}:{$user_id}";
                        }
                    }
                    zip_close($zip);
                    unlink($zipfile);
                }
            }
        }
        return $href;
    }
Ejemplo n.º 13
0
 public static function send($settings = array(), $files = array(), $clear_uploads = false)
 {
     global $pb_backupbuddy_destination_errors;
     if (!is_array($files)) {
         $files = array($files);
     }
     if ($clear_uploads === false) {
         // Uncomment the following line to override and always clear.
         //$clear_uploads = true;
     }
     $itxapi_username = $settings['itxapi_username'];
     $itxapi_password = $settings['itxapi_password'];
     $db_archive_limit = $settings['db_archive_limit'];
     $full_archive_limit = $settings['full_archive_limit'];
     $max_chunk_size = $settings['max_chunk_size'];
     $remote_path = self::get_remote_path($settings['directory']);
     // Has leading and trailng slashes.
     if ($settings['ssl'] == '0') {
         $disable_ssl = true;
     } else {
         $disable_ssl = false;
     }
     $multipart_id = $settings['_multipart_id'];
     $multipart_counts = $settings['_multipart_counts'];
     pb_backupbuddy::status('details', 'Stash remote path set to `' . $remote_path . '`.');
     require_once dirname(__FILE__) . '/lib/class.itx_helper.php';
     require_once dirname(__FILE__) . '/lib/aws-sdk/sdk.class.php';
     // Stash API talk.
     $stash = new ITXAPI_Helper(pb_backupbuddy_destination_stash::ITXAPI_KEY, pb_backupbuddy_destination_stash::ITXAPI_URL, $itxapi_username, $itxapi_password);
     $manage_data = pb_backupbuddy_destination_stash::get_manage_data($settings);
     // Wipe all current uploads.
     if ($clear_uploads === true) {
         pb_backupbuddy::status('details', 'Clearing any current uploads via Stash call to `abort-all`.');
         $abort_url = $stash->get_upload_url(null, 'abort-all');
         $request = new RequestCore($abort_url);
         //pb_backupbuddy::status('details', print_r( $request , true ) );
         $response = $request->send_request(true);
     }
     // Process multipart transfer that we already initiated in a previous PHP load.
     if ($multipart_id != '') {
         // Multipart upload initiated and needs parts sent.
         // Create S3 instance.
         pb_backupbuddy::status('details', 'Creating Stash S3 instance.');
         $s3 = new AmazonS3($settings['_multipart_upload_data']['credentials']);
         // the key, secret, token
         if ($disable_ssl === true) {
             @$s3->disable_ssl(true);
         }
         pb_backupbuddy::status('details', 'Stash S3 instance created.');
         $this_part_number = $settings['_multipart_partnumber'] + 1;
         pb_backupbuddy::status('details', 'Stash beginning upload of part `' . $this_part_number . '` of `' . count($settings['_multipart_counts']) . '` parts of file `' . $settings['_multipart_file'] . '` with multipart ID `' . $settings['_multipart_id'] . '`.');
         $response = $s3->upload_part($settings['_multipart_upload_data']['bucket'], $settings['_multipart_upload_data']['object'], $settings['_multipart_id'], array('expect' => '100-continue', 'fileUpload' => $settings['_multipart_file'], 'partNumber' => $this_part_number, 'seekTo' => (int) $settings['_multipart_counts'][$settings['_multipart_partnumber']]['seekTo'], 'length' => (int) $settings['_multipart_counts'][$settings['_multipart_partnumber']]['length']));
         if (!$response->isOK()) {
             $this_error = 'Stash unable to upload file part for multipart upload `' . $settings['_multipart_id'] . '`. Details: `' . print_r($response, true) . '`.';
             $pb_backupbuddy_destination_errors[] = $this_error;
             pb_backupbuddy::status('error', $this_error);
             return false;
         }
         // Update stats.
         foreach (pb_backupbuddy::$options['remote_sends'] as $identifier => $remote_send) {
             if (isset($remote_send['_multipart_id']) && $remote_send['_multipart_id'] == $multipart_id) {
                 // this item.
                 pb_backupbuddy::$options['remote_sends'][$identifier]['_multipart_status'] = 'Sent part ' . $this_part_number . ' of ' . count($settings['_multipart_counts']) . '.';
                 if ($this_part_number == count($settings['_multipart_counts'])) {
                     pb_backupbuddy::$options['remote_sends'][$identifier]['_multipart_status'] .= '<br>Success.';
                     pb_backupbuddy::$options['remote_sends'][$identifier]['finish_time'] = time();
                 }
                 pb_backupbuddy::save();
                 break;
             }
         }
         // Made it here so success sending part. Increment for next part to send.
         $settings['_multipart_partnumber']++;
         if (!isset($settings['_multipart_counts'][$settings['_multipart_partnumber']])) {
             // No more parts exist for this file. Tell S3 the multipart upload is complete and move on.
             pb_backupbuddy::status('details', 'Stash getting parts with etags to notify S3 of completed multipart send.');
             $etag_parts = $s3->list_parts($settings['_multipart_upload_data']['bucket'], $settings['_multipart_upload_data']['object'], $settings['_multipart_id']);
             pb_backupbuddy::status('details', 'Stash got parts list. Notifying S3 of multipart upload completion.');
             $response = $s3->complete_multipart_upload($settings['_multipart_upload_data']['bucket'], $settings['_multipart_upload_data']['object'], $settings['_multipart_id'], $etag_parts);
             if (!$response->isOK()) {
                 $this_error = 'Stash unable to notify S3 of completion of all parts for multipart upload `' . $settings['_multipart_id'] . '`.';
                 $pb_backupbuddy_destination_errors[] = $this_error;
                 pb_backupbuddy::status('error', $this_error);
                 return false;
             } else {
                 pb_backupbuddy::status('details', 'Stash notified S3 of multipart completion.');
             }
             // Notify Stash API that things were succesful.
             $done_url = $stash->get_upload_url($settings['_multipart_file'], 'done', $remote_path . $settings['_multipart_backup_type_dir'] . basename($settings['_multipart_file']));
             pb_backupbuddy::status('details', 'Notifying Stash of completed multipart upload with done url `' . $done_url . '`.');
             $request = new RequestCore($done_url);
             $response = $request->send_request(true);
             if (!$response->isOK()) {
                 $this_error = 'Error #756834682. Could not finalize Stash upload. Response code: `' . $response->get_response_code() . '`; Response body: `' . $response->get_response_body() . '`; Response headers: `' . $response->get_response_header() . '`.';
                 $pb_backupbuddy_destination_errors[] = $this_error;
                 pb_backupbuddy::status('error', $this_error);
                 return false;
             } else {
                 // Good server response.
                 // See if we got an optional json response.
                 $upload_data = @json_decode($response->body, true);
                 if (isset($upload_data['error'])) {
                     $this_error = 'Stash error(s): `' . implode(' - ', $upload_data['error']) . '`.';
                     $pb_backupbuddy_destination_errors[] = $this_error;
                     pb_backupbuddy::status('error', $this_error);
                     return false;
                 }
                 pb_backupbuddy::status('details', 'Stash success sending file `' . basename($settings['_multipart_file']) . '`. File uploaded via multipart across `' . $this_part_number . '` parts and reported to Stash as completed.');
             }
             pb_backupbuddy::status('details', 'Stash has no more parts left for this multipart upload. Clearing multipart instance variables.');
             $settings['_multipart_partnumber'] = 0;
             $settings['_multipart_id'] = '';
             $settings['_multipart_file'] = '';
             $settings['_multipart_counts'] = array();
             $settings['_multipart_upload_data'] = array();
         }
         delete_transient('pb_backupbuddy_stashquota_' . $settings['itxapi_username']);
         // Delete quota transient since it probably has changed now.
         // Schedule to continue if anything is left to upload for this multipart of any individual files.
         if ($settings['_multipart_id'] != '' || count($files) > 0) {
             pb_backupbuddy::status('details', 'Stash multipart upload has more parts left. Scheduling next part send.');
             wp_schedule_single_event(time(), pb_backupbuddy::cron_tag('destination_send'), array($settings, $files, 'multipart', false));
             spawn_cron(time() + 150);
             // Adds > 60 seconds to get around once per minute cron running limit.
             update_option('_transient_doing_cron', 0);
             // Prevent cron-blocking for next item.
             pb_backupbuddy::status('details', 'Stash scheduled send of next part(s). Done for this cycle.');
             return array($settings['_multipart_id'], 'Sent ' . $this_part_number . ' of ' . count($multipart_destination_settings['_multipart_counts'] . ' parts.'));
         }
     }
     // Upload each file.
     foreach ($files as $file_id => $file) {
         // Determine backup type directory (if zip).
         $backup_type_dir = '';
         $backup_type = '';
         if (stristr($file, '.zip') !== false) {
             // If a zip try to determine backup type.
             pb_backupbuddy::status('details', 'Stash: Zip file. Detecting backup type if possible.');
             $serial = pb_backupbuddy::$classes['core']->get_serial_from_file($file);
             if (isset(pb_backupbuddy::$options['backups'][$serial]['integrity']['detected_type'])) {
                 pb_backupbuddy::status('details', 'Stash: Detected backup type as `' . pb_backupbuddy::$options['backups'][$serial]['integrity']['detected_type'] . '` via integrity check data.');
                 $backup_type_dir = pb_backupbuddy::$options['backups'][$serial]['integrity']['detected_type'] . '/';
                 $backup_type = pb_backupbuddy::$options['backups'][$serial]['integrity']['detected_type'];
             } else {
                 if (stristr($file, '-db-') !== false) {
                     pb_backupbuddy::status('details', 'Stash: Detected backup type as `db` via filename.');
                     $backup_type_dir = 'db/';
                     $backup_type = 'db';
                 } elseif (stristr($file, '-full-') !== false) {
                     pb_backupbuddy::status('details', 'Stash: Detected backup type as `full` via filename.');
                     $backup_type_dir = 'full/';
                     $backup_type = 'full';
                 } else {
                     pb_backupbuddy::status('details', 'Stash: Could not detect backup type via integrity details nor filename.');
                 }
             }
         }
         // Interact with Stash API.
         pb_backupbuddy::status('details', 'Determining Stash upload URL for `' . $file . '`.` with destination remote path `' . $remote_path . $backup_type_dir . basename($file) . '`.');
         $upload_url = $stash->get_upload_url($file, 'request', $remote_path . $backup_type_dir . basename($file));
         pb_backupbuddy::status('details', 'Determined upload url: `' . $upload_url . '`.');
         $request = new RequestCore($upload_url);
         pb_backupbuddy::status('details', 'Sending Stash API request.');
         $response = $request->send_request(true);
         // Validate response.
         if (!$response->isOK()) {
             $this_error = 'Stash request for upload credentials failed.';
             $pb_backupbuddy_destination_errors[] = $this_error;
             pb_backupbuddy::status('error', $this_error);
             return false;
         }
         if (!($upload_data = json_decode($response->body, true))) {
             $this_error = 'Stash API did not give a valid JSON response.';
             $pb_backupbuddy_destination_errors[] = $this_error;
             pb_backupbuddy::status('error', $this_error);
             return false;
         }
         if (isset($upload_data['error'])) {
             $this_error = 'Stash error(s): `' . implode(' - ', $upload_data['error']) . '`.';
             $pb_backupbuddy_destination_errors[] = $this_error;
             pb_backupbuddy::status('error', $this_error);
             return false;
         }
         // Calculate meta data to send.
         /*
         $meta_array = array();
         if ( stristr( $file, '.zip' ) !== false ) { // If a zip try to determine backup type.
         	pb_backupbuddy::status( 'details', 'Stash: Zip file. Detecting backup type if possible.' );
         	$serial = pb_backupbuddy::$classes['core']->get_serial_from_file( $file );
         	if ( isset( pb_backupbuddy::$options['backups'][$serial]['integrity']['detected_type'] ) ) {
         		pb_backupbuddy::status( 'details', 'Stash: Detected backup type as `' . pb_backupbuddy::$options['backups'][$serial]['integrity']['detected_type'] . '` via integrity check data.' );
         		$meta_array['backup_type'] = pb_backupbuddy::$options['backups'][$serial]['integrity']['detected_type'];
         	} else {
         		if ( stristr( $file, '-db-' ) !== false ) {
         			pb_backupbuddy::status( 'details', 'Stash: Detected backup type as `db` via filename.' );
         			$meta_array['backup_type'] = 'db';
         		} elseif ( stristr( $file, '-full-' ) !== false ) {
         			pb_backupbuddy::status( 'details', 'Stash: Detected backup type as `full` via filename.' );
         			$meta_array['backup_type'] = 'full';
         		} else {
         			pb_backupbuddy::status( 'details', 'Stash: Could not detect backup type via integrity details nor filename.' );
         		}
         	}
         }
         */
         // Create S3 instance.
         pb_backupbuddy::status('details', 'Creating Stash S3 instance.');
         $s3 = new AmazonS3($upload_data['credentials']);
         // the key, secret, token
         if ($disable_ssl === true) {
             @$s3->disable_ssl(true);
         }
         pb_backupbuddy::status('details', 'Stash S3 instance created.');
         // Handle chunking of file into a multipart upload (if applicable).
         $file_size = filesize($file);
         if ($max_chunk_size >= 5 && $file_size / 1024 / 1024 > $max_chunk_size) {
             // minimum chunk size is 5mb. Anything under 5mb we will not chunk.
             pb_backupbuddy::status('details', 'Stash file size of ' . $file_size / 1024 / 1024 . 'MB exceeds max chunk size of ' . $max_chunk_size . 'MB set in settings for sending file as multipart upload.');
             // Initiate multipart upload with S3.
             pb_backupbuddy::status('details', 'Initiating Stash multipart upload.');
             $response = $s3->initiate_multipart_upload($upload_data['bucket'], $upload_data['object'], array('encryption' => 'AES256'));
             if (!$response->isOK()) {
                 $this_error = 'Stash was unable to initiate multipart upload.';
                 $pb_backupbuddy_destination_errors[] = $this_error;
                 pb_backupbuddy::status('error', $this_error);
                 return false;
             } else {
                 $upload_id = (string) $response->body->UploadId;
                 pb_backupbuddy::status('details', 'Stash initiated multipart upload with ID `' . $upload_id . '`.');
             }
             // Get chunk parts for multipart transfer.
             pb_backupbuddy::status('details', 'Stash getting multipart counts.');
             $parts = $s3->get_multipart_counts($file_size, $max_chunk_size * 1024 * 1024);
             // Size of chunks expected to be in bytes.
             $multipart_destination_settings = $settings;
             $multipart_destination_settings['_multipart_id'] = $upload_id;
             $multipart_destination_settings['_multipart_partnumber'] = 0;
             $multipart_destination_settings['_multipart_file'] = $file;
             $multipart_destination_settings['_multipart_counts'] = $parts;
             $multipart_destination_settings['_multipart_upload_data'] = $upload_data;
             $multipart_destination_settings['_multipart_backup_type_dir'] = $backup_type_dir;
             pb_backupbuddy::status('details', 'Stash multipart settings to pass:'******'details', 'Stash scheduling send of next part(s).');
             wp_schedule_single_event(time(), pb_backupbuddy::cron_tag('destination_send'), array($multipart_destination_settings, $files, 'multipart', false));
             spawn_cron(time() + 150);
             // Adds > 60 seconds to get around once per minute cron running limit.
             update_option('_transient_doing_cron', 0);
             // Prevent cron-blocking for next item.
             pb_backupbuddy::status('details', 'Stash scheduled send of next part(s). Done for this cycle.');
             return array($upload_id, 'Starting send of ' . count($multipart_destination_settings['_multipart_counts']) . ' parts.');
         } else {
             if ($max_chunk_size != '0') {
                 pb_backupbuddy::status('details', 'File size of ' . $file_size / 1024 / 1024 . 'MB is less than the max chunk size of ' . $max_chunk_size . 'MB; not chunking into multipart upload.');
             } else {
                 pb_backupbuddy::status('details', 'Max chunk size set to zero so not chunking into multipart upload.');
             }
         }
         // SEND file.
         pb_backupbuddy::status('details', 'About to put (upload) object to Stash.');
         $response = $s3->create_object($upload_data['bucket'], $upload_data['object'], array('fileUpload' => $file, 'encryption' => 'AES256'));
         //  we can also utilize the multi-part-upload to create an object
         //  $response = $s3->create_mpu_object($upload_data['bucket'], $upload_data['object'], array('fileUpload'=>$upload_file));
         // Validate response. On failure notify Stash API that things went wrong.
         if (!$response->isOK()) {
             pb_backupbuddy::status('details', 'Sending upload abort.');
             $request = new RequestCore($abort_url);
             $response = $request->send_request(true);
             $this_error = 'Could not upload to Stash, attempt aborted.';
             $pb_backupbuddy_destination_errors[] = $this_error;
             pb_backupbuddy::status('error', $this_error);
             return false;
         } else {
             //	pb_backupbuddy::status( 'details', 'Stash file upload speed: ' . ( $response->header['_info']['speed_upload'] / 1024 / 1024 ) . 'MB/sec. This number may be invalid for small file transfers.' );
             pb_backupbuddy::status('details', 'Stash put success. Need to nofity Stash of upload completion. Details: `' . print_r($response, true) . '`.');
         }
         delete_transient('pb_backupbuddy_stashquota_' . $settings['itxapi_username']);
         // Delete quota transient since it probably has changed now.
         // Notify Stash API that things were succesful.
         $done_url = $stash->get_upload_url($file, 'done', $remote_path . $backup_type_dir . basename($file));
         pb_backupbuddy::status('details', 'Notifying Stash of completed upload with done url `' . $done_url . '`.');
         $request = new RequestCore($done_url);
         $response = $request->send_request(true);
         if (!$response->isOK()) {
             $this_error = 'Error #756834682. Could not finalize Stash upload. Response code: `' . $response->get_response_code() . '`; Response body: `' . $response->get_response_body() . '`; Response headers: `' . $response->get_response_header() . '`.';
             $pb_backupbuddy_destination_errors[] = $this_error;
             pb_backupbuddy::status('error', $this_error);
             return false;
         } else {
             // Good server response.
             // See if we got an optional json response.
             $upload_data = @json_decode($response->body, true);
             if (isset($upload_data['error'])) {
                 // Some kind of error.
                 $this_error = 'Stash error(s): `' . implode(' - ', $upload_data['error']) . '`.';
                 $pb_backupbuddy_destination_errors[] = $this_error;
                 pb_backupbuddy::status('error', $this_error);
                 return false;
             }
             unset($files[$file_id]);
             // Remove from list of files we have not sent yet.
             pb_backupbuddy::status('details', 'Stash success sending file `' . basename($file) . '`. File uploaded and reported to Stash as completed.');
         }
         // Enforce archive limits if applicable.
         if ($backup_type == 'full') {
             $limit = $full_archive_limit;
             pb_backupbuddy::status('details', 'Stash full backup archive limit of `' . $limit . '` based on destination settings.');
         } elseif ($backup_type == 'db') {
             $limit = $db_archive_limit;
             pb_backupbuddy::status('details', 'Stash database backup archive limit of `' . $limit . '` based on destination settings.');
         } else {
             $limit = 0;
             pb_backupbuddy::status('error', 'Error #54854895. Stash was unable to determine backup type so archive limits NOT enforced for this backup.');
         }
         if ($limit > 0) {
             pb_backupbuddy::status('details', 'Stash archive limit enforcement beginning.');
             // S3 object for managing files.
             $s3_manage = new AmazonS3($manage_data['credentials']);
             if ($disable_ssl === true) {
                 @$s3_manage->disable_ssl(true);
             }
             // Get file listing.
             $response_manage = $s3_manage->list_objects($manage_data['bucket'], array('prefix' => $manage_data['subkey'] . $remote_path . $backup_type_dir));
             // list all the files in the subscriber account
             // Create array of backups and organize by date
             $prefix = pb_backupbuddy::$classes['core']->backup_prefix();
             // List backups associated with this site by date.
             $backups = array();
             foreach ($response_manage->body->Contents as $object) {
                 $file = str_replace($manage_data['subkey'] . $remote_path . $backup_type_dir, '', $object->Key);
                 // Stash stores files in a directory per site so no need to check prefix here! if ( false !== strpos( $file, 'backup-' . $prefix . '-' ) ) { // if backup has this site prefix...
                 $backups[$file] = strtotime($object->LastModified);
                 //}
             }
             arsort($backups);
             //error_log( 'backups: ' . print_r( $backups, true ) );
             pb_backupbuddy::status('details', 'Stash found `' . count($backups) . '` backups of this type when checking archive limits.');
             if (count($backups) > $limit) {
                 pb_backupbuddy::status('details', 'More archives (' . count($backups) . ') than limit (' . $limit . ') allows. Trimming...');
                 $i = 0;
                 $delete_fail_count = 0;
                 foreach ($backups as $buname => $butime) {
                     $i++;
                     if ($i > $limit) {
                         pb_backupbuddy::status('details', 'Trimming excess file `' . $buname . '`...');
                         $response = $s3_manage->delete_object($manage_data['bucket'], $manage_data['subkey'] . $remote_path . $backup_type_dir . $buname);
                         if (!$response->isOK()) {
                             pb_backupbuddy::status('details', 'Unable to delete excess Stash file `' . $buname . '`. Details: `' . print_r($response, true) . '`.');
                             $delete_fail_count++;
                         }
                     }
                 }
                 pb_backupbuddy::status('details', 'Finished trimming excess backups.');
                 if ($delete_fail_count !== 0) {
                     $error_message = 'Stash remote limit could not delete ' . $delete_fail_count . ' backups.';
                     pb_backupbuddy::status('error', $error_message);
                     pb_backupbuddy::$classes['core']->mail_error($error_message);
                 }
             }
             pb_backupbuddy::status('details', 'Stash completed archive limiting.');
         } else {
             pb_backupbuddy::status('details', 'No Stash archive file limit to enforce.');
         }
         // End remote backup limit
     }
     // end foreach.
     // Success if we made it this far.
     return true;
 }
Ejemplo n.º 14
0
<?php

/*
 * To change this template, choose Tools | Templates
 * and open the template in the editor.
 */
header("Content-type:text/plain; charset:utf-8");
include_once 'sdk.class.php';
$s3 = new AmazonS3();
$bucketname = "empresasctmbucketpruebas";
//CREA UN BUCKET
//$response = $s3->create_bucket($bucketname, $s3::REGION_US_E1);
//print_r($response);
//LISTA BUCKETS
//$response = $s3->get_bucket_list();
//print_r($response);
//UPLOAD
//$response = $s3->create_object($bucketname, "PRUEBA-".date('ljS-FYh:i:sA'),
//        array(
//          'body' => "EMPTY",
//            'contentType' => 'text/plain',
//            'acl' => $s3::ACL_PUBLIC
//        ));
//print_r($response);
$response = $s3->create_object($bucketname, "PRUEBA-" . date('ljS-FYh:i:sA') . ".png", array('fileUpload' => "/home/naito/Escritorio/Firsttets.png", 'acl' => $s3::ACL_PUBLIC));
print_r($response);
Ejemplo n.º 15
0
function dest_s3()
{
    global $WORKING, $STATIC;
    trigger_error(sprintf(__('%d. try sending backup file to Amazon S3...', 'backwpup'), $WORKING['DEST_S3']['STEP_TRY']), E_USER_NOTICE);
    $WORKING['STEPTODO'] = 2 + filesize($STATIC['JOB']['backupdir'] . $STATIC['backupfile']);
    $WORKING['STEPDONE'] = 0;
    require_once dirname(__FILE__) . '/../libs/aws/sdk.class.php';
    need_free_memory(26214400 * 1.1);
    try {
        $s3 = new AmazonS3(array('key' => $STATIC['JOB']['awsAccessKey'], 'secret' => $STATIC['JOB']['awsSecretKey'], 'certificate_authority' => true));
        if ($s3->if_bucket_exists($STATIC['JOB']['awsBucket'])) {
            trigger_error(sprintf(__('Connected to S3 Bucket: %s', 'backwpup'), $STATIC['JOB']['awsBucket']), E_USER_NOTICE);
            //Transfer Backup to S3
            if ($STATIC['JOB']['awsrrs']) {
                //set reduced redundancy or not
                $storage = AmazonS3::STORAGE_REDUCED;
            } else {
                $storage = AmazonS3::STORAGE_STANDARD;
            }
            //set curl Progress bar
            $curlops = array();
            if (defined('CURLOPT_PROGRESSFUNCTION')) {
                $curlops = array(CURLOPT_NOPROGRESS => false, CURLOPT_PROGRESSFUNCTION => 'curl_progresscallback', CURLOPT_BUFFERSIZE => 1048576);
            }
            trigger_error(__('Upload to Amazon S3 now started... ', 'backwpup'), E_USER_NOTICE);
            //transferee file to S3
            $result = $s3->create_object($STATIC['JOB']['awsBucket'], $STATIC['JOB']['awsdir'] . $STATIC['backupfile'], array('fileUpload' => $STATIC['JOB']['backupdir'] . $STATIC['backupfile'], 'acl' => AmazonS3::ACL_PRIVATE, 'storage' => $storage, 'curlopts' => $curlops));
            $result = (array) $result;
            if ($result["status"] >= 200 and $result["status"] < 300) {
                $WORKING['STEPTODO'] = 1 + filesize($STATIC['JOB']['backupdir'] . $STATIC['backupfile']);
                trigger_error(sprintf(__('Backup transferred to %s', 'backwpup'), $result["header"]["_info"]["url"]), E_USER_NOTICE);
                $STATIC['JOB']['lastbackupdownloadurl'] = $STATIC['WP']['ADMINURL'] . '?page=backwpupbackups&action=downloads3&file=' . $STATIC['JOB']['awsdir'] . $STATIC['backupfile'] . '&jobid=' . $STATIC['JOB']['jobid'];
                $WORKING['STEPSDONE'][] = 'DEST_S3';
                //set done
            } else {
                trigger_error(sprintf(__('Can not transfer backup to S3! (%1$d) %2$s', 'backwpup'), $result["status"], $result["Message"]), E_USER_ERROR);
            }
        } else {
            trigger_error(sprintf(__('S3 Bucket "%s" not exists!', 'backwpup'), $STATIC['JOB']['awsBucket']), E_USER_ERROR);
        }
    } catch (Exception $e) {
        trigger_error(sprintf(__('Amazon API: %s', 'backwpup'), $e->getMessage()), E_USER_ERROR);
        return;
    }
    try {
        if ($s3->if_bucket_exists($STATIC['JOB']['awsBucket'])) {
            if ($STATIC['JOB']['awsmaxbackups'] > 0) {
                //Delete old backups
                $backupfilelist = array();
                if (($contents = $s3->list_objects($STATIC['JOB']['awsBucket'], array('prefix' => $STATIC['JOB']['awsdir']))) !== false) {
                    foreach ($contents->body->Contents as $object) {
                        $file = basename($object->Key);
                        if ($STATIC['JOB']['fileprefix'] == substr($file, 0, strlen($STATIC['JOB']['fileprefix'])) and $STATIC['JOB']['fileformart'] == substr($file, -strlen($STATIC['JOB']['fileformart']))) {
                            $backupfilelist[] = $file;
                        }
                    }
                }
                if (sizeof($backupfilelist) > 0) {
                    rsort($backupfilelist);
                    $numdeltefiles = 0;
                    for ($i = $STATIC['JOB']['awsmaxbackups']; $i < sizeof($backupfilelist); $i++) {
                        if ($s3->delete_object($STATIC['JOB']['awsBucket'], $STATIC['JOB']['awsdir'] . $backupfilelist[$i])) {
                            //delte files on S3
                            $numdeltefiles++;
                        } else {
                            trigger_error(sprintf(__('Can not delete backup on S3://%s', 'backwpup'), $STATIC['JOB']['awsBucket'] . '/' . $STATIC['JOB']['awsdir'] . $backupfilelist[$i]), E_USER_ERROR);
                        }
                    }
                    if ($numdeltefiles > 0) {
                        trigger_error(sprintf(_n('One file deleted on S3 Bucket', '%d files deleted on S3 Bucket', $numdeltefiles, 'backwpup'), $numdeltefiles), E_USER_NOTICE);
                    }
                }
            }
        }
    } catch (Exception $e) {
        trigger_error(sprintf(__('Amazon API: %s', 'backwpup'), $e->getMessage()), E_USER_ERROR);
        return;
    }
    $WORKING['STEPDONE']++;
}
Ejemplo n.º 16
0
<?php

// Inizializzo la classe AmazonS3
$s3 = new AmazonS3();
// Creo un bucket per la memorizzazione di un file
$response = $s3->create_bucket('my-bucket', AmazonS3::REGION_US_E1);
if (!$response->isOK()) {
    die('Errore durante la creazione del bucket');
}
$data = file_get_contents('/my/local/dir/picture.jpg');
$response = $s3->create_object('my-bucket', 'picture.jpg', array('body' => $data));
if (!$response->isOK()) {
    die('Errore durante la memorizzazione del file');
}
echo "Il file è stato memorizzato con successo";
Ejemplo n.º 17
0
<?php

ini_set('memory_limit', '1G');
$pre = 'skip_fbapi';
include '../include/config.php';
include '../include/aws/sdk.class.php';
$s3 = new AmazonS3();
$query = $db->Raw("SELECT `id`,`link` FROM `userdb_uploads` WHERE `type`='upload' AND `server`!='s3'");
//echo count($query);
//print_r($query);
foreach ($query as $upload) {
    $id = $upload['id'];
    $split = split("/", $upload['link']);
    $link = "/var/www/music/users/" . $split[4] . "/" . $split[5] . "/" . basename($upload['link']);
    $file = basename($link);
    //   echo $link;
    if (file_exists($link)) {
        $s3->create_object('fb-music', $file, array('fileUpload' => $link, 'acl' => AmazonS3::ACL_AUTH_READ, 'storage' => AmazonS3::STORAGE_REDUCED));
    }
    unlink($link);
    $db->Raw("UPDATE `userdb_uploads` SET `server`='s3', `link`='{$file}' WHERE `id`='{$id}'");
}
Ejemplo n.º 18
0
<?php

include "../sdk-1.5.6.2/sdk.class.php";
include "../s3functions.php";
$s3 = new AmazonS3();
$try = 1;
$res = "";
$sleep = 1;
do {
    $res = $s3->create_object('com.sanchitkarve.tb.usor', 'test.txt', array('body' => 'howdy', 'contentType' => 'text/plain'));
    if ($res->isOK()) {
        echo "DONE";
    }
    sleep($sleep);
    $sleep *= 1;
} while (++$try < 6);
print_r($res);
echo "NOT DONE";
// We won't know the max number of bytes until the download starts, so we'll handle that in our callback.
$progress_bar = new Console_ProgressBar('* %fraction% KB [%bar%] %percent%', '=>', ' ', 100, 1);
$progress_bar->UPDATED = false;
// Register a callback function to execute when a stream is written locally.
$s3->register_streaming_read_callback('read_callback');
function read_callback($curl_handle, $file_handle, $length)
{
    // Import from global scope
    $progress_bar = $GLOBALS['progress_bar'];
    // Have we updated the format with updated information yet?
    if (!$progress_bar->UPDATED) {
        // Store the total size in local & global scope
        $_100_percent = $GLOBALS['_100_percent'] = curl_getinfo($curl_handle, CURLINFO_CONTENT_LENGTH_UPLOAD);
        // Add the Content-Length of the file as the max number of bytes.
        $progress_bar->reset('* %fraction% KB [%bar%] %percent%', '=>', ' ', 100, $_100_percent);
        $progress_bar->UPDATED = true;
    }
    // Update the progress bar with the cumulative number of bytes uploaded.
    $progress_bar->update(curl_getinfo($curl_handle, CURLINFO_SIZE_UPLOAD));
}
// Add some spacing above the progress bar.
echo PHP_EOL;
echo 'Uploading to http://' . $bucket . '.s3.amazonaws.com/big-buck-bunny.mp4' . PHP_EOL;
echo 'Reading from ' . realpath('./downloads') . '/big-buck-bunny.mp4' . PHP_EOL;
// Upload an object.
$response = $s3->create_object($bucket, 'big-buck-bunny.mp4', array('fileUpload' => './downloads/big-buck-bunny.mp4'));
// The "read" callback doesn't fire after the last bits are uploaded (it could end at 99.x%), so
// manually set the upload to 100%.
$progress_bar->update($_100_percent);
// Add some spacing below the progress bar.
echo PHP_EOL . PHP_EOL;
Ejemplo n.º 20
0
 public function setLatinaPresenterDataFile($var_name = "presente_data", $market_id = 1)
 {
     try {
         $db = $this->Presenter->getDataSource();
         $query = $db->fetchAll("SELECT\n\t\t\t\t\t\tp.id as presenter_id\n\t\t\t\t\t\t, p.presenter_sequence_id as presenter_sequence_id\n\t\t\t\t\t\t, geo.lat as lat\n\t\t\t\t\t\t, geo.lng as lng\n\t\t\t\t\t\t, MAX(pt.presentertypes_id) as presntertype_id\n\t\t\t\t\t\t, s.abbrev as state\n\t\t\t\t\t\t, a.city as city\n\t\t\t\t\t\tFROM presenters p\n\t\t\t\t\t\tLEFT JOIN users u on u.id=p.user_id\n\t\t\t\t\t\tLEFT JOIN addresses a on a.user_id=u.id AND a.address_type_id = 1\n\t\t\t\t\t\tLEFT JOIN address_geocodes geo on geo.address_id=a.id\n\t\t\t\t\t\tLEFT JOIN states s on s.id = a.state_id\n\t\t\t\t\t\tLEFT JOIN presenter_types as pt on pt.presenter_id = p.id\n\t\t\t\t\t\tWHERE p.presenter_status_id = " . PresenterStatus::COMPLETE . "\n\t\t\t\t\t\t\tAND p.market_id = {$market_id}\n\t\t\t\t\t\t\t AND p.default_locale = 'es_US'\n\t\t\t\t\t\tGROUP BY p.id\n\t\t\t\t\t\tORDER BY a.id DESC, geo.id DESC");
         foreach ($query as $value) {
             $results[] = array('presenter_id' => $value['p']['presenter_id'], 'lat' => $value['geo']['lat'], 'lng' => $value['geo']['lng'], 'presentertype_id' => $value['0']['presntertype_id'], 'city' => $value['a']['city'], 'state' => $value['s']['state']);
         }
     } catch (Exception $e) {
         $this->out("Error getting list of Presenters.");
     }
     //create the file
     $string = 'var ' . $var_name . ' = {"code":200,"result":';
     $string .= json_encode($results);
     $string .= "};";
     if (YOUNIQUE_TESTSERVER === true) {
         $filename = "test_latina_presenter_data_{$market_id}";
     } else {
         $filename = "latina_presenter_data_{$market_id}";
     }
     try {
         $s3 = new AmazonS3(array("key" => "AKIAJVCBLQ3VQQS3DJHA", "secret" => "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"));
         $s3->disable_ssl_verification();
         $bucket = "younique-map-data";
         if ($s3->if_bucket_exists($bucket)) {
             $result = $s3->create_object($bucket, $filename, array('body' => $string, 'contentType' => 'text/plain', 'length' => strlen($string), 'acl' => AmazonS3::ACL_PUBLIC));
             if ($result->isOK()) {
                 $this->out("Presenter map updated for market " . $market_id);
             } else {
                 $this->out("AS3 error:" . var_export($result->body->Message, true));
             }
         } else {
             $this->out("AS3 error:" . "No bucket");
         }
     } catch (Exception $e) {
         $this->out("AS3 error:" . var_export($e->getMessage(), true));
     }
 }
Ejemplo n.º 21
0
<?php

require_once './sdk.class.php';
$s3 = new AmazonS3();
$bucket = 'book-bucket-' . strtolower($s3->key);
$response1 = $s3->create_object($bucket, $_FILES['uploadedfile']['name'], array('fileUpload' => $_FILES['uploadedfile']['tmp_name'], 'acl' => AmazonS3::ACL_PUBLIC, 'meta' => array('title' => $_POST['title'])));
session_start();
$author = $_SESSION['email'];
$book = $_POST['title'];
$key = $author . '/' . $book;
$domain = 'books-aalr';
$sdb = new AmazonSDB();
$response2 = $sdb->put_attributes($domain, $key, array('author' => $author, 'title' => $book));
//var_dump($_FILES);
//var_dump($response);
//var_dump($response->body)
if ($response1->isOK() && $response2->isOK()) {
    session_start();
    $_SESSION['upmsg'] = "Upload successful";
    header('Location:author.php');
} else {
    session_start();
    $_SESSION['upemsg'] = "Upload failed";
    header('Location:author.php');
}
Ejemplo n.º 22
0
 function amazons3_backup_bwd_comp($historyID, $args = '')
 {
     $s3StartTime = $this->iwpScriptStartTime;
     $this->backup_settings_vals = get_option('iwp_client_multi_backup_temp_values');
     $backup_settings_values = $this->backup_settings_vals;
     if (isset($backup_settings_values['s3_retrace_count']) && !empty($backup_settings_values['s3_retrace_count'])) {
         $s3_retrace_count = $backup_settings_values['s3_retrace_count'][$historyID];
     } else {
         $s3_retrace_count = 0;
     }
     //get the settings by other method
     $requestParams = $this->getRequiredData($historyID, "requestParams");
     $upload_loop_break_time = $requestParams['account_info']['upload_loop_break_time'];
     //darkcode changed
     $upload_file_block_size = $requestParams['account_info']['upload_file_block_size'];
     if ($upload_file_block_size < 5 * 1024 * 1024) {
         $upload_file_block_size = 5 * 1024 * 1024 + 1;
     }
     $del_host_file = $requestParams['args']['del_host_file'];
     $task_result = $this->getRequiredData($historyID, "taskResults");
     @set_time_limit(0);
     $this->hisID = $historyID;
     $uploadLoopCount = 0;
     $upload_id = 'start';
     $partsArray = array();
     $nextPart = 1;
     $retrace = 'notSet';
     $doComplete = false;
     if ($args == '') {
         //on the next call $args would be ''
         //set $args, $uploadid, $offset  from the DB
         $responseParams = $this->getRequiredData($historyID, "responseParams");
         if (!$responseParams) {
             return $this->statusLog($this->hisID, array('stage' => 's3Upload', 'status' => 'error', 'statusMsg' => 'S3 Upload failed: Error while fetching table data.', 'statusCode' => 's3_upload_failed_error_while_fetching_table_data'));
         }
         $args = $responseParams['s3Args'];
         $prevChunkResults = $responseParams['response_data'];
         $upload_id = $prevChunkResults['upload_id'];
         $nextPart = $prevChunkResults['nextPart'];
         $partsArray = $prevChunkResults['partsArray'];
         $current_file_num = $responseParams['current_file_num'];
         $dont_retrace = $responseParams['dont_retrace'];
         $start_new_backup = $responseParams['start_new_backup'];
     }
     if (empty($current_file_num)) {
         $current_file_num = 0;
     }
     //traceback options and setting values
     if (!$upload_id && empty($dont_retrace)) {
         if ($s3_retrace_count <= 3) {
             $args = $requestParams['secure']['account_info']['iwp_amazon_s3'];
             if ($backup_settings_values['s3_upload_id']) {
                 $upload_id = $backup_settings_values['s3_upload_id'][$historyID];
             } else {
                 return $this->statusLog($this->hisID, array('stage' => 's3Upload Retrace', 'status' => 'error', 'statusMsg' => 'S3 Upload failed: Error while fetching table data during retrace', 'statusCode' => 's3_upload_failed_error_while_fetching_table_data_during_retrace'));
             }
             $backup_file = $backup_settings_values['backup_file'];
             $retrace = 'set';
             $s3_retrace_count++;
             $backup_settings_values['s3_retrace_count'][$historyID] = $s3_retrace_count;
             update_option('iwp_client_multi_backup_temp_values', $backup_settings_values);
         } else {
             return $this->statusLog($this->hisID, array('stage' => 's3Upload', 'status' => 'error', 'statusMsg' => 'S3 upload failed: Retrace limit reached.', 'statusCode' => 's3_upload_failed_retrace_limit_reached'));
         }
     }
     if (!$this->iwp_mmb_function_exists('curl_init')) {
         return array('error' => 'You cannot use Amazon S3 on your server. Please enable curl first.', 'partial' => 1, 'error_code' => 'cannot_use_s3_enable_curl_first');
     }
     require_once $GLOBALS['iwp_mmb_plugin_dir'] . '/lib/amazon_s3_bwd_comp/sdk.class.php';
     $tempArgs = $args;
     extract($args);
     if (!is_array($backup_file)) {
         $temp_backup_file = $backup_file;
         $backup_file = array();
         $backup_file[] = $temp_backup_file;
     }
     if (is_array($backup_file)) {
         $backup_files_count = count($backup_file);
         $temp_single_file = $backup_file[$current_file_num];
         unset($backup_file);
         $backup_file = $temp_single_file;
     }
     if ($as3_site_folder == true) {
         if (!empty($as3_directory)) {
             $as3_directory .= '/' . $this->site_name;
         } else {
             $as3_directory = $this->site_name;
         }
     }
     try {
         CFCredentials::set(array('development' => array('key' => trim($as3_access_key), 'secret' => trim(str_replace(' ', '+', $as3_secure_key)), 'default_cache_config' => '', 'certificate_authority' => true, 'use_ssl' => false, 'ssl_verification' => false), '@default' => 'development'));
         $s3 = new AmazonS3();
         $cfu_obj = new CFUtilities();
         //the mulitCall upload starts				darkCode starts
         //$this->statusLog($this -> hisID, array('stage' => 'uploadingFiles', 'status' => 'partiallyCompleted', 'statusMsg' => 's3MultiCallStartsHere'));
         if (!empty($as3_directory)) {
             $as3_file = $as3_directory . '/' . basename($backup_file);
         } else {
             $as3_file = basename($backup_file);
         }
         if (iwp_mmb_get_file_size($backup_file) <= 5 * 1024 * 1024) {
             echo "<br>small backup so single upload<br>";
             $response = $s3->create_object($as3_bucket, $as3_file, array('fileUpload' => $backup_file));
             if ($response->isOK()) {
                 $current_file_num += 1;
                 $resArray = array('status' => "completed", 'backupParentHID' => $historyID);
                 $result_arr = array();
                 $result_arr['status'] = 'completed';
                 $result_arr['nextFunc'] = 'amazons3_backup_over';
                 $result_arr['s3Args'] = $tempArgs;
                 $result_arr['current_file_num'] = $current_file_num;
                 $result_arr['dont_retrace'] = true;
                 $task_result['task_results'][$historyID]['amazons3'][$current_file_num - 1] = basename($backup_file);
                 $task_result['amazons3'][$current_file_num - 1] = basename($backup_file);
                 if ($current_file_num >= $backup_files_count) {
                     unset($task_result['task_results'][$historyID]['server']);
                     @unlink($backup_file);
                 } else {
                     //to continue zip split parts
                     $resArray['status'] = 'partiallyCompleted';
                     $chunkResult = array();
                     $chunkResult['partsArray'] = array();
                     $chunkResult['nextPart'] = 1;
                     $chunkResult['upload_id'] = 'start';
                     $result_arr['response_data'] = $chunkResult;
                     $result_arr['nextFunc'] = 'amazons3_backup';
                     $result_arr['status'] = 'partiallyCompleted';
                     $result_arr['start_new_backup'] = true;
                     @unlink($backup_file);
                 }
                 $this->statusLog($this->hisID, array('stage' => 's3MultiCall', 'status' => 'completed', 'statusMsg' => 'nextCall', 'nextFunc' => 'amazons3_backup', 'task_result' => $task_result, 'responseParams' => $result_arr));
                 return $resArray;
             } else {
                 return array('error' => 'Failed to upload to Amazon S3.');
             }
         }
         if ($upload_id == 'start') {
             echo "initiating multiCall upload";
             //initiate the multiPartUpload to get the uploadID from its response
             $response = $s3->initiate_multipart_upload($as3_bucket, $as3_file);
             //createMultipartUpload
             //convert the response into an array
             $response_array = $cfu_obj->convert_response_to_array($response);
             //get the uploadID
             $upload_id = $response_array['body']['UploadId'];
             //storing the uploadID in DB
             $backup_settings_values['s3_upload_id'][$historyID] = $upload_id;
             $backup_settings_values['backup_file'] = $backup_file;
             update_option('iwp_client_multi_backup_temp_values', $backup_settings_values);
         }
         //get the parts of the big file
         $parts = $s3->get_multipart_counts(iwp_mmb_get_file_size($backup_file), $upload_file_block_size);
         //1 MB chunks
         if ($retrace == 'set') {
             $list_parts_response = $s3->list_parts($as3_bucket, $as3_file, $upload_id);
             $partsArray = CFUtilities::convert_response_to_array($list_parts_response);
             $nextPart = count($partsArray) + 1;
             $this->statusLog($this->hisID, array('stage' => 's3MultiCall', 'status' => 'partiallyCompleted', 'statusMsg' => 'retracingValues', 'nextFunc' => 'amazons3_backup', 'task_result' => $task_result, 'responseParams' => $result_arr));
             $retrace = 'unset';
         }
         //this is the main upload loop break it on when the timeLimit is reached
         //chunk upload loop
         $partsArraySize = count($parts);
         $s3ChunkTimeTaken = 0;
         $s3ChunkCount = 0;
         $reloop = false;
         $reloopCount = 0;
         $status = '';
         do {
             $uploadLoopCount = 0;
             if ($reloopCount == 0) {
                 $s3ChunkStartTime = $s3StartTime;
             } else {
                 $s3ChunkStartTime = microtime(true);
             }
             foreach ($parts as $i => $part) {
                 $uploadLoopCount += 1;
                 if ($uploadLoopCount == $nextPart) {
                     $singleUploadResponse = $s3->upload_part($as3_bucket, $as3_file, $upload_id, array('fileUpload' => $backup_file, 'partNumber' => $i + 1, 'seekTo' => $part['seekTo'], 'length' => $part['length']));
                     $singleUploadResult = $singleUploadResponse->isOk();
                     echo "singleUploadResult - " . $singleUploadResult;
                     $singleUploadResponseArray = $cfu_obj->convert_response_to_array($singleUploadResponse);
                     /* $response = $s3->complete_multipart_upload($bucket, $filename, $upload_id, array(
                     				array('PartNumber' => 1, 'ETag' => '"25e317773f308e446cc84c503a6d1f85"'),
                     				array('PartNumber' => 2, 'ETag' => '"a6d1f85f58498973f308e446cc84c503"'),
                     				array('PartNumber' => 3, 'ETag' => '"bed3c0a4a1407f584989b4009e9ce33f"'),
                     			)); */
                     $nextPart = $uploadLoopCount;
                     $partsArray[$i + 1]['PartNumber'] = $i + 1;
                     $partsArray[$i + 1]['ETag'] = $singleUploadResponseArray['header']['etag'];
                     $chunkResult = array();
                     $chunkResult['partsArray'] = $partsArray;
                     $chunkResult['nextPart'] = $nextPart + 1;
                     $chunkResult['upload_id'] = $upload_id;
                     $nextPart = $nextPart + 1;
                     $backup_settings_values['s3_retrace_count'][$historyID] = 0;
                     update_option('iwp_client_multi_backup_temp_values', $backup_settings_values);
                     $status = 'partiallyCompleted';
                     if ($nextPart == $partsArraySize + 1) {
                         $doComplete = true;
                         $status = 'completed';
                     }
                     $result_arr = array();
                     $result_arr['response_data'] = $chunkResult;
                     $result_arr['status'] = $status;
                     $result_arr['nextFunc'] = 'amazons3_backup';
                     $result_arr['s3Args'] = $tempArgs;
                     $result_arr['current_file_num'] = $current_file_num;
                     $task_result['task_results'][$historyID]['amazons3'][$current_file_num] = basename($backup_file);
                     $task_result['amazons3'][$current_file_num] = basename($backup_file);
                     $this->statusLog($this->hisID, array('stage' => 's3MultiCall', 'status' => 'completed', 'statusMsg' => 'nextCall', 'nextFunc' => 'amazons3_backup', 'task_result' => $task_result, 'responseParams' => $result_arr));
                     $resArray = array('status' => $status, 'backupParentHID' => $historyID);
                     /* $resArray = array (
                     			  'status' => 'completed',
                     			  'backupParentHID' => $historyID,
                     			); */
                     break;
                     //return $resArray;
                     //exit;
                 } else {
                     if ($nextPart == $partsArraySize + 1) {
                         $doComplete = true;
                         break;
                     }
                 }
             }
             if ($doComplete) {
                 // complete the multipart upload
                 $response = $s3->complete_multipart_upload($as3_bucket, $as3_file, $upload_id, $partsArray);
                 if ($response->isOK() != true) {
                     $response = $s3->abort_multipart_upload($as3_bucket, $as3_file, $upload_id);
                 }
                 $response_array = $cfu_obj->convert_response_to_array($response);
                 $current_file_num += 1;
                 $result_arr = array();
                 $result_arr['response_data'] = $chunkResult;
                 $result_arr['status'] = 'completed';
                 $result_arr['nextFunc'] = 'amazons3_backup_over';
                 $result_arr['s3Args'] = $tempArgs;
                 $result_arr['dont_retrace'] = true;
                 $result_arr['current_file_num'] = $current_file_num;
                 $resArray = array('status' => 'completed', 'backupParentHID' => $historyID);
                 if ($current_file_num >= $backup_files_count) {
                     $task_result['task_results'][$historyID]['amazons3'][$current_file_num - 1] = basename($backup_file);
                     $task_result['amazons3'][$current_file_num - 1] = basename($backup_file);
                     unset($task_result['task_results'][$historyID]['server']);
                 } else {
                     //to continue zip split parts
                     $status = 'partiallyCompleted';
                     $chunkResult = array();
                     $chunkResult['partsArray'] = array();
                     $chunkResult['nextPart'] = 1;
                     $chunkResult['upload_id'] = 'start';
                     $result_arr['response_data'] = $chunkResult;
                     $result_arr['status'] = 'partiallyCompleted';
                     $result_arr['nextFunc'] = 'amazons3_backup';
                     $result_arr['start_new_backup'] = true;
                     $resArray['status'] = 'partiallyCompleted';
                 }
                 $this->statusLog($this->hisID, array('stage' => 's3MultiCall', 'status' => 'completed', 'statusMsg' => 'finalCall', 'nextFunc' => 'amazons3_backup', 'task_result' => $task_result, 'responseParams' => $result_arr));
                 $upload = $response->isOk();
             }
             //check time
             $s3ChunkEndTime = microtime(true);
             $s3ChunkTimeTaken = $s3ChunkEndTime - $s3ChunkStartTime + $s3ChunkTimeTaken / ($reloopCount + 1);
             $s3EndTime = microtime(true);
             $s3TimeTaken = $s3EndTime - $s3StartTime;
             $s3TimeLeft = $upload_loop_break_time - $s3TimeTaken;
             $s3TimeLeft = $s3TimeLeft - 5;
             //for safe timeLimit
             if (!empty($chunkResult['nextPart'])) {
                 echo 'parts' . $chunkResult['nextPart'];
             }
             echo " s3TimeTaken " . $s3TimeTaken;
             $s3UploadedSize = $uploadLoopCount * 5;
             echo " s3 approx file size written " . $s3UploadedSize;
             iwp_mmb_print_flush("s3loop");
             echo " s3TimeLeft " . $s3TimeLeft;
             echo " s3ChunkTimeTaken " . $s3ChunkTimeTaken;
             if ($s3TimeLeft <= $s3ChunkTimeTaken || !$singleUploadResult || $doComplete) {
                 $reloop = false;
                 echo "reloop stopped";
             } else {
                 $reloop = true;
                 $reloopCount++;
             }
         } while ($reloop);
         if (!$doComplete) {
             return $resArray;
         }
         if ($doComplete && $upload) {
             $status = 'completed';
             iwp_mmb_print_flush('Amazon S3 upload: End');
             if ($status == 'completed') {
                 //file verification
                 //checking file size and comparing
                 //getting the hash value
                 $partArrayLength = count($partsArray);
                 $verificationResult = $this->postUploadVerification($s3, $backup_file, $as3_file, $type = "amazons3", $as3_bucket);
                 if (!$verificationResult) {
                     return $this->statusLog($historyID, array('stage' => 'uploadAmazons3', 'status' => 'error', 'statusMsg' => 'S3 verification failed: File may be corrupted.', 'statusCode' => 'docomplete_S3_verification_failed_file_may_be_corrupted'));
                 }
                 if ($del_host_file) {
                     @unlink($backup_file);
                 }
             }
             return $resArray;
         } else {
             return array('error' => 'Failed to upload to Amazon S3. Please check your details and set upload/delete permissions on your bucket.', 'partial' => 1, 'error_code' => 'failed_to_upload_to_s3_check_your_details_and_set_upload_delete_permissions_on_your_bucket');
         }
     } catch (Exception $e) {
         $err = $e->getMessage();
         if ($err) {
             return array('error' => 'Failed to upload to AmazonS3 (' . $err . ').', 'error_code' => 'failed_to_upload_s3_err');
         } else {
             return array('error' => 'Failed to upload to Amazon S3.', 'error_code' => 'failed_to_upload_s3');
         }
     }
 }
Ejemplo n.º 23
0
 public function moveEpisodeFileToAmazon()
 {
     if (!$this->getAudioFile()) {
         throw new Exception("No local file to upload!");
     }
     $file_location = ProjectConfiguration::getEpisodeAudioFileLocalDirectory();
     $filename = $file_location . $this->getAudioFile();
     if (!file_exists($filename)) {
         throw new Exception("No local file to upload!");
     }
     ProjectConfiguration::registerAws();
     $s3 = new AmazonS3();
     $bucket = $this->getSubreddit()->getBucketName();
     if ($s3->if_bucket_exists($bucket)) {
         $nice_filename = $this->getNiceFilename();
         while ($s3->if_object_exists($bucket, $nice_filename)) {
             $nice_filename = $nice_filename . rand(0, 1000);
             $this->setNiceFilename($nice_filename);
         }
         $response = $s3->create_object($bucket, $this->getNiceFilename(), array('fileUpload' => $file_location . $this->getAudioFile(), 'acl' => AmazonS3::ACL_PUBLIC));
         if ($response->isOK()) {
             //$this->setRemoteUrl($s3->get_object_url($bucket,
             //                                        $this->getNiceFilename()));
             $this->setRemoteUrl('http://' . $this->getSubreddit()->getCfDomainName() . '/' . urlencode($this->getNiceFilename()));
             $this->deleteLocalFile($this->getAudioFile());
         }
     } else {
         throw new Exception("Amazon bucket '{$bucket}' does not exist!");
     }
     $this->setFileIsRemote(true);
 }
Ejemplo n.º 24
0
 /**
  * @param $job_object
  * @return bool
  */
 public function job_run_archive(&$job_object)
 {
     $job_object->substeps_todo = 2 + $job_object->backup_filesize;
     $job_object->log(sprintf(__('%d. Trying to send backup file to S3 Service&#160;&hellip;', 'backwpup'), $job_object->steps_data[$job_object->step_working]['STEP_TRY']), E_USER_NOTICE);
     try {
         $s3 = new AmazonS3(array('key' => $job_object->job['s3accesskey'], 'secret' => BackWPup_Encryption::decrypt($job_object->job['s3secretkey']), 'certificate_authority' => TRUE));
         $base_url = $this->get_s3_base_url($job_object->job['s3region'], $job_object->job['s3base_url']);
         if (stristr($base_url, 'amazonaws.com')) {
             $s3->set_region(str_replace(array('http://', 'https://'), '', $base_url));
         } else {
             $s3->set_hostname(str_replace(array('http://', 'https://'), '', $base_url));
             $s3->allow_hostname_override(FALSE);
             if (substr($base_url, -1) == '/') {
                 $s3->enable_path_style(TRUE);
             }
         }
         if (stristr($base_url, 'http://')) {
             $s3->disable_ssl();
         }
         if ($s3->if_bucket_exists($job_object->job['s3bucket'])) {
             $job_object->log(sprintf(__('Connected to S3 Bucket "%1$s" in %2$s', 'backwpup'), $job_object->job['s3bucket'], $base_url), E_USER_NOTICE);
         } else {
             $job_object->log(sprintf(__('S3 Bucket "%s" does not exist!', 'backwpup'), $job_object->job['s3bucket']), E_USER_ERROR);
             return TRUE;
         }
         //transfer file to S3
         $job_object->log(__('Starting upload to S3 Service&#160;&hellip;', 'backwpup'), E_USER_NOTICE);
         //Transfer Backup to S3
         if ($job_object->job['s3storageclass'] == 'REDUCED_REDUNDANCY') {
             //set reduced redundancy or not
             $storage = AmazonS3::STORAGE_REDUCED;
         } else {
             $storage = AmazonS3::STORAGE_STANDARD;
         }
         if (empty($job_object->job['s3ssencrypt'])) {
             $job_object->job['s3ssencrypt'] = NULL;
         }
         //set progress bar
         $s3->register_streaming_read_callback(array($job_object, 'curl_read_callback'));
         $result = $s3->create_object($job_object->job['s3bucket'], $job_object->job['s3dir'] . $job_object->backup_file, array('fileUpload' => $job_object->backup_folder . $job_object->backup_file, 'acl' => AmazonS3::ACL_PRIVATE, 'storage' => $storage, 'encryption' => $job_object->job['s3ssencrypt']));
         if ($result->status >= 200 and $result->status < 300) {
             $job_object->substeps_done = 1 + $job_object->backup_filesize;
             $job_object->log(sprintf(__('Backup transferred to %s.', 'backwpup'), $this->get_s3_base_url($job_object->job['s3region'], $job_object->job['s3base_url']) . '/' . $job_object->job['s3bucket'] . '/' . $job_object->job['s3dir'] . $job_object->backup_file), E_USER_NOTICE);
             if (!empty($job_object->job['jobid'])) {
                 BackWPup_Option::update($job_object->job['jobid'], 'lastbackupdownloadurl', network_admin_url('admin.php') . '?page=backwpupbackups&action=downloads3&file=' . $job_object->job['s3dir'] . $job_object->backup_file . '&jobid=' . $job_object->job['jobid']);
             }
         } else {
             $job_object->log(sprintf(__('Cannot transfer backup to S3! (%1$d) %2$s', 'backwpup'), $result->status, $result->body), E_USER_ERROR);
         }
     } catch (Exception $e) {
         $job_object->log(E_USER_ERROR, sprintf(__('S3 Service API: %s', 'backwpup'), htmlentities($e->getMessage())), $e->getFile(), $e->getLine());
         return FALSE;
     }
     try {
         $backupfilelist = array();
         $filecounter = 0;
         $files = array();
         $objects = $s3->list_objects($job_object->job['s3bucket'], array('prefix' => $job_object->job['s3dir']));
         if (is_object($objects)) {
             foreach ($objects->body->Contents as $object) {
                 $file = basename((string) $object->Key);
                 $changetime = strtotime((string) $object->LastModified) + get_option('gmt_offset') * 3600;
                 if ($job_object->is_backup_archive($file)) {
                     $backupfilelist[$changetime] = $file;
                 }
                 $files[$filecounter]['folder'] = $this->get_s3_base_url($job_object->job['s3region'], $job_object->job['s3base_url']) . '/' . $job_object->job['s3bucket'] . '/' . dirname((string) $object->Key);
                 $files[$filecounter]['file'] = (string) $object->Key;
                 $files[$filecounter]['filename'] = basename($object->Key);
                 $files[$filecounter]['downloadurl'] = network_admin_url('admin.php') . '?page=backwpupbackups&action=downloads3&file=' . (string) $object->Key . '&jobid=' . $job_object->job['jobid'];
                 $files[$filecounter]['filesize'] = (int) $object->Size;
                 $files[$filecounter]['time'] = $changetime;
                 $filecounter++;
             }
         }
         if ($job_object->job['s3maxbackups'] > 0 && is_object($s3)) {
             //Delete old backups
             if (count($backupfilelist) > $job_object->job['s3maxbackups']) {
                 ksort($backupfilelist);
                 $numdeltefiles = 0;
                 while ($file = array_shift($backupfilelist)) {
                     if (count($backupfilelist) < $job_object->job['s3maxbackups']) {
                         break;
                     }
                     //delete files on S3
                     $delete_s3 = $s3->delete_object($job_object->job['s3bucket'], $job_object->job['s3dir'] . $file);
                     if ($delete_s3) {
                         foreach ($files as $key => $filedata) {
                             if ($filedata['file'] == $job_object->job['s3dir'] . $file) {
                                 unset($files[$key]);
                             }
                         }
                         $numdeltefiles++;
                     } else {
                         $job_object->log(sprintf(__('Cannot delete backup from %s.', 'backwpup'), $this->get_s3_base_url($job_object->job['s3region'], $job_object->job['s3base_url']) . '/' . $job_object->job['s3bucket'] . '/' . $job_object->job['s3dir'] . $file), E_USER_ERROR);
                     }
                 }
                 if ($numdeltefiles > 0) {
                     $job_object->log(sprintf(_n('One file deleted on S3 Bucket.', '%d files deleted on S3 Bucket', $numdeltefiles, 'backwpup'), $numdeltefiles), E_USER_NOTICE);
                 }
             }
         }
         set_site_transient('backwpup_' . $job_object->job['jobid'] . '_s3', $files, 60 * 60 * 24 * 7);
     } catch (Exception $e) {
         $job_object->log(E_USER_ERROR, sprintf(__('S3 Service API: %s', 'backwpup'), htmlentities($e->getMessage())), $e->getFile(), $e->getLine());
         return FALSE;
     }
     $job_object->substeps_done = 2 + $job_object->backup_filesize;
     return TRUE;
 }
Ejemplo n.º 25
0
 function amazons3_backup_bwd_comp($args)
 {
     if ($this->iwp_mmb_function_exists('curl_init')) {
         require_once $GLOBALS['iwp_mmb_plugin_dir'] . '/lib/amazon_s3_bwd_comp/sdk.class.php';
         extract($args);
         if ($as3_site_folder == true) {
             if (!empty($as3_directory)) {
                 $as3_directory .= '/' . $this->site_name;
             } else {
                 $as3_directory = $this->site_name;
             }
         }
         if (empty($as3_directory)) {
             $as3_file = basename($backup_file);
         } else {
             $as3_file = $as3_directory . '/' . basename($backup_file);
         }
         try {
             CFCredentials::set(array('development' => array('key' => trim($as3_access_key), 'secret' => trim(str_replace(' ', '+', $as3_secure_key)), 'default_cache_config' => '', 'certificate_authority' => true, 'use_ssl' => false, 'ssl_verification' => false), '@default' => 'development'));
             $s3 = new AmazonS3();
             $response = $s3->create_object($as3_bucket, $as3_file, array('fileUpload' => $backup_file));
             $upload = $response->isOk();
             if ($upload) {
                 return true;
             } else {
                 return array('error' => 'Failed to upload to Amazon S3. Please check your details and set upload/delete permissions on your bucket.', 'error_code' => 'upload_failed_to_S3_check_your_details_and_set_upload_delete_permissions_on_your_bucket', 'partial' => 1);
             }
         } catch (Exception $e) {
             $err = $e->getMessage();
             if ($err) {
                 return array('error' => 'Failed to upload to AmazonS3 (' . $err . ').', 'error_code' => 'failed_upload_s3_with_error');
             } else {
                 return array('error' => 'Failed to upload to Amazon S3.', 'error_code' => 'failed_upload_s3');
             }
         }
     } else {
         return array('error' => 'You cannot use Amazon S3 on your server. Please enable curl first.', 'error_code' => 'you_cannot_use_S3_on_your_server_enable_curl', 'partial' => 1);
     }
 }
Ejemplo n.º 26
0
 public static function send($settings = array(), $files = array(), $send_id = '', $clear_uploads = false)
 {
     global $pb_backupbuddy_destination_errors;
     if (!is_array($files)) {
         $files = array($files);
     }
     if ($clear_uploads === false) {
         // Uncomment the following line to override and always clear.
         //$clear_uploads = true;
     }
     $itxapi_username = $settings['itxapi_username'];
     $itxapi_password = $settings['itxapi_password'];
     $db_archive_limit = $settings['db_archive_limit'];
     $full_archive_limit = $settings['full_archive_limit'];
     $files_archive_limit = $settings['files_archive_limit'];
     $max_chunk_size = $settings['max_chunk_size'];
     $remote_path = self::get_remote_path($settings['directory']);
     // Has leading and trailng slashes.
     if ($settings['ssl'] == '0') {
         $disable_ssl = true;
     } else {
         $disable_ssl = false;
     }
     $multipart_id = $settings['_multipart_id'];
     $multipart_counts = $settings['_multipart_counts'];
     pb_backupbuddy::status('details', 'Stash remote path set to `' . $remote_path . '`.');
     require_once dirname(__FILE__) . '/lib/class.itx_helper.php';
     require_once dirname(dirname(__FILE__)) . '/_s3lib/aws-sdk/sdk.class.php';
     // Stash API talk.
     $stash = new ITXAPI_Helper(pb_backupbuddy_destination_stash::ITXAPI_KEY, pb_backupbuddy_destination_stash::ITXAPI_URL, $itxapi_username, $itxapi_password);
     $manage_data = pb_backupbuddy_destination_stash::get_manage_data($settings);
     if (!is_array($manage_data['credentials'])) {
         pb_backupbuddy::status('error', 'Error #8484383b: Your authentication credentials for Stash failed. Verify your login and password to Stash. You may need to update the Stash destination settings. Perhaps you recently changed your password?');
         return false;
     }
     // Wipe all current uploads.
     if ($clear_uploads === true) {
         pb_backupbuddy::status('details', 'Clearing any current uploads via Stash call to `abort-all`.');
         $abort_url = $stash->get_upload_url(null, 'abort-all');
         $request = new RequestCore($abort_url);
         $response = $request->send_request(true);
     }
     // Process multipart transfer that we already initiated in a previous PHP load.
     if ($multipart_id != '') {
         // Multipart upload initiated and needs parts sent.
         // Create S3 instance.
         pb_backupbuddy::status('details', 'Creating Stash S3 instance.');
         $s3 = new AmazonS3($settings['_multipart_upload_data']['credentials']);
         // the key, secret, token
         if ($disable_ssl === true) {
             @$s3->disable_ssl(true);
         }
         pb_backupbuddy::status('details', 'Stash S3 instance created.');
         $backup_type = str_replace('/', '', $settings['_multipart_backup_type_dir']);
         // For use later by file limiting.
         $this_part_number = $settings['_multipart_partnumber'] + 1;
         pb_backupbuddy::status('details', 'Stash beginning upload of part `' . $this_part_number . '` of `' . count($settings['_multipart_counts']) . '` parts of file `' . $settings['_multipart_file'] . '` with multipart ID `' . $settings['_multipart_id'] . '`.');
         $response = $s3->upload_part($settings['_multipart_upload_data']['bucket'], $settings['_multipart_upload_data']['object'], $settings['_multipart_id'], array('expect' => '100-continue', 'fileUpload' => $settings['_multipart_file'], 'partNumber' => $this_part_number, 'seekTo' => (int) $settings['_multipart_counts'][$settings['_multipart_partnumber']]['seekTo'], 'length' => (int) $settings['_multipart_counts'][$settings['_multipart_partnumber']]['length']));
         if (!$response->isOK()) {
             $this_error = 'Stash unable to upload file part for multipart upload `' . $settings['_multipart_id'] . '`. Details: `' . print_r($response, true) . '`.';
             $pb_backupbuddy_destination_errors[] = $this_error;
             pb_backupbuddy::status('error', $this_error);
             return false;
         } else {
             $uploaded_size = $response->header['_info']['size_upload'];
             $uploaded_speed = $response->header['_info']['speed_upload'];
             pb_backupbuddy::status('details', 'Uploaded size: ' . pb_backupbuddy::$format->file_size($uploaded_size) . ', Speed: ' . pb_backupbuddy::$format->file_size($uploaded_speed) . '/sec.');
         }
         // Load fileoptions to the send.
         pb_backupbuddy::status('details', 'About to load fileoptions data.');
         require_once pb_backupbuddy::plugin_path() . '/classes/fileoptions.php';
         $fileoptions_obj = new pb_backupbuddy_fileoptions(backupbuddy_core::getLogDirectory() . 'fileoptions/send-' . $send_id . '.txt', $read_only = false, $ignore_lock = false, $create_file = false);
         if (true !== ($result = $fileoptions_obj->is_ok())) {
             pb_backupbuddy::status('error', __('Fatal Error #9034.2344848. Unable to access fileoptions data.', 'it-l10n-backupbuddy') . ' Error: ' . $result);
             return false;
         }
         pb_backupbuddy::status('details', 'Fileoptions data loaded.');
         $fileoptions =& $fileoptions_obj->options;
         $update_status = 'Sent part ' . $this_part_number . ' of ' . count($settings['_multipart_counts']) . '.';
         // Made it here so success sending part. Increment for next part to send.
         $settings['_multipart_partnumber']++;
         if (!isset($settings['_multipart_counts'][$settings['_multipart_partnumber']])) {
             // No more parts exist for this file. Tell S3 the multipart upload is complete and move on.
             pb_backupbuddy::status('details', 'Stash getting parts with etags to notify S3 of completed multipart send.');
             $etag_parts = $s3->list_parts($settings['_multipart_upload_data']['bucket'], $settings['_multipart_upload_data']['object'], $settings['_multipart_id']);
             pb_backupbuddy::status('details', 'Stash got parts list. Notifying S3 of multipart upload completion.');
             $response = $s3->complete_multipart_upload($settings['_multipart_upload_data']['bucket'], $settings['_multipart_upload_data']['object'], $settings['_multipart_id'], $etag_parts);
             if (!$response->isOK()) {
                 $this_error = 'Stash unable to notify S3 of completion of all parts for multipart upload `' . $settings['_multipart_id'] . '`.';
                 $pb_backupbuddy_destination_errors[] = $this_error;
                 pb_backupbuddy::status('error', $this_error);
                 return false;
             } else {
                 pb_backupbuddy::status('details', 'Stash notified S3 of multipart completion.');
             }
             $backup_type_dir = $settings['_multipart_backup_type_dir'];
             // Notify Stash API that things were succesful.
             $done_url = $stash->get_upload_url($settings['_multipart_file'], 'done', $remote_path . $backup_type_dir . basename($settings['_multipart_file']));
             pb_backupbuddy::status('details', 'Notifying Stash of completed multipart upload with done url `' . $done_url . '`.');
             $request = new RequestCore($done_url);
             $response = $request->send_request(true);
             if (!$response->isOK()) {
                 $this_error = 'Error #756834682. Could not finalize Stash upload. Response code: `' . $response->get_response_code() . '`; Response body: `' . $response->get_response_body() . '`; Response headers: `' . $response->get_response_header() . '`.';
                 $pb_backupbuddy_destination_errors[] = $this_error;
                 pb_backupbuddy::status('error', $this_error);
                 return false;
             } else {
                 // Good server response.
                 // See if we got an optional json response.
                 $upload_data = @json_decode($response->body, true);
                 if (isset($upload_data['error'])) {
                     $this_error = 'Stash error(s): `' . implode(' - ', $upload_data['error']) . '`.';
                     $pb_backupbuddy_destination_errors[] = $this_error;
                     pb_backupbuddy::status('error', $this_error);
                     return false;
                 }
                 pb_backupbuddy::status('details', 'Stash success sending file `' . basename($settings['_multipart_file']) . '`. File uploaded via multipart across `' . $this_part_number . '` parts and reported to Stash as completed.');
             }
             pb_backupbuddy::status('details', 'Stash has no more parts left for this multipart upload. Clearing multipart instance variables.');
             $settings['_multipart_partnumber'] = 0;
             $settings['_multipart_id'] = '';
             $settings['_multipart_file'] = '';
             $settings['_multipart_upload_data'] = array();
             $settings['_multipart_transferspeeds'][] = $uploaded_speed;
             // Overall upload speed average.
             $uploaded_speed = array_sum($settings['_multipart_transferspeeds']) / count($settings['_multipart_counts']);
             pb_backupbuddy::status('details', 'Upload speed average of all chunks: `' . pb_backupbuddy::$format->file_size($uploaded_speed) . '`.');
             $settings['_multipart_counts'] = array();
             // Update stats.
             $fileoptions['_multipart_status'] = $update_status;
             $fileoptions['finish_time'] = time();
             $fileoptions['status'] = 'success';
             if (isset($uploaded_speed)) {
                 $fileoptions['write_speed'] = $uploaded_speed;
             }
             $fileoptions_obj->save();
             unset($fileoptions);
         }
         delete_transient('pb_backupbuddy_stashquota_' . $settings['itxapi_username']);
         // Delete quota transient since it probably has changed now.
         // Schedule to continue if anything is left to upload for this multipart of any individual files.
         if ($settings['_multipart_id'] != '' || count($files) > 0) {
             pb_backupbuddy::status('details', 'Stash multipart upload has more parts left. Scheduling next part send.');
             $schedule_result = backupbuddy_core::schedule_single_event(time(), pb_backupbuddy::cron_tag('destination_send'), array($settings, $files, $send_id));
             if (true === $schedule_result) {
                 pb_backupbuddy::status('details', 'Next Stash chunk step cron event scheduled.');
             } else {
                 pb_backupbuddy::status('error', 'Next Stash chunk step cron even FAILED to be scheduled.');
             }
             spawn_cron(time() + 150);
             // Adds > 60 seconds to get around once per minute cron running limit.
             update_option('_transient_doing_cron', 0);
             // Prevent cron-blocking for next item.
             return array($settings['_multipart_id'], 'Sent part ' . $this_part_number . ' of ' . count($settings['_multipart_counts']) . ' parts.');
         }
     }
     // end if multipart continuation.
     require_once pb_backupbuddy::plugin_path() . '/classes/fileoptions.php';
     // Upload each file.
     foreach ($files as $file_id => $file) {
         // Determine backup type directory (if zip).
         $backup_type_dir = '';
         $backup_type = '';
         if (stristr($file, '.zip') !== false) {
             // If a zip try to determine backup type.
             pb_backupbuddy::status('details', 'Stash: Zip file. Detecting backup type if possible.');
             $serial = backupbuddy_core::get_serial_from_file($file);
             // See if we can get backup type from fileoptions data.
             $backup_options = new pb_backupbuddy_fileoptions(backupbuddy_core::getLogDirectory() . 'fileoptions/' . $serial . '.txt', $read_only = true, $ignore_lock = true);
             if (true !== ($result = $backup_options->is_ok())) {
                 pb_backupbuddy::status('error', 'Unable to open fileoptions file `' . backupbuddy_core::getLogDirectory() . 'fileoptions/' . $serial . '.txt' . '`.');
             } else {
                 if (isset($backup_options->options['integrity']['detected_type'])) {
                     pb_backupbuddy::status('details', 'Stash: Detected backup type as `' . $backup_options->options['integrity']['detected_type'] . '` via integrity check data.');
                     $backup_type_dir = $backup_options->options['integrity']['detected_type'] . '/';
                     $backup_type = $backup_options->options['integrity']['detected_type'];
                 }
             }
             // If still do not know backup type then attempt to deduce it from filename.
             if ($backup_type == '') {
                 if (stristr($file, '-db-') !== false) {
                     pb_backupbuddy::status('details', 'Stash: Detected backup type as `db` via filename.');
                     $backup_type_dir = 'db/';
                     $backup_type = 'db';
                 } elseif (stristr($file, '-full-') !== false) {
                     pb_backupbuddy::status('details', 'Stash: Detected backup type as `full` via filename.');
                     $backup_type_dir = 'full/';
                     $backup_type = 'full';
                 } elseif (stristr($file, '-files-') !== false) {
                     pb_backupbuddy::status('details', 'Stash: Detected backup type as `files` via filename.');
                     $backup_type_dir = 'files/';
                     $backup_type = 'files';
                 } else {
                     pb_backupbuddy::status('details', 'Stash: Could not detect backup type via integrity details nor filename.');
                 }
             }
         }
         // Interact with Stash API.
         pb_backupbuddy::status('details', 'Determining Stash upload URL for `' . $file . '`.` with destination remote path `' . $remote_path . $backup_type_dir . basename($file) . '`.');
         $upload_url = $stash->get_upload_url($file, 'request', $remote_path . $backup_type_dir . basename($file));
         pb_backupbuddy::status('details', 'Determined upload url: `' . $upload_url . '`.');
         $request = new RequestCore($upload_url);
         pb_backupbuddy::status('details', 'Sending Stash API request.');
         $response = $request->send_request(true);
         // Validate response.
         if (!$response->isOK()) {
             $this_error = 'Stash request for upload credentials failed.';
             $pb_backupbuddy_destination_errors[] = $this_error;
             pb_backupbuddy::status('error', $this_error);
             return false;
         }
         if (!($upload_data = json_decode($response->body, true))) {
             $this_error = 'Stash API did not give a valid JSON response.';
             $pb_backupbuddy_destination_errors[] = $this_error;
             pb_backupbuddy::status('error', $this_error);
             return false;
         }
         if (isset($upload_data['error'])) {
             $this_error = 'Stash error(s): `' . implode(' - ', $upload_data['error']) . '`.';
             $pb_backupbuddy_destination_errors[] = $this_error;
             pb_backupbuddy::status('error', $this_error);
             return false;
         }
         // Create S3 instance.
         pb_backupbuddy::status('details', 'Creating Stash S3 instance.');
         $s3 = new AmazonS3($upload_data['credentials']);
         // the key, secret, token
         if ($disable_ssl === true) {
             @$s3->disable_ssl(true);
         }
         pb_backupbuddy::status('details', 'Stash S3 instance created.');
         // Handle chunking of file into a multipart upload (if applicable).
         $file_size = filesize($file);
         if ($max_chunk_size >= self::MINIMUM_CHUNK_SIZE && $file_size / 1024 / 1024 > $max_chunk_size) {
             // minimum chunk size is 5mb. Anything under 5mb we will not chunk.
             pb_backupbuddy::status('details', 'Stash file size of ' . pb_backupbuddy::$format->file_size($file_size) . ' exceeds max chunk size of ' . $max_chunk_size . 'MB set in settings for sending file as multipart upload.');
             // Initiate multipart upload with S3.
             pb_backupbuddy::status('details', 'Initiating Stash multipart upload.');
             $response = $s3->initiate_multipart_upload($upload_data['bucket'], $upload_data['object'], array('encryption' => 'AES256'));
             if (!$response->isOK()) {
                 $this_error = 'Stash was unable to initiate multipart upload.';
                 $pb_backupbuddy_destination_errors[] = $this_error;
                 pb_backupbuddy::status('error', $this_error);
                 return false;
             } else {
                 $upload_id = (string) $response->body->UploadId;
                 pb_backupbuddy::status('details', 'Stash initiated multipart upload with ID `' . $upload_id . '`.');
             }
             // Get chunk parts for multipart transfer.
             pb_backupbuddy::status('details', 'Stash getting multipart counts.');
             $parts = $s3->get_multipart_counts($file_size, $max_chunk_size * 1024 * 1024);
             // Size of chunks expected to be in bytes.
             $multipart_destination_settings = $settings;
             $multipart_destination_settings['_multipart_id'] = $upload_id;
             $multipart_destination_settings['_multipart_partnumber'] = 0;
             $multipart_destination_settings['_multipart_file'] = $file;
             $multipart_destination_settings['_multipart_counts'] = $parts;
             $multipart_destination_settings['_multipart_upload_data'] = $upload_data;
             $multipart_destination_settings['_multipart_backup_type_dir'] = $backup_type_dir;
             pb_backupbuddy::status('details', 'Stash multipart settings to pass:'******'details', 'Stash scheduling send of next part(s).');
             backupbuddy_core::schedule_single_event(time(), pb_backupbuddy::cron_tag('destination_send'), array($multipart_destination_settings, $files, $send_id));
             spawn_cron(time() + 150);
             // Adds > 60 seconds to get around once per minute cron running limit.
             update_option('_transient_doing_cron', 0);
             // Prevent cron-blocking for next item.
             pb_backupbuddy::status('details', 'Stash scheduled send of next part(s). Done for this cycle.');
             return array($upload_id, 'Starting send of ' . count($multipart_destination_settings['_multipart_counts']) . ' parts.');
         } else {
             // did not meet chunking criteria.
             if ($max_chunk_size != '0') {
                 if ($file_size / 1024 / 1024 > self::MINIMUM_CHUNK_SIZE) {
                     pb_backupbuddy::status('details', 'File size of ' . pb_backupbuddy::$format->file_size($file_size) . ' is less than the max chunk size of ' . $max_chunk_size . 'MB; not chunking into multipart upload.');
                 } else {
                     pb_backupbuddy::status('details', 'File size of ' . pb_backupbuddy::$format->file_size($file_size) . ' is less than the minimum allowed chunk size of ' . self::MINIMUM_CHUNK_SIZE . 'MB; not chunking into multipart upload.');
                 }
             } else {
                 pb_backupbuddy::status('details', 'Max chunk size set to zero so not chunking into multipart upload.');
             }
         }
         // SEND file.
         pb_backupbuddy::status('details', 'About to put (upload) object to Stash.');
         $response = $s3->create_object($upload_data['bucket'], $upload_data['object'], array('fileUpload' => $file, 'encryption' => 'AES256'));
         // Validate response. On failure notify Stash API that things went wrong.
         if (!$response->isOK()) {
             // Send FAILED.
             pb_backupbuddy::status('details', 'Sending upload abort.');
             $request = new RequestCore($abort_url);
             $response = $request->send_request(true);
             $this_error = 'Could not upload to Stash, attempt aborted.';
             $pb_backupbuddy_destination_errors[] = $this_error;
             pb_backupbuddy::status('error', $this_error);
             return false;
         } else {
             // Send SUCCESS.
             pb_backupbuddy::status('details', 'Success uploading file to Stash storage. Notifying Stash API next. Upload details: `' . print_r($response, true) . '`.');
             $uploaded_size = $response->header['_info']['size_upload'];
             $uploaded_speed = $response->header['_info']['speed_upload'];
             pb_backupbuddy::status('details', 'Uploaded size: ' . pb_backupbuddy::$format->file_size($uploaded_size) . ', Speed: ' . pb_backupbuddy::$format->file_size($uploaded_speed) . '/sec.');
         }
         delete_transient('pb_backupbuddy_stashquota_' . $settings['itxapi_username']);
         // Delete quota transient since it probably has changed now.
         // Notify Stash API that things were succesful.
         $done_url = $stash->get_upload_url($file, 'done', $remote_path . $backup_type_dir . basename($file));
         pb_backupbuddy::status('details', 'Notifying Stash of completed upload with done url `' . $done_url . '`.');
         $request = new RequestCore($done_url);
         $response = $request->send_request(true);
         if (!$response->isOK()) {
             $this_error = 'Error #247568834682. Could not finalize Stash upload. Response code: `' . $response->get_response_code() . '`; Response body: `' . $response->get_response_body() . '`; Response headers: `' . $response->get_response_header() . '`.';
             $pb_backupbuddy_destination_errors[] = $this_error;
             pb_backupbuddy::status('error', $this_error);
             return false;
         } else {
             // Good server response.
             // See if we got an optional json response.
             $upload_data = @json_decode($response->body, true);
             if (isset($upload_data['error'])) {
                 // Some kind of error.
                 $this_error = 'Stash error(s): `' . implode(' - ', $upload_data['error']) . '`.';
                 $pb_backupbuddy_destination_errors[] = $this_error;
                 pb_backupbuddy::status('error', $this_error);
                 return false;
             }
             unset($files[$file_id]);
             // Remove from list of files we have not sent yet.
             pb_backupbuddy::status('details', 'Stash success sending file `' . basename($file) . '`. File uploaded and reported to Stash as completed.');
             // Load destination fileoptions.
             pb_backupbuddy::status('details', 'About to load fileoptions data.');
             require_once pb_backupbuddy::plugin_path() . '/classes/fileoptions.php';
             $fileoptions_obj = new pb_backupbuddy_fileoptions(backupbuddy_core::getLogDirectory() . 'fileoptions/send-' . $send_id . '.txt', $read_only = false, $ignore_lock = false, $create_file = false);
             if (true !== ($result = $fileoptions_obj->is_ok())) {
                 pb_backupbuddy::status('error', __('Fatal Error #9034.84838. Unable to access fileoptions data.', 'it-l10n-backupbuddy') . ' Error: ' . $result);
                 return false;
             }
             pb_backupbuddy::status('details', 'Fileoptions data loaded.');
             $fileoptions =& $fileoptions_obj->options;
             // Save stats.
             if (isset($uploaded_speed)) {
                 $fileoptions['write_speed'] = $uploaded_speed;
                 $fileoptions_obj->save();
             }
             //$fileoptions['finish_time'] = time();
             //$fileoptions['status'] = 'success';
             unset($fileoptions_obj);
         }
     }
     // end foreach.
     // BEGIN FILE LIMIT PROCESSING. Enforce archive limits if applicable.
     if ($backup_type == 'full') {
         $limit = $full_archive_limit;
         pb_backupbuddy::status('details', 'Stash full backup archive limit of `' . $limit . '` of type `full` based on destination settings.');
     } elseif ($backup_type == 'db') {
         $limit = $db_archive_limit;
         pb_backupbuddy::status('details', 'Stash database backup archive limit of `' . $limit . '` of type `db` based on destination settings.');
     } elseif ($backup_type == 'files') {
         $limit = $db_archive_limit;
         pb_backupbuddy::status('details', 'Stash database backup archive limit of `' . $limit . '` of type `files` based on destination settings.');
     } else {
         $limit = 0;
         pb_backupbuddy::status('warning', 'Warning #54854895. Stash was unable to determine backup type (reported: `' . $backup_type . '`) so archive limits NOT enforced for this backup.');
     }
     if ($limit > 0) {
         pb_backupbuddy::status('details', 'Stash archive limit enforcement beginning.');
         // S3 object for managing files.
         $s3_manage = new AmazonS3($manage_data['credentials']);
         if ($disable_ssl === true) {
             @$s3_manage->disable_ssl(true);
         }
         // Get file listing.
         $response_manage = $s3_manage->list_objects($manage_data['bucket'], array('prefix' => $manage_data['subkey'] . $remote_path . $backup_type_dir));
         // list all the files in the subscriber account
         // Create array of backups and organize by date
         $prefix = backupbuddy_core::backup_prefix();
         // List backups associated with this site by date.
         $backups = array();
         foreach ($response_manage->body->Contents as $object) {
             $file = str_replace($manage_data['subkey'] . $remote_path . $backup_type_dir, '', $object->Key);
             // Stash stores files in a directory per site so no need to check prefix here! if ( false !== strpos( $file, 'backup-' . $prefix . '-' ) ) { // if backup has this site prefix...
             $backups[$file] = strtotime($object->LastModified);
         }
         arsort($backups);
         pb_backupbuddy::status('details', 'Stash found `' . count($backups) . '` backups of this type when checking archive limits.');
         if (count($backups) > $limit) {
             pb_backupbuddy::status('details', 'More archives (' . count($backups) . ') than limit (' . $limit . ') allows. Trimming...');
             $i = 0;
             $delete_fail_count = 0;
             foreach ($backups as $buname => $butime) {
                 $i++;
                 if ($i > $limit) {
                     pb_backupbuddy::status('details', 'Trimming excess file `' . $buname . '`...');
                     $response = $s3_manage->delete_object($manage_data['bucket'], $manage_data['subkey'] . $remote_path . $backup_type_dir . $buname);
                     if (!$response->isOK()) {
                         pb_backupbuddy::status('details', 'Unable to delete excess Stash file `' . $buname . '`. Details: `' . print_r($response, true) . '`.');
                         $delete_fail_count++;
                     }
                 }
             }
             pb_backupbuddy::status('details', 'Finished trimming excess backups.');
             if ($delete_fail_count !== 0) {
                 $error_message = 'Stash remote limit could not delete ' . $delete_fail_count . ' backups.';
                 pb_backupbuddy::status('error', $error_message);
                 backupbuddy_core::mail_error($error_message);
             }
         }
         pb_backupbuddy::status('details', 'Stash completed archive limiting.');
     } else {
         pb_backupbuddy::status('details', 'No Stash archive file limit to enforce.');
     }
     // End remote backup limit
     if (isset($fileoptions_obj)) {
         unset($fileoptions_obj);
     }
     // END FILE LIMIT PROCESSING.
     // Success if we made it this far.
     return true;
 }
Ejemplo n.º 27
0
 public function updateRemote($myrole, $drivers)
 {
     if ($this->_options['update'] == 'simulate') {
         $simulate = true;
         $this->_out->logWarning("only SIMULATION mode");
     } else {
         if ($this->_options['update'] === false || (int) $this->_options['update'] === 0) {
             $this->_out->logNotice("skipped, not requested and not needed");
             return;
         }
         $simulate = false;
     }
     /** @var $compare Compare_Interface */
     $compare = $drivers['compare'];
     /** @var $local Storage_Interface */
     $local = $drivers['local'];
     if (!$compare->initChangesOn("remote")) {
         // TODO not sure, but maybe we will need it
     }
     $job = $this->_out->jobStart("updating remote storage");
     $this->_out->jobSetProgressStep($job, 1000);
     foreach ($compare as $task) {
         $repeat = 3;
         do {
             $msg = "";
             try {
                 $path = $this->_getPathWithBasedir($task->path, self::ADD_BASE_DIR);
                 switch ($task->action) {
                     case Compare_Interface::CMD_MKDIR:
                         $msg = "mkdir " . $path . " into s3 bucket";
                         $this->_out->logDebug($msg);
                         if (!$simulate) {
                             // create folders
                             $this->_s3->create_object($this->getBucket(), $path, array('body' => '', 'storage' => $this->_defaultRedundancyStorage));
                         }
                         break;
                     case Compare_Interface::CMD_PUT:
                         $msg = "put " . $path . " into s3 bucket";
                         $this->_out->logDebug($msg);
                         $uploadPath = $local->getBaseDir() . $task->path;
                         //fix for windows encoding issue
                         $uploadPath = $local->convertEncodingPath($uploadPath);
                         if (!file_exists($uploadPath)) {
                             $this->_out->logError("file {$uploadPath} does not exists anymore locally");
                             continue;
                         }
                         if (!$simulate) {
                             //empty directory
                             if (ord(substr($path, -1)) === 47) {
                                 //for empty folders we need little different options
                                 $this->_out->logWarning("TODO putting empty folder {$path} ... is it possible ?");
                                 $this->_s3->create_object($this->getBucket(), $path, array('body' => '', 'storage' => $this->_defaultRedundancyStorage));
                             } else {
                                 $options = array('fileUpload' => $uploadPath, 'storage' => $this->_defaultRedundancyStorage);
                                 // TODO it should be possible to speedup upload of small upload but using S3 batch
                                 if ($this->_options['multipart']['big-files']) {
                                     // multipart upload for big files
                                     if ($this->_options['multipart']['part-size']) {
                                         $options['partSize'] = $this->_options['multipart']['part-size'];
                                     }
                                     $this->_s3->create_mpu_object($this->getBucket(), $path, $options);
                                 } else {
                                     // normal upload
                                     $this->_s3->create_object($this->getBucket(), $path, $options);
                                 }
                             }
                         }
                         break;
                     case Compare_Interface::CMD_DELETE:
                         $msg = "deleting " . $path . " from s3 bucket";
                         $this->_out->logDebug($msg);
                         if (!$simulate) {
                             $this->_s3->delete_object($this->getBucket(), $path);
                         }
                         break;
                     case Compare_Interface::CMD_TS:
                         // storing this information as metadata is too slow to be used
                         //                        $this->_out->logDebug("remember local timestamp for " . $path . " into s3 bucket");
                         //                        if (!$simulate) {
                         //                            $this->_s3->update_object(
                         //                                $this->getBucket(), $path,
                         //                                array(
                         //                                     'meta' => array('localts' => $task->ltime),
                         //                                )
                         //                            );
                         //                        }
                         break;
                     default:
                         $this->_out->logError("ignored command {$task->action}");
                 }
                 $repeat = 0;
             } catch (Exception $e) {
                 $repeat--;
                 if ($repeat) {
                     $this->_out->logError("need to repeat: {$msg}");
                 } else {
                     if ($msg) {
                         $this->_out->logError($msg);
                     }
                     throw new Exception($e->getMessage(), $e->getCode());
                 }
             }
         } while ($repeat);
         if (!$simulate) {
             $compare->remoteHasDone($task);
         }
         $this->_out->jobStep($job);
     }
     $this->_out->jobEnd($job, "remote storage updated");
 }