/** * @param \Akeeba\Engine\Archiver\Base $archiver * @param \Akeeba\Engine\Configuration $configuration * * @return bool */ protected function postProcessDonePartFile(\Akeeba\Engine\Archiver\Base $archiver, \Akeeba\Engine\Configuration $configuration) { $filename = array_shift($archiver->finishedPart); Factory::getLog()->log(LogLevel::INFO, 'Preparing to post process ' . basename($filename)); $timer = Factory::getTimer(); $startTime = $timer->getRunningTime(); $post_proc = Factory::getPostprocEngine(); $result = $post_proc->processPart($filename); $this->propagateFromObject($post_proc); if ($result === false) { Factory::getLog()->log(LogLevel::WARNING, 'Failed to process file ' . $filename); Factory::getLog()->log(LogLevel::WARNING, 'Error received from the post-processing engine:'); Factory::getLog()->log(LogLevel::WARNING, implode("\n", array_merge($this->getWarnings(), $this->getErrors()))); $this->setWarning('Failed to process file ' . basename($filename)); } elseif ($result === true) { // Add this part's size to the volatile storage $volatileTotalSize = $configuration->get('volatile.engine.archiver.totalsize', 0); $volatileTotalSize += (int) @filesize($filename); $configuration->set('volatile.engine.archiver.totalsize', $volatileTotalSize); Factory::getLog()->log(LogLevel::INFO, 'Successfully processed file ' . basename($filename)); } else { // More work required Factory::getLog()->log(LogLevel::INFO, 'More post-processing steps required for file ' . $filename); $configuration->set('volatile.postproc.filename', $filename); // Let's push back the file into the archiver stack array_unshift($archiver->finishedPart, $filename); // Do we need to break the step? $endTime = $timer->getRunningTime(); $stepTime = $endTime - $startTime; $timeLeft = $timer->getTimeLeft(); if ($timeLeft < $stepTime) { // We predict that running yet another step would cause a timeout $configuration->set('volatile.breakflag', true); } else { // We have enough time to run yet another step $configuration->set('volatile.breakflag', false); } } // Should we delete the file afterwards? if ($configuration->get('engine.postproc.common.delete_after', false) && $post_proc->allow_deletes && $result === true) { Factory::getLog()->log(LogLevel::DEBUG, 'Deleting already processed file ' . basename($filename)); Platform::getInstance()->unlink($filename); } else { Factory::getLog()->log(LogLevel::DEBUG, 'Not removing processed file ' . $filename); } if ($post_proc->break_after && $result === true) { $configuration->set('volatile.breakflag', true); return true; } // This is required to let the backup continue even after a post-proc failure $this->resetErrors(); $this->setState('running'); return false; }
/** * Try to add some files from the $file_list into the archive * * @return boolean True if there were files packed, false otherwise * (empty filelist or fatal error) */ protected function pack_files() { // Get a reference to the archiver and the timer classes $archiver = Factory::getArchiverEngine(); $timer = Factory::getTimer(); $configuration = Factory::getConfiguration(); // If post-processing after part creation is enabled, make sure we do post-process each part before moving on if ($configuration->get('engine.postproc.common.after_part', 0) && !empty($archiver->finishedPart)) { if ($this->postProcessDonePartFile($archiver, $configuration)) { return true; } } // If the archiver has work to do, make sure it finished up before continuing if ($configuration->get('volatile.engine.archiver.processingfile', false)) { Factory::getLog()->log(LogLevel::DEBUG, "Continuing file packing from previous step"); $result = $archiver->addFile('', '', ''); $this->propagateFromObject($archiver); if ($this->getError()) { return false; } // If that was the last step for packing this file, mark a file done if (!$configuration->get('volatile.engine.archiver.processingfile', false)) { $this->progressMarkFileDone(); } } // Did it finish, or does it have more work to do? if ($configuration->get('volatile.engine.archiver.processingfile', false)) { // More work to do. Let's just tell our parent that we finished up successfully. return true; } // Normal file backup loop; we keep on processing the file list, packing files as we go. if (count($this->file_list) == 0) { // No files left to pack. Return true and let the engine loop $this->progressMarkFolderDone(); return true; } else { Factory::getLog()->log(LogLevel::DEBUG, "Packing files"); $packedSize = 0; $numberOfFiles = 0; list($usec, $sec) = explode(" ", microtime()); $opStartTime = (double) $usec + (double) $sec; $largeFileThreshold = Factory::getConfiguration()->get('engine.scan.common.largefile', 10485760); while (count($this->file_list) > 0) { $file = @array_shift($this->file_list); $size = 0; if (file_exists($file)) { $size = @filesize($file); } // Anticipatory file size algorithm if ($numberOfFiles > 0 && $size > $largeFileThreshold) { if (!Factory::getConfiguration()->get('akeeba.tuning.nobreak.beforelargefile', 0)) { // If the file is bigger than the big file threshold, break the step // to avoid potential timeouts $this->setBreakFlag(); Factory::getLog()->log(LogLevel::INFO, "Breaking step _before_ large file: " . $file . " - size: " . $size); // Push the file back to the list. array_unshift($this->file_list, $file); // Return true and let the engine loop return true; } } // Proactive potential timeout detection // Rough estimation of packing speed in bytes per second list($usec, $sec) = explode(" ", microtime()); $opEndTime = (double) $usec + (double) $sec; if ($opEndTime - $opStartTime == 0) { $_packSpeed = 0; } else { $_packSpeed = $packedSize / ($opEndTime - $opStartTime); } // Estimate required time to pack next file. If it's the first file of this operation, // do not impose any limitations. $_reqTime = $_packSpeed - 0.01 <= 0 ? 0 : $size / $_packSpeed; // Do we have enough time? if ($timer->getTimeLeft() < $_reqTime) { if (!Factory::getConfiguration()->get('akeeba.tuning.nobreak.proactive', 0)) { array_unshift($this->file_list, $file); Factory::getLog()->log(LogLevel::INFO, "Proactive step break - file: " . $file . " - size: " . $size . " - req. time " . sprintf('%2.2f', $_reqTime)); $this->setBreakFlag(); return true; } } $packedSize += $size; $numberOfFiles++; $ret = $archiver->addFile($file, $this->remove_path_prefix, $this->path_prefix); // If no more processing steps are required, mark a done file if (!$configuration->get('volatile.engine.archiver.processingfile', false)) { $this->progressMarkFileDone(); } // Error propagation $this->propagateFromObject($archiver); if ($this->getError()) { return false; } // If this was the first file packed and we've already gone past // the large file size threshold break the step. Continuing with // more operations after packing such a big file is increasing // the risk to hit a timeout. if ($packedSize > $largeFileThreshold && $numberOfFiles == 1) { if (!Factory::getConfiguration()->get('akeeba.tuning.nobreak.afterlargefile', 0)) { Factory::getLog()->log(LogLevel::INFO, "Breaking step *after* large file: " . $file . " - size: " . $size); $this->setBreakFlag(); return true; } } // If we have to continue processing the file, break the file packing loop forcibly if ($configuration->get('volatile.engine.archiver.processingfile', false)) { return true; } } // True if we have more files, false if we're done packing return count($this->file_list) > 0; } }
protected function _run() { $logTag = $this->getLogTag(); Factory::getLog()->open($logTag); if (!static::$registeredErrorHandler) { static::$registeredErrorHandler = true; set_error_handler('\\Akeeba\\Engine\\Core\\akeebaBackupErrorHandler'); } // Maybe we're already done or in an error state? if ($this->getError() || $this->getState() == 'postrun') { return; } // Set running state $this->setState('running'); // Initialize operation counter $registry = Factory::getConfiguration(); $registry->set('volatile.operation_counter', 0); // Advance step counter $stepCounter = $registry->get('volatile.step_counter', 0); $registry->set('volatile.step_counter', ++$stepCounter); // Log step start number Factory::getLog()->log(LogLevel::DEBUG, '====== Starting Step number ' . $stepCounter . ' ======'); if (defined('AKEEBADEBUG')) { $root = Platform::getInstance()->get_site_root(); Factory::getLog()->log(LogLevel::DEBUG, 'Site root: ' . $root); } $timer = Factory::getTimer(); $finished = false; $error = false; $breakFlag = false; // BREAKFLAG is optionally passed by domains to force-break current operation // Apply an infinite time limit if required if ($registry->get('akeeba.tuning.settimelimit', 0)) { if (function_exists('set_time_limit')) { set_time_limit(0); } } // Loop until time's up, we're done or an error occurred, or BREAKFLAG is set $this->array_cache = null; while ($timer->getTimeLeft() > 0 && !$finished && !$error && !$breakFlag) { // Reset the break flag $registry->set('volatile.breakflag', false); // Do we have to switch domains? This only happens if there is no active // domain, or the current domain has finished $have_to_switch = false; $object = null; if ($this->class == '') { $have_to_switch = true; } else { $object = Factory::getDomainObject($this->class); if (!is_object($object)) { $have_to_switch = true; } else { if (!in_array('getState', get_class_methods($object))) { $have_to_switch = true; } elseif ($object->getState() == 'finished') { $have_to_switch = true; } } } // Switch domain if necessary if ($have_to_switch) { if (!Factory::getConfiguration()->get('akeeba.tuning.nobreak.domains', 0)) { Factory::getLog()->log(LogLevel::DEBUG, "Kettenrad :: BREAKING STEP BEFORE SWITCHING DOMAIN"); $registry->set('volatile.breakflag', true); } // Free last domain $object = null; if (empty($this->domain_chain)) { // Aw, we're done! No more domains to run. $this->setState('postrun'); Factory::getLog()->log(LogLevel::DEBUG, "Kettenrad :: No more domains to process"); Factory::getLog()->log(LogLevel::DEBUG, '====== Finished Step number ' . $stepCounter . ' ======'); $this->array_cache = null; //restore_error_handler(); return; } // Shift the next definition off the stack $this->array_cache = null; $new_definition = array_shift($this->domain_chain); if (array_key_exists('class', $new_definition)) { $this->domain = $new_definition['domain']; $this->class = $new_definition['class']; // Get a working object $object = Factory::getDomainObject($this->class); $object->setup($this->_parametersArray); } else { Factory::getLog()->log(LogLevel::WARNING, "Kettenrad :: No class defined trying to switch domains. The backup will crash."); $this->domain = null; $this->class = null; } } else { if (!is_object($object)) { $object = Factory::getDomainObject($this->class); } } // Tick the object $result = $object->tick(); // Propagate errors $this->propagateFromObject($object); // Advance operation counter $currentOperationNumber = $registry->get('volatile.operation_counter', 0); $currentOperationNumber++; $registry->set('volatile.operation_counter', $currentOperationNumber); // Process return array $this->setDomain($this->domain); $this->setStep($result['Step']); $this->setSubstep($result['Substep']); // Check for BREAKFLAG $breakFlag = $registry->get('volatile.breakflag', false); // Process errors $error = false; if ($this->getError()) { $error = true; } // Check if the backup procedure should finish now $finished = $error ? true : !$result['HasRun']; // Log operation end Factory::getLog()->log(LogLevel::DEBUG, '----- Finished operation ' . $currentOperationNumber . ' ------'); } // Log the result if (!$error) { Factory::getLog()->log(LogLevel::DEBUG, "Successful Smart algorithm on " . get_class($object)); } else { Factory::getLog()->log(LogLevel::ERROR, "Failed Smart algorithm on " . get_class($object)); } // Log if we have to do more work or not if (!is_object($object)) { Factory::getLog()->log(LogLevel::WARNING, "Kettenrad :: Empty object found when processing domain '" . $this->domain . "'. This should never happen."); } else { if ($object->getState() == 'running') { Factory::getLog()->log(LogLevel::DEBUG, "Kettenrad :: More work required in domain '" . $this->domain . "'"); // We need to set the break flag for the part processing to not batch successive steps $registry->set('volatile.breakflag', true); } elseif ($object->getState() == 'finished') { Factory::getLog()->log(LogLevel::DEBUG, "Kettenrad :: Domain '" . $this->domain . "' has finished."); $registry->set('volatile.breakflag', false); } } // Log step end Factory::getLog()->log(LogLevel::DEBUG, '====== Finished Step number ' . $stepCounter . ' ======'); if (!$registry->get('akeeba.tuning.nobreak.domains', 0)) { // Force break between steps $registry->set('volatile.breakflag', true); } //restore_error_handler(); }
/** * Apply quotas for remotely stored files * * @return bool True on success */ protected function apply_remote_quotas() { $this->setStep('Applying remote storage quotas'); $this->setSubstep(''); // Make sure we are enabled $config = Factory::getConfiguration(); $enableRemote = $config->get('akeeba.quota.remote', 0); if (!$enableRemote) { return true; } // Get the list of files to kill if (empty($this->remote_files_killlist)) { Factory::getLog()->log(LogLevel::DEBUG, 'Applying remote file quotas'); $this->remote_files_killlist = $this->get_remote_quotas(); if (empty($this->remote_files_killlist)) { Factory::getLog()->log(LogLevel::DEBUG, 'No remote files to apply quotas to were found'); return true; } } // Remove the files $timer = Factory::getTimer(); while ($timer->getRunningTime() && count($this->remote_files_killlist)) { $filename = array_shift($this->remote_files_killlist); list($engineName, $path) = explode('://', $filename); $engine = Factory::getPostprocEngine($engineName); if (!$engine->can_delete) { continue; } Factory::getLog()->log(LogLevel::DEBUG, "Removing {$filename}"); $result = $engine->delete($path); if (!$result) { Factory::getLog()->log(LogLevel::DEBUG, "Removal failed: " . $engine->getWarning()); } } // Return false if we have more work to do or true if we're done if (count($this->remote_files_killlist)) { Factory::getLog()->log(LogLevel::DEBUG, "Remote file removal will continue in the next step"); return false; } else { Factory::getLog()->log(LogLevel::DEBUG, "Remote file quotas applied successfully"); return true; } }
/** * The most basic file transaction: add a single entry (file or directory) to * the archive. * * @param bool $isVirtual If true, the next parameter contains file data instead of a file name * @param string $sourceNameOrData Absolute file name to read data from or the file data itself is $isVirtual is * true * @param string $targetName The (relative) file name under which to store the file in the archive * * @return boolean True on success, false otherwise * * @since 1.2.1 */ protected function _addFile($isVirtual, &$sourceNameOrData, $targetName) { static $configuration; static $memLimit = null; if (is_null($memLimit)) { $memLimit = ini_get("memory_limit"); if (is_numeric($memLimit) && $memLimit < 0 || !is_numeric($memLimit)) { $memLimit = 0; // 1.2a3 -- Rare case with memory_limit < 0, e.g. -1Mb! } $memLimit = $this->_return_bytes($memLimit); } $isDir = false; $isSymlink = false; if (is_null($isVirtual)) { $isVirtual = false; } $compressionMethod = 0; if ($isVirtual) { Factory::getLog()->log(LogLevel::DEBUG, "-- Adding {$targetName} to archive (virtual data)"); } else { Factory::getLog()->log(LogLevel::DEBUG, "-- Adding {$targetName} to archive (source: {$sourceNameOrData})"); } if (!$configuration) { $configuration = Factory::getConfiguration(); } $timer = Factory::getTimer(); // Initialize inode change timestamp $filectime = 0; // Open data file for output if (is_null($this->fp)) { $this->fp = $this->_fopen($this->_dataFileName, "ab"); } if ($this->fp === false) { $this->fp = null; $this->setError("Could not open archive file '{$this->_dataFileName}' for append!"); return false; } if (!$configuration->get('volatile.engine.archiver.processingfile', false)) { // Uncache data -- WHY DO THAT?! /** * $configuration->set('volatile.engine.archiver.sourceNameOrData', null); * $configuration->set('volatile.engine.archiver.unc_len', null); * $configuration->set('volatile.engine.archiver.resume', null); * $configuration->set('volatile.engine.archiver.processingfile',false); * /**/ // See if it's a directory $isDir = $isVirtual ? false : is_dir($sourceNameOrData); // See if it's a symlink (w/out dereference) $isSymlink = false; if ($this->_symlink_store_target && !$isVirtual) { $isSymlink = is_link($sourceNameOrData); } // Get real size before compression if ($isVirtual) { $fileSize = akstringlen($sourceNameOrData); $filectime = time(); } else { if ($isSymlink) { $fileSize = akstringlen(@readlink($sourceNameOrData)); } else { // Is the file readable? if (!is_readable($sourceNameOrData) && !$isDir) { // Unreadable files won't be recorded in the archive file $this->setWarning('Unreadable file ' . $sourceNameOrData . '. Check permissions'); return false; } // Get the filesize $fileSize = $isDir ? 0 : @filesize($sourceNameOrData); $filectime = $isDir ? 0 : @filemtime($sourceNameOrData); } } // Decide if we will compress if ($isDir || $isSymlink) { $compressionMethod = 0; // don't compress directories... } else { if (!$memLimit || $fileSize >= _AKEEBA_COMPRESSION_THRESHOLD) { // No memory limit, or over 1Mb files => always compress up to 1Mb files (otherwise it times out) $compressionMethod = $fileSize <= _AKEEBA_COMPRESSION_THRESHOLD ? 1 : 0; } elseif (function_exists("memory_get_usage")) { // PHP can report memory usage, see if there's enough available memory; the containing application / CMS alone eats about 5-6Mb. This code is called on files <= 1Mb $availableRAM = $memLimit - memory_get_usage(); $compressionMethod = $availableRAM / 2.5 >= $fileSize ? 1 : 0; } else { // PHP can't report memory usage, compress only files up to 512Kb (conservative approach) and hope it doesn't break $compressionMethod = $fileSize <= 524288 ? 1 : 0; } } $compressionMethod = function_exists("gzcompress") ? $compressionMethod : 0; $storedName = $targetName; /* "Entity Description BLock" segment. */ $unc_len =& $fileSize; // File size $storedName .= $isDir ? "/" : ""; if ($compressionMethod == 1) { if ($isVirtual) { $udata =& $sourceNameOrData; } else { // Get uncompressed data $udata = @file_get_contents($sourceNameOrData); // PHP > 4.3.0 saves us the trouble } if ($udata === false) { // Unreadable file, skip it. $this->setWarning('Unreadable file ' . $sourceNameOrData . '. Check permissions'); return false; } else { // Proceed with compression $zdata = @gzcompress($udata); if ($zdata === false) { // If compression fails, let it behave like no compression was available $c_len =& $unc_len; $compressionMethod = 0; } else { unset($udata); $zdata = substr(substr($zdata, 0, -4), 2); $c_len = akstringlen($zdata); } } } else { $c_len = $unc_len; // Test for unreadable files if (!$isVirtual && !$isSymlink && !$isDir) { $myfp = @fopen($sourceNameOrData, 'rb'); if ($myfp === false) { // Unreadable file, skip it. $this->setWarning('Unreadable file ' . $sourceNameOrData . '. Check permissions'); return false; } @fclose($myfp); } } $this->_compressedSize += $c_len; // Update global data $this->_uncompressedSize += $fileSize; // Update global data $this->_fileCount++; // Get file permissions $perms = 0755; if (!$isVirtual) { if (@file_exists($sourceNameOrData)) { if (@is_file($sourceNameOrData) || @is_link($sourceNameOrData)) { if (@is_readable($sourceNameOrData)) { $perms = @fileperms($sourceNameOrData); } } } } // Calculate Entity Description Block length $blockLength = 21 + akstringlen($storedName); // If we need to store the file mod date if ($filectime > 0) { $blockLength += 8; } // Get file type if (!$isDir && !$isSymlink) { $fileType = 1; } elseif ($isSymlink) { $fileType = 2; } elseif ($isDir) { $fileType = 0; } // If it's a split JPA file, we've got to make sure that the header can fit in the part if ($this->_useSplitZIP) { // Compare to free part space clearstatcache(); $current_part_size = @filesize($this->_dataFileName); $free_space = $this->_fragmentSize - ($current_part_size === false ? 0 : $current_part_size); if ($free_space <= $blockLength) { // Not enough space on current part, create new part if (!$this->_createNewPart()) { $this->setError('Could not create new JPA part file ' . basename($this->_dataFileName)); return false; } // Open data file for output $this->fp = $this->_fopen($this->_dataFileName, "ab"); if ($this->fp === false) { $this->fp = null; $this->setError("Could not open archive file {$this->_dataFileName} for append!"); return false; } } } $this->_fwrite($this->fp, $this->_fileHeader); // Entity Description Block header if ($this->getError()) { return false; } $this->_fwrite($this->fp, pack('v', $blockLength)); // Entity Description Block header length $this->_fwrite($this->fp, pack('v', akstringlen($storedName))); // Length of entity path $this->_fwrite($this->fp, $storedName); // Entity path $this->_fwrite($this->fp, pack('C', $fileType)); // Entity type $this->_fwrite($this->fp, pack('C', $compressionMethod)); // Compression method $this->_fwrite($this->fp, pack('V', $c_len)); // Compressed size $this->_fwrite($this->fp, pack('V', $unc_len)); // Uncompressed size $this->_fwrite($this->fp, pack('V', $perms)); // Entity permissions // Timestamp Extra Field, only for files if ($filectime > 0) { $this->_fwrite($this->fp, ""); // Extra Field Identifier $this->_fwrite($this->fp, pack('v', 8)); // Extra Field Length $this->_fwrite($this->fp, pack('V', $filectime)); // Timestamp } // Cache useful information about the file if (!$isDir && !$isSymlink && !$isVirtual) { $configuration->set('volatile.engine.archiver.unc_len', $unc_len); $configuration->set('volatile.engine.archiver.sourceNameOrData', $sourceNameOrData); } } else { // If we are continuing file packing we have an uncompressed, non-virtual file. // We need to set up these variables so as not to throw any PHP notices. $isDir = false; $isSymlink = false; $isVirtual = false; $compressionMethod = 0; } /* "File data" segment. */ if ($compressionMethod == 1) { if (!$this->_useSplitZIP) { // Just dump the compressed data $this->_fwrite($this->fp, $zdata); if ($this->getError()) { return false; } } else { // Split ZIP. Check if we need to split the part in the middle of the data. clearstatcache(); $current_part_size = @filesize($this->_dataFileName); $free_space = $this->_fragmentSize - ($current_part_size === false ? 0 : $current_part_size); if ($free_space >= akstringlen($zdata)) { // Write in one part $this->_fwrite($this->fp, $zdata); if ($this->getError()) { return false; } } else { $bytes_left = akstringlen($zdata); while ($bytes_left > 0) { clearstatcache(); $current_part_size = @filesize($this->_dataFileName); $free_space = $this->_fragmentSize - ($current_part_size === false ? 0 : $current_part_size); // Split between parts - Write first part $this->_fwrite($this->fp, $zdata, min(akstringlen($zdata), $free_space)); if ($this->getError()) { return false; } // Get the rest of the data $bytes_left = akstringlen($zdata) - $free_space; if ($bytes_left > 0) { // Create new part $this->_fclose($this->fp); $this->fp = null; if (!$this->_createNewPart()) { // Die if we couldn't create the new part $this->setError('Could not create new JPA part file ' . basename($this->_dataFileName)); return false; } // Open data file for output $this->fp = $this->_fopen($this->_dataFileName, "ab"); if ($this->fp === false) { $this->fp = null; $this->setError("Could not open archive file {$this->_dataFileName} for append!"); return false; } $zdata = substr($zdata, -$bytes_left); } } } } unset($zdata); } elseif (!$isDir && !$isSymlink) { if ($isVirtual) { if (!$this->_useSplitZIP) { // Just dump the data $this->_fwrite($this->fp, $sourceNameOrData); if ($this->getError()) { return false; } } else { // Split JPA. Check if we need to split the part in the middle of the data. clearstatcache(); $current_part_size = @filesize($this->_dataFileName); $free_space = $this->_fragmentSize - ($current_part_size === false ? 0 : $current_part_size); if ($free_space >= akstringlen($sourceNameOrData)) { // Write in one part $this->_fwrite($this->fp, $sourceNameOrData); if ($this->getError()) { return false; } } else { $bytes_left = akstringlen($sourceNameOrData); while ($bytes_left > 0) { clearstatcache(); $current_part_size = @filesize($this->_dataFileName); $free_space = $this->_fragmentSize - ($current_part_size === false ? 0 : $current_part_size); // Split between parts - Write first part $this->_fwrite($this->fp, $sourceNameOrData, min(akstringlen($sourceNameOrData), $free_space)); if ($this->getError()) { return false; } // Get the rest of the data $rest_size = akstringlen($sourceNameOrData) - $free_space; if ($rest_size > 0) { $this->_fclose($this->fp); $this->fp = null; // Create new part if (!$this->_createNewPart()) { // Die if we couldn't create the new part $this->setError('Could not create new JPA part file ' . basename($this->_dataFileName)); return false; } // Open data file for output $this->fp = $this->_fopen($this->_dataFileName, "ab"); if ($this->fp === false) { $this->fp = null; $this->setError("Could not open archive file {$this->_dataFileName} for append!"); return false; } $zdata = substr($sourceNameOrData, -$rest_size); } $bytes_left = $rest_size; } // end while } } } else { // IMPORTANT! Only this case can be spanned across steps: uncompressed, non-virtual data // Load cached data if we're resuming file packing if ($configuration->get('volatile.engine.archiver.processingfile', false)) { $sourceNameOrData = $configuration->get('volatile.engine.archiver.sourceNameOrData', ''); $unc_len = $configuration->get('volatile.engine.archiver.unc_len', 0); $resume = $configuration->get('volatile.engine.archiver.resume', 0); } // Copy the file contents, ignore directories $zdatafp = @fopen($sourceNameOrData, "rb"); if ($zdatafp === false) { $this->setWarning('Unreadable file ' . $sourceNameOrData . '. Check permissions'); return false; } else { // Seek to the resume point if required if ($configuration->get('volatile.engine.archiver.processingfile', false)) { // Seek to new offset $seek_result = @fseek($zdatafp, $resume); if ($seek_result === -1) { // What?! We can't resume! $this->setError(sprintf('Could not resume packing of file %s. Your archive is damaged!', $sourceNameOrData)); @fclose($zdatafp); return false; } // Doctor the uncompressed size to match the remainder of the data $unc_len = $unc_len - $resume; } if (!$this->_useSplitZIP) { while (!feof($zdatafp) && $timer->getTimeLeft() > 0 && $unc_len > 0) { $zdata = fread($zdatafp, AKEEBA_CHUNK); $this->_fwrite($this->fp, $zdata, min(akstringlen($zdata), AKEEBA_CHUNK)); $unc_len -= min(akstringlen($zdata), AKEEBA_CHUNK); if ($this->getError()) { @fclose($zdatafp); return false; } } // WARNING!!! The extra $unc_len != 0 check is necessary as PHP won't reach EOF for 0-byte files. if (!feof($zdatafp) && $unc_len != 0) { // We have to break, or we'll time out! $resume = @ftell($zdatafp); $configuration->set('volatile.engine.archiver.resume', $resume); $configuration->set('volatile.engine.archiver.processingfile', true); @fclose($zdatafp); return true; } } else { // Split JPA - Do we have enough space to host the whole file? clearstatcache(); $current_part_size = @filesize($this->_dataFileName); $free_space = $this->_fragmentSize - ($current_part_size === false ? 0 : $current_part_size); if ($free_space >= $unc_len) { // Yes, it will fit inside this part, do quick copy while (!feof($zdatafp) && $timer->getTimeLeft() > 0 && $unc_len > 0) { $zdata = fread($zdatafp, AKEEBA_CHUNK); $this->_fwrite($this->fp, $zdata, min(akstringlen($zdata), AKEEBA_CHUNK)); //$unc_len -= min(akstringlen($zdata), AKEEBA_CHUNK); $unc_len -= AKEEBA_CHUNK; if ($this->getError()) { @fclose($zdatafp); return false; } } //if(!feof($zdatafp) && ($unc_len != 0)) if (!feof($zdatafp) && $unc_len > 0) { // We have to break, or we'll time out! $resume = @ftell($zdatafp); $configuration->set('volatile.engine.archiver.resume', $resume); $configuration->set('volatile.engine.archiver.processingfile', true); @fclose($zdatafp); return true; } } else { // No, we'll have to split between parts. We'll loop until we run // out of space. while (!feof($zdatafp) && $timer->getTimeLeft() > 0) { clearstatcache(); $current_part_size = @filesize($this->_dataFileName); $free_space = $this->_fragmentSize - ($current_part_size === false ? 0 : $current_part_size); // Find optimal chunk size $chunk_size_primary = min(AKEEBA_CHUNK, $free_space); if ($chunk_size_primary <= 0) { $chunk_size_primary = max(AKEEBA_CHUNK, $free_space); } // Calculate if we have to read some more data (smaller chunk size) // and how many times we must read w/ the primary chunk size $chunk_size_secondary = $free_space % $chunk_size_primary; $loop_times = ($free_space - $chunk_size_secondary) / $chunk_size_primary; // Read and write with the primary chunk size for ($i = 1; $i <= $loop_times; $i++) { $zdata = fread($zdatafp, $chunk_size_primary); $this->_fwrite($this->fp, $zdata, min(akstringlen($zdata), $chunk_size_primary)); //$unc_len -= min(akstringlen($zdata), $chunk_size_primary); $unc_len -= $chunk_size_primary; if ($this->getError()) { @fclose($zdatafp); return false; } // Do we have enough time to proceed? //if( (!feof($zdatafp)) && ($unc_len != 0) && ($timer->getTimeLeft() <= 0) ) { if (!feof($zdatafp) && $unc_len >= 0 && $timer->getTimeLeft() <= 0) { // No, we have to break, or we'll time out! $resume = @ftell($zdatafp); $configuration->set('volatile.engine.archiver.resume', $resume); $configuration->set('volatile.engine.archiver.processingfile', true); @fclose($zdatafp); return true; } } // Read and write w/ secondary chunk size, if non-zero if ($chunk_size_secondary > 0) { $zdata = fread($zdatafp, $chunk_size_secondary); $this->_fwrite($this->fp, $zdata, min(akstringlen($zdata), $chunk_size_secondary)); //$unc_len -= min(akstringlen($zdata), $chunk_size_secondary); $unc_len -= $chunk_size_secondary; if ($this->getError()) { @fclose($zdatafp); return false; } } // Do we have enough time to proceed? if (!feof($zdatafp) && $unc_len >= 0 && $timer->getTimeLeft() <= 0) { // No, we have to break, or we'll time out! $resume = @ftell($zdatafp); $configuration->set('volatile.engine.archiver.resume', $resume); $configuration->set('volatile.engine.archiver.processingfile', true); // ...and create a new part as well if (!$this->_createNewPart()) { // Die if we couldn't create the new part $this->setError('Could not create new JPA part file ' . basename($this->_dataFileName)); @fclose($zdatafp); return false; } // ...and make sure we can open the new part $this->fp = $this->_fopen($this->_dataFileName, "ab"); if ($this->fp === false) { $this->fp = null; $this->setError("Could not open archive file {$this->_dataFileName} for append!"); @fclose($zdatafp); return false; } // ...then, return @fclose($zdatafp); return true; } // Create new JPA part, but only if we'll have more data to write if (!feof($zdatafp) && $unc_len > 0) { $this->_fclose($this->fp); $this->fp = null; if (!$this->_createNewPart()) { // Die if we couldn't create the new part $this->setError('Could not create new JPA part file ' . basename($this->_dataFileName)); @fclose($zdatafp); return false; } // We have created the part. If the user asked for immediate post-proc, break step now. if ($configuration->get('engine.postproc.common.after_part', 0)) { $resume = @ftell($zdatafp); $configuration->set('volatile.engine.archiver.resume', $resume); $configuration->set('volatile.engine.archiver.processingfile', true); $configuration->set('volatile.breakflag', true); @fclose($zdatafp); return true; } // Open data file for output $this->fp = $this->_fopen($this->_dataFileName, "ab"); if ($this->fp === false) { $this->fp = null; $this->setError("Could not open archive file {$this->_dataFileName} for append!"); @fclose($zdatafp); return false; } } } // end while } } @fclose($zdatafp); } } } elseif ($isSymlink) { $this->_fwrite($this->fp, @readlink($sourceNameOrData)); } //Factory::getLog()->log(LogLevel::DEBUG, "DEBUG -- Added $targetName to archive"); // Uncache data $configuration->set('volatile.engine.archiver.sourceNameOrData', null); $configuration->set('volatile.engine.archiver.unc_len', null); $configuration->set('volatile.engine.archiver.resume', null); $configuration->set('volatile.engine.archiver.processingfile', false); // ... and return TRUE = success return true; }
/** * Creates a dummy file of a given size. Remember to give the filesize * query parameter in bytes! */ public function partsize() { $timer = Factory::getTimer(); $blocks = $this->input->get('blocks', 1, 'int'); $result = $this->createTempFile($blocks); if ($result) { // Save the setting if ($blocks > 200) { $blocks = 16383; // Over 25Mb = 2Gb minus 128Kb limit (safe setting for PHP not running on 64-bit Linux) } $profile_id = Platform::getInstance()->get_active_profile(); $config = Factory::getConfiguration(); $config->set('engine.archiver.common.part_size', $blocks * 128 * 1024); Platform::getInstance()->save_configuration($profile_id); } // Enforce the min exec time $timer->enforce_min_exec_time(false); return $result; }
/** * Performs one more step of dumping database data * * @return void */ protected function stepDatabaseDump() { // Initialize local variables $db = $this->getDB(); if ($this->getError()) { return; } if (!is_object($db) || $db === false) { $this->setError(__CLASS__ . '::_run() Could not connect to database?!'); return; } $outData = ''; // Used for outputting INSERT INTO commands $this->enforceSQLCompatibility(); // Apply MySQL compatibility option if ($this->getError()) { return; } // Touch SQL dump file $nada = ""; $this->writeline($nada); // Get this table's information $tableName = $this->nextTable; $this->setStep($tableName); $this->setSubstep(''); $tableAbstract = trim($this->table_name_map[$tableName]); $dump_records = $this->tables_data[$tableName]['dump_records']; // If it is the first run, find number of rows and get the CREATE TABLE command if ($this->nextRange == 0) { if ($this->getError()) { return; } $outCreate = ''; if (is_array($this->tables_data[$tableName])) { if (array_key_exists('create', $this->tables_data[$tableName])) { $outCreate = $this->tables_data[$tableName]['create']; } } if (empty($outCreate) && !empty($tableName)) { // The CREATE command wasn't cached. Time to create it. The $type and $dependencies // variables will be thrown away. $type = 'table'; $dependencies = array(); $outCreate = $this->get_create($tableAbstract, $tableName, $type, $dependencies); } // Create drop statements if required (the key is defined by the scripting engine) if (Factory::getEngineParamsProvider()->getScriptingParameter('db.dropstatements', 0)) { if (array_key_exists('create', $this->tables_data[$tableName])) { $dropStatement = $this->createDrop($this->tables_data[$tableName]['create']); } else { $type = 'table'; $createStatement = $this->get_create($tableAbstract, $tableName, $type, $dependencies); $dropStatement = $this->createDrop($createStatement); } if (!empty($dropStatement)) { $dropStatement .= "\n"; if (!$this->writeDump($dropStatement)) { return; } } } // Write the CREATE command after any DROP command which might be necessary. if (!$this->writeDump($outCreate)) { return; } if ($dump_records) { // We are dumping data from a table, get the row count $this->getRowCount($tableAbstract); } else { // We should not dump any data Factory::getLog()->log(LogLevel::INFO, "Skipping dumping data of " . $tableAbstract); $this->maxRange = 0; $this->nextRange = 1; $outData = ''; $numRows = 0; $dump_records = false; } // Output any data preamble commands, e.g. SET IDENTITY_INSERT for SQL Server if ($dump_records && Factory::getEngineParamsProvider()->getScriptingParameter('db.dropstatements', 0)) { Factory::getLog()->log(LogLevel::DEBUG, "Writing data dump preamble for " . $tableAbstract); $preamble = $this->getDataDumpPreamble($tableAbstract, $tableName, $this->maxRange); if (!empty($preamble)) { if (!$this->writeDump($preamble)) { return; } } } // Get the table's auto increment information if ($dump_records) { $this->setAutoIncrementInfo(); } } // Check if we have more work to do on this table $configuration = Factory::getConfiguration(); $batchsize = intval($configuration->get('engine.dump.common.batchsize', 1000)); if ($batchsize <= 0) { $batchsize = 1000; } if ($this->nextRange < $this->maxRange) { $timer = Factory::getTimer(); // Get the number of rows left to dump from the current table $sql = $db->getQuery(true)->select('*')->from($db->nameQuote($tableAbstract)); if (!is_null($this->table_autoincrement['field'])) { $sql->order($db->qn($this->table_autoincrement['field']) . ' ASC'); } if ($this->nextRange == 0) { // First run, get a cursor to all records $db->setQuery($sql, 0, $batchsize); Factory::getLog()->log(LogLevel::INFO, "Beginning dump of " . $tableAbstract); } else { // Subsequent runs, get a cursor to the rest of the records $this->setSubstep($this->nextRange . ' / ' . $this->maxRange); // If we have an auto_increment value and the table has over $batchsize records use the indexed select instead of a plain limit if (!is_null($this->table_autoincrement['field']) && !is_null($this->table_autoincrement['value'])) { Factory::getLog()->log(LogLevel::INFO, "Continuing dump of " . $tableAbstract . " from record #{$this->nextRange} using auto_increment column {$this->table_autoincrement['field']} and value {$this->table_autoincrement['value']}"); $sql->where($db->qn($this->table_autoincrement['field']) . ' > ' . $db->q($this->table_autoincrement['value'])); $db->setQuery($sql, 0, $batchsize); } else { Factory::getLog()->log(LogLevel::INFO, "Continuing dump of " . $tableAbstract . " from record #{$this->nextRange}"); $db->setQuery($sql, $this->nextRange, $batchsize); } } $this->query = ''; $numRows = 0; $use_abstract = Factory::getEngineParamsProvider()->getScriptingParameter('db.abstractnames', 1); $filters = Factory::getFilters(); $mustFilter = $filters->hasFilterType('dbobject', 'children'); try { $cursor = $db->query(); } catch (\Exception $exc) { $db->resetErrors(); $cursor = null; } while (is_array($myRow = $db->fetchAssoc()) && $numRows < $this->maxRange - $this->nextRange) { $this->createNewPartIfRequired(); $numRows++; $numOfFields = count($myRow); // On MS SQL Server there's always a RowNumber pseudocolumn added at the end, screwing up the backup (GRRRR!) if ($db->getDriverType() == 'mssql') { $numOfFields--; } // If row-level filtering is enabled, please run the filtering if ($mustFilter) { $isFiltered = $filters->isFiltered(array('table' => $tableAbstract, 'row' => $myRow), $configuration->get('volatile.database.root', '[SITEDB]'), 'dbobject', 'children'); if ($isFiltered) { // Update the auto_increment value to avoid edge cases when the batch size is one if (!is_null($this->table_autoincrement['field']) && isset($myRow[$this->table_autoincrement['field']])) { $this->table_autoincrement['value'] = $myRow[$this->table_autoincrement['field']]; } continue; } } if (!$this->extendedInserts || $this->extendedInserts && empty($this->query)) { $newQuery = true; $fieldList = $this->getFieldListSQL(array_keys($myRow), $numOfFields); if ($numOfFields > 0) { $this->query = "INSERT INTO " . $db->nameQuote(!$use_abstract ? $tableName : $tableAbstract) . " {$fieldList} VALUES "; } } else { // On other cases, just mark that we should add a comma and start a new VALUES entry $newQuery = false; } $outData = '('; // Step through each of the row's values $fieldID = 0; // Used in running backup fix $isCurrentBackupEntry = false; // Fix 1.2a - NULL values were being skipped if ($numOfFields > 0) { foreach ($myRow as $value) { // The ID of the field, used to determine placement of commas $fieldID++; if ($fieldID > $numOfFields) { // This is required for SQL Server backups, do NOT remove! continue; } // Fix 2.0: Mark currently running backup as successful in the DB snapshot if ($tableAbstract == '#__ak_stats') { if ($fieldID == 1) { // Compare the ID to the currently running $statistics = Factory::getStatistics(); $isCurrentBackupEntry = $value == $statistics->getId(); } elseif ($fieldID == 6) { // Treat the status field $value = $isCurrentBackupEntry ? 'complete' : $value; } } // Post-process the value if (is_null($value)) { $outData .= "NULL"; // Cope with null values } else { // Accommodate for runtime magic quotes $value = @get_magic_quotes_runtime() ? stripslashes($value) : $value; $value = $db->Quote($value); if ($this->postProcessValues) { $value = $this->postProcessQuotedValue($value); } $outData .= $value; } if ($fieldID < $numOfFields) { $outData .= ', '; } } } $outData .= ')'; if ($numOfFields) { // If it's an existing query and we have extended inserts if ($this->extendedInserts && !$newQuery) { // Check the existing query size $query_length = strlen($this->query); $data_length = strlen($outData); if ($query_length + $data_length > $this->packetSize) { // We are about to exceed the packet size. Write the data so far. $this->query .= ";\n"; if (!$this->writeDump($this->query)) { return; } // Then, start a new query $this->query = ''; $this->query = "INSERT INTO " . $db->nameQuote(!$use_abstract ? $tableName : $tableAbstract) . " VALUES "; $this->query .= $outData; } else { // We have room for more data. Append $outData to the query. $this->query .= ', '; $this->query .= $outData; } } elseif ($this->extendedInserts && $newQuery) { // Append the data to the INSERT statement $this->query .= $outData; // Let's see the size of the dumped data... $query_length = strlen($this->query); if ($query_length >= $this->packetSize) { // This was a BIG query. Write the data to disk. $this->query .= ";\n"; if (!$this->writeDump($this->query)) { return; } // Then, start a new query $this->query = ''; } } else { // Append the data to the INSERT statement $this->query .= $outData; // Write the data to disk. $this->query .= ";\n"; if (!$this->writeDump($this->query)) { return; } // Then, start a new query $this->query = ''; } } $outData = ''; // Update the auto_increment value to avoid edge cases when the batch size is one if (!is_null($this->table_autoincrement['field'])) { $this->table_autoincrement['value'] = $myRow[$this->table_autoincrement['field']]; } unset($myRow); // Check for imminent timeout if ($timer->getTimeLeft() <= 0) { Factory::getLog()->log(LogLevel::DEBUG, "Breaking dump of {$tableAbstract} after {$numRows} rows; will continue on next step"); break; } } $db->freeResult($cursor); // Advance the _nextRange pointer $this->nextRange += $numRows != 0 ? $numRows : 1; $this->setStep($tableName); $this->setSubstep($this->nextRange . ' / ' . $this->maxRange); } // Finalize any pending query // WARNING! If we do not do that now, the query will be emptied in the next operation and all // accumulated data will go away... if (!empty($this->query)) { $this->query .= ";\n"; if (!$this->writeDump($this->query)) { return; } $this->query = ''; } // Check for end of table dump (so that it happens inside the same operation) if (!($this->nextRange < $this->maxRange)) { // Tell the user we are done with the table Factory::getLog()->log(LogLevel::DEBUG, "Done dumping " . $tableAbstract); // Output any data preamble commands, e.g. SET IDENTITY_INSERT for SQL Server if ($dump_records && Factory::getEngineParamsProvider()->getScriptingParameter('db.dropstatements', 0)) { Factory::getLog()->log(LogLevel::DEBUG, "Writing data dump epilogue for " . $tableAbstract); $epilogue = $this->getDataDumpEpilogue($tableAbstract, $tableName, $this->maxRange); if (!empty($epilogue)) { if (!$this->writeDump($epilogue)) { return; } } } if (count($this->tables) == 0) { // We have finished dumping the database! Factory::getLog()->log(LogLevel::INFO, "End of database detected; flushing the dump buffers..."); $null = null; $this->writeDump($null); Factory::getLog()->log(LogLevel::INFO, "Database has been successfully dumped to SQL file(s)"); $this->setState('postrun'); $this->setStep(''); $this->setSubstep(''); $this->nextTable = ''; $this->nextRange = 0; } elseif (count($this->tables) != 0) { // Switch tables $this->nextTable = array_shift($this->tables); $this->nextRange = 0; $this->setStep($this->nextTable); $this->setSubstep(''); } } }
/** * The public interface to an engine part. This method takes care for * calling the correct method in order to perform the initialisation - * run - finalisation cycle of operation and return a proper response array. * * @param int $nesting * * @return array A response array */ public function tick($nesting = 0) { $this->waitTimeMsec = 0; $configuration = Factory::getConfiguration(); $timer = Factory::getTimer(); // Call the right action method, depending on engine part state switch ($this->getState()) { case "init": $this->_prepare(); break; case "prepared": $this->_run(); break; case "running": $this->_run(); break; case "postrun": $this->_finalize(); break; } // If there is still time, we are not finished and there is no break flag set, re-run the tick() // method. $breakFlag = $configuration->get('volatile.breakflag', false); if (!in_array($this->getState(), array('finished', 'error')) && $timer->getTimeLeft() > 0 && !$breakFlag && $nesting < 20 && $this->nest_logging) { // Nesting is only applied if $this->nest_logging == true (currently only Kettenrad has this) $nesting++; if ($this->nest_logging) { Factory::getLog()->log(LogLevel::DEBUG, "*** Batching successive steps (nesting level {$nesting})"); } $out = $this->tick($nesting); } else { // Return the output array $out = $this->_makeReturnTable(); // Things to do for nest-logged parts (currently, only Kettenrad is) if ($this->nest_logging) { if ($breakFlag) { Factory::getLog()->log(LogLevel::DEBUG, "*** Engine steps batching: Break flag detected."); } // Reset the break flag $configuration->set('volatile.breakflag', false); // Log that we're breaking the step Factory::getLog()->log(LogLevel::DEBUG, "*** Batching of engine steps finished. I will now return control to the caller."); // Do I need client-side sleep? $serverSideSleep = true; if (method_exists($this, 'getTag')) { $tag = $this->getTag(); $clientSideSleep = Factory::getConfiguration()->get('akeeba.basic.clientsidewait', 0); if (in_array($tag, array('backend', 'restorepoint')) && $clientSideSleep) { $serverSideSleep = false; } } // Enforce minimum execution time $timer = Factory::getTimer(); $this->waitTimeMsec = (int) $timer->enforce_min_exec_time(true, $serverSideSleep); } } // Send a Return Table back to the caller return $out; }
/** * Set up the Akeeba Restore engine for the current archive */ private function setUpAkeebaRestore() { $config = Factory::getConfiguration(); $maxTime = Factory::getTimer()->getTimeLeft(); $maxTime = floor($maxTime); $maxTime = max(2, $maxTime); $statistics = Factory::getStatistics(); $stat = $statistics->getRecord(); $backup_parts = Factory::getStatistics()->get_all_filenames($stat, false); $filePath = array_shift($backup_parts); $specialDirs = Platform::getInstance()->get_stock_directories(); $tmpPath = $specialDirs['[SITETMP]']; $archiver = Factory::getArchiverEngine(); $extension = $archiver->getExtension(); $extension = strtoupper($extension); $extension = ltrim($extension, '.'); $ksOptions = array('kickstart.tuning.max_exec_time' => $maxTime, 'kickstart.tuning.run_time_bias' => $config->get('akeeba.tuning.run_time_bias', 75), 'kickstart.tuning.min_exec_time' => '0', 'kickstart.procengine' => 'direct', 'kickstart.setup.sourcefile' => $filePath, 'kickstart.setup.destdir' => $tmpPath, 'kickstart.setup.restoreperms' => '0', 'kickstart.setup.filetype' => $extension, 'kickstart.setup.dryrun' => '1', 'kickstart.jps.password' => $config->get('engine.archiver.jps.key', '', false)); \AKFactory::nuke(); foreach ($ksOptions as $k => $v) { \AKFactory::set($k, $v); } \AKFactory::set('kickstart.enabled', true); }
/** * Implements the _run() abstract method */ protected function _run() { // Check if we are already done if ($this->getState() == 'postrun') { Factory::getLog()->log(LogLevel::DEBUG, __CLASS__ . " :: Already finished"); $this->setStep(""); $this->setSubstep(""); return; } // Mark ourselves as still running (we will test if we actually do towards the end ;) ) $this->setState('running'); // Check if we are still adding a database dump part to the archive, or if // we have to post-process a part if (Factory::getEngineParamsProvider()->getScriptingParameter('db.saveasname', 'normal') != 'output') { $archiver = Factory::getArchiverEngine(); $configuration = Factory::getConfiguration(); if ($configuration->get('engine.postproc.common.after_part', 0)) { if (!empty($archiver->finishedPart)) { $filename = array_shift($archiver->finishedPart); Factory::getLog()->log(LogLevel::INFO, 'Preparing to post process ' . basename($filename)); $timer = Factory::getTimer(); $startTime = $timer->getRunningTime(); $post_proc = Factory::getPostprocEngine(); $result = $post_proc->processPart($filename); $this->propagateFromObject($post_proc); if ($result === false) { Factory::getLog()->log(LogLevel::WARNING, 'Failed to process file ' . $filename); Factory::getLog()->log(LogLevel::WARNING, 'Error received from the post-processing engine:'); Factory::getLog()->log(LogLevel::WARNING, implode("\n", array_merge($this->getWarnings(), $this->getErrors()))); $this->setWarning('Failed to process file ' . basename($filename)); } elseif ($result === true) { // Add this part's size to the volatile storage $volatileTotalSize = $configuration->get('volatile.engine.archiver.totalsize', 0); $volatileTotalSize += (int) @filesize($filename); $configuration->set('volatile.engine.archiver.totalsize', $volatileTotalSize); Factory::getLog()->log(LogLevel::INFO, 'Successfully processed file ' . basename($filename)); } else { // More work required Factory::getLog()->log(LogLevel::INFO, 'More post-processing steps required for file ' . $filename); $configuration->set('volatile.postproc.filename', $filename); // Let's push back the file into the archiver stack array_unshift($archiver->finishedPart, $filename); // Do we need to break the step? $endTime = $timer->getRunningTime(); $stepTime = $endTime - $startTime; $timeLeft = $timer->getTimeLeft(); if ($timeLeft < $stepTime) { // We predict that running yet another step would cause a timeout $configuration->set('volatile.breakflag', true); } else { // We have enough time to run yet another step $configuration->set('volatile.breakflag', false); } } // Should we delete the file afterwards? if ($configuration->get('engine.postproc.common.delete_after', false) && $post_proc->allow_deletes && $result === true) { Factory::getLog()->log(LogLevel::DEBUG, 'Deleting already processed file ' . basename($filename)); Platform::getInstance()->unlink($filename); } if ($post_proc->break_after) { $configuration->set('volatile.breakflag', true); return; } } } if ($configuration->get('volatile.engine.archiver.processingfile', false)) { // We had already started archiving the db file, but it needs more time $finished = true; Factory::getLog()->log(LogLevel::DEBUG, "Continuing adding the SQL dump part to the archive"); $archiver->addFile(null, null, null); $this->propagateFromObject($archiver); if ($this->getError()) { return; } $finished = !$configuration->get('volatile.engine.archiver.processingfile', false); if ($finished) { $this->getNextDumpPart(); } else { return; } } } $this->stepDatabaseDump(); $null = null; $this->writeline($null); }
/** * Put up to $fileLength bytes of the file pointer $sourceFilePointer into the backup archive. Returns true if we * ran out of time and need to perform a step break. Returns false when the whole quantity of data has been copied. * Throws an ErrorException if soemthing really bad happens. * * @param resource $sourceFilePointer The pointer to the input file * @param int $fileLength How many bytes to copy * * @return bool True to indicate we need to resume packing the file in the next step */ private function putDataFromFileIntoArchive(&$sourceFilePointer, &$fileLength) { // Get references to engine objects we're going to be using $configuration = Factory::getConfiguration(); $timer = Factory::getTimer(); // Quick copy data into the archive, AKEEBA_CHUNK bytes at a time while (!feof($sourceFilePointer) && $timer->getTimeLeft() > 0 && $fileLength > 0) { // Normally I read up to AKEEBA_CHUNK bytes at a time $chunkSize = AKEEBA_CHUNK; // Do I have a split ZIP? if ($this->useSplitArchive) { // I must only read up to the free space in the part file if it's less than AKEEBA_CHUNK. $free_space = $this->getPartFreeSize(); $chunkSize = min($free_space, AKEEBA_CHUNK); // If I ran out of free space I have to create a new part file. if ($free_space <= 0) { $this->createAndOpenNewPart(); // We have created the part. If the user asked for immediate post-proc, break step now. if ($configuration->get('engine.postproc.common.after_part', 0)) { $resumeOffset = @ftell($sourceFilePointer); @fclose($sourceFilePointer); $configuration->set('volatile.engine.archiver.resume', $resumeOffset); $configuration->set('volatile.engine.archiver.processingfile', true); $configuration->set('volatile.breakflag', true); // Always close the open part when immediate post-processing is requested @$this->fclose($this->fp); $this->fp = null; return true; } // No immediate post-proc. Recalculate the optimal chunk size. $free_space = $this->getPartFreeSize(); $chunkSize = min($free_space, AKEEBA_CHUNK); } } // Read some data and write it to the backup archive part file $data = fread($sourceFilePointer, $chunkSize); $bytesWritten = $this->fwrite($this->fp, $data, akstrlen($data)); // Subtract the written bytes from the bytes left to write $fileLength -= $bytesWritten; } /** * According to the file size we read when we were writing the file header we have more data to write. However, * we reached the end of the file. This means the file went away or shrunk. We can't reliably go back and * change the file header since it may be in a previous part file that's already been post-processed. All we can * do is try to warn the user. */ while (feof($sourceFilePointer) && $timer->getTimeLeft() > 0 && $fileLength > 0) { throw new ErrorException('The file shrunk or went away while putting it in the backup archive. Your archive is damaged! If this is a temporary or cache file we advise you to exclude the contents of the temporary / cache folder it is contained in.'); } // WARNING!!! The extra $unc_len != 0 check is necessary as PHP won't reach EOF for 0-byte files. if (!feof($sourceFilePointer) && $fileLength != 0) { // We have to break, or we'll time out! $resumeOffset = @ftell($sourceFilePointer); @fclose($sourceFilePointer); $configuration->set('volatile.engine.archiver.resume', $resumeOffset); $configuration->set('volatile.engine.archiver.processingfile', true); return true; } return false; }
/** * The most basic file transaction: add a single entry (file or directory) to * the archive. * * @param bool $isVirtual If true, the next parameter contains file data instead of a file name * @param string $sourceNameOrData Absolute file name to read data from or the file data itself is $isVirtual is * true * @param string $targetName The (relative) file name under which to store the file in the archive * * @return bool True on success, false otherwise */ protected function _addFile($isVirtual, &$sourceNameOrData, $targetName) { static $configuration; // Note down the starting disk number for Split ZIP archives if ($this->_useSplitZIP) { $starting_disk_number_for_this_file = $this->_currentFragment - 1; } else { $starting_disk_number_for_this_file = 0; } if (!$configuration) { $configuration = Factory::getConfiguration(); } // Open data file for output if (is_null($this->fp)) { $this->fp = @$this->_fopen($this->_dataFileName, "ab"); } if ($this->fp === false) { $this->setError("Could not open archive file {$this->_dataFileName} for append!"); return false; } if (!$configuration->get('volatile.engine.archiver.processingfile', false)) { // See if it's a directory $isDir = $isVirtual ? false : is_dir($sourceNameOrData); // See if it's a symlink (w/out dereference) $isSymlink = false; if ($this->_symlink_store_target && !$isVirtual) { $isSymlink = is_link($sourceNameOrData); } // Get real size before compression if ($isVirtual) { $fileSize = function_exists('mb_strlen') ? mb_strlen($sourceNameOrData, '8bit') : strlen($sourceNameOrData); } else { if ($isSymlink) { $fileSize = function_exists('mb_strlen') ? mb_strlen(@readlink($sourceNameOrData), '8bit') : strlen(@readlink($sourceNameOrData)); } else { $fileSize = $isDir ? 0 : @filesize($sourceNameOrData); } } // Get last modification time to store in archive $ftime = $isVirtual ? time() : @filemtime($sourceNameOrData); // Decide if we will compress if ($isDir || $isSymlink) { $compressionMethod = 0; // don't compress directories... } else { // Do we have plenty of memory left? $memLimit = ini_get("memory_limit"); if (strstr($memLimit, 'M')) { $memLimit = (int) $memLimit * 1048576; } elseif (strstr($memLimit, 'K')) { $memLimit = (int) $memLimit * 1024; } elseif (strstr($memLimit, 'G')) { $memLimit = (int) $memLimit * 1073741824; } else { $memLimit = (int) $memLimit; } if ($memLimit == "" || $fileSize >= _AKEEBA_COMPRESSION_THRESHOLD) { // No memory limit, or over 1Mb files => always compress up to 1Mb files (otherwise it times out) $compressionMethod = $fileSize <= _AKEEBA_COMPRESSION_THRESHOLD ? 8 : 0; } elseif (function_exists("memory_get_usage")) { // PHP can report memory usage, see if there's enough available memory; the containing application / CMS alone eats about 5-6Mb! This code is called on files <= 1Mb $memLimit = $this->_return_bytes($memLimit); $availableRAM = $memLimit - memory_get_usage(); $compressionMethod = $availableRAM / 2.5 >= $fileSize ? 8 : 0; } else { // PHP can't report memory usage, compress only files up to 512Kb (conservative approach) and hope it doesn't break $compressionMethod = $fileSize <= 524288 ? 8 : 0; } } $compressionMethod = function_exists("gzcompress") ? $compressionMethod : 0; $storedName = $targetName; if ($isVirtual) { Factory::getLog()->log(LogLevel::DEBUG, ' Virtual add:' . $storedName . ' (' . $fileSize . ') - ' . $compressionMethod); } /* "Local file header" segment. */ $unc_len = $fileSize; // File size if (!$isDir) { // Get CRC for regular files, not dirs if ($isVirtual) { $crc = crc32($sourceNameOrData); } else { $crc = $this->crcCalculator->crc32_file($sourceNameOrData, $this->AkeebaPackerZIP_CHUNK_SIZE); // This is supposed to be the fast way to calculate CRC32 of a (large) file. // If the file was unreadable, $crc will be false, so we skip the file if ($crc === false) { $this->setWarning('Could not calculate CRC32 for ' . $sourceNameOrData); return false; } } } else { if ($isSymlink) { $crc = crc32(@readlink($sourceNameOrData)); } else { // Dummy CRC for dirs $crc = 0; $storedName .= "/"; $unc_len = 0; } } // If we have to compress, read the data in memory and compress it if ($compressionMethod == 8) { // Get uncompressed data if ($isVirtual) { $udata =& $sourceNameOrData; } else { $udata = @file_get_contents($sourceNameOrData); // PHP > 4.3.0 saves us the trouble } if ($udata === false) { // Unreadable file, skip it. Normally, we should have exited on CRC code above $this->setWarning('Unreadable file ' . $sourceNameOrData . '. Check permissions'); return false; } else { // Proceed with compression $zdata = @gzcompress($udata); if ($zdata === false) { // If compression fails, let it behave like no compression was available $c_len = $unc_len; $compressionMethod = 0; } else { unset($udata); $zdata = substr(substr($zdata, 0, -4), 2); $c_len = function_exists('mb_strlen') ? mb_strlen($zdata, '8bit') : strlen($zdata); } } } else { $c_len = $unc_len; } /* Get the hex time. */ $dtime = dechex($this->_unix2DosTime($ftime)); if ((function_exists('mb_strlen') ? mb_strlen($dtime, '8bit') : strlen($dtime)) < 8) { $dtime = "00000000"; } $hexdtime = chr(hexdec($dtime[6] . $dtime[7])) . chr(hexdec($dtime[4] . $dtime[5])) . chr(hexdec($dtime[2] . $dtime[3])) . chr(hexdec($dtime[0] . $dtime[1])); // If it's a split ZIP file, we've got to make sure that the header can fit in the part if ($this->_useSplitZIP) { // Get header size, taking into account any extra header necessary $header_size = 30 + (function_exists('mb_strlen') ? mb_strlen($storedName, '8bit') : strlen($storedName)); // Compare to free part space clearstatcache(); $current_part_size = @filesize($this->_dataFileName); $free_space = $this->_fragmentSize - ($current_part_size === false ? 0 : $current_part_size); if ($free_space <= $header_size) { // Not enough space on current part, create new part if (!$this->_createNewPart()) { $this->setError('Could not create new ZIP part file ' . basename($this->_dataFileName)); return false; } // Open data file for output $this->fp = @$this->_fopen($this->_dataFileName, "ab"); if ($this->fp === false) { $this->setError("Could not open archive file {$this->_dataFileName} for append!"); return false; } } } $old_offset = @ftell($this->fp); if ($this->_useSplitZIP && $old_offset == 0) { // Because in split ZIPs we have the split ZIP marker in the first four bytes. @fseek($this->fp, 4); $old_offset = @ftell($this->fp); } /** * $seek_result = @fseek($this->fp, 0, SEEK_END); * $old_offset = ($seek_result == -1) ? false : @ftell($this->fp); * if ($old_offset === false) * { * @clearstatcache(); * $old_offset = @filesize($this->_dataFileName); * } * /**/ // Get the file name length in bytes if (function_exists('mb_strlen')) { $fn_length = mb_strlen($storedName, '8bit'); } else { $fn_length = strlen($storedName); } $this->_fwrite($this->fp, $this->_fileHeader); /* Begin creating the ZIP data. */ if (!$isSymlink) { $this->_fwrite($this->fp, ""); /* Version needed to extract. */ } else { $this->_fwrite($this->fp, "\n"); /* Version needed to extract. */ } $this->_fwrite($this->fp, pack('v', 2048)); /* General purpose bit flag. Bit 11 set = use UTF-8 encoding for filenames & comments */ $this->_fwrite($this->fp, $compressionMethod == 8 ? "" : ""); /* Compression method. */ $this->_fwrite($this->fp, $hexdtime); /* Last modification time/date. */ $this->_fwrite($this->fp, pack('V', $crc)); /* CRC 32 information. */ if (!isset($c_len)) { $c_len = $unc_len; } $this->_fwrite($this->fp, pack('V', $c_len)); /* Compressed filesize. */ $this->_fwrite($this->fp, pack('V', $unc_len)); /* Uncompressed filesize. */ $this->_fwrite($this->fp, pack('v', $fn_length)); /* Length of filename. */ $this->_fwrite($this->fp, pack('v', 0)); /* Extra field length. */ $this->_fwrite($this->fp, $storedName); /* File name. */ // Cache useful information about the file if (!$isDir && !$isSymlink && !$isVirtual) { $configuration->set('volatile.engine.archiver.unc_len', $unc_len); $configuration->set('volatile.engine.archiver.hexdtime', $hexdtime); $configuration->set('volatile.engine.archiver.crc', $crc); $configuration->set('volatile.engine.archiver.c_len', $c_len); $configuration->set('volatile.engine.archiver.fn_length', $fn_length); $configuration->set('volatile.engine.archiver.old_offset', $old_offset); $configuration->set('volatile.engine.archiver.storedName', $storedName); $configuration->set('volatile.engine.archiver.sourceNameOrData', $sourceNameOrData); } } else { // Since we are continuing archiving, it's an uncompressed regular file. Set up the variables. $compressionMethod = 1; $isDir = false; $isSymlink = false; $unc_len = $configuration->get('volatile.engine.archiver.unc_len'); $hexdtime = $configuration->get('volatile.engine.archiver.hexdtime'); $crc = $configuration->get('volatile.engine.archiver.crc'); $c_len = $configuration->get('volatile.engine.archiver.c_len'); $fn_length = $configuration->get('volatile.engine.archiver.fn_length'); $old_offset = $configuration->get('volatile.engine.archiver.old_offset'); $storedName = $configuration->get('volatile.engine.archiver.storedName'); } /* "File data" segment. */ if ($compressionMethod == 8) { // Just dump the compressed data if (!$this->_useSplitZIP) { $this->_fwrite($this->fp, $zdata); if ($this->getError()) { return false; } } else { // Split ZIP. Check if we need to split the part in the middle of the data. clearstatcache(); $current_part_size = @filesize($this->_dataFileName); $free_space = $this->_fragmentSize - ($current_part_size === false ? 0 : $current_part_size); if ($free_space >= (function_exists('mb_strlen') ? mb_strlen($zdata, '8bit') : strlen($zdata))) { // Write in one part $this->_fwrite($this->fp, $zdata); if ($this->getError()) { return false; } } else { $bytes_left = function_exists('mb_strlen') ? mb_strlen($zdata, '8bit') : strlen($zdata); while ($bytes_left > 0) { clearstatcache(); $current_part_size = @filesize($this->_dataFileName); $free_space = $this->_fragmentSize - ($current_part_size === false ? 0 : $current_part_size); // Split between parts - Write a part $this->_fwrite($this->fp, $zdata, min(function_exists('mb_strlen') ? mb_strlen($zdata, '8bit') : strlen($zdata), $free_space)); if ($this->getError()) { return false; } // Get the rest of the data $bytes_left = (function_exists('mb_strlen') ? mb_strlen($zdata, '8bit') : strlen($zdata)) - $free_space; if ($bytes_left > 0) { $this->_fclose($this->fp); $this->fp = null; // Create new part if (!$this->_createNewPart()) { // Die if we couldn't create the new part $this->setError('Could not create new ZIP part file ' . basename($this->_dataFileName)); return false; } // Open data file for output $this->fp = @$this->_fopen($this->_dataFileName, "ab"); if ($this->fp === false) { $this->setError("Could not open archive file {$this->_dataFileName} for append!"); return false; } $zdata = substr($zdata, -$bytes_left); } } } } unset($zdata); } elseif (!($isDir || $isSymlink)) { // Virtual file, just write the data! if ($isVirtual) { // Just dump the data if (!$this->_useSplitZIP) { $this->_fwrite($this->fp, $sourceNameOrData); if ($this->getError()) { return false; } } else { // Split ZIP. Check if we need to split the part in the middle of the data. clearstatcache(); $current_part_size = @filesize($this->_dataFileName); $free_space = $this->_fragmentSize - ($current_part_size === false ? 0 : $current_part_size); if ($free_space >= (function_exists('mb_strlen') ? mb_strlen($sourceNameOrData, '8bit') : strlen($sourceNameOrData))) { // Write in one part $this->_fwrite($this->fp, $sourceNameOrData); if ($this->getError()) { return false; } } else { $bytes_left = function_exists('mb_strlen') ? mb_strlen($sourceNameOrData, '8bit') : strlen($sourceNameOrData); while ($bytes_left > 0) { clearstatcache(); $current_part_size = @filesize($this->_dataFileName); $free_space = $this->_fragmentSize - ($current_part_size === false ? 0 : $current_part_size); // Split between parts - Write first part $this->_fwrite($this->fp, $sourceNameOrData, min(function_exists('mb_strlen') ? mb_strlen($zdata, '8bit') : strlen($zdata), $free_space)); if ($this->getError()) { return false; } // Get the rest of the data $rest_size = (function_exists('mb_strlen') ? mb_strlen($sourceNameOrData, '8bit') : strlen($sourceNameOrData)) - $free_space; if ($rest_size > 0) { $this->_fclose($this->fp); $this->fp = null; // Create new part if required if (!$this->_createNewPart()) { // Die if we couldn't create the new part $this->setError('Could not create new ZIP part file ' . basename($this->_dataFileName)); return false; } // Open data file for output $this->fp = @$this->_fopen($this->_dataFileName, "ab"); if ($this->fp === false) { $this->setError("Could not open archive file {$this->_dataFileName} for append!"); return false; } // Get the rest of the compressed data $zdata = substr($sourceNameOrData, -$rest_size); } $bytes_left = $rest_size; } } } } else { // IMPORTANT! Only this case can be spanned across steps: uncompressed, non-virtual data if ($configuration->get('volatile.engine.archiver.processingfile', false)) { $sourceNameOrData = $configuration->get('volatile.engine.archiver.sourceNameOrData', ''); $unc_len = $configuration->get('volatile.engine.archiver.unc_len', 0); $resume = $configuration->get('volatile.engine.archiver.resume', 0); } // Copy the file contents, ignore directories $zdatafp = @fopen($sourceNameOrData, "rb"); if ($zdatafp === false) { $this->setWarning('Unreadable file ' . $sourceNameOrData . '. Check permissions'); return false; } else { $timer = Factory::getTimer(); // Seek to the resume point if required if ($configuration->get('volatile.engine.archiver.processingfile', false)) { // Seek to new offset $seek_result = @fseek($zdatafp, $resume); if ($seek_result === -1) { // What?! We can't resume! $this->setError(sprintf('Could not resume packing of file %s. Your archive is damaged!', $sourceNameOrData)); return false; } // Doctor the uncompressed size to match the remainder of the data $unc_len = $unc_len - $resume; } if (!$this->_useSplitZIP) { // For non Split ZIP, just dump the file very fast while (!feof($zdatafp) && $timer->getTimeLeft() > 0 && $unc_len > 0) { $zdata = fread($zdatafp, AKEEBA_CHUNK); $this->_fwrite($this->fp, $zdata, min(function_exists('mb_strlen') ? mb_strlen($zdata, '8bit') : strlen($zdata), AKEEBA_CHUNK)); $unc_len -= AKEEBA_CHUNK; if ($this->getError()) { return false; } } if (!feof($zdatafp) && $unc_len != 0) { // We have to break, or we'll time out! $resume = @ftell($zdatafp); $configuration->set('volatile.engine.archiver.resume', $resume); $configuration->set('volatile.engine.archiver.processingfile', true); return true; } } else { // Split ZIP - Do we have enough space to host the whole file? clearstatcache(); $current_part_size = @filesize($this->_dataFileName); $free_space = $this->_fragmentSize - ($current_part_size === false ? 0 : $current_part_size); if ($free_space >= $unc_len) { // Yes, it will fit inside this part, do quick copy while (!feof($zdatafp) && $timer->getTimeLeft() > 0 && $unc_len > 0) { $zdata = fread($zdatafp, AKEEBA_CHUNK); $this->_fwrite($this->fp, $zdata, min(function_exists('mb_strlen') ? mb_strlen($zdata, '8bit') : strlen($zdata), AKEEBA_CHUNK)); $unc_len -= AKEEBA_CHUNK; if ($this->getError()) { return false; } } if (!feof($zdatafp) && $unc_len != 0) { // We have to break, or we'll time out! $resume = @ftell($zdatafp); $configuration->set('volatile.engine.archiver.resume', $resume); $configuration->set('volatile.engine.archiver.processingfile', true); return true; } } else { // No, we'll have to split between parts. We'll loop until we run // out of space. while (!feof($zdatafp) && $timer->getTimeLeft() > 0) { // No, we'll have to split between parts. Write the first part // Find optimal chunk size clearstatcache(); $current_part_size = @filesize($this->_dataFileName); $free_space = $this->_fragmentSize - ($current_part_size === false ? 0 : $current_part_size); $chunk_size_primary = min(AKEEBA_CHUNK, $free_space); if ($chunk_size_primary <= 0) { $chunk_size_primary = max(AKEEBA_CHUNK, $free_space); } // Calculate if we have to read some more data (smaller chunk size) // and how many times we must read w/ the primary chunk size $chunk_size_secondary = $free_space % $chunk_size_primary; $loop_times = ($free_space - $chunk_size_secondary) / $chunk_size_primary; // Read and write with the primary chunk size for ($i = 1; $i <= $loop_times; $i++) { $zdata = fread($zdatafp, $chunk_size_primary); $this->_fwrite($this->fp, $zdata, min(function_exists('mb_strlen') ? mb_strlen($zdata, '8bit') : strlen($zdata), $chunk_size_primary)); $unc_len -= $chunk_size_primary; if ($this->getError()) { return false; } // Do we have enough time to proceed? if (!feof($zdatafp) && $unc_len != 0 && $timer->getTimeLeft() <= 0) { // No, we have to break, or we'll time out! $resume = @ftell($zdatafp); $configuration->set('volatile.engine.archiver.resume', $resume); $configuration->set('volatile.engine.archiver.processingfile', true); return true; } } // Read and write w/ secondary chunk size, if non-zero if ($chunk_size_secondary > 0) { $zdata = fread($zdatafp, $chunk_size_secondary); $this->_fwrite($this->fp, $zdata, min(function_exists('mb_strlen') ? mb_strlen($zdata, '8bit') : strlen($zdata), $chunk_size_secondary)); $unc_len -= $chunk_size_secondary; if ($this->getError()) { return false; } } // Do we have enough time to proceed? if (!feof($zdatafp) && $unc_len != 0 && $timer->getTimeLeft() <= 0) { // No, we have to break, or we'll time out! $resume = @ftell($zdatafp); $configuration->set('volatile.engine.archiver.resume', $resume); $configuration->set('volatile.engine.archiver.processingfile', true); // ...and create a new part as well if (!$this->_createNewPart()) { // Die if we couldn't create the new part $this->setError('Could not create new ZIP part file ' . basename($this->_dataFileName)); return false; } // Open data file for output $this->fp = @$this->_fopen($this->_dataFileName, "ab"); if ($this->fp === false) { $this->setError("Could not open archive file {$this->_dataFileName} for append!"); return false; } // ...then, return return true; } // Create new ZIP part, but only if we'll have more data to write if (!feof($zdatafp) && $unc_len > 0) { // Create new ZIP part if (!$this->_createNewPart()) { // Die if we couldn't create the new part $this->setError('Could not create new ZIP part file ' . basename($this->_dataFileName)); return false; } // Close the old data file $this->_fclose($this->fp); $this->fp = null; // We have created the part. If the user asked for immediate post-proc, break step now. if ($configuration->get('engine.postproc.common.after_part', 0)) { $resume = @ftell($zdatafp); $configuration->set('volatile.engine.archiver.resume', $resume); $configuration->set('volatile.engine.archiver.processingfile', true); $configuration->set('volatile.breakflag', true); @fclose($zdatafp); return true; } // Open data file for output $this->fp = @$this->_fopen($this->_dataFileName, "ab"); if ($this->fp === false) { $this->setError("Could not open archive file {$this->_dataFileName} for append!"); return false; } } } // end while } } @fclose($zdatafp); } } } elseif ($isSymlink) { $this->_fwrite($this->fp, @readlink($sourceNameOrData)); } // Open the central directory file for append if (is_null($this->cdfp)) { $this->cdfp = @$this->_fopen($this->_ctrlDirFileName, "ab"); } if ($this->cdfp === false) { $this->setError("Could not open Central Directory temporary file for append!"); return false; } $this->_fwrite($this->cdfp, $this->_ctrlDirHeader); if (!$isSymlink) { $this->_fwrite($this->cdfp, ""); /* Version made by (always set to 2.0). */ $this->_fwrite($this->cdfp, ""); /* Version needed to extract */ $this->_fwrite($this->cdfp, pack('v', 2048)); /* General purpose bit flag */ $this->_fwrite($this->cdfp, $compressionMethod == 8 ? "" : ""); /* Compression method. */ } else { // Symlinks get special treatment $this->_fwrite($this->cdfp, ""); /* Version made by (version 2.0 with UNIX extensions). */ $this->_fwrite($this->cdfp, "\n"); /* Version needed to extract */ $this->_fwrite($this->cdfp, pack('v', 2048)); /* General purpose bit flag */ $this->_fwrite($this->cdfp, ""); /* Compression method. */ } $this->_fwrite($this->cdfp, $hexdtime); /* Last mod time/date. */ $this->_fwrite($this->cdfp, pack('V', $crc)); /* CRC 32 information. */ $this->_fwrite($this->cdfp, pack('V', $c_len)); /* Compressed filesize. */ if ($compressionMethod == 0) { // When we are not compressing, $unc_len is being reduced to 0 while backing up. // With this trick, we always store the correct length, as in this case the compressed // and uncompressed length is always the same. $this->_fwrite($this->cdfp, pack('V', $c_len)); /* Uncompressed filesize. */ } else { // When compressing, the uncompressed length differs from compressed length // and this line writes the correct value. $this->_fwrite($this->cdfp, pack('V', $unc_len)); /* Uncompressed filesize. */ } $this->_fwrite($this->cdfp, pack('v', $fn_length)); /* Length of filename. */ $this->_fwrite($this->cdfp, pack('v', 0)); /* Extra field length. */ $this->_fwrite($this->cdfp, pack('v', 0)); /* File comment length. */ $this->_fwrite($this->cdfp, pack('v', $starting_disk_number_for_this_file)); /* Disk number start. */ $this->_fwrite($this->cdfp, pack('v', 0)); /* Internal file attributes. */ if (!$isSymlink) { $this->_fwrite($this->cdfp, pack('V', $isDir ? 0x41ff0010 : 0.0)); /* External file attributes - 'archive' bit set. */ } else { // For SymLinks we store UNIX file attributes $this->_fwrite($this->cdfp, " €ÿ¡"); /* External file attributes for Symlink. */ } $this->_fwrite($this->cdfp, pack('V', $old_offset)); /* Relative offset of local header. */ $this->_fwrite($this->cdfp, $storedName); /* File name. */ /* Optional extra field, file comment goes here. */ // Finally, increase the file counter by one $this->_totalFileEntries++; // Uncache data $configuration->set('volatile.engine.archiver.sourceNameOrData', null); $configuration->set('volatile.engine.archiver.unc_len', null); $configuration->set('volatile.engine.archiver.resume', null); $configuration->set('volatile.engine.archiver.hexdtime', null); $configuration->set('volatile.engine.archiver.crc', null); $configuration->set('volatile.engine.archiver.c_len', null); $configuration->set('volatile.engine.archiver.fn_length', null); $configuration->set('volatile.engine.archiver.old_offset', null); $configuration->set('volatile.engine.archiver.storedName', null); $configuration->set('volatile.engine.archiver.sourceNameOrData', null); $configuration->set('volatile.engine.archiver.processingfile', false); // ... and return TRUE = success return true; }