/** * Execute the JSON API task * * @param array $parameters The parameters to this task * * @return mixed * * @throws \RuntimeException In case of an error */ public function execute(array $parameters = array()) { // Get the passed configuration values $defConfig = array('backup_id' => 0); $defConfig = array_merge($defConfig, $parameters); $backup_id = (int) $defConfig['backup_id']; // Get the basic statistics $record = Platform::getInstance()->get_statistics($backup_id); // Get a list of filenames $backup_stats = Platform::getInstance()->get_statistics($backup_id); // Backup record doesn't exist if (empty($backup_stats)) { throw new \RuntimeException('Invalid backup record identifier', 404); } $filenames = Factory::getStatistics()->get_all_filenames($record); if (empty($filenames)) { // Archives are not stored on the server or no files produced $record['filenames'] = array(); } else { $filedata = array(); $i = 0; // Get file sizes per part foreach ($filenames as $file) { $i++; $size = @filesize($file); $size = is_numeric($size) ? $size : 0; $filedata[] = array('part' => $i, 'name' => basename($file), 'size' => $size); } // Add the file info to $record['filenames'] $record['filenames'] = $filedata; } return $record; }
/** * Execute the JSON API task * * @param array $parameters The parameters to this task * * @return mixed * * @throws \RuntimeException In case of an error */ public function execute(array $parameters = array()) { $filter = \JFilterInput::getInstance(); // Get the passed configuration values $defConfig = array('profile' => null, 'tag' => AKEEBA_BACKUP_ORIGIN, 'backupid' => null); $defConfig = array_merge($defConfig, $parameters); $profile = $filter->clean($defConfig['profile'], 'int'); $tag = $filter->clean($defConfig['tag'], 'cmd'); $backupid = $filter->clean($defConfig['backupid'], 'cmd'); // Set the active profile $session = $this->container->session; // Try to set the profile from the setup parameters if (!empty($profile)) { $profile = max(1, $profile); // Make sure $profile is a positive integer >= 1 $session->set('profile', $profile); define('AKEEBA_PROFILE', $profile); } /** @var \Akeeba\Backup\Site\Model\Backup $model */ $model = $this->container->factory->model('Backup')->tmpInstance(); $model->setState('tag', $tag); $model->setState('backupid', $backupid); $array = $model->stepBackup(false); if ($array['Error'] != '') { throw new \RuntimeException('A backup error has occurred: ' . $array['Error'], 500); } // BackupID contains the numeric backup record ID. backupid contains the backup id (usually in the form id123) $statistics = Factory::getStatistics(); $array['BackupID'] = $statistics->getId(); // Remote clients expect a boolean, not an integer. $array['HasRun'] = $array['HasRun'] === 0; return $array; }
/** * Execute the JSON API task * * @param array $parameters The parameters to this task * * @return mixed * * @throws \RuntimeException In case of an error */ public function execute(array $parameters = array()) { $filter = \JFilterInput::getInstance(); // Get the passed configuration values $defConfig = array('profile' => 1, 'description' => '', 'comment' => '', 'backupid' => null, 'overrides' => array()); $defConfig = array_merge($defConfig, $parameters); $profile = (int) $defConfig['profile']; $profile = max(1, $profile); // Make sure $profile is a positive integer >= 1 $description = $filter->clean($defConfig['description'], 'string'); $comment = $filter->clean($defConfig['comment'], 'string'); $backupid = $filter->clean($defConfig['backupid'], 'cmd'); $backupid = empty($backupid) ? null : $backupid; // Otherwise the Engine doesn't set a backup ID $overrides = $filter->clean($defConfig['overrides'], 'array'); $this->container->session->set('profile', $profile); define('AKEEBA_PROFILE', $profile); /** * DO NOT REMOVE! * * The Model will only try to load the configuration after nuking the factory. This causes Profile 1 to be * loaded first. Then it figures out it needs to load a different profile and it does – but the protected keys * are NOT replaced, meaning that certain configuration parameters are not replaced. Most notably, the chain. * This causes backups to behave weirdly. So, DON'T REMOVE THIS UNLESS WE REFACTOR THE MODEL. */ Platform::getInstance()->load_configuration($profile); /** @var \Akeeba\Backup\Site\Model\Backup $model */ $model = $this->container->factory->model('Backup')->tmpInstance(); $model->setState('tag', AKEEBA_BACKUP_ORIGIN); $model->setState('backupid', $backupid); $model->setState('description', $description); $model->setState('comment', $comment); $array = $model->startBackup($overrides); if ($array['Error'] != '') { throw new \RuntimeException('A backup error has occurred: ' . $array['Error'], 500); } // BackupID contains the numeric backup record ID. backupid contains the backup id (usually in the form id123) $statistics = Factory::getStatistics(); $array['BackupID'] = $statistics->getId(); // Remote clients expect a boolean, not an integer. $array['HasRun'] = $array['HasRun'] === 0; return $array; }
/** * Execute the JSON API task * * @param array $parameters The parameters to this task * * @return mixed * * @throws \RuntimeException In case of an error */ public function execute(array $parameters = array()) { // Get the passed configuration values $defConfig = array('backup_id' => 0, 'part_id' => 1, 'segment' => 1, 'chunk_size' => 1); $defConfig = array_merge($defConfig, $parameters); $backup_id = (int) $defConfig['backup_id']; $part_id = (int) $defConfig['part_id']; $segment = (int) $defConfig['segment']; $chunk_size = (int) $defConfig['chunk_size']; $backup_stats = Platform::getInstance()->get_statistics($backup_id); if (empty($backup_stats)) { // Backup record doesn't exist throw new \RuntimeException('Invalid backup record identifier', 404); } $files = Factory::getStatistics()->get_all_filenames($backup_stats); if (count($files) < $part_id || $part_id <= 0) { // Invalid part throw new \RuntimeException('Invalid backup part', 404); } $file = $files[$part_id - 1]; $filesize = @filesize($file); $seekPos = $chunk_size * 1048576 * ($segment - 1); if ($seekPos > $filesize) { // Trying to seek past end of file throw new \RuntimeException('Invalid segment', 404); } $fp = fopen($file, 'rb'); if ($fp === false) { // Could not read file throw new \RuntimeException('Error reading backup archive', 500); } rewind($fp); if (fseek($fp, $seekPos, SEEK_SET) === -1) { // Could not seek to position throw new \RuntimeException('Error reading specified segment', 500); } $buffer = fread($fp, 1048576); if ($buffer === false) { throw new \RuntimeException('Error reading specified segment', 500); } fclose($fp); return base64_encode($buffer); }
/** * Performs one more step of dumping database data * * @return void */ protected function stepDatabaseDump() { // Initialize local variables $db = $this->getDB(); if ($this->getError()) { return; } if (!is_object($db) || $db === false) { $this->setError(__CLASS__ . '::_run() Could not connect to database?!'); return; } $outData = ''; // Used for outputting INSERT INTO commands $this->enforceSQLCompatibility(); // Apply MySQL compatibility option if ($this->getError()) { return; } // Touch SQL dump file $nada = ""; $this->writeline($nada); // Get this table's information $tableName = $this->nextTable; $this->setStep($tableName); $this->setSubstep(''); $tableAbstract = trim($this->table_name_map[$tableName]); $dump_records = $this->tables_data[$tableName]['dump_records']; // If it is the first run, find number of rows and get the CREATE TABLE command if ($this->nextRange == 0) { if ($this->getError()) { return; } $outCreate = ''; if (is_array($this->tables_data[$tableName])) { if (array_key_exists('create', $this->tables_data[$tableName])) { $outCreate = $this->tables_data[$tableName]['create']; } } if (empty($outCreate) && !empty($tableName)) { // The CREATE command wasn't cached. Time to create it. The $type and $dependencies // variables will be thrown away. $type = 'table'; $dependencies = array(); $outCreate = $this->get_create($tableAbstract, $tableName, $type, $dependencies); } // Create drop statements if required (the key is defined by the scripting engine) if (Factory::getEngineParamsProvider()->getScriptingParameter('db.dropstatements', 0)) { if (array_key_exists('create', $this->tables_data[$tableName])) { $dropStatement = $this->createDrop($this->tables_data[$tableName]['create']); } else { $type = 'table'; $createStatement = $this->get_create($tableAbstract, $tableName, $type, $dependencies); $dropStatement = $this->createDrop($createStatement); } if (!empty($dropStatement)) { $dropStatement .= "\n"; if (!$this->writeDump($dropStatement)) { return; } } } // Write the CREATE command after any DROP command which might be necessary. if (!$this->writeDump($outCreate)) { return; } if ($dump_records) { // We are dumping data from a table, get the row count $this->getRowCount($tableAbstract); } else { // We should not dump any data Factory::getLog()->log(LogLevel::INFO, "Skipping dumping data of " . $tableAbstract); $this->maxRange = 0; $this->nextRange = 1; $outData = ''; $numRows = 0; $dump_records = false; } // Output any data preamble commands, e.g. SET IDENTITY_INSERT for SQL Server if ($dump_records && Factory::getEngineParamsProvider()->getScriptingParameter('db.dropstatements', 0)) { Factory::getLog()->log(LogLevel::DEBUG, "Writing data dump preamble for " . $tableAbstract); $preamble = $this->getDataDumpPreamble($tableAbstract, $tableName, $this->maxRange); if (!empty($preamble)) { if (!$this->writeDump($preamble)) { return; } } } // Get the table's auto increment information if ($dump_records) { $this->setAutoIncrementInfo(); } } // Check if we have more work to do on this table $configuration = Factory::getConfiguration(); $batchsize = intval($configuration->get('engine.dump.common.batchsize', 1000)); if ($batchsize <= 0) { $batchsize = 1000; } if ($this->nextRange < $this->maxRange) { $timer = Factory::getTimer(); // Get the number of rows left to dump from the current table $sql = $db->getQuery(true)->select('*')->from($db->nameQuote($tableAbstract)); if (!is_null($this->table_autoincrement['field'])) { $sql->order($db->qn($this->table_autoincrement['field']) . ' ASC'); } if ($this->nextRange == 0) { // First run, get a cursor to all records $db->setQuery($sql, 0, $batchsize); Factory::getLog()->log(LogLevel::INFO, "Beginning dump of " . $tableAbstract); } else { // Subsequent runs, get a cursor to the rest of the records $this->setSubstep($this->nextRange . ' / ' . $this->maxRange); // If we have an auto_increment value and the table has over $batchsize records use the indexed select instead of a plain limit if (!is_null($this->table_autoincrement['field']) && !is_null($this->table_autoincrement['value'])) { Factory::getLog()->log(LogLevel::INFO, "Continuing dump of " . $tableAbstract . " from record #{$this->nextRange} using auto_increment column {$this->table_autoincrement['field']} and value {$this->table_autoincrement['value']}"); $sql->where($db->qn($this->table_autoincrement['field']) . ' > ' . $db->q($this->table_autoincrement['value'])); $db->setQuery($sql, 0, $batchsize); } else { Factory::getLog()->log(LogLevel::INFO, "Continuing dump of " . $tableAbstract . " from record #{$this->nextRange}"); $db->setQuery($sql, $this->nextRange, $batchsize); } } $this->query = ''; $numRows = 0; $use_abstract = Factory::getEngineParamsProvider()->getScriptingParameter('db.abstractnames', 1); $filters = Factory::getFilters(); $mustFilter = $filters->hasFilterType('dbobject', 'children'); try { $cursor = $db->query(); } catch (\Exception $exc) { $db->resetErrors(); $cursor = null; } while (is_array($myRow = $db->fetchAssoc()) && $numRows < $this->maxRange - $this->nextRange) { $this->createNewPartIfRequired(); $numRows++; $numOfFields = count($myRow); // On MS SQL Server there's always a RowNumber pseudocolumn added at the end, screwing up the backup (GRRRR!) if ($db->getDriverType() == 'mssql') { $numOfFields--; } // If row-level filtering is enabled, please run the filtering if ($mustFilter) { $isFiltered = $filters->isFiltered(array('table' => $tableAbstract, 'row' => $myRow), $configuration->get('volatile.database.root', '[SITEDB]'), 'dbobject', 'children'); if ($isFiltered) { // Update the auto_increment value to avoid edge cases when the batch size is one if (!is_null($this->table_autoincrement['field']) && isset($myRow[$this->table_autoincrement['field']])) { $this->table_autoincrement['value'] = $myRow[$this->table_autoincrement['field']]; } continue; } } if (!$this->extendedInserts || $this->extendedInserts && empty($this->query)) { $newQuery = true; $fieldList = $this->getFieldListSQL(array_keys($myRow), $numOfFields); if ($numOfFields > 0) { $this->query = "INSERT INTO " . $db->nameQuote(!$use_abstract ? $tableName : $tableAbstract) . " {$fieldList} VALUES "; } } else { // On other cases, just mark that we should add a comma and start a new VALUES entry $newQuery = false; } $outData = '('; // Step through each of the row's values $fieldID = 0; // Used in running backup fix $isCurrentBackupEntry = false; // Fix 1.2a - NULL values were being skipped if ($numOfFields > 0) { foreach ($myRow as $value) { // The ID of the field, used to determine placement of commas $fieldID++; if ($fieldID > $numOfFields) { // This is required for SQL Server backups, do NOT remove! continue; } // Fix 2.0: Mark currently running backup as successful in the DB snapshot if ($tableAbstract == '#__ak_stats') { if ($fieldID == 1) { // Compare the ID to the currently running $statistics = Factory::getStatistics(); $isCurrentBackupEntry = $value == $statistics->getId(); } elseif ($fieldID == 6) { // Treat the status field $value = $isCurrentBackupEntry ? 'complete' : $value; } } // Post-process the value if (is_null($value)) { $outData .= "NULL"; // Cope with null values } else { // Accommodate for runtime magic quotes $value = @get_magic_quotes_runtime() ? stripslashes($value) : $value; $value = $db->Quote($value); if ($this->postProcessValues) { $value = $this->postProcessQuotedValue($value); } $outData .= $value; } if ($fieldID < $numOfFields) { $outData .= ', '; } } } $outData .= ')'; if ($numOfFields) { // If it's an existing query and we have extended inserts if ($this->extendedInserts && !$newQuery) { // Check the existing query size $query_length = strlen($this->query); $data_length = strlen($outData); if ($query_length + $data_length > $this->packetSize) { // We are about to exceed the packet size. Write the data so far. $this->query .= ";\n"; if (!$this->writeDump($this->query)) { return; } // Then, start a new query $this->query = ''; $this->query = "INSERT INTO " . $db->nameQuote(!$use_abstract ? $tableName : $tableAbstract) . " VALUES "; $this->query .= $outData; } else { // We have room for more data. Append $outData to the query. $this->query .= ', '; $this->query .= $outData; } } elseif ($this->extendedInserts && $newQuery) { // Append the data to the INSERT statement $this->query .= $outData; // Let's see the size of the dumped data... $query_length = strlen($this->query); if ($query_length >= $this->packetSize) { // This was a BIG query. Write the data to disk. $this->query .= ";\n"; if (!$this->writeDump($this->query)) { return; } // Then, start a new query $this->query = ''; } } else { // Append the data to the INSERT statement $this->query .= $outData; // Write the data to disk. $this->query .= ";\n"; if (!$this->writeDump($this->query)) { return; } // Then, start a new query $this->query = ''; } } $outData = ''; // Update the auto_increment value to avoid edge cases when the batch size is one if (!is_null($this->table_autoincrement['field'])) { $this->table_autoincrement['value'] = $myRow[$this->table_autoincrement['field']]; } unset($myRow); // Check for imminent timeout if ($timer->getTimeLeft() <= 0) { Factory::getLog()->log(LogLevel::DEBUG, "Breaking dump of {$tableAbstract} after {$numRows} rows; will continue on next step"); break; } } $db->freeResult($cursor); // Advance the _nextRange pointer $this->nextRange += $numRows != 0 ? $numRows : 1; $this->setStep($tableName); $this->setSubstep($this->nextRange . ' / ' . $this->maxRange); } // Finalize any pending query // WARNING! If we do not do that now, the query will be emptied in the next operation and all // accumulated data will go away... if (!empty($this->query)) { $this->query .= ";\n"; if (!$this->writeDump($this->query)) { return; } $this->query = ''; } // Check for end of table dump (so that it happens inside the same operation) if (!($this->nextRange < $this->maxRange)) { // Tell the user we are done with the table Factory::getLog()->log(LogLevel::DEBUG, "Done dumping " . $tableAbstract); // Output any data preamble commands, e.g. SET IDENTITY_INSERT for SQL Server if ($dump_records && Factory::getEngineParamsProvider()->getScriptingParameter('db.dropstatements', 0)) { Factory::getLog()->log(LogLevel::DEBUG, "Writing data dump epilogue for " . $tableAbstract); $epilogue = $this->getDataDumpEpilogue($tableAbstract, $tableName, $this->maxRange); if (!empty($epilogue)) { if (!$this->writeDump($epilogue)) { return; } } } if (count($this->tables) == 0) { // We have finished dumping the database! Factory::getLog()->log(LogLevel::INFO, "End of database detected; flushing the dump buffers..."); $null = null; $this->writeDump($null); Factory::getLog()->log(LogLevel::INFO, "Database has been successfully dumped to SQL file(s)"); $this->setState('postrun'); $this->setStep(''); $this->setSubstep(''); $this->nextTable = ''; $this->nextRange = 0; } elseif (count($this->tables) != 0) { // Switch tables $this->nextTable = array_shift($this->tables); $this->nextRange = 0; $this->setStep($this->nextTable); $this->setSubstep(''); } } }
/** * Creates a new part for the spanned archive * * @param bool $finalPart Is this the final archive part? * * @return bool True on success */ protected function createNewPartFile($finalPart = false) { // Close any open file pointers if (is_resource($this->fp)) { $this->fclose($this->fp); } if (is_resource($this->cdfp)) { $this->fclose($this->cdfp); } // Remove the just finished part from the list of resumable offsets $this->removeFromOffsetsList($this->_dataFileName); // Set the file pointers to null $this->fp = null; $this->cdfp = null; // Push the previous part if we have to post-process it immediately $configuration = Factory::getConfiguration(); if ($configuration->get('engine.postproc.common.after_part', 0)) { $this->finishedPart[] = $this->_dataFileName; } // Add the part's size to our rolling sum clearstatcache(); $this->totalCompressedSize += filesize($this->_dataFileName); $this->totalParts++; $this->currentPartNumber = $this->totalParts; if ($finalPart) { $this->_dataFileName = $this->dataFileNameWithoutExtension . '.zip'; } else { $this->_dataFileName = $this->dataFileNameWithoutExtension . '.z' . sprintf('%02d', $this->currentPartNumber); } Factory::getLog()->log(LogLevel::INFO, 'Creating new ZIP part #' . $this->currentPartNumber . ', file ' . $this->_dataFileName); // Inform CUBE that we have changed the multipart number $statistics = Factory::getStatistics(); $statistics->updateMultipart($this->totalParts); // Try to remove any existing file @unlink($this->_dataFileName); // Touch the new file $result = @touch($this->_dataFileName); if (function_exists('chmod')) { chmod($this->_dataFileName, 0666); } return $result; }
/** * Resets the engine state, wiping out any pending backups and/or stale * temporary data. * * @param array $config Configuration parameters for the reset operation */ public static function resetState($config = array()) { $default_config = array('global' => true, 'log' => false, 'maxrun' => 180); $config = (object) array_merge($default_config, $config); // Pause logging if so desired if (!$config->log) { Factory::getLog()->pause(); } $originTag = null; if (!$config->global) { // If we're not resetting globally, get a list of running backups per tag $originTag = Platform::getInstance()->get_backup_origin(); } // Cache the factory before proceeding $factory = self::serialize(); $runningList = Platform::getInstance()->get_running_backups($originTag); // Origins we have to clean $origins = array(Platform::getInstance()->get_backup_origin()); // 1. Detect failed backups if (is_array($runningList) && !empty($runningList)) { // The current timestamp $now = time(); // Mark running backups as failed foreach ($runningList as $running) { if (empty($originTag)) { // Check the timestamp of the log file to decide if it's stuck, // but only if a tag is not set $tstamp = Factory::getLog()->getLastTimestamp($running['origin']); if (!is_null($tstamp)) { // We can only check the timestamp if it's returned. If not, we assume the backup is stale $difference = abs($now - $tstamp); // Backups less than maxrun seconds old are not considered stale (default: 3 minutes) if ($difference < $config->maxrun) { continue; } } } $filenames = Factory::getStatistics()->get_all_filenames($running, false); $totalSize = 0; // Process if there are files to delete... if (!is_null($filenames)) { // Delete the failed backup's archive, if exists foreach ($filenames as $failedArchive) { if (file_exists($failedArchive)) { $totalSize += (int) @filesize($failedArchive); Platform::getInstance()->unlink($failedArchive); } } } // Mark the backup failed if (!$running['total_size']) { $running['total_size'] = $totalSize; } $running['status'] = 'fail'; $running['multipart'] = 0; $dummy = null; Platform::getInstance()->set_or_update_statistics($running['id'], $running, $dummy); $backupId = isset($running['backupid']) ? '.' . $running['backupid'] : ''; $origins[] = $running['origin'] . $backupId; } } if (!empty($origins)) { $origins = array_unique($origins); foreach ($origins as $originTag) { self::loadState($originTag); // Remove temporary files Factory::getTempFiles()->deleteTempFiles(); // Delete any stale temporary data self::getFactoryStorage()->reset($originTag); } } // Reload the factory self::unserialize($factory); unset($factory); // Unpause logging if it was previously paused if (!$config->log) { Factory::getLog()->unpause(); } }
/** * Adds a log entry to the #__admintools_scanalerts table, marking a modified, added or suspicious file. * * @param \stdClass $newFileRecord The record of the current version of the file * @param \stdClass|null $oldFileRecord The record of the old version of the file (or null if it's an added file) * * @return void */ private function _logFileChange(&$newFileRecord, &$oldFileRecord = null) { // Initialise the new alert record $alertRecord = array('path' => $newFileRecord->path, 'scan_id' => \Akeeba\Engine\Factory::getStatistics()->getId(), 'diff' => '', 'threat_score' => 0, 'acknowledged' => 0); // Produce the diff if there is an old file if (!is_null($oldFileRecord)) { if ($this->generateDiff) { // Modified file, generate diff $newText = gzinflate($newFileRecord->data); $newText = str_replace("\r\n", "\n", $newText); $newText = str_replace("\r", "\n", $newText); $newLines = explode("\n", $newText); unset($newText); $oldText = gzinflate($oldFileRecord->data); $oldText = str_replace("\r\n", "\n", $oldText); $oldText = str_replace("\r", "\n", $oldText); $oldLines = explode("\n", $oldText); unset($oldText); $diffObject = new \Horde_Text_Diff('native', array($newLines, $oldLines)); $renderer = new \Horde_Text_Diff_Renderer(); $alertRecord['diff'] = $renderer->render($diffObject); unset($renderer); unset($diffObject); unset($newLines); unset($oldLines); $alertRecord['threat_score'] = $this->_getThreatScore($alertRecord['diff']); } else { // Modified file, do not generate diff $alertRecord['diff'] = "###MODIFIED FILE###\n"; $newText = @file_get_contents($newFileRecord->sourcePath); $alertRecord['threat_score'] = $this->_getThreatScore($newText); unset($newText); } } else { // New file $newText = @file_get_contents($newFileRecord->sourcePath); $alertRecord['threat_score'] = $this->_getThreatScore($newText); unset($newText); } // Do not create a record for non-threat files if ($this->ignoreNonThreats && !$alertRecord['threat_score']) { return; } $alertRecord = (object) $alertRecord; $db = \JFactory::getDbo(); $db->insertObject('#__admintools_scanalerts', $alertRecord); unset($alertRecord); }
/** * Delete the backup file of the stats record whose ID is set in the model * * @return bool True on success */ public function deleteFile() { JLoader::import('joomla.filesystem.file'); $id = $this->getState('id', 0); if (!is_numeric($id) || $id <= 0) { throw new RecordNotLoaded(JText::_('COM_AKEEBA_BUADMIN_ERROR_INVALIDID')); } // Get the backup statistics record and the files to delete $stat = Platform::getInstance()->get_statistics($id); $allFiles = Factory::getStatistics()->get_all_filenames($stat, false); // Remove the custom log file if necessary $this->deleteLogs($stat); // No files? Nothing to do. if (empty($allFiles)) { return true; } $status = true; foreach ($allFiles as $filename) { if (!@file_exists($filename)) { continue; } $new_status = @unlink($filename); if (!$new_status) { $new_status = JFile::delete($filename); } $status = $status ? $new_status : false; } return $status; }
private function _apiDownloadDirect($config) { $defConfig = array('backup_id' => 0, 'part_id' => 1); $config = array_merge($defConfig, $config); $backup_id = $config['backup_id']; $part_id = $config['part_id']; $backup_stats = Platform::getInstance()->get_statistics($backup_id); if (empty($backup_stats)) { // Backup record doesn't exist $this->status = self::STATUS_NOT_FOUND; $this->encapsulation = self::ENCAPSULATION_RAW; @ob_end_clean(); header('HTTP/1.1 500 Invalid backup record identifier'); flush(); JFactory::getApplication()->close(); } $files = Factory::getStatistics()->get_all_filenames($backup_stats); if (count($files) < $part_id || $part_id <= 0) { // Invalid part $this->status = self::STATUS_NOT_FOUND; $this->encapsulation = self::ENCAPSULATION_RAW; @ob_end_clean(); header('HTTP/1.1 500 Invalid backup part'); flush(); JFactory::getApplication()->close(); } $filename = $files[$part_id - 1]; @clearstatcache(); // For a certain unmentionable browser -- Thank you, Nooku, for the tip if (function_exists('ini_get') && function_exists('ini_set')) { if (ini_get('zlib.output_compression')) { ini_set('zlib.output_compression', 'Off'); } } // Remove php's time limit -- Thank you, Nooku, for the tip if (function_exists('ini_get') && function_exists('set_time_limit')) { if (!ini_get('safe_mode')) { @set_time_limit(0); } } $basename = @basename($filename); $filesize = @filesize($filename); $extension = strtolower(str_replace(".", "", strrchr($filename, "."))); while (@ob_end_clean()) { } @clearstatcache(); // Send MIME headers header('MIME-Version: 1.0'); header('Content-Disposition: attachment; filename="' . $basename . '"'); header('Content-Transfer-Encoding: binary'); header('Accept-Ranges: bytes'); switch ($extension) { case 'zip': // ZIP MIME type header('Content-Type: application/zip'); break; default: // Generic binary data MIME type header('Content-Type: application/octet-stream'); break; } // Notify of filesize, if this info is available if ($filesize > 0) { header('Content-Length: ' . @filesize($filename)); } // Disable caching header("Cache-Control: must-revalidate, post-check=0, pre-check=0"); header("Expires: 0"); header('Pragma: no-cache'); flush(); if ($filesize > 0) { // If the filesize is reported, use 1M chunks for echoing the data to the browser $blocksize = 1048756; //1M chunks $handle = @fopen($filename, "r"); // Now we need to loop through the file and echo out chunks of file data if ($handle !== false) { while (!@feof($handle)) { echo @fread($handle, $blocksize); @ob_flush(); flush(); } } if ($handle !== false) { @fclose($handle); } } else { // If the filesize is not reported, hope that readfile works @readfile($filename); } flush(); JFactory::getApplication()->close(); }
/** * Find where to store the backup files * * @param $partNumber int The SQL part number, default is 0 (.sql) */ protected function getBackupFilePaths($partNumber = 0) { Factory::getLog()->log(LogLevel::DEBUG, __CLASS__ . " :: Getting temporary file"); $this->tempFile = Factory::getTempFiles()->registerTempFile(dechex(crc32(microtime())) . '.sql'); Factory::getLog()->log(LogLevel::DEBUG, __CLASS__ . " :: Temporary file is {$this->tempFile}"); // Get the base name of the dump file $partNumber = intval($partNumber); $baseName = $this->dumpFile; if ($partNumber > 0) { // The file names are in the format dbname.sql, dbname.s01, dbname.s02, etc if (strtolower(substr($baseName, -4)) == '.sql') { $baseName = substr($baseName, 0, -4) . '.s' . sprintf('%02u', $partNumber); } else { $baseName = $baseName . '.s' . sprintf('%02u', $partNumber); } } if (empty($this->installerSettings)) { // Fetch the installer settings $this->installerSettings = (object) array('installerroot' => 'installation', 'sqlroot' => 'installation/sql', 'databasesini' => 1, 'readme' => 1, 'extrainfo' => 1); $config = Factory::getConfiguration(); $installerKey = $config->get('akeeba.advanced.embedded_installer'); $installerDescriptors = Factory::getEngineParamsProvider()->getInstallerList(); if (array_key_exists($installerKey, $installerDescriptors)) { // The selected installer exists, use it $this->installerSettings = (object) $installerDescriptors[$installerKey]; } elseif (array_key_exists('angie', $installerDescriptors)) { // The selected installer doesn't exist, but ANGIE exists; use that instead $this->installerSettings = (object) $installerDescriptors['angie']; } } switch (Factory::getEngineParamsProvider()->getScriptingParameter('db.saveasname', 'normal')) { case 'output': // The SQL file will be stored uncompressed in the output directory $statistics = Factory::getStatistics(); $statRecord = $statistics->getRecord(); $this->saveAsName = $statRecord['absolute_path']; break; case 'normal': // The SQL file will be stored in the SQL root of the archive, as // specified by the particular embedded installer's settings $this->saveAsName = $this->installerSettings->sqlroot . '/' . $baseName; break; case 'short': // The SQL file will be stored on archive's root $this->saveAsName = $baseName; break; } if ($partNumber > 0) { Factory::getLog()->log(LogLevel::DEBUG, "AkeebaDomainDBBackup :: Creating new SQL dump part #{$partNumber}"); } Factory::getLog()->log(LogLevel::DEBUG, "AkeebaDomainDBBackup :: SQL temp file is " . $this->tempFile); Factory::getLog()->log(LogLevel::DEBUG, "AkeebaDomainDBBackup :: SQL file location in archive is " . $this->saveAsName); }
/** * Enable split archive creation where possible * * @return void */ protected function enableSplitArchives() { $configuration = Factory::getConfiguration(); $partSize = $configuration->get('engine.archiver.common.part_size', 0); // If the part size is less than 64Kb we won't enable split archives if ($partSize < 65536) { return; } $extension = $this->getExtension(); $altExtension = substr($extension, 0, 2) . '01'; $archiveTypeUppercase = strtoupper(substr($extension, 1)); Factory::getLog()->log(LogLevel::INFO, __CLASS__ . " :: Split {$archiveTypeUppercase} creation enabled"); $this->useSplitArchive = true; $this->partSize = $partSize; $this->dataFileNameWithoutExtension = dirname($this->_dataFileName) . '/' . basename($this->_dataFileName, $extension); $this->_dataFileName = $this->dataFileNameWithoutExtension . $altExtension; // Indicate that we have at least 1 part $statistics = Factory::getStatistics(); $statistics->updateMultipart(1); }
public function apply_srp_quotas($parent) { $parent->relayStep('Applying quotas'); $parent->relaySubstep(''); // If no quota settings are enabled, quit $registry = Factory::getConfiguration(); $srpQuotas = $registry->get('akeeba.quota.srp_size_quota'); if ($srpQuotas <= 0) { Factory::getLog()->log(LogLevel::DEBUG, "No restore point quotas were defined; old restore point files will be kept intact"); return true; // No quota limits were requested } // Get valid-looking backup ID's $validIDs = Platform::getInstance()->get_valid_backup_records(true, array('restorepoint')); if (!empty($validIDs)) { $validIDs = array_splice($validIDs, 1); } $statistics = Factory::getStatistics(); $latestBackupId = $statistics->getId(); // Create a list of valid files $allFiles = array(); if (count($validIDs)) { foreach ($validIDs as $id) { $stat = Platform::getInstance()->get_statistics($id); // Get the log file name $tag = $stat['tag']; $backupId = isset($stat['backupid']) ? $stat['backupid'] : ''; $logName = ''; if (!empty($backupId)) { $logName = 'akeeba.' . $tag . '.' . $backupId . '.log'; } // Multipart processing $filenames = Factory::getStatistics()->get_all_filenames($stat, true); if (!is_null($filenames)) { // Only process existing files $filesize = 0; foreach ($filenames as $filename) { $filesize += @filesize($filename); } $allFiles[] = array('id' => $id, 'filenames' => $filenames, 'size' => $filesize, 'logname' => $logName); } } } unset($validIDs); // If there are no files, exit early if (count($allFiles) == 0) { Factory::getLog()->log(LogLevel::DEBUG, "There were no old restore points to apply quotas on"); return true; } // Init arrays $killids = array(); $killLogs = array(); $ret = array(); $leftover = array(); // Do we need to apply size quotas? Factory::getLog()->log(LogLevel::DEBUG, "Processing restore point size quotas"); // OK, let's start counting bytes! $runningSize = 0; while (count($allFiles) > 0) { // Each time, remove the last element of the backup array and calculate // running size. If it's over the limit, add the archive to the return array. $def = array_pop($allFiles); $runningSize += $def['size']; if ($runningSize >= $srpQuotas) { if ($latestBackupId == $def['id']) { $runningSize -= $def['size']; } else { $ret[] = $def['filenames']; $killids[] = $def['filenames']; if (!empty($def['logname'])) { $filePath = reset($def['filenames']); if (!empty($filePath)) { $killLogs[] = dirname($filePath) . '/' . $def['logname']; } } } } } // Convert the $ret 2-dimensional array to single dimensional $quotaFiles = array(); foreach ($ret as $temp) { foreach ($temp as $filename) { $quotaFiles[] = $filename; } } // Update the statistics record with the removed remote files if (!empty($killids)) { foreach ($killids as $id) { $data = array('filesexist' => '0'); Platform::getInstance()->set_or_update_statistics($id, $data, $parent); } } // Apply quotas to SRP backup archives if (count($quotaFiles) > 0) { Factory::getLog()->log(LogLevel::DEBUG, "Applying quotas"); \JLoader::import('joomla.filesystem.file'); foreach ($quotaFiles as $file) { if (!@Platform::getInstance()->unlink($file)) { $parent->setWarning("Failed to remove old system restore point file " . $file); } } } // Apply quotas to log files if (!empty($killLogs)) { Factory::getLog()->log(LogLevel::DEBUG, "Removing obsolete log files"); foreach ($killLogs as $logPath) { @Platform::getInstance()->unlink($logPath); } } return true; }
/** * Applies the size and count quotas * * @return bool True on success */ protected function get_remote_quotas() { // Get all records with a remote filename $allRecords = Platform::getInstance()->get_valid_remote_records(); // Bail out if no records found if (empty($allRecords)) { return array(); } // Try to find the files to be deleted due to quota settings $statistics = Factory::getStatistics(); $latestBackupId = $statistics->getId(); // Filter out the current record $temp = array(); foreach ($allRecords as $item) { if ($item['id'] == $latestBackupId) { continue; } $item['files'] = $this->get_remote_files($item['remote_filename'], $item['multipart']); $temp[] = $item; } $allRecords = $temp; // Bail out if only the current backup was included in the list if (count($allRecords) == 0) { return array(); } // Get quota values $registry = Factory::getConfiguration(); $countQuota = $registry->get('akeeba.quota.count_quota'); $sizeQuota = $registry->get('akeeba.quota.size_quota'); $useCountQuotas = $registry->get('akeeba.quota.enable_count_quota'); $useSizeQuotas = $registry->get('akeeba.quota.enable_size_quota'); $useDayQuotas = $registry->get('akeeba.quota.maxage.enable'); $daysQuota = $registry->get('akeeba.quota.maxage.maxdays'); $preserveDay = $registry->get('akeeba.quota.maxage.keepday'); $leftover = array(); $ret = array(); $killids = array(); if ($useDayQuotas) { $killDatetime = new \DateTime(); $killDatetime->modify('-' . $daysQuota . ($daysQuota == 1 ? ' day' : ' days')); $killTS = $killDatetime->format('U'); foreach ($allRecords as $def) { $backupstart = new \DateTime($def['backupstart']); $backupTS = $backupstart->format('U'); $backupDay = $backupstart->format('d'); // Is this on a preserve day? if ($preserveDay > 0) { if ($preserveDay == $backupDay) { $leftover[] = $def; continue; } } // Otherwise, check the timestamp if ($backupTS < $killTS) { $ret[] = $def['files']; $killids[] = $def['id']; } else { $leftover[] = $def; } } } // Do we need to apply count quotas? if ($useCountQuotas && $countQuota >= 1 && !$useDayQuotas) { $countQuota--; // Are there more files than the quota limit? if (!(count($allRecords) > $countQuota)) { // No, effectively skip the quota checking $leftover = $allRecords; } else { Factory::getLog()->log(LogLevel::DEBUG, "Processing remote count quotas"); // Yes, apply the quota setting. $totalRecords = count($allRecords); for ($count = 0; $count <= $totalRecords; $count++) { $def = array_pop($allRecords); if (count($leftover) >= $countQuota) { $ret[] = $def['files']; $killids[] = $def['id']; } else { $leftover[] = $def; } } unset($allRecords); } } else { // No count quotas are applied $leftover = $allRecords; } // Do we need to apply size quotas? if ($useSizeQuotas && $sizeQuota > 0 && count($leftover) > 0 && !$useDayQuotas) { Factory::getLog()->log(LogLevel::DEBUG, "Processing remote size quotas"); // OK, let's start counting bytes! $runningSize = 0; while (count($leftover) > 0) { // Each time, remove the last element of the backup array and calculate // running size. If it's over the limit, add the archive to the $ret array. $def = array_pop($leftover); $runningSize += $def['total_size']; if ($runningSize >= $sizeQuota) { $ret[] = $def['files']; $killids[] = $def['id']; } } } // Convert the $ret 2-dimensional array to single dimensional $quotaFiles = array(); foreach ($ret as $temp) { if (!is_array($temp) || empty($temp)) { continue; } foreach ($temp as $filename) { $quotaFiles[] = $filename; } } // Update the statistics record with the removed remote files if (!empty($killids)) { foreach ($killids as $id) { if (empty($id)) { continue; } $data = array('remote_filename' => ''); Platform::getInstance()->set_or_update_statistics($id, $data, $this); } } return $quotaFiles; }
protected function is_excluded_by_api($test, $root) { static $filter_switch = null; static $last_backup = null; if (is_null($filter_switch)) { $config = Factory::getConfiguration(); $filter_switch = Factory::getEngineParamsProvider()->getScriptingParameter('filter.incremental', 0); $filter_switch = $filter_switch == 1; $last_backup = $config->get('volatile.filter.last_backup', null); if (is_null($last_backup) && $filter_switch) { // Get a list of backups on this profile $backups = Platform::getInstance()->get_statistics_list(array('filters' => array(array('field' => 'profile_id', 'value' => Platform::getInstance()->get_active_profile())))); // Find this backup's ID $model = Factory::getStatistics(); $id = $model->getId(); if (is_null($id)) { $id = -1; } // Initialise $last_backup = time(); $now = $last_backup; // Find the last time a successful backup with this profile was made if (count($backups)) { foreach ($backups as $backup) { // Skip the current backup if ($backup['id'] == $id) { continue; } // Skip non-complete backups if ($backup['status'] != 'complete') { continue; } $tzUTC = new \DateTimeZone('UTC'); $dateTime = new \DateTime($backup['backupstart'], $tzUTC); $backuptime = $dateTime->getTimestamp(); $last_backup = $backuptime; break; } } if ($last_backup == $now) { // No suitable backup found; disable this filter $config->set('volatile.scripting.incfile.filter.incremental', 0); $filter_switch = false; } else { // Cache the last backup timestamp $config->set('volatile.filter.last_backup', $last_backup); } } } if (!$filter_switch) { return false; } // Get the filesystem path for $root $config = Factory::getConfiguration(); $fsroot = $config->get('volatile.filesystem.current_root', ''); $ds = $fsroot == '' || $fsroot == '/' ? '' : DIRECTORY_SEPARATOR; $filename = $fsroot . $ds . $test; // Get the timestamp of the file $timestamp = @filemtime($filename); // If we could not get this information, include the file in the archive if ($timestamp === false) { return false; } // Compare it with the last backup timestamp and exclude if it's older than the last backup if ($timestamp <= $last_backup) { //Factory::getLog()->log(LogLevel::DEBUG, "Excluding $filename due to incremental backup restrictions"); return true; } // No match? Just include the file! return false; }
/** * Creates a new part for the spanned archive * * @param bool $finalPart Is this the final archive part? * * @return bool True on success */ protected function _createNewPart($finalPart = false) { // Close any open file pointers if (is_resource($this->fp)) { $this->_fclose($this->fp); } if (is_resource($this->cdfp)) { $this->_fclose($this->cdfp); } // Remove the just finished part from the list of resumable offsets $this->_removeFromOffsetsList($this->_dataFileName); // Set the file pointers to null $this->fp = null; $this->cdfp = null; // Push the previous part if we have to post-process it immediately $configuration = Factory::getConfiguration(); if ($configuration->get('engine.postproc.common.after_part', 0)) { // The first part needs its header overwritten during archive // finalization. Skip it from immediate processing. if ($this->_currentFragment != 1) { $this->finishedPart[] = $this->_dataFileName; } } $this->_totalFragments++; $this->_currentFragment = $this->_totalFragments; if ($finalPart) { $this->_dataFileName = $this->_dataFileNameBase . '.jpa'; } else { $this->_dataFileName = $this->_dataFileNameBase . '.j' . sprintf('%02d', $this->_currentFragment); } Factory::getLog()->log(LogLevel::INFO, 'Creating new JPA part #' . $this->_currentFragment . ', file ' . $this->_dataFileName); $statistics = Factory::getStatistics(); $statistics->updateMultipart($this->_totalFragments); // Try to remove any existing file @unlink($this->_dataFileName); // Touch the new file $result = @touch($this->_dataFileName); if (function_exists('chmod')) { chmod($this->_dataFileName, 0666); } // Try to write 6 bytes to it if ($result) { $result = @file_put_contents($this->_dataFileName, 'AKEEBA') == 6; } if ($result) { @unlink($this->_dataFileName); $result = @touch($this->_dataFileName); if (function_exists('chmod')) { chmod($this->_dataFileName, 0666); } } return $result; }
/** * Downloads the backup file of a specific backup attempt, * if it's available * */ public function download() { $model = $this->getThisModel(); $id = $model->getId(); $part = $this->input->get('part', -1, 'int'); if ($this->input instanceof F0FInput) { $cid = $this->input->get('cid', array(), 'array'); } else { $cid = $this->input->get('cid', array(), 'array'); } if (empty($id)) { if (is_array($cid) && !empty($cid)) { $id = $cid[0]; } else { $id = -1; } } if ($id <= 0) { $session = JFactory::getSession(); $task = $session->get('buadmin.task', 'browse', 'akeeba'); $this->setRedirect(JUri::base() . 'index.php?option=com_akeeba&view=buadmin&task=' . $task, JText::_('STATS_ERROR_INVALIDID'), 'error'); parent::display(); return true; } $stat = Platform::getInstance()->get_statistics($id); $allFilenames = Factory::getStatistics()->get_all_filenames($stat); // Check single part files if (count($allFilenames) == 1 && $part == -1) { $filename = array_shift($allFilenames); } elseif (count($allFilenames) > 0 && count($allFilenames) > $part && $part >= 0) { $filename = $allFilenames[$part]; } else { $filename = null; } if (is_null($filename) || empty($filename) || !@file_exists($filename)) { $session = JFactory::getSession(); $task = $session->get('buadmin.task', 'browse', 'akeeba'); $this->setRedirect(JUri::base() . 'index.php?option=com_akeeba&view=buadmin&task=' . $task, JText::_('STATS_ERROR_INVALIDDOWNLOAD'), 'error'); parent::display(); return true; } else { // For a certain unmentionable browser -- Thank you, Nooku, for the tip if (function_exists('ini_get') && function_exists('ini_set')) { if (ini_get('zlib.output_compression')) { ini_set('zlib.output_compression', 'Off'); } } // Remove php's time limit -- Thank you, Nooku, for the tip if (function_exists('ini_get') && function_exists('set_time_limit')) { if (!ini_get('safe_mode')) { @set_time_limit(0); } } $basename = @basename($filename); $filesize = @filesize($filename); $extension = strtolower(str_replace(".", "", strrchr($filename, "."))); while (@ob_end_clean()) { } @clearstatcache(); // Send MIME headers header('MIME-Version: 1.0'); header('Content-Disposition: attachment; filename="' . $basename . '"'); header('Content-Transfer-Encoding: binary'); header('Accept-Ranges: bytes'); switch ($extension) { case 'zip': // ZIP MIME type header('Content-Type: application/zip'); break; default: // Generic binary data MIME type header('Content-Type: application/octet-stream'); break; } // Notify of filesize, if this info is available if ($filesize > 0) { header('Content-Length: ' . @filesize($filename)); } // Disable caching header("Cache-Control: must-revalidate, post-check=0, pre-check=0"); header("Expires: 0"); header('Pragma: no-cache'); flush(); if ($filesize > 0) { // If the filesize is reported, use 1M chunks for echoing the data to the browser $blocksize = 1048756; //1M chunks $handle = @fopen($filename, "r"); // Now we need to loop through the file and echo out chunks of file data if ($handle !== false) { while (!@feof($handle)) { echo @fread($handle, $blocksize); @ob_flush(); flush(); } } if ($handle !== false) { @fclose($handle); } } else { // If the filesize is not reported, hope that readfile works @readfile($filename); } exit(0); } }
/** * Implements the _run() abstract method * * @return void */ protected function _run() { if ($this->getState() == 'postrun') { Factory::getLog()->log(LogLevel::DEBUG, __CLASS__ . " :: Already finished"); $this->setStep(''); $this->setSubstep(''); return; } else { $this->setState('running'); } // Initialise the extra notes variable, used by platform classes to return warnings and errors $extraNotes = null; // Load the version defines Platform::getInstance()->load_version_defines(); $registry = Factory::getConfiguration(); // Write log file's header $version = defined('AKEEBABACKUP_VERSION') ? AKEEBABACKUP_VERSION : AKEEBA_VERSION; $date = defined('AKEEBABACKUP_DATE') ? AKEEBABACKUP_DATE : AKEEBA_DATE; Factory::getLog()->log(LogLevel::INFO, "--------------------------------------------------------------------------------"); Factory::getLog()->log(LogLevel::INFO, "Akeeba Backup " . $version . ' (' . $date . ')'); Factory::getLog()->log(LogLevel::INFO, "Got backup?"); Factory::getLog()->log(LogLevel::INFO, "--------------------------------------------------------------------------------"); // PHP configuration variables are tried to be logged only for debug and info log levels if ($registry->get('akeeba.basic.log_level') >= 2) { Factory::getLog()->log(LogLevel::INFO, "--- System Information ---"); Factory::getLog()->log(LogLevel::INFO, "PHP Version :" . PHP_VERSION); Factory::getLog()->log(LogLevel::INFO, "PHP OS :" . PHP_OS); Factory::getLog()->log(LogLevel::INFO, "PHP SAPI :" . PHP_SAPI); if (function_exists('php_uname')) { Factory::getLog()->log(LogLevel::INFO, "OS Version :" . php_uname('s')); } $db = Factory::getDatabase(); Factory::getLog()->log(LogLevel::INFO, "DB Version :" . $db->getVersion()); if (isset($_SERVER['SERVER_SOFTWARE'])) { $server = $_SERVER['SERVER_SOFTWARE']; } elseif ($sf = getenv('SERVER_SOFTWARE')) { $server = $sf; } else { $server = 'n/a'; } Factory::getLog()->log(LogLevel::INFO, "Web Server :" . $server); $platform = 'Unknown platform'; $version = '(unknown version)'; $platformData = Platform::getInstance()->getPlatformVersion(); Factory::getLog()->log(LogLevel::INFO, $platformData['name'] . " version :" . $platformData['version']); if (isset($_SERVER['HTTP_USER_AGENT'])) { Factory::getLog()->log(LogLevel::INFO, "User agent :" . phpversion() <= "4.2.1" ? getenv("HTTP_USER_AGENT") : $_SERVER['HTTP_USER_AGENT']); } Factory::getLog()->log(LogLevel::INFO, "Safe mode :" . ini_get("safe_mode")); Factory::getLog()->log(LogLevel::INFO, "Display errors :" . ini_get("display_errors")); Factory::getLog()->log(LogLevel::INFO, "Error reporting :" . self::error2string()); Factory::getLog()->log(LogLevel::INFO, "Error display :" . self::errordisplay()); Factory::getLog()->log(LogLevel::INFO, "Disabled functions :" . ini_get("disable_functions")); Factory::getLog()->log(LogLevel::INFO, "open_basedir restr.:" . ini_get('open_basedir')); Factory::getLog()->log(LogLevel::INFO, "Max. exec. time :" . ini_get("max_execution_time")); Factory::getLog()->log(LogLevel::INFO, "Memory limit :" . ini_get("memory_limit")); if (function_exists("memory_get_usage")) { Factory::getLog()->log(LogLevel::INFO, "Current mem. usage :" . memory_get_usage()); } if (function_exists("gzcompress")) { Factory::getLog()->log(LogLevel::INFO, "GZIP Compression : available (good)"); } else { Factory::getLog()->log(LogLevel::INFO, "GZIP Compression : n/a (no compression)"); } $extraNotes = Platform::getInstance()->log_platform_special_directories(); if (!empty($extraNotes) && is_array($extraNotes)) { if (isset($extraNotes['warnings']) && is_array($extraNotes['warnings'])) { foreach ($extraNotes['warnings'] as $warning) { $this->setWarning($warning); } } if (isset($extraNotes['errors']) && is_array($extraNotes['errors'])) { foreach ($extraNotes['errors'] as $error) { $this->setError($error); } } } Factory::getLog()->log(LogLevel::INFO, "Output directory :" . $registry->get('akeeba.basic.output_directory')); Factory::getLog()->log(LogLevel::INFO, "Part size (bytes) :" . $registry->get('engine.archiver.common.part_size', 0)); Factory::getLog()->log(LogLevel::INFO, "--------------------------------------------------------------------------------"); } // Quirks reporting $quirks = Factory::getConfigurationChecks()->getDetailedStatus(true); if (!empty($quirks)) { Factory::getLog()->log(LogLevel::INFO, "Akeeba Backup has detected the following potential problems:"); foreach ($quirks as $q) { Factory::getLog()->log(LogLevel::INFO, '- ' . $q['code'] . ' ' . $q['description'] . ' (' . $q['severity'] . ')'); } Factory::getLog()->log(LogLevel::INFO, "You probably do not have to worry about them, but you should be aware of them."); Factory::getLog()->log(LogLevel::INFO, "--------------------------------------------------------------------------------"); } if (!version_compare(PHP_VERSION, '5.4.0', 'ge')) { Factory::getLog()->log(LogLevel::WARNING, "You are using an outdated version of PHP. Akeeba Engine may not work properly. Please upgrade to PHP 5.4.0 or later."); } // Report profile ID $profile_id = Platform::getInstance()->get_active_profile(); Factory::getLog()->log(LogLevel::INFO, "Loaded profile #{$profile_id}"); // Get archive name list($relativeArchiveName, $absoluteArchiveName) = $this->getArchiveName(); // ==== Stats initialisation === $origin = Platform::getInstance()->get_backup_origin(); // Get backup origin $profile_id = Platform::getInstance()->get_active_profile(); // Get active profile $registry = Factory::getConfiguration(); $backupType = $registry->get('akeeba.basic.backup_type'); Factory::getLog()->log(LogLevel::DEBUG, "Backup type is now set to '" . $backupType . "'"); // Substitute "variables" in the archive name $fsUtils = Factory::getFilesystemTools(); $description = $fsUtils->replace_archive_name_variables($this->description); $comment = $fsUtils->replace_archive_name_variables($this->comment); if ($registry->get('volatile.writer.store_on_server', true)) { // Archive files are stored on our server $stat_relativeArchiveName = $relativeArchiveName; $stat_absoluteArchiveName = $absoluteArchiveName; } else { // Archive files are not stored on our server (FTP backup, cloud backup, sent by email, etc) $stat_relativeArchiveName = ''; $stat_absoluteArchiveName = ''; } $kettenrad = Factory::getKettenrad(); $temp = array('description' => $description, 'comment' => $comment, 'backupstart' => Platform::getInstance()->get_timestamp_database(), 'status' => 'run', 'origin' => $origin, 'type' => $backupType, 'profile_id' => $profile_id, 'archivename' => $stat_relativeArchiveName, 'absolute_path' => $stat_absoluteArchiveName, 'multipart' => 0, 'filesexist' => 1, 'tag' => $kettenrad->getTag(), 'backupid' => $kettenrad->getBackupId()); // Save the entry $statistics = Factory::getStatistics(); $statistics->setStatistics($temp); if ($statistics->getError()) { $this->setError($statistics->getError()); return; } $statistics->release_multipart_lock(); // Initialize the archive. if (Factory::getEngineParamsProvider()->getScriptingParameter('core.createarchive', true)) { Factory::getLog()->log(LogLevel::DEBUG, "Expanded archive file name: " . $absoluteArchiveName); Factory::getLog()->log(LogLevel::DEBUG, "Initializing archiver engine"); $archiver = Factory::getArchiverEngine(); $archiver->initialize($absoluteArchiveName); $archiver->setComment($comment); // Add the comment to the archive itself. $archiver->propagateToObject($this); if ($this->getError()) { return; } } $this->setState('postrun'); }
/** * Set up the Akeeba Restore engine for the current archive */ private function setUpAkeebaRestore() { $config = Factory::getConfiguration(); $maxTime = Factory::getTimer()->getTimeLeft(); $maxTime = floor($maxTime); $maxTime = max(2, $maxTime); $statistics = Factory::getStatistics(); $stat = $statistics->getRecord(); $backup_parts = Factory::getStatistics()->get_all_filenames($stat, false); $filePath = array_shift($backup_parts); $specialDirs = Platform::getInstance()->get_stock_directories(); $tmpPath = $specialDirs['[SITETMP]']; $archiver = Factory::getArchiverEngine(); $extension = $archiver->getExtension(); $extension = strtoupper($extension); $extension = ltrim($extension, '.'); $ksOptions = array('kickstart.tuning.max_exec_time' => $maxTime, 'kickstart.tuning.run_time_bias' => $config->get('akeeba.tuning.run_time_bias', 75), 'kickstart.tuning.min_exec_time' => '0', 'kickstart.procengine' => 'direct', 'kickstart.setup.sourcefile' => $filePath, 'kickstart.setup.destdir' => $tmpPath, 'kickstart.setup.restoreperms' => '0', 'kickstart.setup.filetype' => $extension, 'kickstart.setup.dryrun' => '1', 'kickstart.jps.password' => $config->get('engine.archiver.jps.key', '', false)); \AKFactory::nuke(); foreach ($ksOptions as $k => $v) { \AKFactory::set($k, $v); } \AKFactory::set('kickstart.enabled', true); }
/** * Returns a copy of the class's status array * * @return array */ public function getStatusArray() { if (empty($this->array_cache)) { // Get the default table $array = $this->_makeReturnTable(); // Get the current step number $stepCounter = Factory::getConfiguration()->get('volatile.step_counter', 0); // Add the archive name $statistics = Factory::getStatistics(); $record = $statistics->getRecord(); $array['Archive'] = isset($record['archivename']) ? $record['archivename'] : ''; // Translate HasRun to what the rest of the suite expects $array['HasRun'] = $this->getState() == 'finished' ? 1 : 0; // Translate no errors $array['Error'] = $array['Error'] == false ? '' : $array['Error']; $array['tag'] = $this->tag; $array['Progress'] = $this->getProgress(); $array['backupid'] = $this->getBackupId(); $array['sleepTime'] = $this->waitTimeMsec; $array['stepNumber'] = $stepCounter; $array['stepState'] = $this->getState(); $this->array_cache = $array; } return $this->array_cache; }
public function send_scan_email($parent) { if ($parent instanceof Finalization) { $parent->relayStep('Sending email'); $parent->relaySubstep(''); } // If no email is set, quit Factory::getLog()->log(LogLevel::DEBUG, __CLASS__ . ": Getting email addresses"); $registry = Factory::getConfiguration(); $email = $registry->get('admintools.scanner.email', ''); $email = trim($email); if (empty($email)) { Factory::getLog()->log(LogLevel::DEBUG, "No email is set. Scan results will not sent by email."); return true; } Factory::getLog()->log(LogLevel::DEBUG, __CLASS__ . ": Email address set to {$email}"); // Get the ID of the scan $statistics = Factory::getStatistics(); $latestBackupId = $statistics->getId(); Factory::getLog()->log(LogLevel::DEBUG, __CLASS__ . ": Latest scan ID is {$latestBackupId}"); // Get scan statistics Factory::getLog()->log(LogLevel::DEBUG, __CLASS__ . ": Getting scan statistics"); $items = \F0FModel::getTmpInstance('Scans', 'AdmintoolsModel')->id($latestBackupId)->getItemList(); $item = array_pop($items); // Populate table data for new, modified and suspicious files Factory::getLog()->log(LogLevel::DEBUG, __CLASS__ . ": Populating table"); $body_new = ''; $body_modified = ''; $totalFiles = \F0FModel::getTmpInstance('Scanalerts', 'AdmintoolsModel')->scan_id($latestBackupId)->acknowledged(0)->getTotal(); $segments = (int) ($totalFiles / 100) + 1; Factory::getLog()->log(LogLevel::DEBUG, __CLASS__ . ": Processing file list in {$segments} segment(s)"); for ($i = 0; $i < $segments; $i++) { $limitstart = 100 * $i; $files = \F0FModel::getTmpInstance('Scanalerts', 'AdmintoolsModel')->scan_id($latestBackupId)->acknowledged(0)->limit(100)->limitstart($limitstart)->getItemList(); if (!empty($files)) { foreach ($files as $file) { $fileRow = "<tr><td>{$file->path}</td><td>{$file->threat_score}</td></tr>\n"; if ($file->newfile) { $body_new .= $fileRow; } else { $body_modified .= $fileRow; } } } } Factory::getLog()->log(LogLevel::DEBUG, __CLASS__ . ": Preparing email text"); // Prepare the email body $body = '<html><head>' . JText::_('COM_ADMINTOOLS_SCANS_EMAIL_HEADING') . '<title></title></head><body>'; $body .= '<h1>' . JText::_('COM_ADMINTOOLS_SCANS_EMAIL_HEADING') . "</h1><hr/>\n"; $body .= '<h2>' . JText::_('COM_ADMINTOOLS_SCANS_EMAIL_OVERVIEW') . "</h2>\n"; $body .= "<p>\n"; $body .= '<strong>' . JText::_('COM_ADMINTOOLS_LBL_SCANS_TOTAL') . "</strong>: " . $item->multipart . "<br/>\n"; $body .= '<strong>' . JText::_('COM_ADMINTOOLS_LBL_SCANS_MODIFIED') . "</strong>: " . $item->files_modified . "<br/>\n"; $body .= '<strong>' . JText::_('COM_ADMINTOOLS_LBL_SCANS_ADDED') . "</strong>: " . $item->files_new . "<br/>\n"; $body .= '<strong>' . JText::_('COM_ADMINTOOLS_LBL_SCANS_SUSPICIOUS') . "</strong>: " . $item->files_suspicious . "<br/>\n"; $body .= "</p>\n"; $body .= '<hr/><h2>' . JText::_('COM_ADMINTOOLS_LBL_SCANS_ADDED') . "</h2>\n"; $body .= "<table width=\"100%\">\n"; $body .= "\t<thead>\n"; $body .= "\t<tr>\n"; $body .= "\t\t<th>" . JText::_('COM_ADMINTOOLS_LBL_SCANALERTS_PATH') . "</th>\n"; $body .= "\t\t<th width=\"50\">" . JText::_('COM_ADMINTOOLS_LBL_SCANALERTS_THREAT_SCORE') . "</th>\n"; $body .= "\t</tr>\n"; $body .= "\t</thead>\n"; $body .= "\t<tbody>\n"; $body .= $body_new; unset($body_new); $body .= "\t</tbody>\n"; $body .= '</table>'; $body .= '<hr/><h2>' . JText::_('COM_ADMINTOOLS_LBL_SCANS_MODIFIED') . "</h2>\n"; $body .= "<table width=\"100%\">\n"; $body .= "\t<thead>\n"; $body .= "\t<tr>\n"; $body .= "\t\t<th>" . JText::_('COM_ADMINTOOLS_LBL_SCANALERTS_PATH') . "</th>\n"; $body .= "\t\t<th width=\"50\">" . JText::_('COM_ADMINTOOLS_LBL_SCANALERTS_THREAT_SCORE') . "</th>\n"; $body .= "\t</tr>\n"; $body .= "\t</thead>\n"; $body .= "\t<tbody>\n"; $body .= $body_modified; unset($body_modified); $body .= "\t</tbody>\n"; $body .= '</table>'; $body .= '</body></html>'; // Prepare the email subject $config = \JFactory::getConfig(); $sitename = $config->get('sitename', 'Unknown Site'); $subject = JText::sprintf('COM_ADMINTOOLS_SCANS_EMAIL_SUBJECT', $sitename); // Send the email Factory::getLog()->log(LogLevel::DEBUG, __CLASS__ . ": Ready to send out emails"); $this->_send_email($email, $subject, $body); return true; }