예제 #1
0
 function restoredblist()
 {
     require_once SYS_PATH . 'lib/class/class.file.php';
     if (class_exists('FileOp')) {
         $filedir = SYS_PATH . 'data' . DS . 'backup';
         $fileobj = new FileOp();
         $ar = $fileobj->list_files($filedir);
         $art_ = array();
         if (!empty($ar)) {
             foreach ($ar as $var) {
                 if (!empty($var)) {
                     $type = substr($var, -4);
                     if ($type != '.sql') {
                         continue;
                     }
                 } else {
                     continue;
                 }
                 $isize = filesize($var);
                 if ($isize < 1000000) {
                     $size = sprintf("%.2f", $isize / 1024) . 'KB';
                 } else {
                     $size = sprintf("%.2f", $isize / 1024 / 1024) . 'MB';
                 }
                 $art_[] = array('filename' => basename($var), 'size' => $size, 'titme' => date('Y-m-d H:i:s', filemtime($var)), 'filedir' => $var);
             }
         }
         unset($ar);
         $art = Import::basic()->array_sort($art_, 'titme', 'desc');
         $this->set('restoredblist', $art);
         unset($art_);
     } else {
         die("请你检查你的文件处理类是否存在!=>FileOp");
     }
     $this->template('database_restore');
 }
예제 #2
0
 /**
  * Attempt a series of file operations.
  * Callers are responsible for handling file locking.
  * 
  * $opts is an array of options, including:
  * 'force'      : Errors that would normally cause a rollback do not.
  *                The remaining operations are still attempted if any fail.
  * 'allowStale' : Don't require the latest available data.
  *                This can increase performance for non-critical writes.
  *                This has no effect unless the 'force' flag is set.
  *
  * The resulting Status will be "OK" unless:
  *     a) unexpected operation errors occurred (network partitions, disk full...)
  *     b) significant operation errors occured and 'force' was not set
  * 
  * @param $performOps Array List of FileOp operations
  * @param $opts Array Batch operation options
  * @return Status 
  */
 public static final function attemptBatch(array $performOps, array $opts)
 {
     $status = Status::newGood();
     $allowStale = !empty($opts['allowStale']);
     $ignoreErrors = !empty($opts['force']);
     $n = count($performOps);
     if ($n > self::MAX_BATCH_SIZE) {
         $status->fatal('backend-fail-batchsize', $n, self::MAX_BATCH_SIZE);
         return $status;
     }
     $predicates = FileOp::newPredicates();
     // account for previous op in prechecks
     // Do pre-checks for each operation; abort on failure...
     foreach ($performOps as $index => $fileOp) {
         if ($allowStale) {
             $fileOp->allowStaleReads();
             // allow potentially stale reads
         }
         $subStatus = $fileOp->precheck($predicates);
         $status->merge($subStatus);
         if (!$subStatus->isOK()) {
             // operation failed?
             $status->success[$index] = false;
             ++$status->failCount;
             if (!$ignoreErrors) {
                 return $status;
                 // abort
             }
         }
     }
     if ($ignoreErrors) {
         # Treat all precheck() fatals as merely warnings
         $status->setResult(true, $status->value);
     }
     // Restart PHP's execution timer and set the timeout to safe amount.
     // This handles cases where the operations take a long time or where we are
     // already running low on time left. The old timeout is restored afterwards.
     # @TODO: re-enable this for when the number of batches is high.
     #$scopedTimeLimit = new FileOpScopedPHPTimeout( self::TIME_LIMIT_SEC );
     // Attempt each operation...
     foreach ($performOps as $index => $fileOp) {
         if ($fileOp->failed()) {
             continue;
             // nothing to do
         }
         $subStatus = $fileOp->attempt();
         $status->merge($subStatus);
         if ($subStatus->isOK()) {
             $status->success[$index] = true;
             ++$status->successCount;
         } else {
             $status->success[$index] = false;
             ++$status->failCount;
             // We can't continue (even with $ignoreErrors) as $predicates is wrong.
             // Log the remaining ops as failed for recovery...
             for ($i = $index + 1; $i < count($performOps); $i++) {
                 $performOps[$i]->logFailure('attempt_aborted');
             }
             return $status;
             // bail out
         }
     }
     return $status;
 }
예제 #3
0
	/**
	 * Attempt to perform a series of file operations.
	 * Callers are responsible for handling file locking.
	 *
	 * $opts is an array of options, including:
	 *   - force        : Errors that would normally cause a rollback do not.
	 *                    The remaining operations are still attempted if any fail.
	 *   - nonJournaled : Don't log this operation batch in the file journal.
	 *   - concurrency  : Try to do this many operations in parallel when possible.
	 *
	 * The resulting Status will be "OK" unless:
	 *   - a) unexpected operation errors occurred (network partitions, disk full...)
	 *   - b) significant operation errors occurred and 'force' was not set
	 *
	 * @param array $performOps List of FileOp operations
	 * @param array $opts Batch operation options
	 * @param FileJournal $journal Journal to log operations to
	 * @return Status
	 */
	public static function attempt( array $performOps, array $opts, FileJournal $journal ) {
		wfProfileIn( __METHOD__ );
		$status = Status::newGood();

		$n = count( $performOps );
		if ( $n > self::MAX_BATCH_SIZE ) {
			$status->fatal( 'backend-fail-batchsize', $n, self::MAX_BATCH_SIZE );
			wfProfileOut( __METHOD__ );
			return $status;
		}

		$batchId = $journal->getTimestampedUUID();
		$ignoreErrors = !empty( $opts['force'] );
		$journaled = empty( $opts['nonJournaled'] );
		$maxConcurrency = isset( $opts['concurrency'] ) ? $opts['concurrency'] : 1;

		$entries = array(); // file journal entry list
		$predicates = FileOp::newPredicates(); // account for previous ops in prechecks
		$curBatch = array(); // concurrent FileOp sub-batch accumulation
		$curBatchDeps = FileOp::newDependencies(); // paths used in FileOp sub-batch
		$pPerformOps = array(); // ordered list of concurrent FileOp sub-batches
		$lastBackend = null; // last op backend name
		// Do pre-checks for each operation; abort on failure...
		foreach ( $performOps as $index => $fileOp ) {
			$backendName = $fileOp->getBackend()->getName();
			$fileOp->setBatchId( $batchId ); // transaction ID
			// Decide if this op can be done concurrently within this sub-batch
			// or if a new concurrent sub-batch must be started after this one...
			if ( $fileOp->dependsOn( $curBatchDeps )
				|| count( $curBatch ) >= $maxConcurrency
				|| ( $backendName !== $lastBackend && count( $curBatch ) )
			) {
				$pPerformOps[] = $curBatch; // push this batch
				$curBatch = array(); // start a new sub-batch
				$curBatchDeps = FileOp::newDependencies();
			}
			$lastBackend = $backendName;
			$curBatch[$index] = $fileOp; // keep index
			// Update list of affected paths in this batch
			$curBatchDeps = $fileOp->applyDependencies( $curBatchDeps );
			// Simulate performing the operation...
			$oldPredicates = $predicates;
			$subStatus = $fileOp->precheck( $predicates ); // updates $predicates
			$status->merge( $subStatus );
			if ( $subStatus->isOK() ) {
				if ( $journaled ) { // journal log entries
					$entries = array_merge( $entries,
						$fileOp->getJournalEntries( $oldPredicates, $predicates ) );
				}
			} else { // operation failed?
				$status->success[$index] = false;
				++$status->failCount;
				if ( !$ignoreErrors ) {
					wfProfileOut( __METHOD__ );
					return $status; // abort
				}
			}
		}
		// Push the last sub-batch
		if ( count( $curBatch ) ) {
			$pPerformOps[] = $curBatch;
		}

		// Log the operations in the file journal...
		if ( count( $entries ) ) {
			$subStatus = $journal->logChangeBatch( $entries, $batchId );
			if ( !$subStatus->isOK() ) {
				wfProfileOut( __METHOD__ );
				return $subStatus; // abort
			}
		}

		if ( $ignoreErrors ) { // treat precheck() fatals as mere warnings
			$status->setResult( true, $status->value );
		}

		// Attempt each operation (in parallel if allowed and possible)...
		self::runParallelBatches( $pPerformOps, $status );

		wfProfileOut( __METHOD__ );
		return $status;
	}
예제 #4
0
 function ajax_clearcache($i = 0, $j = 0, $k = 0)
 {
     @set_time_limit(600);
     //最大运行时间
     $k++;
     //删除temp/ajin文件夹下的所有文件
     require_once SYS_PATH . 'lib/class/class.file.php';
     if (class_exists('FileOp')) {
         $ajincachedir = SYS_PATH . 'cache';
         $fileobj = new FileOp();
         $ar = $fileobj->list_files($ajincachedir);
         if (!empty($ar)) {
             foreach ($ar as $filename) {
                 if (is_file($filename)) {
                     if ($fileobj->delete_file($filename)) {
                         $i++;
                     }
                 } else {
                     if (is_dir($filename)) {
                         if ($fileobj->delete_dir($filename)) {
                             $j++;
                         }
                     }
                 }
                 $fileobj->dir2delete($filename);
             }
         }
         unset($ar);
     }
     $ar = $fileobj->list_files($ajincachedir);
     if (!empty($ar)) {
         if ($k < 5) {
             $this->ajax_clearcache($i, $j, $k);
         }
     }
     echo $str = "删除了" . $i . "个文件,删除了" . $j . "个目录!";
     exit;
 }
 /**
  * @see FileBackend::doOperationsInternal()
  */
 protected final function doOperationsInternal(array $ops, array $opts)
 {
     $status = Status::newGood();
     $performOps = array();
     // list of FileOp objects
     $filesRead = $filesChanged = array();
     // storage paths used
     // Build up a list of FileOps. The list will have all the ops
     // for one backend, then all the ops for the next, and so on.
     // These batches of ops are all part of a continuous array.
     // Also build up a list of files read/changed...
     foreach ($this->backends as $index => $backend) {
         $backendOps = $this->substOpBatchPaths($ops, $backend);
         // Add on the operation batch for this backend
         $performOps = array_merge($performOps, $backend->getOperations($backendOps));
         if ($index == 0) {
             // first batch
             // Get the files used for these operations. Each backend has a batch of
             // the same operations, so we only need to get them from the first batch.
             foreach ($performOps as $fileOp) {
                 $filesRead = array_merge($filesRead, $fileOp->storagePathsRead());
                 $filesChanged = array_merge($filesChanged, $fileOp->storagePathsChanged());
             }
             // Get the paths under the proxy backend's name
             $filesRead = $this->unsubstPaths($filesRead);
             $filesChanged = $this->unsubstPaths($filesChanged);
         }
     }
     // Try to lock those files for the scope of this function...
     if (empty($opts['nonLocking'])) {
         $filesLockSh = array_diff($filesRead, $filesChanged);
         // optimization
         $filesLockEx = $filesChanged;
         // Get a shared lock on the parent directory of each path changed
         $filesLockSh = array_merge($filesLockSh, array_map('dirname', $filesLockEx));
         // Try to lock those files for the scope of this function...
         $scopeLockS = $this->getScopedFileLocks($filesLockSh, LockManager::LOCK_UW, $status);
         $scopeLockE = $this->getScopedFileLocks($filesLockEx, LockManager::LOCK_EX, $status);
         if (!$status->isOK()) {
             return $status;
             // abort
         }
     }
     // Clear any cache entries (after locks acquired)
     $this->clearCache();
     // Do a consistency check to see if the backends agree
     if (count($this->backends) > 1) {
         $status->merge($this->consistencyCheck(array_merge($filesRead, $filesChanged)));
         if (!$status->isOK()) {
             return $status;
             // abort
         }
     }
     // Actually attempt the operation batch...
     $subStatus = FileOp::attemptBatch($performOps, $opts);
     $success = array();
     $failCount = $successCount = 0;
     // Make 'success', 'successCount', and 'failCount' fields reflect
     // the overall operation, rather than all the batches for each backend.
     // Do this by only using success values from the master backend's batch.
     $batchStart = $this->masterIndex * count($ops);
     $batchEnd = $batchStart + count($ops) - 1;
     for ($i = $batchStart; $i <= $batchEnd; $i++) {
         if (!isset($subStatus->success[$i])) {
             break;
             // failed out before trying this op
         } elseif ($subStatus->success[$i]) {
             ++$successCount;
         } else {
             ++$failCount;
         }
         $success[] = $subStatus->success[$i];
     }
     $subStatus->success = $success;
     $subStatus->successCount = $successCount;
     $subStatus->failCount = $failCount;
     // Merge errors into status fields
     $status->merge($subStatus);
     $status->success = $subStatus->success;
     // not done in merge()
     return $status;
 }
예제 #6
0
 /**
  * @see FileBackend::doOperationsInternal()
  */
 protected function doOperationsInternal(array $ops, array $opts)
 {
     wfProfileIn(__METHOD__);
     $status = Status::newGood();
     // Build up a list of FileOps...
     $performOps = $this->getOperations($ops);
     // Acquire any locks as needed...
     if (empty($opts['nonLocking'])) {
         // Build up a list of files to lock...
         $filesLockEx = $filesLockSh = array();
         foreach ($performOps as $fileOp) {
             $filesLockSh = array_merge($filesLockSh, $fileOp->storagePathsRead());
             $filesLockEx = array_merge($filesLockEx, $fileOp->storagePathsChanged());
         }
         // Optimization: if doing an EX lock anyway, don't also set an SH one
         $filesLockSh = array_diff($filesLockSh, $filesLockEx);
         // Get a shared lock on the parent directory of each path changed
         $filesLockSh = array_merge($filesLockSh, array_map('dirname', $filesLockEx));
         // Try to lock those files for the scope of this function...
         $scopeLockS = $this->getScopedFileLocks($filesLockSh, LockManager::LOCK_UW, $status);
         $scopeLockE = $this->getScopedFileLocks($filesLockEx, LockManager::LOCK_EX, $status);
         if (!$status->isOK()) {
             wfProfileOut(__METHOD__);
             return $status;
             // abort
         }
     }
     // Clear any cache entries (after locks acquired)
     $this->clearCache();
     // Actually attempt the operation batch...
     $subStatus = FileOp::attemptBatch($performOps, $opts);
     // Merge errors into status fields
     $status->merge($subStatus);
     $status->success = $subStatus->success;
     // not done in merge()
     wfProfileOut(__METHOD__);
     return $status;
 }
예제 #7
0
 function ajax_clearcache($i = 0, $j = 0, $k = 0)
 {
     @set_time_limit(600);
     //最大运行时间
     $k++;
     //删除temp/ajin文件夹下的所有文件
     $t = '';
     $x = $_SERVER["HTTP_HOST"];
     $x1 = explode('.', $x);
     if (count($x1) == 2) {
         $t = $x1[0];
     } elseif (count($x1) > 2) {
         $t = $x1[0] . $x1[1];
     }
     require_once SYS_PATH . 'lib/class/class.file.php';
     if (class_exists('FileOp')) {
         $ajincachedir = SYS_PATH . 'cache';
         $fileobj = new FileOp();
         $ar = $fileobj->list_files($ajincachedir);
         if (!empty($ar)) {
             foreach ($ar as $filename) {
                 if (!empty($t) && strpos($filename, $t) == false) {
                     continue;
                 }
                 if (is_file($filename)) {
                     if ($fileobj->delete_file($filename)) {
                         $i++;
                     }
                 } else {
                     if (is_dir($filename)) {
                         if ($fileobj->delete_dir($filename)) {
                             $j++;
                         }
                     }
                 }
                 $fileobj->dir2delete($filename);
             }
         }
         unset($ar);
     }
     $ar = $fileobj->list_files($ajincachedir);
     if (!empty($ar)) {
         if ($k < 5) {
             $this->ajax_clearcache($i, $j, $k);
         }
     }
     echo $str = "删除了" . $i . "个文件,删除了" . $j . "个目录!";
     exit;
 }
예제 #8
0
 function dir2arr($dir, $unread = array())
 {
     $result[$dir] = array();
     $resource = opendir($dir);
     while ($file = readdir($resource)) {
         if (!empty($unread)) {
             if (in_array($file, $unread)) {
                 continue;
             }
         }
         if ($file != '..' && $file != '.') {
             is_dir($dir . DS . $file) ? $result = array_merge($result, FileOp::dir2arr($dir . DS . $file)) : ($result[$dir][] = $dir . DS . $file);
         }
     }
     return $result;
 }