/** * Attempt a series of file operations. * Callers are responsible for handling file locking. * * $opts is an array of options, including: * 'force' : Errors that would normally cause a rollback do not. * The remaining operations are still attempted if any fail. * 'allowStale' : Don't require the latest available data. * This can increase performance for non-critical writes. * This has no effect unless the 'force' flag is set. * * The resulting Status will be "OK" unless: * a) unexpected operation errors occurred (network partitions, disk full...) * b) significant operation errors occured and 'force' was not set * * @param $performOps Array List of FileOp operations * @param $opts Array Batch operation options * @return Status */ public static final function attemptBatch(array $performOps, array $opts) { $status = Status::newGood(); $allowStale = !empty($opts['allowStale']); $ignoreErrors = !empty($opts['force']); $n = count($performOps); if ($n > self::MAX_BATCH_SIZE) { $status->fatal('backend-fail-batchsize', $n, self::MAX_BATCH_SIZE); return $status; } $predicates = FileOp::newPredicates(); // account for previous op in prechecks // Do pre-checks for each operation; abort on failure... foreach ($performOps as $index => $fileOp) { if ($allowStale) { $fileOp->allowStaleReads(); // allow potentially stale reads } $subStatus = $fileOp->precheck($predicates); $status->merge($subStatus); if (!$subStatus->isOK()) { // operation failed? $status->success[$index] = false; ++$status->failCount; if (!$ignoreErrors) { return $status; // abort } } } if ($ignoreErrors) { # Treat all precheck() fatals as merely warnings $status->setResult(true, $status->value); } // Restart PHP's execution timer and set the timeout to safe amount. // This handles cases where the operations take a long time or where we are // already running low on time left. The old timeout is restored afterwards. # @TODO: re-enable this for when the number of batches is high. #$scopedTimeLimit = new FileOpScopedPHPTimeout( self::TIME_LIMIT_SEC ); // Attempt each operation... foreach ($performOps as $index => $fileOp) { if ($fileOp->failed()) { continue; // nothing to do } $subStatus = $fileOp->attempt(); $status->merge($subStatus); if ($subStatus->isOK()) { $status->success[$index] = true; ++$status->successCount; } else { $status->success[$index] = false; ++$status->failCount; // We can't continue (even with $ignoreErrors) as $predicates is wrong. // Log the remaining ops as failed for recovery... for ($i = $index + 1; $i < count($performOps); $i++) { $performOps[$i]->logFailure('attempt_aborted'); } return $status; // bail out } } return $status; }
/** * Attempt to perform a series of file operations. * Callers are responsible for handling file locking. * * $opts is an array of options, including: * - force : Errors that would normally cause a rollback do not. * The remaining operations are still attempted if any fail. * - nonJournaled : Don't log this operation batch in the file journal. * - concurrency : Try to do this many operations in parallel when possible. * * The resulting Status will be "OK" unless: * - a) unexpected operation errors occurred (network partitions, disk full...) * - b) significant operation errors occurred and 'force' was not set * * @param array $performOps List of FileOp operations * @param array $opts Batch operation options * @param FileJournal $journal Journal to log operations to * @return Status */ public static function attempt( array $performOps, array $opts, FileJournal $journal ) { wfProfileIn( __METHOD__ ); $status = Status::newGood(); $n = count( $performOps ); if ( $n > self::MAX_BATCH_SIZE ) { $status->fatal( 'backend-fail-batchsize', $n, self::MAX_BATCH_SIZE ); wfProfileOut( __METHOD__ ); return $status; } $batchId = $journal->getTimestampedUUID(); $ignoreErrors = !empty( $opts['force'] ); $journaled = empty( $opts['nonJournaled'] ); $maxConcurrency = isset( $opts['concurrency'] ) ? $opts['concurrency'] : 1; $entries = array(); // file journal entry list $predicates = FileOp::newPredicates(); // account for previous ops in prechecks $curBatch = array(); // concurrent FileOp sub-batch accumulation $curBatchDeps = FileOp::newDependencies(); // paths used in FileOp sub-batch $pPerformOps = array(); // ordered list of concurrent FileOp sub-batches $lastBackend = null; // last op backend name // Do pre-checks for each operation; abort on failure... foreach ( $performOps as $index => $fileOp ) { $backendName = $fileOp->getBackend()->getName(); $fileOp->setBatchId( $batchId ); // transaction ID // Decide if this op can be done concurrently within this sub-batch // or if a new concurrent sub-batch must be started after this one... if ( $fileOp->dependsOn( $curBatchDeps ) || count( $curBatch ) >= $maxConcurrency || ( $backendName !== $lastBackend && count( $curBatch ) ) ) { $pPerformOps[] = $curBatch; // push this batch $curBatch = array(); // start a new sub-batch $curBatchDeps = FileOp::newDependencies(); } $lastBackend = $backendName; $curBatch[$index] = $fileOp; // keep index // Update list of affected paths in this batch $curBatchDeps = $fileOp->applyDependencies( $curBatchDeps ); // Simulate performing the operation... $oldPredicates = $predicates; $subStatus = $fileOp->precheck( $predicates ); // updates $predicates $status->merge( $subStatus ); if ( $subStatus->isOK() ) { if ( $journaled ) { // journal log entries $entries = array_merge( $entries, $fileOp->getJournalEntries( $oldPredicates, $predicates ) ); } } else { // operation failed? $status->success[$index] = false; ++$status->failCount; if ( !$ignoreErrors ) { wfProfileOut( __METHOD__ ); return $status; // abort } } } // Push the last sub-batch if ( count( $curBatch ) ) { $pPerformOps[] = $curBatch; } // Log the operations in the file journal... if ( count( $entries ) ) { $subStatus = $journal->logChangeBatch( $entries, $batchId ); if ( !$subStatus->isOK() ) { wfProfileOut( __METHOD__ ); return $subStatus; // abort } } if ( $ignoreErrors ) { // treat precheck() fatals as mere warnings $status->setResult( true, $status->value ); } // Attempt each operation (in parallel if allowed and possible)... self::runParallelBatches( $pPerformOps, $status ); wfProfileOut( __METHOD__ ); return $status; }