/** * Release the locks when this goes out of scope */ function __destruct() { $wasOk = $this->status->isOK(); $this->status->merge( $this->manager->unlockByType( $this->pathsByType ) ); if ( $wasOk ) { // Make sure status is OK, despite any unlockFiles() fatals $this->status->setResult( true, $this->status->value ); } }
/** * @covers Status::merge */ public function testMergeWithOverwriteValue() { $status1 = new Status(); $status2 = new Status(); $message1 = $this->getMockMessage('warn1'); $message2 = $this->getMockMessage('error2'); $status1->warning($message1); $status2->error($message2); $status2->value = 'FooValue'; $status1->merge($status2, true); $this->assertEquals(2, count($status1->getWarningsArray()) + count($status1->getErrorsArray())); $this->assertEquals('FooValue', $status1->getValue()); }
/** * Attempt a list of file operations sub-batches in series. * * The operations *in* each sub-batch will be done in parallel. * The caller is responsible for making sure the operations * within any given sub-batch do not depend on each other. * This will abort remaining ops on failure. * * @param Array $pPerformOps * @param Status $status * @return bool Success */ protected static function runParallelBatches( array $pPerformOps, Status $status ) { $aborted = false; // set to true on unexpected errors foreach ( $pPerformOps as $performOpsBatch ) { if ( $aborted ) { // check batch op abort flag... // We can't continue (even with $ignoreErrors) as $predicates is wrong. // Log the remaining ops as failed for recovery... foreach ( $performOpsBatch as $i => $fileOp ) { $performOpsBatch[$i]->logFailure( 'attempt_aborted' ); } continue; } $statuses = array(); $opHandles = array(); // Get the backend; all sub-batch ops belong to a single backend $backend = reset( $performOpsBatch )->getBackend(); // Get the operation handles or actually do it if there is just one. // If attemptAsync() returns a Status, it was either due to an error // or the backend does not support async ops and did it synchronously. foreach ( $performOpsBatch as $i => $fileOp ) { if ( !$fileOp->failed() ) { // failed => already has Status // If the batch is just one operation, it's faster to avoid // pipelining as that can involve creating new TCP connections. $subStatus = ( count( $performOpsBatch ) > 1 ) ? $fileOp->attemptAsync() : $fileOp->attempt(); if ( $subStatus->value instanceof FileBackendStoreOpHandle ) { $opHandles[$i] = $subStatus->value; // deferred } else { $statuses[$i] = $subStatus; // done already } } } // Try to do all the operations concurrently... $statuses = $statuses + $backend->executeOpHandlesInternal( $opHandles ); // Marshall and merge all the responses (blocking)... foreach ( $performOpsBatch as $i => $fileOp ) { if ( !$fileOp->failed() ) { // failed => already has Status $subStatus = $statuses[$i]; $status->merge( $subStatus ); if ( $subStatus->isOK() ) { $status->success[$i] = true; ++$status->successCount; } else { $status->success[$i] = false; ++$status->failCount; $aborted = true; // set abort flag; we can't continue } } } } return $status; }
/** * Does various sanity checks that the move is * valid. Only things based on the two titles * should be checked here. * * @return Status */ public function isValidMove() { global $wgContentHandlerUseDB; $status = new Status(); if ($this->oldTitle->equals($this->newTitle)) { $status->fatal('selfmove'); } if (!$this->oldTitle->isMovable()) { $status->fatal('immobile-source-namespace', $this->oldTitle->getNsText()); } if ($this->newTitle->isExternal()) { $status->fatal('immobile-target-namespace-iw'); } if (!$this->newTitle->isMovable()) { $status->fatal('immobile-target-namespace', $this->newTitle->getNsText()); } $oldid = $this->oldTitle->getArticleID(); if (strlen($this->newTitle->getDBkey()) < 1) { $status->fatal('articleexists'); } if ($this->oldTitle->getDBkey() == '' || !$oldid || $this->newTitle->getDBkey() == '') { $status->fatal('badarticleerror'); } # The move is allowed only if (1) the target doesn't exist, or # (2) the target is a redirect to the source, and has no history # (so we can undo bad moves right after they're done). if ($this->newTitle->getArticleID() && !$this->isValidMoveTarget()) { $status->fatal('articleexists'); } // Content model checks if (!$wgContentHandlerUseDB && $this->oldTitle->getContentModel() !== $this->newTitle->getContentModel()) { // can't move a page if that would change the page's content model $status->fatal('bad-target-model', ContentHandler::getLocalizedName($this->oldTitle->getContentModel()), ContentHandler::getLocalizedName($this->newTitle->getContentModel())); } // Image-specific checks if ($this->oldTitle->inNamespace(NS_FILE)) { $status->merge($this->isValidFileMove()); } if ($this->newTitle->inNamespace(NS_FILE) && !$this->oldTitle->inNamespace(NS_FILE)) { $status->fatal('nonfile-cannot-move-to-file'); } // Hook for extensions to say a title can't be moved for technical reasons Hooks::run('MovePageIsValidMove', array($this->oldTitle, $this->newTitle, $status)); return $status; }
/** * Attempt a list of file operations sub-batches in series. * * The operations *in* each sub-batch will be done in parallel. * The caller is responsible for making sure the operations * within any given sub-batch do not depend on each other. * This will abort remaining ops on failure. * * @param array $pPerformOps Batches of file ops (batches use original indexes) * @param Status $status */ protected static function runParallelBatches(array $pPerformOps, Status $status) { $aborted = false; // set to true on unexpected errors foreach ($pPerformOps as $performOpsBatch) { /** @var FileOp[] $performOpsBatch */ if ($aborted) { // check batch op abort flag... // We can't continue (even with $ignoreErrors) as $predicates is wrong. // Log the remaining ops as failed for recovery... foreach ($performOpsBatch as $i => $fileOp) { $status->success[$i] = false; ++$status->failCount; $performOpsBatch[$i]->logFailure('attempt_aborted'); } continue; } /** @var Status[] $statuses */ $statuses = array(); $opHandles = array(); // Get the backend; all sub-batch ops belong to a single backend $backend = reset($performOpsBatch)->getBackend(); // Get the operation handles or actually do it if there is just one. // If attemptAsync() returns a Status, it was either due to an error // or the backend does not support async ops and did it synchronously. foreach ($performOpsBatch as $i => $fileOp) { if (!isset($status->success[$i])) { // didn't already fail in precheck() // Parallel ops may be disabled in config due to missing dependencies, // (e.g. needing popen()). When they are, $performOpsBatch has size 1. $subStatus = count($performOpsBatch) > 1 ? $fileOp->attemptAsync() : $fileOp->attempt(); if ($subStatus->value instanceof FileBackendStoreOpHandle) { $opHandles[$i] = $subStatus->value; // deferred } else { $statuses[$i] = $subStatus; // done already } } } // Try to do all the operations concurrently... $statuses = $statuses + $backend->executeOpHandlesInternal($opHandles); // Marshall and merge all the responses (blocking)... foreach ($performOpsBatch as $i => $fileOp) { if (!isset($status->success[$i])) { // didn't already fail in precheck() $subStatus = $statuses[$i]; $status->merge($subStatus); if ($subStatus->isOK()) { $status->success[$i] = true; ++$status->successCount; } else { $status->success[$i] = false; ++$status->failCount; $aborted = true; // set abort flag; we can't continue } } } } }
/** * Common implementation for the APIEditBeforeSave, EditFilterMerged * and EditFilterMergedContent hooks. * * @param IContextSource $context the context of the edit * @param Content|null $content the new Content generated by the edit * @param string $text new page content (subject of filtering) * @param Status $status Error message to return * @param string $summary Edit summary for page * @param bool $minoredit whether this is a minor edit according to the user. * * @return bool */ public static function filterEdit(IContextSource $context, $content, $text, Status $status, $summary, $minoredit) { // Load vars $vars = new AbuseFilterVariableHolder(); $title = $context->getTitle(); // Some edits are running through multiple hooks, but we only want to filter them once if (isset($title->editAlreadyFiltered)) { return true; } elseif ($title) { $title->editAlreadyFiltered = true; } self::$successful_action_vars = false; self::$last_edit_page = false; $user = $context->getUser(); // Check for null edits. $oldtext = ''; $oldcontent = null; if ($title instanceof Title && $title->canExist() && $title->exists()) { // Make sure we load the latest text saved in database (bug 31656) $page = $context->getWikiPage(); $revision = $page->getRevision(); if (!$revision) { return true; } if (defined('MW_SUPPORTS_CONTENTHANDLER')) { $oldcontent = $revision->getContent(Revision::RAW); $oldtext = AbuseFilter::contentToString($oldcontent); } else { $oldtext = AbuseFilter::revisionToString($revision, Revision::RAW); } // Cache article object so we can share a parse operation $articleCacheKey = $title->getNamespace() . ':' . $title->getText(); AFComputedVariable::$articleCache[$articleCacheKey] = $page; } else { $page = null; } // Don't trigger for null edits. if ($content && $oldcontent && $oldcontent->equals($content)) { // Compare Content objects if available return true; } else { if (strcmp($oldtext, $text) == 0) { // Otherwise, compare strings return true; } } $vars->addHolders(AbuseFilter::generateUserVars($user), AbuseFilter::generateTitleVars($title, 'ARTICLE')); $vars->setVar('action', 'edit'); $vars->setVar('summary', $summary); $vars->setVar('minor_edit', $minoredit); $vars->setVar('old_wikitext', $oldtext); $vars->setVar('new_wikitext', $text); // TODO: set old_content and new_content vars, use them $vars->addHolders(AbuseFilter::getEditVars($title, $page)); $filter_result = AbuseFilter::filterAction($vars, $title); if (!$filter_result->isOK()) { $status->merge($filter_result); return true; // re-show edit form } self::$successful_action_vars = $vars; self::$last_edit_page = $page; return true; }
/** * Attempt a list of file operations sub-batches in series. * * The operations *in* each sub-batch will be done in parallel. * The caller is responsible for making sure the operations * within any given sub-batch do not depend on each other. * This will abort remaining ops on failure. * * @param $pPerformOps Array * @param $status Status * @return bool Success */ protected static function runBatchParallel(array $pPerformOps, Status $status) { $aborted = false; foreach ($pPerformOps as $performOpsBatch) { if ($aborted) { // check batch op abort flag... // We can't continue (even with $ignoreErrors) as $predicates is wrong. // Log the remaining ops as failed for recovery... foreach ($performOpsBatch as $i => $fileOp) { $performOpsBatch[$i]->logFailure('attempt_aborted'); } continue; } $statuses = array(); $opHandles = array(); // Get the backend; all sub-batch ops belong to a single backend $backend = reset($performOpsBatch)->getBackend(); // If attemptAsync() returns synchronously, it was either an // error Status or the backend just doesn't support async ops. foreach ($performOpsBatch as $i => $fileOp) { if (!$fileOp->failed()) { // failed => already has Status $subStatus = $fileOp->attemptAsync(); if ($subStatus->value instanceof FileBackendStoreOpHandle) { $opHandles[$i] = $subStatus->value; // deferred } else { $statuses[$i] = $subStatus; // done already } } } // Try to do all the operations concurrently... $statuses = $statuses + $backend->executeOpHandlesInternal($opHandles); // Marshall and merge all the responses (blocking)... foreach ($performOpsBatch as $i => $fileOp) { if (!$fileOp->failed()) { // failed => already has Status $subStatus = $statuses[$i]; $status->merge($subStatus); if ($subStatus->isOK()) { $status->success[$i] = true; ++$status->successCount; } else { $status->success[$i] = false; ++$status->failCount; $aborted = true; // set abort flag; we can't continue } } } } return $status; }