/** * Get contents of a javascript file for inline use. * * Roughly based MediaWiki core methods: * - ResourceLoader::filter() * - ResourceLoaderFileModule::readScriptFiles() * * @param string $name Path to file relative to /modules/inline/ * @return string Minified script * @throws Exception If file doesn't exist */ protected static function getInlineScript($name) { // Get file $filePath = __DIR__ . '/../../modules/inline/' . $name; if (!file_exists($filePath)) { throw new Exception(__METHOD__ . ": file not found: \"{$filePath}\""); } $contents = file_get_contents($filePath); // Try minified from cache $key = wfMemcKey('centralauth', 'minify-js', md5($contents)); $cache = wfGetCache(CACHE_ANYTHING); $cacheEntry = $cache->get($key); if (is_string($cacheEntry)) { return $cacheEntry; } // Compute new value $result = ''; try { $result = JavaScriptMinifier::minify($contents) . "\n/* cache key: {$key} */"; $cache->set($key, $result); } catch (Exception $e) { MWExceptionHandler::logException($e); wfDebugLog('CentralAuth', __METHOD__ . ": minification failed for {$name}: {$e}"); $result = ResourceLoader::formatException($e) . "\n" . $contents; } return $result; }
/** * Run the update */ public function doUpdate() { global $wgHitcounterUpdateFreq; $dbw = wfGetDB(DB_MASTER); if ($wgHitcounterUpdateFreq <= 1 || $dbw->getType() == 'sqlite') { $id = $this->id; $method = __METHOD__; $dbw->onTransactionIdle(function () use($dbw, $id, $method) { try { $dbw->update('page', array('page_counter = page_counter + 1'), array('page_id' => $id), $method); } catch (DBError $e) { MWExceptionHandler::logException($e); } }); return; } # Not important enough to warrant an error page in case of failure try { // Since `hitcounter` is non-transactional, the contention is minimal $dbw->insert('hitcounter', array('hc_id' => $this->id), __METHOD__); $checkfreq = intval($wgHitcounterUpdateFreq / 25 + 1); if (rand() % $checkfreq == 0 && $dbw->lastErrno() == 0) { $this->collect(); } } catch (DBError $e) { MWExceptionHandler::logException($e); } }
/** * @param UUID $postId * @param int $limit * @param UUID|null $offset * @param string $direction 'rev' or 'fwd' * @return FormatterRow[] */ public function getResults(UUID $postId, $limit = 50, UUID $offset = null, $direction = 'fwd') { $history = $this->storage->find('TopicHistoryEntry', array('topic_root_id' => $postId), array('sort' => 'rev_id', 'order' => 'DESC', 'limit' => $limit, 'offset-id' => $offset, 'offset-dir' => $direction, 'offset-include' => false, 'offset-elastic' => false)); if (!$history) { return array(); } $this->loadMetadataBatch($history); $results = $replies = array(); foreach ($history as $revision) { try { if ($this->excludeFromHistory($revision)) { continue; } $results[] = $row = new TopicRow(); $this->buildResult($revision, null, $row); if ($revision instanceof PostRevision) { $replyToId = $revision->getReplyToId(); if ($replyToId) { // $revisionId into the key rather than value prevents // duplicate insertion $replies[$replyToId->getAlphadecimal()][$revision->getPostId()->getAlphadecimal()] = true; } } } catch (FlowException $e) { \MWExceptionHandler::logException($e); } } foreach ($results as $result) { if ($result->revision instanceof PostRevision) { $alpha = $result->revision->getPostId()->getAlphadecimal(); $result->replies = isset($replies[$alpha]) ? array_keys($replies[$alpha]) : array(); } } return $results; }
/** * All queries must be against the same index. Results are equivalent to * array_map, maintaining order and key relationship between input $queries * and $result. * * @param array $queries * @param array $options * @return array|null null is query failure. empty array is no result. array is success */ public function findMulti(array $queries, array $options = array()) { if (!$queries) { return array(); } $keys = array_keys(reset($queries)); if (isset($options['sort']) && !is_array($options['sort'])) { $options['sort'] = ObjectManager::makeArray($options['sort']); } try { $index = $this->getIndexFor($keys, $options); $res = $index->findMulti($queries, $options); } catch (NoIndexException $e) { if (array_search('topic_root_id', $keys)) { wfDebugLog('Flow', __METHOD__ . ': ' . json_encode($keys) . ' : ' . json_encode($options) . ' : ' . json_encode(array_map('get_class', $this->indexes))); \MWExceptionHandler::logException($e); } else { wfDebugLog('FlowDebug', __METHOD__ . ': ' . $e->getMessage()); } $res = $this->storage->findMulti($queries, $this->convertToDbOptions($options)); } if ($res === null) { return null; } $output = array(); foreach ($res as $index => $queryOutput) { foreach ($queryOutput as $k => $v) { if ($v) { $output[$index][$k] = $this->load($v); } } } return $output; }
/** * @param UUID[]|TopicListEntry[] $topicIdsOrEntries * @return FormatterRow[] */ public function getResults(array $topicIdsOrEntries) { $topicIds = $this->getTopicIds($topicIdsOrEntries); $allPostIds = $this->collectPostIds($topicIds); $topicSummary = $this->collectSummary($topicIds); $posts = $this->collectRevisions($allPostIds); $watchStatus = $this->collectWatchStatus($topicIds); $missing = array_diff(array_keys($allPostIds), array_keys($posts)); if ($missing) { $needed = array(); foreach ($missing as $alpha) { // convert alpha back into UUID object $needed[] = $allPostIds[$alpha]; } $posts += $this->createFakePosts($needed); } $this->loadMetadataBatch($posts); $results = array(); $replies = array(); foreach ($posts as $post) { try { if (!$this->permissions->isAllowed($post, 'view')) { continue; } $row = new TopicRow(); $this->buildResult($post, null, $row); /** @var PostRevision $revision */ $revision = $row->revision; $replyToId = $revision->getReplyToId(); $replyToId = $replyToId ? $replyToId->getAlphadecimal() : null; $postId = $revision->getPostId()->getAlphadecimal(); $replies[$replyToId] = $postId; if ($post->isTopicTitle()) { // Attach the summary if (isset($topicSummary[$postId])) { $row->summary = $this->buildResult($topicSummary[$postId], 'rev_id'); } // Attach the watch status if (isset($watchStatus[$postId]) && $watchStatus[$postId]) { $row->isWatched = true; } } $results[] = $row; } catch (FlowException $e) { \MWExceptionHandler::logException($e); } } foreach ($results as $result) { $alpha = $result->revision->getPostId()->getAlphadecimal(); $result->replies = isset($replies[$alpha]) ? $replies[$alpha] : array(); } return $results; }
/** * Creates a flow board. * Archives any pre-existing wikitext talk page. * * @param array $data Form data * @return Status Status indicating result */ public function onSubmit(array $data) { $page = $data['page']; $title = Title::newFromText($page); if (!$title) { return Status::newFatal('flow-special-enableflow-invalid-title', $page); } // Canonicalize so the error or confirmation message looks nicer (no underscores). $page = $title->getPrefixedText(); if ($this->occupationController->isTalkpageOccupied($title, true)) { return Status::newFatal('flow-special-enableflow-board-already-exists', $page); } $status = Status::newGood(); if ($title->exists(Title::GAID_FOR_UPDATE)) { if (class_exists('LqtDispatch') && \LqtDispatch::isLqtPage($title)) { return Status::newFatal('flow-special-enableflow-page-is-liquidthreads', $page); } $logger = Container::get('default_logger'); $converter = new Converter(wfGetDB(DB_MASTER), Container::get('importer'), $logger, $this->occupationController->getTalkpageManager(), new EnableFlowWikitextConversionStrategy(Container::get('parser'), new NullImportSourceStore(), $logger, array(), $data['header'])); try { $converter->convert($title); } catch (\Exception $e) { \MWExceptionHandler::logException($e); $status->fatal('flow-error-external', $e->getMessage()); } } else { $allowCreationStatus = $this->occupationController->allowCreation($title, $this->getUser(), false); if (!$allowCreationStatus->isGood()) { return Status::newFatal('flow-special-enableflow-board-creation-not-allowed', $page); } $loader = $this->loaderFactory->createWorkflowLoader($title); $blocks = $loader->getBlocks(); $action = 'edit-header'; $params = array('header' => array('content' => $data['header'], 'format' => 'wikitext')); $blocksToCommit = $loader->handleSubmit($this->getContext(), $action, $params); foreach ($blocks as $block) { if ($block->hasErrors()) { $errors = $block->getErrors(); foreach ($errors as $errorKey) { $status->fatal($block->getErrorMessage($errorKey)); } } } $loader->commit($blocksToCommit); } $this->page = $data['page']; return $status; }
/** * @param UUID $postId * @param int $limit * @param UUID|null $offset * @param string $direction 'rev' or 'fwd' * @return FormatterRow[] */ public function getResults(UUID $postId, $limit = 50, UUID $offset = null, $direction = 'fwd') { $history = $this->storage->find('PostRevision', array('rev_type_id' => $postId), array('sort' => 'rev_id', 'order' => 'DESC', 'limit' => $limit, 'offset-id' => $offset, 'offset-dir' => $direction, 'offset-include' => false, 'offset-elastic' => false)); if (!$history) { return array(); } $this->loadMetadataBatch($history); $results = array(); foreach ($history as $revision) { try { $results[] = $row = new FormatterRow(); $this->buildResult($revision, null, $row); } catch (FlowException $e) { \MWExceptionHandler::logException($e); } } return $results; }
/** * Handle an exception as an API response * * @since 1.23 * @param Exception $e */ protected function handleException(Exception $e) { // Bug 63145: Rollback any open database transactions if (!$e instanceof UsageException) { // UsageExceptions are intentional, so don't rollback if that's the case try { MWExceptionHandler::rollbackMasterChangesAndLog($e); } catch (DBError $e2) { // Rollback threw an exception too. Log it, but don't interrupt // our regularly scheduled exception handling. MWExceptionHandler::logException($e2); } } // Allow extra cleanup and logging Hooks::run('ApiMain::onException', array($this, $e)); // Log it if (!$e instanceof UsageException) { MWExceptionHandler::logException($e); } // Handle any kind of exception by outputting properly formatted error message. // If this fails, an unhandled exception should be thrown so that global error // handler will process and log it. $errCode = $this->substituteResultWithError($e); // Error results should not be cached $this->setCacheMode('private'); $response = $this->getRequest()->response(); $headerStr = 'MediaWiki-API-Error: ' . $errCode; if ($e->getCode() === 0) { $response->header($headerStr); } else { $response->header($headerStr, true, $e->getCode()); } // Reset and print just the error message ob_clean(); // Printer may not be initialized if the extractRequestParams() fails for the main module $this->createErrorPrinter(); try { $this->printResult(true); } catch (UsageException $ex) { // The error printer itself is failing. Try suppressing its request // parameters and redo. $this->setWarning('Error printer failed (will retry without params): ' . $ex->getMessage()); $this->mPrinter = null; $this->createErrorPrinter(); $this->mPrinter->forceDefaultParams(); $this->printResult(true); } }
/** * Do standard deferred updates after page view (existing or missing page) * @param User $user The relevant user * @param int $oldid Revision id being viewed; if not given or 0, latest revision is assumed */ public function doViewUpdates(User $user, $oldid = 0) { if (wfReadOnly()) { return; } Hooks::run('PageViewUpdates', [$this, $user]); // Update newtalk / watchlist notification status try { $user->clearNotification($this->mTitle, $oldid); } catch (DBError $e) { // Avoid outage if the master is not reachable MWExceptionHandler::logException($e); } }
/** * Save this user's settings into the database. * @todo Only rarely do all these fields need to be set! */ public function saveSettings() { global $wgAuth; if (wfReadOnly()) { // @TODO: caller should deal with this instead! // This should really just be an exception. MWExceptionHandler::logException(new DBExpectedError(null, "Could not update user with ID '{$this->mId}'; DB is read-only.")); return; } $this->load(); $this->loadPasswords(); if (0 == $this->mId) { return; // anon } // Get a new user_touched that is higher than the old one. // This will be used for a CAS check as a last-resort safety // check against race conditions and slave lag. $oldTouched = $this->mTouched; $newTouched = $this->newTouchedTimestamp(); if (!$wgAuth->allowSetLocalPassword()) { $this->mPassword = self::getPasswordFactory()->newFromCiphertext(null); } $dbw = wfGetDB(DB_MASTER); $dbw->update('user', array('user_name' => $this->mName, 'user_password' => $this->mPassword->toString(), 'user_newpassword' => $this->mNewpassword->toString(), 'user_newpass_time' => $dbw->timestampOrNull($this->mNewpassTime), 'user_real_name' => $this->mRealName, 'user_email' => $this->mEmail, 'user_email_authenticated' => $dbw->timestampOrNull($this->mEmailAuthenticated), 'user_touched' => $dbw->timestamp($newTouched), 'user_token' => strval($this->mToken), 'user_email_token' => $this->mEmailToken, 'user_email_token_expires' => $dbw->timestampOrNull($this->mEmailTokenExpires), 'user_password_expires' => $dbw->timestampOrNull($this->mPasswordExpires)), array('user_id' => $this->mId, 'user_touched' => $dbw->timestamp($oldTouched)), __METHOD__); if (!$dbw->affectedRows()) { // Maybe the problem was a missed cache update; clear it to be safe $this->clearSharedCache(); // User was changed in the meantime or loaded with stale data $from = $this->queryFlagsUsed & self::READ_LATEST ? 'master' : 'slave'; throw new MWException("CAS update failed on user_touched for user ID '{$this->mId}' (read from {$from});" . " the version of the user to be saved is older than the current version."); } $this->mTouched = $newTouched; $this->saveOptions(); Hooks::run('UserSaveSettings', array($this)); $this->clearSharedCache(); $this->getUserPage()->invalidateCache(); }
/** * @todo document */ function wfLogProfilingData() { global $wgDebugLogGroups, $wgDebugRawPage; $context = RequestContext::getMain(); $request = $context->getRequest(); $profiler = Profiler::instance(); $profiler->setContext($context); $profiler->logData(); $config = $context->getConfig(); if ($config->get('StatsdServer')) { try { $statsdServer = explode(':', $config->get('StatsdServer')); $statsdHost = $statsdServer[0]; $statsdPort = isset($statsdServer[1]) ? $statsdServer[1] : 8125; $statsdSender = new SocketSender($statsdHost, $statsdPort); $statsdClient = new SamplingStatsdClient($statsdSender, true, false); $statsdClient->send($context->getStats()->getBuffer()); } catch (Exception $ex) { MWExceptionHandler::logException($ex); } } # Profiling must actually be enabled... if ($profiler instanceof ProfilerStub) { return; } if (isset($wgDebugLogGroups['profileoutput']) && $wgDebugLogGroups['profileoutput'] === false) { // Explicitly disabled return; } if (!$wgDebugRawPage && wfIsDebugRawPage()) { return; } $ctx = array('elapsed' => $request->getElapsedTime()); if (!empty($_SERVER['HTTP_X_FORWARDED_FOR'])) { $ctx['forwarded_for'] = $_SERVER['HTTP_X_FORWARDED_FOR']; } if (!empty($_SERVER['HTTP_CLIENT_IP'])) { $ctx['client_ip'] = $_SERVER['HTTP_CLIENT_IP']; } if (!empty($_SERVER['HTTP_FROM'])) { $ctx['from'] = $_SERVER['HTTP_FROM']; } if (isset($ctx['forwarded_for']) || isset($ctx['client_ip']) || isset($ctx['from'])) { $ctx['proxy'] = $_SERVER['REMOTE_ADDR']; } // Don't load $wgUser at this late stage just for statistics purposes // @todo FIXME: We can detect some anons even if it is not loaded. // See User::getId() $user = $context->getUser(); $ctx['anon'] = $user->isItemLoaded('id') && $user->isAnon(); // Command line script uses a FauxRequest object which does not have // any knowledge about an URL and throw an exception instead. try { $ctx['url'] = urldecode($request->getRequestURL()); } catch (Exception $ignored) { // no-op } $ctx['output'] = $profiler->getOutput(); $log = LoggerFactory::getInstance('profileoutput'); $log->info("Elapsed: {elapsed}; URL: <{url}>\n{output}", $ctx); }
/** * Potentially open a socket and sent an HTTP request back to the server * to run a specified number of jobs. This registers a callback to cleanup * the socket once it's done. */ public function triggerJobs() { $jobRunRate = $this->config->get('JobRunRate'); if ($this->getTitle()->isSpecial('RunJobs')) { return; // recursion guard } elseif ($jobRunRate <= 0 || wfReadOnly()) { return; } if ($jobRunRate < 1) { $max = mt_getrandmax(); if (mt_rand(0, $max) > $max * $jobRunRate) { return; // the higher the job run rate, the less likely we return here } $n = 1; } else { $n = intval($jobRunRate); } $runJobsLogger = LoggerFactory::getInstance('runJobs'); // Fall back to running the job(s) while the user waits if needed if (!$this->config->get('RunJobsAsync')) { $runner = new JobRunner($runJobsLogger); $runner->run(['maxJobs' => $n]); return; } // Do not send request if there are probably no jobs try { $group = JobQueueGroup::singleton(); if (!$group->queuesHaveJobs(JobQueueGroup::TYPE_DEFAULT)) { return; } } catch (JobQueueError $e) { MWExceptionHandler::logException($e); return; // do not make the site unavailable } $query = ['title' => 'Special:RunJobs', 'tasks' => 'jobs', 'maxjobs' => $n, 'sigexpiry' => time() + 5]; $query['signature'] = SpecialRunJobs::getQuerySignature($query, $this->config->get('SecretKey')); $errno = $errstr = null; $info = wfParseUrl($this->config->get('CanonicalServer')); $host = $info ? $info['host'] : null; $port = 80; if (isset($info['scheme']) && $info['scheme'] == 'https') { $host = "tls://" . $host; $port = 443; } if (isset($info['port'])) { $port = $info['port']; } MediaWiki\suppressWarnings(); $sock = $host ? fsockopen($host, $port, $errno, $errstr, 0.1) : false; MediaWiki\restoreWarnings(); $invokedWithSuccess = true; if ($sock) { $special = SpecialPageFactory::getPage('RunJobs'); $url = $special->getPageTitle()->getCanonicalURL($query); $req = "POST {$url} HTTP/1.1\r\n" . "Host: {$info['host']}\r\n" . "Connection: Close\r\n" . "Content-Length: 0\r\n\r\n"; $runJobsLogger->info("Running {$n} job(s) via '{$url}'"); // Send a cron API request to be performed in the background. // Give up if this takes too long to send (which should be rare). stream_set_timeout($sock, 2); $bytes = fwrite($sock, $req); if ($bytes !== strlen($req)) { $invokedWithSuccess = false; $runJobsLogger->error("Failed to start cron API (socket write error)"); } else { // Do not wait for the response (the script should handle client aborts). // Make sure that we don't close before that script reaches ignore_user_abort(). $start = microtime(true); $status = fgets($sock); $sec = microtime(true) - $start; if (!preg_match('#^HTTP/\\d\\.\\d 202 #', $status)) { $invokedWithSuccess = false; $runJobsLogger->error("Failed to start cron API: received '{$status}' ({$sec})"); } } fclose($sock); } else { $invokedWithSuccess = false; $runJobsLogger->error("Failed to start cron API (socket error {$errno}): {$errstr}"); } // Fall back to running the job(s) while the user waits if needed if (!$invokedWithSuccess) { $runJobsLogger->warning("Jobs switched to blocking; Special:RunJobs disabled"); $runner = new JobRunner($runJobsLogger); $runner->run(['maxJobs' => $n]); } }
protected function doGetSiblingQueueSizes(array $types) { $result = array(); $failed = 0; /** @var JobQueue $queue */ foreach ($this->partitionQueues as $queue) { try { $sizes = $queue->doGetSiblingQueueSizes($types); if (is_array($sizes)) { foreach ($sizes as $type => $size) { $result[$type] = isset($result[$type]) ? $result[$type] + $size : $size; } } else { return null; // not supported on all partitions; bail } } catch (JobQueueError $e) { ++$failed; MWExceptionHandler::logException($e); } } $this->throwErrorIfAllPartitionsDown($failed); return $result; }
public static function execute(array &$queue, $mode) { $updates = $queue; // snapshot of queue // Keep doing rounds of updates until none get enqueued while (count($updates)) { $queue = array(); // clear the queue /** @var DataUpdate[] $dataUpdates */ $dataUpdates = array(); /** @var DeferrableUpdate[] $otherUpdates */ $otherUpdates = array(); foreach ($updates as $update) { if ($update instanceof DataUpdate) { $dataUpdates[] = $update; } else { $otherUpdates[] = $update; } } // Delegate DataUpdate execution to the DataUpdate class DataUpdate::runUpdates($dataUpdates, $mode); // Execute the non-DataUpdate tasks foreach ($otherUpdates as $update) { try { $update->doUpdate(); wfGetLBFactory()->commitMasterChanges(__METHOD__); } catch (Exception $e) { // We don't want exceptions thrown during deferred updates to // be reported to the user since the output is already sent if (!$e instanceof ErrorPageError) { MWExceptionHandler::logException($e); } // Make sure incomplete transactions are not committed and end any // open atomic sections so that other DB updates have a chance to run wfGetLBFactory()->rollbackMasterChanges(__METHOD__); } } $updates = $queue; // new snapshot of queue (check for new entries) } }
/** * Output a report about the exception and takes care of formatting. * It will be either HTML or plain text based on isCommandLine(). */ function report() { global $wgMimeType; MWExceptionHandler::logException($this); if (defined('MW_API')) { // Unhandled API exception, we can't be sure that format printer is alive header('MediaWiki-API-Error: internal_api_error_' . get_class($this)); wfHttpError(500, 'Internal Server Error', $this->getText()); } elseif (self::isCommandLine()) { MWExceptionHandler::printError($this->getText()); } else { header('HTTP/1.1 500 MediaWiki exception'); header('Status: 500 MediaWiki exception', true); header("Content-Type: {$wgMimeType}; charset=utf-8", true); $this->reportHTML(); } }
/** * Potentially open a socket and sent an HTTP request back to the server * to run a specified number of jobs. This registers a callback to cleanup * the socket once it's done. */ public function triggerJobs() { $jobRunRate = $this->config->get('JobRunRate'); if ($jobRunRate <= 0 || wfReadOnly()) { return; } elseif ($this->getTitle()->isSpecial('RunJobs')) { return; // recursion guard } if ($jobRunRate < 1) { $max = mt_getrandmax(); if (mt_rand(0, $max) > $max * $jobRunRate) { return; // the higher the job run rate, the less likely we return here } $n = 1; } else { $n = intval($jobRunRate); } $runJobsLogger = LoggerFactory::getInstance('runJobs'); if (!$this->config->get('RunJobsAsync')) { // Fall back to running the job here while the user waits $runner = new JobRunner($runJobsLogger); $runner->run(array('maxJobs' => $n)); return; } try { if (!JobQueueGroup::singleton()->queuesHaveJobs(JobQueueGroup::TYPE_DEFAULT)) { return; // do not send request if there are probably no jobs } } catch (JobQueueError $e) { MWExceptionHandler::logException($e); return; // do not make the site unavailable } $query = array('title' => 'Special:RunJobs', 'tasks' => 'jobs', 'maxjobs' => $n, 'sigexpiry' => time() + 5); $query['signature'] = SpecialRunJobs::getQuerySignature($query, $this->config->get('SecretKey')); $errno = $errstr = null; $info = wfParseUrl($this->config->get('Server')); MediaWiki\suppressWarnings(); $sock = fsockopen($info['host'], isset($info['port']) ? $info['port'] : 80, $errno, $errstr, 0.1); MediaWiki\restoreWarnings(); if (!$sock) { $runJobsLogger->error("Failed to start cron API (socket error {$errno}): {$errstr}"); // Fall back to running the job here while the user waits $runner = new JobRunner($runJobsLogger); $runner->run(array('maxJobs' => $n)); return; } $url = wfAppendQuery(wfScript('index'), $query); $req = "POST {$url} HTTP/1.1\r\n" . "Host: {$info['host']}\r\n" . "Connection: Close\r\n" . "Content-Length: 0\r\n\r\n"; $runJobsLogger->info("Running {$n} job(s) via '{$url}'"); // Send a cron API request to be performed in the background. // Give up if this takes too long to send (which should be rare). stream_set_timeout($sock, 1); $bytes = fwrite($sock, $req); if ($bytes !== strlen($req)) { $runJobsLogger->error("Failed to start cron API (socket write error)"); } else { // Do not wait for the response (the script should handle client aborts). // Make sure that we don't close before that script reaches ignore_user_abort(). $status = fgets($sock); if (!preg_match('#^HTTP/\\d\\.\\d 202 #', $status)) { $runJobsLogger->error("Failed to start cron API: received '{$status}'"); } } fclose($sock); }
/** * Actually any "on transaction pre-commit" callbacks. * * @since 1.22 */ protected function runOnTransactionPreCommitCallbacks() { $e = $ePrior = null; // last exception do { // callbacks may add callbacks :) $callbacks = $this->mTrxPreCommitCallbacks; $this->mTrxPreCommitCallbacks = array(); // recursion guard foreach ($callbacks as $callback) { try { list($phpCallback) = $callback; call_user_func($phpCallback); } catch (Exception $e) { if ($ePrior) { MWExceptionHandler::logException($ePrior); } $ePrior = $e; } } } while (count($this->mTrxPreCommitCallbacks)); if ($e instanceof Exception) { throw $e; // re-throw any last exception } }
/** * Do any deferred updates and clear the list * * @param string $commit Set to 'commit' to commit after every update to * prevent lock contention */ public static function doUpdates($commit = '') { global $wgDeferredUpdateList; $updates = array_merge($wgDeferredUpdateList, self::$updates); // No need to get master connections in case of empty updates array if (!count($updates)) { return; } $dbw = false; $doCommit = $commit == 'commit'; if ($doCommit) { $dbw = wfGetDB(DB_MASTER); } while ($updates) { self::clearPendingUpdates(); /** @var DeferrableUpdate $update */ foreach ($updates as $update) { try { $update->doUpdate(); if ($doCommit && $dbw->trxLevel()) { $dbw->commit(__METHOD__, 'flush'); } } catch (Exception $e) { // We don't want exceptions thrown during deferred updates to // be reported to the user since the output is already sent. // Instead we just log them. if (!$e instanceof ErrorPageError) { MWExceptionHandler::logException($e); } } } $updates = array_merge($wgDeferredUpdateList, self::$updates); } }
protected function doGetSiblingQueueSizes( array $types ) { $result = array(); foreach ( $this->partitionQueues as $queue ) { try { $sizes = $queue->doGetSiblingQueueSizes( $types ); if ( is_array( $sizes ) ) { foreach ( $sizes as $type => $size ) { $result[$type] = isset( $result[$type] ) ? $result[$type] + $size : $size; } } else { return null; // not supported on all partitions; bail } } catch ( JobQueueError $e ) { MWExceptionHandler::logException( $e ); } } return $result; }
/** * Push all jobs buffered via lazyPush() into their respective queues * * @return void * @since 1.26 */ public static function pushLazyJobs() { foreach (self::$instances as $group) { try { $group->push($group->bufferedJobs); $group->bufferedJobs = []; } catch (Exception $e) { // Get in as many jobs as possible and let other post-send updates happen MWExceptionHandler::logException($e); } } }
/** * Generate code for a response. * * @param ResourceLoaderContext $context Context in which to generate a response * @param array $modules List of module objects keyed by module name * @param array $missing List of requested module names that are unregistered (optional) * @return string Response data */ public function makeModuleResponse(ResourceLoaderContext $context, array $modules, array $missing = array()) { $out = ''; $states = array(); if (!count($modules) && !count($missing)) { return <<<MESSAGE /* This file is the Web entry point for MediaWiki's ResourceLoader: <https://www.mediawiki.org/wiki/ResourceLoader>. In this request, no modules were requested. Max made me put this here. */ MESSAGE; } $image = $context->getImageObj(); if ($image) { $data = $image->getImageData($context); if ($data === false) { $data = ''; $this->errors[] = 'Image generation failed'; } return $data; } // Pre-fetch blobs if ($context->shouldIncludeMessages()) { try { $blobs = $this->blobStore->get($this, $modules, $context->getLanguage()); } catch (Exception $e) { MWExceptionHandler::logException($e); wfDebugLog('resourceloader', __METHOD__ . ": pre-fetching blobs from MessageBlobStore failed: {$e}"); $this->errors[] = self::formatExceptionNoComment($e); } } else { $blobs = array(); } foreach ($missing as $name) { $states[$name] = 'missing'; } // Generate output $isRaw = false; foreach ($modules as $name => $module) { /** * @var $module ResourceLoaderModule */ try { $scripts = ''; if ($context->shouldIncludeScripts()) { // If we are in debug mode, we'll want to return an array of URLs if possible // However, we can't do this if the module doesn't support it // We also can't do this if there is an only= parameter, because we have to give // the module a way to return a load.php URL without causing an infinite loop if ($context->getDebug() && !$context->getOnly() && $module->supportsURLLoading()) { $scripts = $module->getScriptURLsForDebug($context); } else { $scripts = $module->getScript($context); // rtrim() because there are usually a few line breaks // after the last ';'. A new line at EOF, a new line // added by ResourceLoaderFileModule::readScriptFiles, etc. if (is_string($scripts) && strlen($scripts) && substr(rtrim($scripts), -1) !== ';') { // Append semicolon to prevent weird bugs caused by files not // terminating their statements right (bug 27054) $scripts .= ";\n"; } } } // Styles $styles = array(); if ($context->shouldIncludeStyles()) { // Don't create empty stylesheets like array( '' => '' ) for modules // that don't *have* any stylesheets (bug 38024). $stylePairs = $module->getStyles($context); if (count($stylePairs)) { // If we are in debug mode without &only= set, we'll want to return an array of URLs // See comment near shouldIncludeScripts() for more details if ($context->getDebug() && !$context->getOnly() && $module->supportsURLLoading()) { $styles = array('url' => $module->getStyleURLsForDebug($context)); } else { // Minify CSS before embedding in mw.loader.implement call // (unless in debug mode) if (!$context->getDebug()) { foreach ($stylePairs as $media => $style) { // Can be either a string or an array of strings. if (is_array($style)) { $stylePairs[$media] = array(); foreach ($style as $cssText) { if (is_string($cssText)) { $stylePairs[$media][] = $this->filter('minify-css', $cssText); } } } elseif (is_string($style)) { $stylePairs[$media] = $this->filter('minify-css', $style); } } } // Wrap styles into @media groups as needed and flatten into a numerical array $styles = array('css' => self::makeCombinedStyles($stylePairs)); } } } // Messages $messagesBlob = isset($blobs[$name]) ? $blobs[$name] : '{}'; // Append output switch ($context->getOnly()) { case 'scripts': if (is_string($scripts)) { // Load scripts raw... $out .= $scripts; } elseif (is_array($scripts)) { // ...except when $scripts is an array of URLs $out .= self::makeLoaderImplementScript($name, $scripts, array(), array()); } break; case 'styles': // We no longer seperate into media, they are all combined now with // custom media type groups into @media .. {} sections as part of the css string. // Module returns either an empty array or a numerical array with css strings. $out .= isset($styles['css']) ? implode('', $styles['css']) : ''; break; case 'messages': $out .= self::makeMessageSetScript(new XmlJsCode($messagesBlob)); break; case 'templates': $out .= Xml::encodeJsCall('mw.templates.set', array($name, (object) $module->getTemplates()), ResourceLoader::inDebugMode()); break; default: $out .= self::makeLoaderImplementScript($name, $scripts, $styles, new XmlJsCode($messagesBlob), $module->getTemplates()); break; } } catch (Exception $e) { MWExceptionHandler::logException($e); wfDebugLog('resourceloader', __METHOD__ . ": generating module package failed: {$e}"); $this->errors[] = self::formatExceptionNoComment($e); // Respond to client with error-state instead of module implementation $states[$name] = 'error'; unset($modules[$name]); } $isRaw |= $module->isRaw(); } // Update module states if ($context->shouldIncludeScripts() && !$context->getRaw() && !$isRaw) { if (count($modules) && $context->getOnly() === 'scripts') { // Set the state of modules loaded as only scripts to ready as // they don't have an mw.loader.implement wrapper that sets the state foreach ($modules as $name => $module) { $states[$name] = 'ready'; } } // Set the state of modules we didn't respond to with mw.loader.implement if (count($states)) { $out .= self::makeLoaderStateScript($states); } } else { if (count($states)) { $this->errors[] = 'Problematic modules: ' . FormatJson::encode($states, ResourceLoader::inDebugMode()); } } if (!$context->getDebug()) { if ($context->getOnly() === 'styles') { $out = $this->filter('minify-css', $out); } else { $out = $this->filter('minify-js', $out); } } return $out; }
/** * Do a job from the job queue */ private function doJobs() { global $wgJobRunRate, $wgPhpCli, $IP; if ( $wgJobRunRate <= 0 || wfReadOnly() ) { return; } if ( $wgJobRunRate < 1 ) { $max = mt_getrandmax(); if ( mt_rand( 0, $max ) > $max * $wgJobRunRate ) { return; // the higher $wgJobRunRate, the less likely we return here } $n = 1; } else { $n = intval( $wgJobRunRate ); } if ( !wfShellExecDisabled() && is_executable( $wgPhpCli ) ) { // Start a background process to run some of the jobs wfProfileIn( __METHOD__ . '-exec' ); $retVal = 1; $cmd = wfShellWikiCmd( "$IP/maintenance/runJobs.php", array( '--maxjobs', $n ) ); $cmd .= " >" . wfGetNull() . " 2>&1"; // don't hang PHP on pipes if ( wfIsWindows() ) { // Using START makes this async and also works around a bug where using // wfShellExec() with a quoted script name causes a filename syntax error. $cmd = "START /B \"bg\" $cmd"; } else { $cmd = "$cmd &"; } wfShellExec( $cmd, $retVal ); wfProfileOut( __METHOD__ . '-exec' ); } else { try { // Fallback to running the jobs here while the user waits $group = JobQueueGroup::singleton(); do { $job = $group->pop( JobQueueGroup::USE_CACHE ); // job from any queue if ( $job ) { $output = $job->toString() . "\n"; $t = - microtime( true ); wfProfileIn( __METHOD__ . '-' . get_class( $job ) ); $success = $job->run(); wfProfileOut( __METHOD__ . '-' . get_class( $job ) ); $group->ack( $job ); // done $t += microtime( true ); $t = round( $t * 1000 ); if ( $success === false ) { $output .= "Error: " . $job->getLastError() . ", Time: $t ms\n"; } else { $output .= "Success, Time: $t ms\n"; } wfDebugLog( 'jobqueue', $output ); } } while ( --$n && $job ); } catch ( MWException $e ) { // We don't want exceptions thrown during job execution to // be reported to the user since the output is already sent. // Instead we just log them. MWExceptionHandler::logException( $e ); } } }
/** * Execute any due periodic queue maintenance tasks for all queues. * * A task is "due" if the time ellapsed since the last run is greater than * the defined run period. Concurrent calls to this function will cause tasks * to be attempted twice, so they may need their own methods of mutual exclusion. * * @return int Number of tasks run */ public function executeReadyPeriodicTasks() { global $wgMemc; list($db, $prefix) = wfSplitWikiID($this->wiki); $key = wfForeignMemcKey($db, $prefix, 'jobqueuegroup', 'taskruns', 'v1'); $lastRuns = $wgMemc->get($key); // (queue => task => UNIX timestamp) $count = 0; $tasksRun = array(); // (queue => task => UNIX timestamp) foreach ($this->getQueueTypes() as $type) { $queue = $this->get($type); foreach ($queue->getPeriodicTasks() as $task => $definition) { if ($definition['period'] <= 0) { continue; // disabled } elseif (!isset($lastRuns[$type][$task]) || $lastRuns[$type][$task] < time() - $definition['period']) { try { if (call_user_func($definition['callback']) !== null) { $tasksRun[$type][$task] = time(); ++$count; } } catch (JobQueueError $e) { MWExceptionHandler::logException($e); } } } } if ($count === 0) { return $count; // nothing to update } $wgMemc->merge($key, function ($cache, $key, $lastRuns) use($tasksRun) { if (is_array($lastRuns)) { foreach ($tasksRun as $type => $tasks) { foreach ($tasks as $task => $timestamp) { if (!isset($lastRuns[$type][$task]) || $timestamp > $lastRuns[$type][$task]) { $lastRuns[$type][$task] = $timestamp; } } } } else { $lastRuns = $tasksRun; } return $lastRuns; }); return $count; }
/** * Formats a notification * * @param $event EchoEvent that the notification is for. * @param $user User to format the notification for. * @param $format string The format to show the notification in: text, html, or email * @param $type string The type of notification being distributed (e.g. email, web) * @return string|array The formatted notification, or an array of subject * and body (for emails), or an error message */ public static function formatNotification($event, $user, $format = 'text', $type = 'web') { global $wgEchoNotifications; $eventType = $event->getType(); // if ($event->getId() == '104') { // print_r($event); // exit; // } //print $event->getId(); $res = ''; if (isset($wgEchoNotifications[$eventType])) { set_error_handler(array(__CLASS__, 'formatterErrorHandler'), -1); try { $params = $wgEchoNotifications[$eventType]; $notifier = EchoNotificationFormatter::factory($params); $notifier->setOutputFormat($format); $res = $notifier->format($event, $user, $type); } catch (Exception $e) { $meta = array('id' => $event->getId(), 'eventType' => $eventType, 'format' => $format, 'type' => $type, 'user' => $user ? $user->getName() : 'no user'); wfDebugLog(__CLASS__, __FUNCTION__ . ": Error formatting " . FormatJson::encode($meta)); MWExceptionHandler::logException($e); } restore_error_handler(); } else { $res = 'whoops - ' . $eventType; } if ($res) { return $res; } else { return Xml::tags('span', array('class' => 'error'), wfMessage('echo-error-no-formatter', $event->getType())->escaped()); } }
/** * Issue ROLLBACK only on master, only if queries were done on connection * @since 1.23 */ public function rollbackMasterChanges() { $failedServers = array(); $masterIndex = $this->getWriterIndex(); foreach ($this->mConns as $conns2) { if (empty($conns2[$masterIndex])) { continue; } /** @var DatabaseBase $conn */ foreach ($conns2[$masterIndex] as $conn) { if ($conn->trxLevel() && $conn->writesOrCallbacksPending()) { try { $conn->rollback(__METHOD__, 'flush'); } catch (DBError $e) { MWExceptionHandler::logException($e); $failedServers[] = $conn->getServer(); } } } } if ($failedServers) { throw new DBExpectedError(null, "Rollback failed on server(s) " . implode(', ', array_unique($failedServers))); } }
/** * Do any deferred updates and clear the list * * @param string $commit Set to 'commit' to commit after every update to * @param string $mode Use "enqueue" to use the job queue when possible [Default: run] * prevent lock contention */ public static function doUpdates($commit = '', $mode = 'run') { $updates = self::$updates; while (count($updates)) { self::clearPendingUpdates(); /** @var DataUpdate[] $dataUpdates */ $dataUpdates = array(); /** @var DeferrableUpdate[] $otherUpdates */ $otherUpdates = array(); foreach ($updates as $update) { if ($update instanceof DataUpdate) { $dataUpdates[] = $update; } else { $otherUpdates[] = $update; } } // Delegate DataUpdate execution to the DataUpdate class DataUpdate::runUpdates($dataUpdates, $mode); // Execute the non-DataUpdate tasks foreach ($otherUpdates as $update) { try { $update->doUpdate(); if ($commit === 'commit') { wfGetLBFactory()->commitMasterChanges(); } } catch (Exception $e) { // We don't want exceptions thrown during deferred updates to // be reported to the user since the output is already sent. // Instead we just log them. if (!$e instanceof ErrorPageError) { MWExceptionHandler::logException($e); } } } $updates = self::$updates; } }
/** * Generate code for a response. * * @param ResourceLoaderContext $context Context in which to generate a response * @param array $modules List of module objects keyed by module name * @param array $missing List of requested module names that are unregistered (optional) * @return string Response data */ public function makeModuleResponse(ResourceLoaderContext $context, array $modules, array $missing = array()) { $out = ''; $states = array(); if (!count($modules) && !count($missing)) { return <<<MESSAGE /* This file is the Web entry point for MediaWiki's ResourceLoader: <https://www.mediawiki.org/wiki/ResourceLoader>. In this request, no modules were requested. Max made me put this here. */ MESSAGE; } $image = $context->getImageObj(); if ($image) { $data = $image->getImageData($context); if ($data === false) { $data = ''; $this->errors[] = 'Image generation failed'; } return $data; } // Pre-fetch blobs if ($context->shouldIncludeMessages()) { try { $this->blobStore->get($this, $modules, $context->getLanguage()); } catch (Exception $e) { MWExceptionHandler::logException($e); $this->logger->warning('Prefetching MessageBlobStore failed: {exception}', array('exception' => $e)); $this->errors[] = self::formatExceptionNoComment($e); } } foreach ($missing as $name) { $states[$name] = 'missing'; } // Generate output $isRaw = false; foreach ($modules as $name => $module) { try { $content = $module->getModuleContent($context); // Append output switch ($context->getOnly()) { case 'scripts': $scripts = $content['scripts']; if (is_string($scripts)) { // Load scripts raw... $out .= $scripts; } elseif (is_array($scripts)) { // ...except when $scripts is an array of URLs $out .= self::makeLoaderImplementScript($name, $scripts, array(), array()); } break; case 'styles': $styles = $content['styles']; // We no longer seperate into media, they are all combined now with // custom media type groups into @media .. {} sections as part of the css string. // Module returns either an empty array or a numerical array with css strings. $out .= isset($styles['css']) ? implode('', $styles['css']) : ''; break; default: $out .= self::makeLoaderImplementScript($name, isset($content['scripts']) ? $content['scripts'] : '', isset($content['styles']) ? $content['styles'] : array(), isset($content['messagesBlob']) ? new XmlJsCode($content['messagesBlob']) : array(), isset($content['templates']) ? $content['templates'] : array()); break; } } catch (Exception $e) { MWExceptionHandler::logException($e); $this->logger->warning('Generating module package failed: {exception}', array('exception' => $e)); $this->errors[] = self::formatExceptionNoComment($e); // Respond to client with error-state instead of module implementation $states[$name] = 'error'; unset($modules[$name]); } $isRaw |= $module->isRaw(); } // Update module states if ($context->shouldIncludeScripts() && !$context->getRaw() && !$isRaw) { if (count($modules) && $context->getOnly() === 'scripts') { // Set the state of modules loaded as only scripts to ready as // they don't have an mw.loader.implement wrapper that sets the state foreach ($modules as $name => $module) { $states[$name] = 'ready'; } } // Set the state of modules we didn't respond to with mw.loader.implement if (count($states)) { $out .= self::makeLoaderStateScript($states); } } else { if (count($states)) { $this->errors[] = 'Problematic modules: ' . FormatJson::encode($states, ResourceLoader::inDebugMode()); } } $enableFilterCache = true; if (count($modules) === 1 && reset($modules) instanceof ResourceLoaderUserTokensModule) { // If we're building the embedded user.tokens, don't cache (T84960) $enableFilterCache = false; } if (!$context->getDebug()) { if ($context->getOnly() === 'styles') { $out = $this->filter('minify-css', $out); } else { $out = $this->filter('minify-js', $out, array('cache' => $enableFilterCache)); } } return $out; }
/** * @param Job $job * @param BufferingStatsdDataFactory $stats * @param float $popTime * @return array Map of status/error/timeMs */ private function executeJob(Job $job, $stats, $popTime) { $jType = $job->getType(); $msg = $job->toString() . " STARTING"; $this->logger->debug($msg); $this->debugCallback($msg); // Run the job... $rssStart = $this->getMaxRssKb(); $jobStartTime = microtime(true); try { $status = $job->run(); $error = $job->getLastError(); $this->commitMasterChanges($job); DeferredUpdates::doUpdates(); $this->commitMasterChanges($job); } catch (Exception $e) { MWExceptionHandler::rollbackMasterChangesAndLog($e); $status = false; $error = get_class($e) . ': ' . $e->getMessage(); MWExceptionHandler::logException($e); } // Commit all outstanding connections that are in a transaction // to get a fresh repeatable read snapshot on every connection. // Note that jobs are still responsible for handling slave lag. wfGetLBFactory()->commitAll(__METHOD__); // Clear out title cache data from prior snapshots LinkCache::singleton()->clear(); $timeMs = intval((microtime(true) - $jobStartTime) * 1000); $rssEnd = $this->getMaxRssKb(); // Record how long jobs wait before getting popped $readyTs = $job->getReadyTimestamp(); if ($readyTs) { $pickupDelay = max(0, $popTime - $readyTs); $stats->timing('jobqueue.pickup_delay.all', 1000 * $pickupDelay); $stats->timing("jobqueue.pickup_delay.{$jType}", 1000 * $pickupDelay); } // Record root job age for jobs being run $root = $job->getRootJobParams(); if ($root['rootJobTimestamp']) { $age = max(0, $popTime - wfTimestamp(TS_UNIX, $root['rootJobTimestamp'])); $stats->timing("jobqueue.pickup_root_age.{$jType}", 1000 * $age); } // Track the execution time for jobs $stats->timing("jobqueue.run.{$jType}", $timeMs); // Track RSS increases for jobs (in case of memory leaks) if ($rssStart && $rssEnd) { $stats->increment("jobqueue.rss_delta.{$jType}", $rssEnd - $rssStart); } if ($status === false) { $msg = $job->toString() . " t={$timeMs} error={$error}"; $this->logger->error($msg); $this->debugCallback($msg); } else { $msg = $job->toString() . " t={$timeMs} good"; $this->logger->info($msg); $this->debugCallback($msg); } return array('status' => $status, 'error' => $error, 'timeMs' => $timeMs); }
/** * Run jobs of the specified number/type for the specified time * * The response map has a 'job' field that lists status of each job, including: * - type : the job type * - status : ok/failed * - error : any error message string * - time : the job run time in ms * The response map also has: * - backoffs : the (job type => seconds) map of backoff times * - elapsed : the total time spent running tasks in ms * - reached : the reason the script finished, one of (none-ready, job-limit, time-limit) * * This method outputs status information only if a debug handler was set. * Any exceptions are caught and logged, but are not reported as output. * * @param array $options Map of parameters: * - type : the job type (or false for the default types) * - maxJobs : maximum number of jobs to run * - maxTime : maximum time in seconds before stopping * - throttle : whether to respect job backoff configuration * @return array Summary response that can easily be JSON serialized */ public function run(array $options) { global $wgJobClasses, $wgTrxProfilerLimits; $response = array('jobs' => array(), 'reached' => 'none-ready'); $type = isset($options['type']) ? $options['type'] : false; $maxJobs = isset($options['maxJobs']) ? $options['maxJobs'] : false; $maxTime = isset($options['maxTime']) ? $options['maxTime'] : false; $noThrottle = isset($options['throttle']) && !$options['throttle']; if ($type !== false && !isset($wgJobClasses[$type])) { $response['reached'] = 'none-possible'; return $response; } // Bail out if in read-only mode if (wfReadOnly()) { $response['reached'] = 'read-only'; return $response; } // Catch huge single updates that lead to slave lag $trxProfiler = Profiler::instance()->getTransactionProfiler(); $trxProfiler->setLogger(LoggerFactory::getInstance('DBPerformance')); $trxProfiler->setExpectations($wgTrxProfilerLimits['JobRunner'], __METHOD__); // Bail out if there is too much DB lag. // This check should not block as we want to try other wiki queues. $maxAllowedLag = 3; list(, $maxLag) = wfGetLB(wfWikiID())->getMaxLag(); if ($maxLag >= $maxAllowedLag) { $response['reached'] = 'slave-lag-limit'; return $response; } $group = JobQueueGroup::singleton(); // Flush any pending DB writes for sanity wfGetLBFactory()->commitAll(); // Some jobs types should not run until a certain timestamp $backoffs = array(); // map of (type => UNIX expiry) $backoffDeltas = array(); // map of (type => seconds) $wait = 'wait'; // block to read backoffs the first time $stats = RequestContext::getMain()->getStats(); $jobsPopped = 0; $timeMsTotal = 0; $flags = JobQueueGroup::USE_CACHE; $startTime = microtime(true); // time since jobs started running $checkLagPeriod = 1.0; // check slave lag this many seconds $lastCheckTime = 1; // timestamp of last slave check do { // Sync the persistent backoffs with concurrent runners $backoffs = $this->syncBackoffDeltas($backoffs, $backoffDeltas, $wait); $blacklist = $noThrottle ? array() : array_keys($backoffs); $wait = 'nowait'; // less important now if ($type === false) { $job = $group->pop(JobQueueGroup::TYPE_DEFAULT, $flags, $blacklist); } elseif (in_array($type, $blacklist)) { $job = false; // requested queue in backoff state } else { $job = $group->pop($type); // job from a single queue } if ($job) { // found a job $popTime = time(); $jType = $job->getType(); // Back off of certain jobs for a while (for throttling and for errors) $ttw = $this->getBackoffTimeToWait($job); if ($ttw > 0) { // Always add the delta for other runners in case the time running the // job negated the backoff for each individually but not collectively. $backoffDeltas[$jType] = isset($backoffDeltas[$jType]) ? $backoffDeltas[$jType] + $ttw : $ttw; $backoffs = $this->syncBackoffDeltas($backoffs, $backoffDeltas, $wait); } $msg = $job->toString() . " STARTING"; $this->logger->debug($msg); $this->debugCallback($msg); // Run the job... $jobStartTime = microtime(true); try { ++$jobsPopped; $status = $job->run(); $error = $job->getLastError(); $this->commitMasterChanges($job); DeferredUpdates::doUpdates(); $this->commitMasterChanges($job); } catch (Exception $e) { MWExceptionHandler::rollbackMasterChangesAndLog($e); $status = false; $error = get_class($e) . ': ' . $e->getMessage(); MWExceptionHandler::logException($e); } // Commit all outstanding connections that are in a transaction // to get a fresh repeatable read snapshot on every connection. // Note that jobs are still responsible for handling slave lag. wfGetLBFactory()->commitAll(); // Clear out title cache data from prior snapshots LinkCache::singleton()->clear(); $timeMs = intval((microtime(true) - $jobStartTime) * 1000); $timeMsTotal += $timeMs; // Record how long jobs wait before getting popped $readyTs = $job->getReadyTimestamp(); if ($readyTs) { $pickupDelay = $popTime - $readyTs; $stats->timing('jobqueue.pickup_delay.all', 1000 * $pickupDelay); $stats->timing("jobqueue.pickup_delay.{$jType}", 1000 * $pickupDelay); } // Record root job age for jobs being run $root = $job->getRootJobParams(); if ($root['rootJobTimestamp']) { $age = $popTime - wfTimestamp(TS_UNIX, $root['rootJobTimestamp']); $stats->timing("jobqueue.pickup_root_age.{$jType}", 1000 * $age); } // Track the execution time for jobs $stats->timing("jobqueue.run.{$jType}", $timeMs); // Mark the job as done on success or when the job cannot be retried if ($status !== false || !$job->allowRetries()) { $group->ack($job); // done } // Back off of certain jobs for a while (for throttling and for errors) if ($status === false && mt_rand(0, 49) == 0) { $ttw = max($ttw, 30); // too many errors $backoffDeltas[$jType] = isset($backoffDeltas[$jType]) ? $backoffDeltas[$jType] + $ttw : $ttw; } if ($status === false) { $msg = $job->toString() . " t={$timeMs} error={$error}"; $this->logger->error($msg); $this->debugCallback($msg); } else { $msg = $job->toString() . " t={$timeMs} good"; $this->logger->info($msg); $this->debugCallback($msg); } $response['jobs'][] = array('type' => $jType, 'status' => $status === false ? 'failed' : 'ok', 'error' => $error, 'time' => $timeMs); // Break out if we hit the job count or wall time limits... if ($maxJobs && $jobsPopped >= $maxJobs) { $response['reached'] = 'job-limit'; break; } elseif ($maxTime && microtime(true) - $startTime > $maxTime) { $response['reached'] = 'time-limit'; break; } // Don't let any of the main DB slaves get backed up. // This only waits for so long before exiting and letting // other wikis in the farm (on different masters) get a chance. $timePassed = microtime(true) - $lastCheckTime; if ($timePassed >= $checkLagPeriod || $timePassed < 0) { if (!wfWaitForSlaves($lastCheckTime, false, '*', $maxAllowedLag)) { $response['reached'] = 'slave-lag-limit'; break; } $lastCheckTime = microtime(true); } // Don't let any queue slaves/backups fall behind if ($jobsPopped > 0 && $jobsPopped % 100 == 0) { $group->waitForBackups(); } // Bail if near-OOM instead of in a job if (!$this->checkMemoryOK()) { $response['reached'] = 'memory-limit'; break; } } } while ($job); // stop when there are no jobs // Sync the persistent backoffs for the next runJobs.php pass if ($backoffDeltas) { $this->syncBackoffDeltas($backoffs, $backoffDeltas, 'wait'); } $response['backoffs'] = $backoffs; $response['elapsed'] = $timeMsTotal; return $response; }
/** * Inform the bloom filter of a new member in order to keep it up to date * * @param string $domain * @param string $type * @param string|array $members * @return bool Success */ public final function insert($domain, $type, $members) { $section = new ProfileSection(get_class($this) . '::' . __FUNCTION__); if (method_exists("BloomFilter{$type}", 'mergeAndCheck')) { try { $virtualKey = "{$domain}:{$type}"; $prefixedMembers = array(); foreach ((array) $members as $member) { $prefixedMembers[] = "{$virtualKey}:{$member}"; } return $this->add('shared', $prefixedMembers); } catch (MWException $e) { MWExceptionHandler::logException($e); return false; } } return true; }