public function run() { /** @noinspection PhpUnusedLocalVariableInspection */ $scope = RequestContext::importScopedSession($this->params['session']); $context = RequestContext::getMain(); $user = $context->getUser(); try { if (!$user->isLoggedIn()) { $this->setLastError("Could not load the author user from session."); return false; } UploadBase::setSessionStatus($user, $this->params['filekey'], array('result' => 'Poll', 'stage' => 'publish', 'status' => Status::newGood())); $upload = new UploadFromStash($user); // @todo initialize() causes a GET, ideally we could frontload the antivirus // checks and anything else to the stash stage (which includes concatenation and // the local file is thus already there). That way, instead of GET+PUT, there could // just be a COPY operation from the stash to the public zone. $upload->initialize($this->params['filekey'], $this->params['filename']); // Check if the local file checks out (this is generally a no-op) $verification = $upload->verifyUpload(); if ($verification['status'] !== UploadBase::OK) { $status = Status::newFatal('verification-error'); $status->value = array('verification' => $verification); UploadBase::setSessionStatus($user, $this->params['filekey'], array('result' => 'Failure', 'stage' => 'publish', 'status' => $status)); $this->setLastError("Could not verify upload."); return false; } // Upload the stashed file to a permanent location $status = $upload->performUpload($this->params['comment'], $this->params['text'], $this->params['watch'], $user); if (!$status->isGood()) { UploadBase::setSessionStatus($user, $this->params['filekey'], array('result' => 'Failure', 'stage' => 'publish', 'status' => $status)); $this->setLastError($status->getWikiText()); return false; } // Build the image info array while we have the local reference handy $apiMain = new ApiMain(); // dummy object (XXX) $imageInfo = $upload->getImageInfo($apiMain->getResult()); // Cleanup any temporary local file $upload->cleanupTempFile(); // Cache the info so the user doesn't have to wait forever to get the final info UploadBase::setSessionStatus($user, $this->params['filekey'], array('result' => 'Success', 'stage' => 'publish', 'filename' => $upload->getLocalFile()->getName(), 'imageinfo' => $imageInfo, 'status' => Status::newGood())); } catch (Exception $e) { UploadBase::setSessionStatus($user, $this->params['filekey'], array('result' => 'Failure', 'stage' => 'publish', 'status' => Status::newFatal('api-error-publishfailed'))); $this->setLastError(get_class($e) . ": " . $e->getMessage()); // To prevent potential database referential integrity issues. // See bug 32551. MWExceptionHandler::rollbackMasterChangesAndLog($e); return false; } return true; }
public function run() { $scope = RequestContext::importScopedSession($this->params['session']); $context = RequestContext::getMain(); try { $user = $context->getUser(); if (!$user->isLoggedIn()) { $this->setLastError("Could not load the author user from session."); return false; } if (count($_SESSION) === 0) { // Empty session probably indicates that we didn't associate // with the session correctly. Note that being able to load // the user does not necessarily mean the session was loaded. // Most likely cause by suhosin.session.encrypt = On. $this->setLastError("Error associating with user session. " . "Try setting suhosin.session.encrypt = Off"); return false; } UploadBase::setSessionStatus($this->params['filekey'], array('result' => 'Poll', 'stage' => 'assembling', 'status' => Status::newGood())); $upload = new UploadFromChunks($user); $upload->continueChunks($this->params['filename'], $this->params['filekey'], $context->getRequest()); // Combine all of the chunks into a local file and upload that to a new stash file $status = $upload->concatenateChunks(); if (!$status->isGood()) { UploadBase::setSessionStatus($this->params['filekey'], array('result' => 'Failure', 'stage' => 'assembling', 'status' => $status)); $this->setLastError($status->getWikiText()); return false; } // We have a new filekey for the fully concatenated file $newFileKey = $upload->getLocalFile()->getFileKey(); // Remove the old stash file row and first chunk file $upload->stash->removeFileNoAuth($this->params['filekey']); // Build the image info array while we have the local reference handy $apiMain = new ApiMain(); // dummy object (XXX) $imageInfo = $upload->getImageInfo($apiMain->getResult()); // Cleanup any temporary local file $upload->cleanupTempFile(); // Cache the info so the user doesn't have to wait forever to get the final info UploadBase::setSessionStatus($this->params['filekey'], array('result' => 'Success', 'stage' => 'assembling', 'filekey' => $newFileKey, 'imageinfo' => $imageInfo, 'status' => Status::newGood())); } catch (MWException $e) { UploadBase::setSessionStatus($this->params['filekey'], array('result' => 'Failure', 'stage' => 'assembling', 'status' => Status::newFatal('api-error-stashfailed'))); $this->setLastError(get_class($e) . ": " . $e->getText()); // To be extra robust. MWExceptionHandler::rollbackMasterChangesAndLog($e); return false; } return true; }
public function run() { $scope = RequestContext::importScopedSession($this->params['session']); $this->addTeardownCallback(function () use(&$scope) { ScopedCallback::consume($scope); // T126450 }); $context = RequestContext::getMain(); $user = $context->getUser(); try { if (!$user->isLoggedIn()) { $this->setLastError("Could not load the author user from session."); return false; } UploadBase::setSessionStatus($user, $this->params['filekey'], ['result' => 'Poll', 'stage' => 'assembling', 'status' => Status::newGood()]); $upload = new UploadFromChunks($user); $upload->continueChunks($this->params['filename'], $this->params['filekey'], new WebRequestUpload($context->getRequest(), 'null')); // Combine all of the chunks into a local file and upload that to a new stash file $status = $upload->concatenateChunks(); if (!$status->isGood()) { UploadBase::setSessionStatus($user, $this->params['filekey'], ['result' => 'Failure', 'stage' => 'assembling', 'status' => $status]); $this->setLastError($status->getWikiText(false, false, 'en')); return false; } // We can only get warnings like 'duplicate' after concatenating the chunks $status = Status::newGood(); $status->value = ['warnings' => $upload->checkWarnings()]; // We have a new filekey for the fully concatenated file $newFileKey = $upload->getStashFile()->getFileKey(); // Remove the old stash file row and first chunk file $upload->stash->removeFileNoAuth($this->params['filekey']); // Build the image info array while we have the local reference handy $apiMain = new ApiMain(); // dummy object (XXX) $imageInfo = $upload->getImageInfo($apiMain->getResult()); // Cleanup any temporary local file $upload->cleanupTempFile(); // Cache the info so the user doesn't have to wait forever to get the final info UploadBase::setSessionStatus($user, $this->params['filekey'], ['result' => 'Success', 'stage' => 'assembling', 'filekey' => $newFileKey, 'imageinfo' => $imageInfo, 'status' => $status]); } catch (Exception $e) { UploadBase::setSessionStatus($user, $this->params['filekey'], ['result' => 'Failure', 'stage' => 'assembling', 'status' => Status::newFatal('api-error-stashfailed')]); $this->setLastError(get_class($e) . ": " . $e->getMessage()); // To be extra robust. MWExceptionHandler::rollbackMasterChangesAndLog($e); return false; } return true; }
/** * Run jobs of the specified number/type for the specified time * * The response map has a 'job' field that lists status of each job, including: * - type : the job type * - status : ok/failed * - error : any error message string * - time : the job run time in ms * The response map also has: * - backoffs : the (job type => seconds) map of backoff times * - elapsed : the total time spent running tasks in ms * - reached : the reason the script finished, one of (none-ready, job-limit, time-limit) * * This method outputs status information only if a debug handler was set. * Any exceptions are caught and logged, but are not reported as output. * * @param array $options Map of parameters: * - type : the job type (or false for the default types) * - maxJobs : maximum number of jobs to run * - maxTime : maximum time in seconds before stopping * - throttle : whether to respect job backoff configuration * @return array Summary response that can easily be JSON serialized */ public function run(array $options) { global $wgJobClasses, $wgTrxProfilerLimits; $response = array('jobs' => array(), 'reached' => 'none-ready'); $type = isset($options['type']) ? $options['type'] : false; $maxJobs = isset($options['maxJobs']) ? $options['maxJobs'] : false; $maxTime = isset($options['maxTime']) ? $options['maxTime'] : false; $noThrottle = isset($options['throttle']) && !$options['throttle']; if ($type !== false && !isset($wgJobClasses[$type])) { $response['reached'] = 'none-possible'; return $response; } // Bail out if in read-only mode if (wfReadOnly()) { $response['reached'] = 'read-only'; return $response; } // Catch huge single updates that lead to slave lag $trxProfiler = Profiler::instance()->getTransactionProfiler(); $trxProfiler->setLogger(LoggerFactory::getInstance('DBPerformance')); $trxProfiler->setExpectations($wgTrxProfilerLimits['JobRunner'], __METHOD__); // Bail out if there is too much DB lag. // This check should not block as we want to try other wiki queues. $maxAllowedLag = 3; list(, $maxLag) = wfGetLB(wfWikiID())->getMaxLag(); if ($maxLag >= $maxAllowedLag) { $response['reached'] = 'slave-lag-limit'; return $response; } $group = JobQueueGroup::singleton(); // Flush any pending DB writes for sanity wfGetLBFactory()->commitAll(); // Some jobs types should not run until a certain timestamp $backoffs = array(); // map of (type => UNIX expiry) $backoffDeltas = array(); // map of (type => seconds) $wait = 'wait'; // block to read backoffs the first time $stats = RequestContext::getMain()->getStats(); $jobsPopped = 0; $timeMsTotal = 0; $flags = JobQueueGroup::USE_CACHE; $startTime = microtime(true); // time since jobs started running $checkLagPeriod = 1.0; // check slave lag this many seconds $lastCheckTime = 1; // timestamp of last slave check do { // Sync the persistent backoffs with concurrent runners $backoffs = $this->syncBackoffDeltas($backoffs, $backoffDeltas, $wait); $blacklist = $noThrottle ? array() : array_keys($backoffs); $wait = 'nowait'; // less important now if ($type === false) { $job = $group->pop(JobQueueGroup::TYPE_DEFAULT, $flags, $blacklist); } elseif (in_array($type, $blacklist)) { $job = false; // requested queue in backoff state } else { $job = $group->pop($type); // job from a single queue } if ($job) { // found a job $popTime = time(); $jType = $job->getType(); // Back off of certain jobs for a while (for throttling and for errors) $ttw = $this->getBackoffTimeToWait($job); if ($ttw > 0) { // Always add the delta for other runners in case the time running the // job negated the backoff for each individually but not collectively. $backoffDeltas[$jType] = isset($backoffDeltas[$jType]) ? $backoffDeltas[$jType] + $ttw : $ttw; $backoffs = $this->syncBackoffDeltas($backoffs, $backoffDeltas, $wait); } $msg = $job->toString() . " STARTING"; $this->logger->debug($msg); $this->debugCallback($msg); // Run the job... $jobStartTime = microtime(true); try { ++$jobsPopped; $status = $job->run(); $error = $job->getLastError(); $this->commitMasterChanges($job); DeferredUpdates::doUpdates(); $this->commitMasterChanges($job); } catch (Exception $e) { MWExceptionHandler::rollbackMasterChangesAndLog($e); $status = false; $error = get_class($e) . ': ' . $e->getMessage(); MWExceptionHandler::logException($e); } // Commit all outstanding connections that are in a transaction // to get a fresh repeatable read snapshot on every connection. // Note that jobs are still responsible for handling slave lag. wfGetLBFactory()->commitAll(); // Clear out title cache data from prior snapshots LinkCache::singleton()->clear(); $timeMs = intval((microtime(true) - $jobStartTime) * 1000); $timeMsTotal += $timeMs; // Record how long jobs wait before getting popped $readyTs = $job->getReadyTimestamp(); if ($readyTs) { $pickupDelay = $popTime - $readyTs; $stats->timing('jobqueue.pickup_delay.all', 1000 * $pickupDelay); $stats->timing("jobqueue.pickup_delay.{$jType}", 1000 * $pickupDelay); } // Record root job age for jobs being run $root = $job->getRootJobParams(); if ($root['rootJobTimestamp']) { $age = $popTime - wfTimestamp(TS_UNIX, $root['rootJobTimestamp']); $stats->timing("jobqueue.pickup_root_age.{$jType}", 1000 * $age); } // Track the execution time for jobs $stats->timing("jobqueue.run.{$jType}", $timeMs); // Mark the job as done on success or when the job cannot be retried if ($status !== false || !$job->allowRetries()) { $group->ack($job); // done } // Back off of certain jobs for a while (for throttling and for errors) if ($status === false && mt_rand(0, 49) == 0) { $ttw = max($ttw, 30); // too many errors $backoffDeltas[$jType] = isset($backoffDeltas[$jType]) ? $backoffDeltas[$jType] + $ttw : $ttw; } if ($status === false) { $msg = $job->toString() . " t={$timeMs} error={$error}"; $this->logger->error($msg); $this->debugCallback($msg); } else { $msg = $job->toString() . " t={$timeMs} good"; $this->logger->info($msg); $this->debugCallback($msg); } $response['jobs'][] = array('type' => $jType, 'status' => $status === false ? 'failed' : 'ok', 'error' => $error, 'time' => $timeMs); // Break out if we hit the job count or wall time limits... if ($maxJobs && $jobsPopped >= $maxJobs) { $response['reached'] = 'job-limit'; break; } elseif ($maxTime && microtime(true) - $startTime > $maxTime) { $response['reached'] = 'time-limit'; break; } // Don't let any of the main DB slaves get backed up. // This only waits for so long before exiting and letting // other wikis in the farm (on different masters) get a chance. $timePassed = microtime(true) - $lastCheckTime; if ($timePassed >= $checkLagPeriod || $timePassed < 0) { if (!wfWaitForSlaves($lastCheckTime, false, '*', $maxAllowedLag)) { $response['reached'] = 'slave-lag-limit'; break; } $lastCheckTime = microtime(true); } // Don't let any queue slaves/backups fall behind if ($jobsPopped > 0 && $jobsPopped % 100 == 0) { $group->waitForBackups(); } // Bail if near-OOM instead of in a job if (!$this->checkMemoryOK()) { $response['reached'] = 'memory-limit'; break; } } } while ($job); // stop when there are no jobs // Sync the persistent backoffs for the next runJobs.php pass if ($backoffDeltas) { $this->syncBackoffDeltas($backoffs, $backoffDeltas, 'wait'); } $response['backoffs'] = $backoffs; $response['elapsed'] = $timeMsTotal; return $response; }
/** * Handle an exception as an API response * * @since 1.23 * @param Exception $e */ protected function handleException(Exception $e) { // Bug 63145: Rollback any open database transactions if (!$e instanceof UsageException) { // UsageExceptions are intentional, so don't rollback if that's the case try { MWExceptionHandler::rollbackMasterChangesAndLog($e); } catch (DBError $e2) { // Rollback threw an exception too. Log it, but don't interrupt // our regularly scheduled exception handling. MWExceptionHandler::logException($e2); } } // Allow extra cleanup and logging Hooks::run('ApiMain::onException', array($this, $e)); // Log it if (!$e instanceof UsageException) { MWExceptionHandler::logException($e); } // Handle any kind of exception by outputting properly formatted error message. // If this fails, an unhandled exception should be thrown so that global error // handler will process and log it. $errCode = $this->substituteResultWithError($e); // Error results should not be cached $this->setCacheMode('private'); $response = $this->getRequest()->response(); $headerStr = 'MediaWiki-API-Error: ' . $errCode; if ($e->getCode() === 0) { $response->header($headerStr); } else { $response->header($headerStr, true, $e->getCode()); } // Reset and print just the error message ob_clean(); // Printer may not be initialized if the extractRequestParams() fails for the main module $this->createErrorPrinter(); try { $this->printResult(true); } catch (UsageException $ex) { // The error printer itself is failing. Try suppressing its request // parameters and redo. $this->setWarning('Error printer failed (will retry without params): ' . $ex->getMessage()); $this->mPrinter = null; $this->createErrorPrinter(); $this->mPrinter->forceDefaultParams(); $this->printResult(true); } }
/** * @param Job $job * @param BufferingStatsdDataFactory $stats * @param float $popTime * @return array Map of status/error/timeMs */ private function executeJob(Job $job, $stats, $popTime) { $jType = $job->getType(); $msg = $job->toString() . " STARTING"; $this->logger->debug($msg); $this->debugCallback($msg); // Run the job... $rssStart = $this->getMaxRssKb(); $jobStartTime = microtime(true); try { $status = $job->run(); $error = $job->getLastError(); $this->commitMasterChanges($job); DeferredUpdates::doUpdates(); $this->commitMasterChanges($job); } catch (Exception $e) { MWExceptionHandler::rollbackMasterChangesAndLog($e); $status = false; $error = get_class($e) . ': ' . $e->getMessage(); MWExceptionHandler::logException($e); } // Commit all outstanding connections that are in a transaction // to get a fresh repeatable read snapshot on every connection. // Note that jobs are still responsible for handling slave lag. wfGetLBFactory()->commitAll(__METHOD__); // Clear out title cache data from prior snapshots LinkCache::singleton()->clear(); $timeMs = intval((microtime(true) - $jobStartTime) * 1000); $rssEnd = $this->getMaxRssKb(); // Record how long jobs wait before getting popped $readyTs = $job->getReadyTimestamp(); if ($readyTs) { $pickupDelay = max(0, $popTime - $readyTs); $stats->timing('jobqueue.pickup_delay.all', 1000 * $pickupDelay); $stats->timing("jobqueue.pickup_delay.{$jType}", 1000 * $pickupDelay); } // Record root job age for jobs being run $root = $job->getRootJobParams(); if ($root['rootJobTimestamp']) { $age = max(0, $popTime - wfTimestamp(TS_UNIX, $root['rootJobTimestamp'])); $stats->timing("jobqueue.pickup_root_age.{$jType}", 1000 * $age); } // Track the execution time for jobs $stats->timing("jobqueue.run.{$jType}", $timeMs); // Track RSS increases for jobs (in case of memory leaks) if ($rssStart && $rssEnd) { $stats->increment("jobqueue.rss_delta.{$jType}", $rssEnd - $rssStart); } if ($status === false) { $msg = $job->toString() . " t={$timeMs} error={$error}"; $this->logger->error($msg); $this->debugCallback($msg); } else { $msg = $job->toString() . " t={$timeMs} good"; $this->logger->info($msg); $this->debugCallback($msg); } return array('status' => $status, 'error' => $error, 'timeMs' => $timeMs); }
public function execute() { if (wfReadOnly()) { $this->error("Unable to run jobs; the wiki is in read-only mode.", 1); // die } if ($this->hasOption('procs')) { $procs = intval($this->getOption('procs')); if ($procs < 1 || $procs > 1000) { $this->error("Invalid argument to --procs", true); } elseif ($procs != 1) { $fc = new ForkController($procs); if ($fc->start() != 'child') { exit(0); } } } $type = $this->getOption('type', false); $maxJobs = $this->getOption('maxjobs', false); $maxTime = $this->getOption('maxtime', false); $noThrottle = $this->hasOption('nothrottle'); $startTime = time(); $group = JobQueueGroup::singleton(); // Handle any required periodic queue maintenance $count = $group->executeReadyPeriodicTasks(); if ($count > 0) { $this->runJobsLog("Executed {$count} periodic queue task(s)."); } $backoffs = $this->loadBackoffs(); // map of (type => UNIX expiry) $startingBackoffs = $backoffs; // avoid unnecessary writes $backoffExpireFunc = function ($t) { return $t > time(); }; $jobsRun = 0; // counter $flags = JobQueueGroup::USE_CACHE; $lastTime = time(); // time since last slave check do { $backoffs = array_filter($backoffs, $backoffExpireFunc); $blacklist = $noThrottle ? array() : array_keys($backoffs); if ($type === false) { $job = $group->pop(JobQueueGroup::TYPE_DEFAULT, $flags, $blacklist); } elseif (in_array($type, $blacklist)) { $job = false; // requested queue in backoff state } else { $job = $group->pop($type); // job from a single queue } if ($job) { // found a job ++$jobsRun; $this->runJobsLog($job->toString() . " STARTING"); // Set timer to stop the job if too much CPU time is used set_time_limit($maxTime ?: 0); // Run the job... wfProfileIn(__METHOD__ . '-' . get_class($job)); $t = microtime(true); try { $status = $job->run(); $error = $job->getLastError(); } catch (MWException $e) { MWExceptionHandler::rollbackMasterChangesAndLog($e); $status = false; $error = get_class($e) . ': ' . $e->getMessage(); $e->report(); // write error to STDERR and the log } $timeMs = intval((microtime(true) - $t) * 1000); wfProfileOut(__METHOD__ . '-' . get_class($job)); // Disable the timer set_time_limit(0); // Mark the job as done on success or when the job cannot be retried if ($status !== false || !$job->allowRetries()) { $group->ack($job); // done } if ($status === false) { $this->runJobsLog($job->toString() . " t={$timeMs} error={$error}"); } else { $this->runJobsLog($job->toString() . " t={$timeMs} good"); } // Back off of certain jobs for a while $ttw = $this->getBackoffTimeToWait($job); if ($ttw > 0) { $jType = $job->getType(); $backoffs[$jType] = isset($backoffs[$jType]) ? $backoffs[$jType] : 0; $backoffs[$jType] = max($backoffs[$jType], time() + $ttw); } // Break out if we hit the job count or wall time limits... if ($maxJobs && $jobsRun >= $maxJobs) { break; } elseif ($maxTime && time() - $startTime > $maxTime) { break; } // Don't let any of the main DB slaves get backed up $timePassed = time() - $lastTime; if ($timePassed >= 5 || $timePassed < 0) { wfWaitForSlaves(); $lastTime = time(); } // Don't let any queue slaves/backups fall behind if ($jobsRun > 0 && $jobsRun % 100 == 0) { $group->waitForBackups(); } // Bail if near-OOM instead of in a job $this->assertMemoryOK(); } } while ($job); // stop when there are no jobs // Sync the persistent backoffs for the next runJobs.php pass $backoffs = array_filter($backoffs, $backoffExpireFunc); if ($backoffs !== $startingBackoffs) { $this->syncBackoffs($backoffs); } }
/** * @param DeferrableUpdate $update * @param LBFactory $lbFactory * @param integer $stage * @return ErrorPageError|null */ private static function runUpdate(DeferrableUpdate $update, LBFactory $lbFactory, $stage) { $guiError = null; try { $fnameTrxOwner = get_class($update) . '::doUpdate'; $lbFactory->beginMasterChanges($fnameTrxOwner); $update->doUpdate(); $lbFactory->commitMasterChanges($fnameTrxOwner); } catch (Exception $e) { // Reporting GUI exceptions does not work post-send if ($e instanceof ErrorPageError && $stage === self::PRESEND) { $guiError = $e; } MWExceptionHandler::rollbackMasterChangesAndLog($e); } return $guiError; }
/** * Handle an exception as an API response * * @since 1.23 * @param Exception $e */ protected function handleException(Exception $e) { // Bug 63145: Rollback any open database transactions if (!$e instanceof UsageException) { // UsageExceptions are intentional, so don't rollback if that's the case MWExceptionHandler::rollbackMasterChangesAndLog($e); } // Allow extra cleanup and logging wfRunHooks('ApiMain::onException', array($this, $e)); // Log it if (!$e instanceof UsageException) { MWExceptionHandler::logException($e); } // Handle any kind of exception by outputting properly formatted error message. // If this fails, an unhandled exception should be thrown so that global error // handler will process and log it. $errCode = $this->substituteResultWithError($e); // Error results should not be cached $this->setCacheMode('private'); $response = $this->getRequest()->response(); $headerStr = 'MediaWiki-API-Error: ' . $errCode; if ($e->getCode() === 0) { $response->header($headerStr); } else { $response->header($headerStr, true, $e->getCode()); } // Reset and print just the error message ob_clean(); // If the error occurred during printing, do a printer->profileOut() $this->mPrinter->safeProfileOut(); $this->printResult(true); }
/** * Run jobs of the specified number/type for the specified time * * The response map has a 'job' field that lists status of each job, including: * - type : the job type * - status : ok/failed * - error : any error message string * - time : the job run time in ms * The response map also has: * - backoffs : the (job type => seconds) map of backoff times * - elapsed : the total time spent running tasks in ms * - reached : the reason the script finished, one of (none-ready, job-limit, time-limit) * * This method outputs status information only if a debug handler was set. * Any exceptions are caught and logged, but are not reported as output. * * @param array $options Map of parameters: * - type : the job type (or false for the default types) * - maxJobs : maximum number of jobs to run * - maxTime : maximum time in seconds before stopping * - throttle : whether to respect job backoff configuration * @return array Summary response that can easily be JSON serialized */ public function run(array $options) { global $wgJobClasses; $response = array('jobs' => array(), 'reached' => 'none-ready'); $type = isset($options['type']) ? $options['type'] : false; $maxJobs = isset($options['maxJobs']) ? $options['maxJobs'] : false; $maxTime = isset($options['maxTime']) ? $options['maxTime'] : false; $noThrottle = isset($options['throttle']) && !$options['throttle']; if ($type !== false && !isset($wgJobClasses[$type])) { $response['reached'] = 'none-possible'; return $response; } $group = JobQueueGroup::singleton(); // Handle any required periodic queue maintenance $count = $group->executeReadyPeriodicTasks(); if ($count > 0) { $msg = "Executed {$count} periodic queue task(s)."; $this->logger->debug($msg); $this->debugCallback($msg); } // Bail out if in read-only mode if (wfReadOnly()) { $response['reached'] = 'read-only'; return $response; } // Bail out if there is too much DB lag list(, $maxLag) = wfGetLBFactory()->getMainLB(wfWikiID())->getMaxLag(); if ($maxLag >= 5) { $response['reached'] = 'slave-lag-limit'; return $response; } // Flush any pending DB writes for sanity wfGetLBFactory()->commitMasterChanges(); // Some jobs types should not run until a certain timestamp $backoffs = array(); // map of (type => UNIX expiry) $backoffDeltas = array(); // map of (type => seconds) $wait = 'wait'; // block to read backoffs the first time $jobsRun = 0; $timeMsTotal = 0; $flags = JobQueueGroup::USE_CACHE; $checkPeriod = 5.0; // seconds $checkPhase = mt_rand(0, 1000 * $checkPeriod) / 1000; // avoid stampedes $startTime = microtime(true); // time since jobs started running $lastTime = microtime(true) - $checkPhase; // time since last slave check do { // Sync the persistent backoffs with concurrent runners $backoffs = $this->syncBackoffDeltas($backoffs, $backoffDeltas, $wait); $blacklist = $noThrottle ? array() : array_keys($backoffs); $wait = 'nowait'; // less important now if ($type === false) { $job = $group->pop(JobQueueGroup::TYPE_DEFAULT, $flags, $blacklist); } elseif (in_array($type, $blacklist)) { $job = false; // requested queue in backoff state } else { $job = $group->pop($type); // job from a single queue } if ($job) { // found a job $jType = $job->getType(); // Back off of certain jobs for a while (for throttling and for errors) $ttw = $this->getBackoffTimeToWait($job); if ($ttw > 0) { // Always add the delta for other runners in case the time running the // job negated the backoff for each individually but not collectively. $backoffDeltas[$jType] = isset($backoffDeltas[$jType]) ? $backoffDeltas[$jType] + $ttw : $ttw; $backoffs = $this->syncBackoffDeltas($backoffs, $backoffDeltas, $wait); } $msg = $job->toString() . " STARTING"; $this->logger->info($msg); $this->debugCallback($msg); // Run the job... $jobStartTime = microtime(true); try { ++$jobsRun; $status = $job->run(); $error = $job->getLastError(); wfGetLBFactory()->commitMasterChanges(); } catch (Exception $e) { MWExceptionHandler::rollbackMasterChangesAndLog($e); $status = false; $error = get_class($e) . ': ' . $e->getMessage(); MWExceptionHandler::logException($e); } $timeMs = intval((microtime(true) - $jobStartTime) * 1000); $timeMsTotal += $timeMs; // Mark the job as done on success or when the job cannot be retried if ($status !== false || !$job->allowRetries()) { $group->ack($job); // done } // Back off of certain jobs for a while (for throttling and for errors) if ($status === false && mt_rand(0, 49) == 0) { $ttw = max($ttw, 30); // too many errors $backoffDeltas[$jType] = isset($backoffDeltas[$jType]) ? $backoffDeltas[$jType] + $ttw : $ttw; } if ($status === false) { $msg = $job->toString() . " t={$timeMs} error={$error}"; $this->logger->error($msg); $this->debugCallback($msg); } else { $msg = $job->toString() . " t={$timeMs} good"; $this->logger->info($msg); $this->debugCallback($msg); } $response['jobs'][] = array('type' => $jType, 'status' => $status === false ? 'failed' : 'ok', 'error' => $error, 'time' => $timeMs); // Break out if we hit the job count or wall time limits... if ($maxJobs && $jobsRun >= $maxJobs) { $response['reached'] = 'job-limit'; break; } elseif ($maxTime && microtime(true) - $startTime > $maxTime) { $response['reached'] = 'time-limit'; break; } // Don't let any of the main DB slaves get backed up. // This only waits for so long before exiting and letting // other wikis in the farm (on different masters) get a chance. $timePassed = microtime(true) - $lastTime; if ($timePassed >= 5 || $timePassed < 0) { if (!wfWaitForSlaves($lastTime, false, '*', 5)) { $response['reached'] = 'slave-lag-limit'; break; } $lastTime = microtime(true); } // Don't let any queue slaves/backups fall behind if ($jobsRun > 0 && $jobsRun % 100 == 0) { $group->waitForBackups(); } // Bail if near-OOM instead of in a job $this->assertMemoryOK(); } } while ($job); // stop when there are no jobs // Sync the persistent backoffs for the next runJobs.php pass if ($backoffDeltas) { $this->syncBackoffDeltas($backoffs, $backoffDeltas, 'wait'); } $response['backoffs'] = $backoffs; $response['elapsed'] = $timeMsTotal; return $response; }
/** * @param Job $job * @param LBFactory $lbFactory * @param StatsdDataFactory $stats * @param float $popTime * @return array Map of status/error/timeMs */ private function executeJob(Job $job, LBFactory $lbFactory, $stats, $popTime) { $jType = $job->getType(); $msg = $job->toString() . " STARTING"; $this->logger->debug($msg); $this->debugCallback($msg); // Run the job... $rssStart = $this->getMaxRssKb(); $jobStartTime = microtime(true); try { $fnameTrxOwner = get_class($job) . '::run'; // give run() outer scope $lbFactory->beginMasterChanges($fnameTrxOwner); $status = $job->run(); $error = $job->getLastError(); $this->commitMasterChanges($lbFactory, $job, $fnameTrxOwner); // Run any deferred update tasks; doUpdates() manages transactions itself DeferredUpdates::doUpdates(); } catch (Exception $e) { MWExceptionHandler::rollbackMasterChangesAndLog($e); $status = false; $error = get_class($e) . ': ' . $e->getMessage(); } // Always attempt to call teardown() even if Job throws exception. try { $job->teardown($status); } catch (Exception $e) { MWExceptionHandler::logException($e); } // Commit all outstanding connections that are in a transaction // to get a fresh repeatable read snapshot on every connection. // Note that jobs are still responsible for handling replica DB lag. $lbFactory->flushReplicaSnapshots(__METHOD__); // Clear out title cache data from prior snapshots MediaWikiServices::getInstance()->getLinkCache()->clear(); $timeMs = intval((microtime(true) - $jobStartTime) * 1000); $rssEnd = $this->getMaxRssKb(); // Record how long jobs wait before getting popped $readyTs = $job->getReadyTimestamp(); if ($readyTs) { $pickupDelay = max(0, $popTime - $readyTs); $stats->timing('jobqueue.pickup_delay.all', 1000 * $pickupDelay); $stats->timing("jobqueue.pickup_delay.{$jType}", 1000 * $pickupDelay); } // Record root job age for jobs being run $rootTimestamp = $job->getRootJobParams()['rootJobTimestamp']; if ($rootTimestamp) { $age = max(0, $popTime - wfTimestamp(TS_UNIX, $rootTimestamp)); $stats->timing("jobqueue.pickup_root_age.{$jType}", 1000 * $age); } // Track the execution time for jobs $stats->timing("jobqueue.run.{$jType}", $timeMs); // Track RSS increases for jobs (in case of memory leaks) if ($rssStart && $rssEnd) { $stats->updateCount("jobqueue.rss_delta.{$jType}", $rssEnd - $rssStart); } if ($status === false) { $msg = $job->toString() . " t={$timeMs} error={$error}"; $this->logger->error($msg); $this->debugCallback($msg); } else { $msg = $job->toString() . " t={$timeMs} good"; $this->logger->info($msg); $this->debugCallback($msg); } return ['status' => $status, 'error' => $error, 'timeMs' => $timeMs]; }