function execute() { $totalOnly = $this->hasOption('totalonly'); $pendingDBs = JobQueueAggregator::singleton()->getAllReadyWikiQueues(); $sizeByWiki = array(); // (wiki => type => count) map foreach ($pendingDBs as $type => $wikis) { foreach ($wikis as $wiki) { $sizeByWiki[$wiki][$type] = JobQueueGroup::singleton($wiki)->get($type)->getSize(); } } if ($this->hasOption('grouponly')) { $this->output(FormatJSON::encode($sizeByWiki, true) . "\n"); } else { $total = 0; foreach ($sizeByWiki as $wiki => $counts) { $count = array_sum($counts); if ($count > 0) { if (!$totalOnly) { $this->output("{$wiki} {$count}\n"); } $total += $count; } } if (!$this->hasOption('nototal')) { $this->output("Total {$total}\n"); } } }
function run() { global $wgUpdateRowsPerJob; // Job to update all (or a range of) backlink pages for a page if (!empty($this->params['recursive'])) { // Carry over information for de-duplication $extraParams = $this->getRootJobParams(); // Avoid slave lag when fetching templates. // When the outermost job is run, we know that the caller that enqueued it must have // committed the relevant changes to the DB by now. At that point, record the master // position and pass it along as the job recursively breaks into smaller range jobs. // Hopefully, when leaf jobs are popped, the slaves will have reached that position. if (isset($this->params['masterPos'])) { $extraParams['masterPos'] = $this->params['masterPos']; } elseif (wfGetLB()->getServerCount() > 1) { $extraParams['masterPos'] = wfGetLB()->getMasterPos(); } else { $extraParams['masterPos'] = false; } // Convert this into no more than $wgUpdateRowsPerJob RefreshLinks per-title // jobs and possibly a recursive RefreshLinks job for the rest of the backlinks $jobs = BacklinkJobUtils::partitionBacklinkJob($this, $wgUpdateRowsPerJob, 1, array('params' => $extraParams)); JobQueueGroup::singleton()->push($jobs); // Job to update link tables for a set of titles } elseif (isset($this->params['pages'])) { foreach ($this->params['pages'] as $pageId => $nsAndKey) { list($ns, $dbKey) = $nsAndKey; $this->runForTitle(Title::makeTitleSafe($ns, $dbKey)); } // Job to update link tables for a given title } else { $this->runForTitle($this->title); } return true; }
/** * Insert jobs into the job queue to fix redirects to the given title * @param string $reason the reason for the fix, see message "double-redirect-fixed-<reason>" * @param $redirTitle Title: the title which has changed, redirects pointing to this title are fixed * @param bool $destTitle Not used */ public static function fixRedirects( $reason, $redirTitle, $destTitle = false ) { # Need to use the master to get the redirect table updated in the same transaction $dbw = wfGetDB( DB_MASTER ); $res = $dbw->select( array( 'redirect', 'page' ), array( 'page_namespace', 'page_title' ), array( 'page_id = rd_from', 'rd_namespace' => $redirTitle->getNamespace(), 'rd_title' => $redirTitle->getDBkey() ), __METHOD__ ); if ( !$res->numRows() ) { return; } $jobs = array(); foreach ( $res as $row ) { $title = Title::makeTitle( $row->page_namespace, $row->page_title ); if ( !$title ) { continue; } $jobs[] = new self( $title, array( 'reason' => $reason, 'redirTitle' => $redirTitle->getPrefixedDBkey() ) ); # Avoid excessive memory usage if ( count( $jobs ) > 10000 ) { JobQueueGroup::singleton()->push( $jobs ); $jobs = array(); } } JobQueueGroup::singleton()->push( $jobs ); }
public function execute() { $group = JobQueueGroup::singleton(); if ($this->hasOption('list')) { foreach ($group->getQueueTypes() as $type) { $queue = $group->get($type); foreach ($queue->getAllQueuedJobs() as $job) { $this->output($job->toString() . " status=unclaimed\n"); } foreach ($queue->getAllDelayedJobs() as $job) { $this->output($job->toString() . " status=delayed\n"); } } } elseif ($this->hasOption('group')) { foreach ($group->getQueueTypes() as $type) { $queue = $group->get($type); $delayed = $queue->getDelayedCount(); $pending = $queue->getSize(); $claimed = $queue->getAcquiredCount(); $abandoned = $queue->getAbandonedCount(); $active = max(0, $claimed - $abandoned); if ($pending + $claimed + $delayed > 0) { $this->output("{$type}: {$pending} queued; " . "{$claimed} claimed ({$active} active, {$abandoned} abandoned); " . "{$delayed} delayed\n"); } } } else { $count = 0; foreach ($group->getQueueTypes() as $type) { $count += $group->get($type)->getSize(); } $this->output("{$count}\n"); } }
/** * Purges the list of URLs passed to the constructor. */ public function doUpdate() { global $wgCdnReboundPurgeDelay; self::purge($this->urls); if ($wgCdnReboundPurgeDelay > 0) { JobQueueGroup::singleton()->lazyPush(new CdnPurgeJob(Title::makeTitle(NS_SPECIAL, 'Badtitle/' . __CLASS__), array('urls' => $this->urls, 'jobReleaseTimestamp' => time() + $wgCdnReboundPurgeDelay))); } }
/** * Usually this job is fast enough to be executed immediately, * in which case having it go through jobqueue only causes problems * in installations with errant job queue processing. * @override */ public function insert() { global $wgTranslateDelayedMessageIndexRebuild; if ($wgTranslateDelayedMessageIndexRebuild) { JobQueueGroup::singleton()->push($this); } else { $this->run(); } }
public function testInsertDelayed() { global $wgTranslateDelayedMessageIndexRebuild; $wgTranslateDelayedMessageIndexRebuild = true; MessageIndexRebuildJob::newJob()->insert(); $job = JobQueueGroup::singleton()->get('MessageIndexRebuildJob')->pop(); $this->assertInstanceOf('MessageIndexRebuildJob', $job, 'There is a job in the JobQueue'); $this->assertTrue($job->run(), 'Job is executed succesfully'); }
public function run() { foreach ($this->params['jobsByWiki'] as $wiki => $jobMaps) { $jobSpecs = array(); foreach ($jobMaps as $jobMap) { $jobSpecs[] = JobSpecification::newFromArray($jobMap); } JobQueueGroup::singleton($wiki)->push($jobSpecs); } return true; }
public function execute() { global $wgTitle; if ($this->hasOption('procs')) { $procs = intval($this->getOption('procs')); if ($procs < 1 || $procs > 1000) { $this->error("Invalid argument to --procs", true); } $fc = new ForkController($procs); if ($fc->start() != 'child') { exit(0); } } $maxJobs = $this->getOption('maxjobs', false); $maxTime = $this->getOption('maxtime', false); $startTime = time(); $type = $this->getOption('type', false); $wgTitle = Title::newFromText('RunJobs.php'); $dbw = wfGetDB(DB_MASTER); $n = 0; $group = JobQueueGroup::singleton(); do { $job = $type === false ? $group->pop() : $group->get($type)->pop(); // job from a single queue if ($job) { // found a job // Perform the job (logging success/failure and runtime)... $t = microtime(true); $this->runJobsLog($job->toString() . " STARTING"); $status = $job->run(); $group->ack($job); // done $t = microtime(true) - $t; $timeMs = intval($t * 1000); if (!$status) { $this->runJobsLog($job->toString() . " t={$timeMs} error={$job->error}"); } else { $this->runJobsLog($job->toString() . " t={$timeMs} good"); } // Break out if we hit the job count or wall time limits... if ($maxJobs && ++$n >= $maxJobs) { break; } if ($maxTime && time() - $startTime > $maxTime) { break; } // Don't let any slaves/backups fall behind... $group->get($type)->waitForBackups(); } } while ($job); // stop when there are no jobs }
public function run() { if ($this->params['usleep'] > 0) { usleep($this->params['usleep']); } if ($this->params['lives'] > 1) { $params = $this->params; $params['lives']--; $job = new self($this->title, $params); JobQueueGroup::singleton()->push($job); } return true; }
/** * Queue some more jobs! * * @return bool */ public function run() { $data = $this->params['data']; $pages = $this->params['pages']; $jobsByTarget = array(); foreach ($pages as $page) { $title = Title::newFromText($page['title']); $jobsByTarget[$page['wiki']][] = new MassMessageJob($title, $data); } foreach ($jobsByTarget as $wiki => $jobs) { JobQueueGroup::singleton($wiki)->push($jobs); } return true; }
public function doUpdate() { $job = new HTMLCacheUpdateJob($this->mTitle, array('table' => $this->mTable, 'recursive' => true) + Job::newRootJobParams("htmlCacheUpdate:{$this->mTable}:{$this->mTitle->getPrefixedText()}")); $count = $this->mTitle->getBacklinkCache()->getNumLinks($this->mTable, 100); if ($count >= 100) { // many backlinks JobQueueGroup::singleton()->lazyPush($job); } else { // few backlinks ($count might be off even if 0) $dbw = wfGetDB(DB_MASTER); $dbw->onTransactionIdle(function () use($job) { $job->run(); // just do the purge query now }); } }
/** * Run a refreshLinks2 job * @return boolean success */ function run() { global $wgUpdateRowsPerJob; $linkCache = LinkCache::singleton(); $linkCache->clear(); if (is_null($this->title)) { $this->error = "refreshLinks2: Invalid title"; return false; } // Back compat for pre-r94435 jobs $table = isset($this->params['table']) ? $this->params['table'] : 'templatelinks'; // Avoid slave lag when fetching templates. // When the outermost job is run, we know that the caller that enqueued it must have // committed the relevant changes to the DB by now. At that point, record the master // position and pass it along as the job recursively breaks into smaller range jobs. // Hopefully, when leaf jobs are popped, the slaves will have reached that position. if (isset($this->params['masterPos'])) { $masterPos = $this->params['masterPos']; } elseif (wfGetLB()->getServerCount() > 1) { $masterPos = wfGetLB()->getMasterPos(); } else { $masterPos = false; } $tbc = $this->title->getBacklinkCache(); $jobs = array(); // jobs to insert if (isset($this->params['start']) && isset($this->params['end'])) { # This is a partition job to trigger the insertion of leaf jobs... $jobs = array_merge($jobs, $this->getSingleTitleJobs($table, $masterPos)); } else { # This is a base job to trigger the insertion of partitioned jobs... if ($tbc->getNumLinks($table, $wgUpdateRowsPerJob + 1) <= $wgUpdateRowsPerJob) { # Just directly insert the single per-title jobs $jobs = array_merge($jobs, $this->getSingleTitleJobs($table, $masterPos)); } else { # Insert the partition jobs to make per-title jobs foreach ($tbc->partition($table, $wgUpdateRowsPerJob) as $batch) { list($start, $end) = $batch; $jobs[] = new RefreshLinksJob2($this->title, array('table' => $table, 'start' => $start, 'end' => $end, 'masterPos' => $masterPos) + $this->getRootJobParams()); } } } if (count($jobs)) { JobQueueGroup::singleton()->push($jobs); } return true; }
/** * Show the special page * * @param $params Mixed: parameter(s) passed to the page or null */ public function execute($params) { $out = $this->getOutput(); $request = $this->getRequest(); $user = $this->getUser(); // If the user doesn't have the required 'SendToFollowers' permission, display an error if (!$user->isAllowed('SendToFollowers')) { $out->permissionRequired('SendToFollowers'); return; } // Set the page title, robot policies, etc. $this->setHeaders(); // This feature is available only to logged-in users. if (!$user->isLoggedIn()) { $out->setPageTitle($this->msg('boardblastlogintitle')->plain()); $out->addWikiMsg('boardblastlogintext'); return ''; } // Is the database locked? if (wfReadOnly()) { $out->readOnlyPage(); return false; } // Blocked through Special:Block? No access for you! if ($user->isBlocked()) { $out->blockedPage(false); return false; } // Add CSS & JS $out->addModuleStyles('ext.socialprofile.userboard.boardblast.css'); $out->addModules('ext.socialprofile.userboard.boardblast.js'); $output = ''; if ($request->wasPosted()) { $out->setPageTitle($this->msg('messagesenttitle')->plain()); $message = $request->getVal('message'); $user_ids_to = explode(',', $request->getVal('ids')); $jobParams = array('user_ids_to' => $user_ids_to, 'message' => $message, 'sender' => $user->getId()); $job = new BoardBlastJobs($this->getTitle(), $jobParams); JobQueueGroup::singleton()->push($job); $output .= $this->msg('messagesentsuccess')->plain(); } else { $out->setPageTitle($this->msg('boardblasttitle')->plain()); $output .= $this->displayForm(); } $out->addHTML($output); }
public function doUpdate() { global $wgMaxBacklinksInvalidate; wfProfileIn(__METHOD__); $job = new HTMLCacheUpdateJob($this->mTitle, array('table' => $this->mTable) + Job::newRootJobParams("htmlCacheUpdate:{$this->mTable}:{$this->mTitle->getPrefixedText()}")); $count = $this->mTitle->getBacklinkCache()->getNumLinks($this->mTable, 200); if ($wgMaxBacklinksInvalidate !== false && $count > $wgMaxBacklinksInvalidate) { wfDebug("Skipped HTML cache invalidation of {$this->mTitle->getPrefixedText()}."); } elseif ($count >= 200) { // many backlinks JobQueueGroup::singleton()->push($job); JobQueueGroup::singleton()->deduplicateRootJob($job); } else { // few backlinks ($count might be off even if 0) $job->run(); // just do the purge query now } wfProfileOut(__METHOD__); }
public function execute() { $typeFilter = $this->getOption('type', ''); $stateFilter = $this->getOption('status', ''); $stateLimit = (double) $this->getOption('limit', INF); $group = JobQueueGroup::singleton(); $filteredTypes = $typeFilter ? array($typeFilter) : $group->getQueueTypes(); $filteredStates = $stateFilter ? array_intersect_key(self::$stateMethods, array($stateFilter => 1)) : self::$stateMethods; if ($this->hasOption('list')) { $count = 0; foreach ($filteredTypes as $type) { $queue = $group->get($type); foreach ($filteredStates as $state => $method) { foreach ($queue->{$method}() as $job) { /** @var Job $job */ $this->output($job->toString() . " status={$state}\n"); if (++$count >= $stateLimit) { return; } } } } } elseif ($this->hasOption('group')) { foreach ($filteredTypes as $type) { $queue = $group->get($type); $delayed = $queue->getDelayedCount(); $pending = $queue->getSize(); $claimed = $queue->getAcquiredCount(); $abandoned = $queue->getAbandonedCount(); $active = max(0, $claimed - $abandoned); if ($pending + $claimed + $delayed + $abandoned > 0) { $this->output("{$type}: {$pending} queued; " . "{$claimed} claimed ({$active} active, {$abandoned} abandoned); " . "{$delayed} delayed\n"); } } } else { $count = 0; foreach ($filteredTypes as $type) { $count += $group->get($type)->getSize(); } $this->output("{$count}\n"); } }
public function execute() { $group = JobQueueGroup::singleton(); if ($this->hasOption('group')) { foreach ($group->getQueueTypes() as $type) { $queue = $group->get($type); $pending = $queue->getSize(); $claimed = $queue->getAcquiredCount(); if ($pending + $claimed > 0) { $this->output("{$type}: {$pending} queued; {$claimed} acquired\n"); } } } else { $count = 0; foreach ($group->getQueueTypes() as $type) { $count += $group->get($type)->getSize(); } $this->output("{$count}\n"); } }
function run() { global $wgUpdateRowsPerJob, $wgUpdateRowsPerQuery; static $expected = array('recursive', 'pages'); // new jobs have one of these $oldRangeJob = false; if (!array_intersect(array_keys($this->params), $expected)) { // B/C for older job params formats that lack these fields: // a) base jobs with just ("table") and b) range jobs with ("table","start","end") if (isset($this->params['start']) && isset($this->params['end'])) { $oldRangeJob = true; } else { $this->params['recursive'] = true; // base job } } // Job to purge all (or a range of) backlink pages for a page if (!empty($this->params['recursive'])) { // Convert this into no more than $wgUpdateRowsPerJob HTMLCacheUpdateJob per-title // jobs and possibly a recursive HTMLCacheUpdateJob job for the rest of the backlinks $jobs = BacklinkJobUtils::partitionBacklinkJob($this, $wgUpdateRowsPerJob, $wgUpdateRowsPerQuery, array('params' => $this->getRootJobParams())); JobQueueGroup::singleton()->push($jobs); // Job to purge pages for for a set of titles } elseif (isset($this->params['pages'])) { $this->invalidateTitles($this->params['pages']); // B/C for job to purge a range of backlink pages for a given page } elseif ($oldRangeJob) { $titleArray = $this->title->getBacklinkCache()->getLinks($this->params['table'], $this->params['start'], $this->params['end']); $pages = array(); // same format BacklinkJobUtils uses foreach ($titleArray as $tl) { $pages[$tl->getArticleId()] = array($tl->getNamespace(), $tl->getDbKey()); } $jobs = array(); foreach (array_chunk($pages, $wgUpdateRowsPerJob) as $pageChunk) { $jobs[] = new HTMLCacheUpdateJob($this->title, array('table' => $this->params['table'], 'pages' => $pageChunk) + $this->getRootJobParams()); } JobQueueGroup::singleton()->push($jobs); } return true; }
public function execute() { global $wgJobQueueMigrationConfig; $srcKey = $this->getOption('src'); $dstKey = $this->getOption('dst'); if (!isset($wgJobQueueMigrationConfig[$srcKey])) { $this->error("\$wgJobQueueMigrationConfig not set for '{$srcKey}'.", 1); } elseif (!isset($wgJobQueueMigrationConfig[$dstKey])) { $this->error("\$wgJobQueueMigrationConfig not set for '{$dstKey}'.", 1); } $types = $this->getOption('type') === 'all' ? JobQueueGroup::singleton()->getQueueTypes() : array($this->getOption('type')); foreach ($types as $type) { $baseConfig = array('type' => $type, 'wiki' => wfWikiID()); $src = JobQueue::factory($baseConfig + $wgJobQueueMigrationConfig[$srcKey]); $dst = JobQueue::factory($baseConfig + $wgJobQueueMigrationConfig[$dstKey]); list($total, $totalOK) = $this->copyJobs($src, $dst, $src->getAllQueuedJobs()); $this->output("Copied {$totalOK}/{$total} queued {$type} jobs.\n"); list($total, $totalOK) = $this->copyJobs($src, $dst, $src->getAllDelayedJobs()); $this->output("Copied {$totalOK}/{$total} delayed {$type} jobs.\n"); } }
function run() { global $wgUpdateRowsPerJob, $wgUpdateRowsPerQuery; if (isset($this->params['table']) && !isset($this->params['pages'])) { $this->params['recursive'] = true; // b/c; base job } // Job to purge all (or a range of) backlink pages for a page if (!empty($this->params['recursive'])) { // Convert this into no more than $wgUpdateRowsPerJob HTMLCacheUpdateJob per-title // jobs and possibly a recursive HTMLCacheUpdateJob job for the rest of the backlinks $jobs = BacklinkJobUtils::partitionBacklinkJob($this, $wgUpdateRowsPerJob, $wgUpdateRowsPerQuery, array('params' => $this->getRootJobParams())); JobQueueGroup::singleton()->push($jobs); // Job to purge pages for a set of titles } elseif (isset($this->params['pages'])) { $this->invalidateTitles($this->params['pages']); // Job to update a single title } else { $t = $this->title; $this->invalidateTitles(array($t->getArticleID() => array($t->getNamespace(), $t->getDBkey()))); } return true; }
function modifyPages($source, $editSummary, $forPagesThatExist) { $text = ""; $xml_parser = new DTXMLParser($source); $xml_parser->doParse(); $jobs = array(); $job_params = array(); $job_params['user_id'] = $this->getUser()->getId(); $job_params['edit_summary'] = $editSummary; $job_params['for_pages_that_exist'] = $forPagesThatExist; foreach ($xml_parser->mPages as $page) { $title = Title::newFromText($page->getName()); $job_params['text'] = $page->createText(); $jobs[] = new DTImportJob($title, $job_params); } // MW 1.21+ if (class_exists('JobQueueGroup')) { JobQueueGroup::singleton()->push($jobs); } else { Job::batchInsert($jobs); } $text .= $this->msg('dt_import_success')->numParams(count($jobs))->params('XML')->parseAsBlock(); return $text; }
/** * Writes the data in this object to the database * @param bool $noudp */ public function save($noudp = false) { global $wgPutIPinRC, $wgUseEnotif, $wgShowUpdatedMarker, $wgContLang; $dbw = wfGetDB(DB_MASTER); if (!is_array($this->mExtra)) { $this->mExtra = array(); } if (!$wgPutIPinRC) { $this->mAttribs['rc_ip'] = ''; } # If our database is strict about IP addresses, use NULL instead of an empty string if ($dbw->strictIPs() && $this->mAttribs['rc_ip'] == '') { unset($this->mAttribs['rc_ip']); } # Trim spaces on user supplied text $this->mAttribs['rc_comment'] = trim($this->mAttribs['rc_comment']); # Make sure summary is truncated (whole multibyte characters) $this->mAttribs['rc_comment'] = $wgContLang->truncate($this->mAttribs['rc_comment'], 255); # Fixup database timestamps $this->mAttribs['rc_timestamp'] = $dbw->timestamp($this->mAttribs['rc_timestamp']); $this->mAttribs['rc_id'] = $dbw->nextSequenceValue('recentchanges_rc_id_seq'); # # If we are using foreign keys, an entry of 0 for the page_id will fail, so use NULL if ($dbw->cascadingDeletes() && $this->mAttribs['rc_cur_id'] == 0) { unset($this->mAttribs['rc_cur_id']); } # Insert new row $dbw->insert('recentchanges', $this->mAttribs, __METHOD__); # Set the ID $this->mAttribs['rc_id'] = $dbw->insertId(); # Notify extensions Hooks::run('RecentChange_save', array(&$this)); # Notify external application via UDP if (!$noudp) { $this->notifyRCFeeds(); } # E-mail notifications if ($wgUseEnotif || $wgShowUpdatedMarker) { $editor = $this->getPerformer(); $title = $this->getTitle(); // Never send an RC notification email about categorization changes if ($this->mAttribs['rc_type'] != RC_CATEGORIZE) { if (Hooks::run('AbortEmailNotification', array($editor, $title, $this))) { # @todo FIXME: This would be better as an extension hook $enotif = new EmailNotification(); $enotif->notifyOnPageChange($editor, $title, $this->mAttribs['rc_timestamp'], $this->mAttribs['rc_comment'], $this->mAttribs['rc_minor'], $this->mAttribs['rc_last_oldid'], $this->mExtra['pageStatus']); } } } // Update the cached list of active users if ($this->mAttribs['rc_user'] > 0) { JobQueueGroup::singleton()->lazyPush(RecentChangesUpdateJob::newCacheUpdateJob()); } }
/** * Queue a RefreshLinks job for any table. * * @param Title $title Title to do job for * @param string $table Table to use (e.g. 'templatelinks') */ public static function queueRecursiveJobsForTable(Title $title, $table) { if ($title->getBacklinkCache()->hasLinks($table)) { $job = new RefreshLinksJob($title, array('table' => $table, 'recursive' => true) + Job::newRootJobParams("refreshlinks:{$table}:{$title->getPrefixedText()}")); JobQueueGroup::singleton()->push($job); } }
/** * Run jobs of the specified number/type for the specified time * * The response map has a 'job' field that lists status of each job, including: * - type : the job type * - status : ok/failed * - error : any error message string * - time : the job run time in ms * The response map also has: * - backoffs : the (job type => seconds) map of backoff times * - elapsed : the total time spent running tasks in ms * - reached : the reason the script finished, one of (none-ready, job-limit, time-limit) * * This method outputs status information only if a debug handler was set. * Any exceptions are caught and logged, but are not reported as output. * * @param array $options Map of parameters: * - type : the job type (or false for the default types) * - maxJobs : maximum number of jobs to run * - maxTime : maximum time in seconds before stopping * - throttle : whether to respect job backoff configuration * @return array Summary response that can easily be JSON serialized */ public function run(array $options) { global $wgJobClasses, $wgTrxProfilerLimits; $response = array('jobs' => array(), 'reached' => 'none-ready'); $type = isset($options['type']) ? $options['type'] : false; $maxJobs = isset($options['maxJobs']) ? $options['maxJobs'] : false; $maxTime = isset($options['maxTime']) ? $options['maxTime'] : false; $noThrottle = isset($options['throttle']) && !$options['throttle']; if ($type !== false && !isset($wgJobClasses[$type])) { $response['reached'] = 'none-possible'; return $response; } // Bail out if in read-only mode if (wfReadOnly()) { $response['reached'] = 'read-only'; return $response; } // Catch huge single updates that lead to slave lag $trxProfiler = Profiler::instance()->getTransactionProfiler(); $trxProfiler->setLogger(LoggerFactory::getInstance('DBPerformance')); $trxProfiler->setExpectations($wgTrxProfilerLimits['JobRunner'], __METHOD__); // Bail out if there is too much DB lag. // This check should not block as we want to try other wiki queues. $maxAllowedLag = 3; list(, $maxLag) = wfGetLB(wfWikiID())->getMaxLag(); if ($maxLag >= $maxAllowedLag) { $response['reached'] = 'slave-lag-limit'; return $response; } $group = JobQueueGroup::singleton(); // Flush any pending DB writes for sanity wfGetLBFactory()->commitAll(); // Some jobs types should not run until a certain timestamp $backoffs = array(); // map of (type => UNIX expiry) $backoffDeltas = array(); // map of (type => seconds) $wait = 'wait'; // block to read backoffs the first time $stats = RequestContext::getMain()->getStats(); $jobsPopped = 0; $timeMsTotal = 0; $flags = JobQueueGroup::USE_CACHE; $startTime = microtime(true); // time since jobs started running $checkLagPeriod = 1.0; // check slave lag this many seconds $lastCheckTime = 1; // timestamp of last slave check do { // Sync the persistent backoffs with concurrent runners $backoffs = $this->syncBackoffDeltas($backoffs, $backoffDeltas, $wait); $blacklist = $noThrottle ? array() : array_keys($backoffs); $wait = 'nowait'; // less important now if ($type === false) { $job = $group->pop(JobQueueGroup::TYPE_DEFAULT, $flags, $blacklist); } elseif (in_array($type, $blacklist)) { $job = false; // requested queue in backoff state } else { $job = $group->pop($type); // job from a single queue } if ($job) { // found a job $popTime = time(); $jType = $job->getType(); // Back off of certain jobs for a while (for throttling and for errors) $ttw = $this->getBackoffTimeToWait($job); if ($ttw > 0) { // Always add the delta for other runners in case the time running the // job negated the backoff for each individually but not collectively. $backoffDeltas[$jType] = isset($backoffDeltas[$jType]) ? $backoffDeltas[$jType] + $ttw : $ttw; $backoffs = $this->syncBackoffDeltas($backoffs, $backoffDeltas, $wait); } $msg = $job->toString() . " STARTING"; $this->logger->debug($msg); $this->debugCallback($msg); // Run the job... $jobStartTime = microtime(true); try { ++$jobsPopped; $status = $job->run(); $error = $job->getLastError(); $this->commitMasterChanges($job); DeferredUpdates::doUpdates(); $this->commitMasterChanges($job); } catch (Exception $e) { MWExceptionHandler::rollbackMasterChangesAndLog($e); $status = false; $error = get_class($e) . ': ' . $e->getMessage(); MWExceptionHandler::logException($e); } // Commit all outstanding connections that are in a transaction // to get a fresh repeatable read snapshot on every connection. // Note that jobs are still responsible for handling slave lag. wfGetLBFactory()->commitAll(); // Clear out title cache data from prior snapshots LinkCache::singleton()->clear(); $timeMs = intval((microtime(true) - $jobStartTime) * 1000); $timeMsTotal += $timeMs; // Record how long jobs wait before getting popped $readyTs = $job->getReadyTimestamp(); if ($readyTs) { $pickupDelay = $popTime - $readyTs; $stats->timing('jobqueue.pickup_delay.all', 1000 * $pickupDelay); $stats->timing("jobqueue.pickup_delay.{$jType}", 1000 * $pickupDelay); } // Record root job age for jobs being run $root = $job->getRootJobParams(); if ($root['rootJobTimestamp']) { $age = $popTime - wfTimestamp(TS_UNIX, $root['rootJobTimestamp']); $stats->timing("jobqueue.pickup_root_age.{$jType}", 1000 * $age); } // Track the execution time for jobs $stats->timing("jobqueue.run.{$jType}", $timeMs); // Mark the job as done on success or when the job cannot be retried if ($status !== false || !$job->allowRetries()) { $group->ack($job); // done } // Back off of certain jobs for a while (for throttling and for errors) if ($status === false && mt_rand(0, 49) == 0) { $ttw = max($ttw, 30); // too many errors $backoffDeltas[$jType] = isset($backoffDeltas[$jType]) ? $backoffDeltas[$jType] + $ttw : $ttw; } if ($status === false) { $msg = $job->toString() . " t={$timeMs} error={$error}"; $this->logger->error($msg); $this->debugCallback($msg); } else { $msg = $job->toString() . " t={$timeMs} good"; $this->logger->info($msg); $this->debugCallback($msg); } $response['jobs'][] = array('type' => $jType, 'status' => $status === false ? 'failed' : 'ok', 'error' => $error, 'time' => $timeMs); // Break out if we hit the job count or wall time limits... if ($maxJobs && $jobsPopped >= $maxJobs) { $response['reached'] = 'job-limit'; break; } elseif ($maxTime && microtime(true) - $startTime > $maxTime) { $response['reached'] = 'time-limit'; break; } // Don't let any of the main DB slaves get backed up. // This only waits for so long before exiting and letting // other wikis in the farm (on different masters) get a chance. $timePassed = microtime(true) - $lastCheckTime; if ($timePassed >= $checkLagPeriod || $timePassed < 0) { if (!wfWaitForSlaves($lastCheckTime, false, '*', $maxAllowedLag)) { $response['reached'] = 'slave-lag-limit'; break; } $lastCheckTime = microtime(true); } // Don't let any queue slaves/backups fall behind if ($jobsPopped > 0 && $jobsPopped % 100 == 0) { $group->waitForBackups(); } // Bail if near-OOM instead of in a job if (!$this->checkMemoryOK()) { $response['reached'] = 'memory-limit'; break; } } } while ($job); // stop when there are no jobs // Sync the persistent backoffs for the next runJobs.php pass if ($backoffDeltas) { $this->syncBackoffDeltas($backoffs, $backoffDeltas, 'wait'); } $response['backoffs'] = $backoffs; $response['elapsed'] = $timeMsTotal; return $response; }
/** * @depends testClearQueue */ public function testSyncDownload($data) { $token = $this->user->getEditToken(); $job = JobQueueGroup::singleton()->pop(); $this->assertFalse($job, 'Starting with an empty jobqueue'); $this->user->addGroup('users'); $data = $this->doApiRequest(array('action' => 'upload', 'filename' => 'UploadFromUrlTest.png', 'url' => 'http://upload.wikimedia.org/wikipedia/mediawiki/b/bc/Wiki.png', 'ignorewarnings' => true, 'token' => $token), $data); $job = JobQueueGroup::singleton()->pop(); $this->assertFalse($job); $this->assertEquals('Success', $data[0]['upload']['result']); $this->deleteFile('UploadFromUrlTest.png'); return $data; }
/** * Do a job from the job queue */ private function doJobs() { global $wgJobRunRate, $wgPhpCli, $IP; if ($wgJobRunRate <= 0 || wfReadOnly()) { return; } if ($wgJobRunRate < 1) { $max = mt_getrandmax(); if (mt_rand(0, $max) > $max * $wgJobRunRate) { return; // the higher $wgJobRunRate, the less likely we return here } $n = 1; } else { $n = intval($wgJobRunRate); } if (!wfShellExecDisabled() && is_executable($wgPhpCli)) { // Start a background process to run some of the jobs. // This will be asynchronous on *nix though not on Windows. wfProfileIn(__METHOD__ . '-exec'); $retVal = 1; $cmd = wfShellWikiCmd("{$IP}/maintenance/runJobs.php", array('--maxjobs', $n)); wfShellExec("{$cmd} &", $retVal); wfProfileOut(__METHOD__ . '-exec'); } else { // Fallback to running the jobs here while the user waits $group = JobQueueGroup::singleton(); do { $job = $group->pop(JobQueueGroup::USE_CACHE); // job from any queue if ($job) { $output = $job->toString() . "\n"; $t = -microtime(true); wfProfileIn(__METHOD__ . '-' . get_class($job)); $success = $job->run(); wfProfileOut(__METHOD__ . '-' . get_class($job)); $group->ack($job); // done $t += microtime(true); $t = round($t * 1000); if ($success === false) { $output .= "Error: " . $job->getLastError() . ", Time: {$t} ms\n"; } else { $output .= "Success, Time: {$t} ms\n"; } wfDebugLog('jobqueue', $output); } } while (--$n && $job); } }
protected function doResolveRequest($approved, $data) { $request = GlobalRenameRequest::newFromId($data['rid']); $oldUser = User::newFromName($request->getName()); if ($request->userIsGlobal() || $request->getWiki() === wfWikiId()) { $notifyEmail = MailAddress::newFromUser($oldUser); } else { $notifyEmail = $this->getRemoteUserMailAddress($request->getWiki(), $request->getName()); } $newUser = User::newFromName($request->getNewName(), 'creatable'); $status = new Status(); $session = $this->getContext()->exportSession(); if ($approved) { if ($request->userIsGlobal()) { // Trigger a global rename job $globalRenameUser = new GlobalRenameUser($this->getUser(), $oldUser, CentralAuthUser::getInstance($oldUser), $newUser, CentralAuthUser::getInstance($newUser), new GlobalRenameUserStatus($newUser->getName()), 'JobQueueGroup::singleton', new GlobalRenameUserDatabaseUpdates(), new GlobalRenameUserLogger($this->getUser()), $session); $status = $globalRenameUser->rename($data); } else { // If the user is local-only: // * rename the local user using LocalRenameUserJob // * create a global user attached only to the local wiki $job = new LocalRenameUserJob(Title::newFromText('Global rename job'), array('from' => $oldUser->getName(), 'to' => $newUser->getName(), 'renamer' => $this->getUser()->getName(), 'movepages' => true, 'suppressredirects' => true, 'promotetoglobal' => true, 'reason' => $data['reason'], 'session' => $session)); JobQueueGroup::singleton($request->getWiki())->push($job); // Now log it $this->logPromotionRename($oldUser->getName(), $request->getWiki(), $newUser->getName(), $data['reason']); $status = Status::newGood(); } } if ($status->isGood()) { $request->setStatus($approved ? GlobalRenameRequest::APPROVED : GlobalRenameRequest::REJECTED); $request->setCompleted(wfTimestampNow()); $request->setPerformer(CentralAuthUser::getInstance($this->getUser())->getId()); $request->setComments($data['comments']); if ($request->save()) { // Send email to the user about the change in status. if ($approved) { $subject = $this->msg('globalrenamequeue-email-subject-approved')->inContentLanguage()->text(); $body = $this->msg('globalrenamequeue-email-body-approved', array($oldUser->getName(), $newUser->getName()))->inContentLanguage()->text(); } else { $subject = $this->msg('globalrenamequeue-email-subject-rejected')->inContentLanguage()->text(); $body = $this->msg('globalrenamequeue-email-body-rejected', array($oldUser->getName(), $newUser->getName(), $request->getComments()))->inContentLanguage()->text(); } if ($notifyEmail !== null && $notifyEmail->address) { $type = $approved ? 'approval' : 'rejection'; wfDebugLog('CentralAuthRename', "Sending {$type} email to User:{$oldUser->getName()}/{$notifyEmail->address}"); $this->sendNotificationEmail($notifyEmail, $subject, $body); } } else { $status->fatal('globalrenamequeue-request-savefailed'); } } return $status; }
public static function createPageWithForm($title, $formName) { global $sfgFormPrinter; $formTitle = Title::makeTitleSafe(SF_NS_FORM, $formName); $formDefinition = SFUtils::getPageText($formTitle); $preloadContent = null; // Allow outside code to set/change the preloaded text. Hooks::run('sfEditFormPreloadText', array(&$preloadContent, $title, $formTitle)); list($formText, $javascriptText, $dataText, $formPageTitle, $generatedPageName) = $sfgFormPrinter->formHTML($formDefinition, false, false, null, $preloadContent, 'Some very long page name that will hopefully never get created ABCDEF123', null); $params = array(); // Get user "responsible" for all auto-generated // pages from red links. $userID = 1; global $sfgAutoCreateUser; if (!is_null($sfgAutoCreateUser)) { $user = User::newFromName($sfgAutoCreateUser); if (!is_null($user)) { $userID = $user->getId(); } } $params['user_id'] = $userID; $params['page_text'] = $dataText; $job = new SFCreatePageJob($title, $params); $jobs = array($job); if (class_exists('JobQueueGroup')) { JobQueueGroup::singleton()->push($jobs); } else { // MW <= 1.20 Job::batchInsert($jobs); } }
/** * Opportunistically enqueue link update jobs given fresh parser output if useful * * @param ParserOutput $parserOutput Current version page output * @since 1.25 */ public function triggerOpportunisticLinksUpdate(ParserOutput $parserOutput) { if (wfReadOnly()) { return; } if (!Hooks::run('OpportunisticLinksUpdate', [$this, $this->mTitle, $parserOutput])) { return; } $config = RequestContext::getMain()->getConfig(); $params = ['isOpportunistic' => true, 'rootJobTimestamp' => $parserOutput->getCacheTime()]; if ($this->mTitle->areRestrictionsCascading()) { // If the page is cascade protecting, the links should really be up-to-date JobQueueGroup::singleton()->lazyPush(RefreshLinksJob::newPrioritized($this->mTitle, $params)); } elseif (!$config->get('MiserMode') && $parserOutput->hasDynamicContent()) { // Assume the output contains "dynamic" time/random based magic words. // Only update pages that expired due to dynamic content and NOT due to edits // to referenced templates/files. When the cache expires due to dynamic content, // page_touched is unchanged. We want to avoid triggering redundant jobs due to // views of pages that were just purged via HTMLCacheUpdateJob. In that case, the // template/file edit already triggered recursive RefreshLinksJob jobs. if ($this->getLinksTimestamp() > $this->getTouched()) { // If a page is uncacheable, do not keep spamming a job for it. // Although it would be de-duplicated, it would still waste I/O. $cache = ObjectCache::getLocalClusterInstance(); $key = $cache->makeKey('dynamic-linksupdate', 'last', $this->getId()); $ttl = max($parserOutput->getCacheExpiry(), 3600); if ($cache->add($key, time(), $ttl)) { JobQueueGroup::singleton()->lazyPush(RefreshLinksJob::newDynamic($this->mTitle, $params)); } } } }
private function runAllRelatedJobs() { $queueGroup = JobQueueGroup::singleton(); while ($job = $queueGroup->pop('refreshLinksPrioritized')) { $job->run(); $queueGroup->ack($job); } while ($job = $queueGroup->pop('categoryMembershipChange')) { $job->run(); $queueGroup->ack($job); } }