/** * Test getter/setter type */ public function testGetSetType() { $this->assertEquals(self::TYPE, $this->jobInstance->getType()); $expectedType = 'import'; $this->assertEntity($this->jobInstance->setType($expectedType)); $this->assertEquals($expectedType, $this->jobInstance->getType()); }
/** * Get a duplicate no-op version of a job * * @param Job $job * @return Job */ public static function newFromJob(Job $job) { $djob = new self($job->getTitle(), $job->getParams()); $djob->command = $job->getType(); $djob->params = is_array($djob->params) ? $djob->params : array(); $djob->params = array('isDuplicate' => true) + $djob->params; $djob->metadata = $job->metadata; return $djob; }
/** * @param Job $job * @return int Seconds for this runner to avoid doing more jobs of this type * @see $wgJobBackoffThrottling */ private function getBackoffTimeToWait(Job $job) { global $wgJobBackoffThrottling; if (!isset($wgJobBackoffThrottling[$job->getType()]) || $job instanceof DuplicateJob) { return 0; // not throttled } $itemsPerSecond = $wgJobBackoffThrottling[$job->getType()]; if ($itemsPerSecond <= 0) { return 0; // not throttled } $seconds = 0; if ($job->workItemCount() > 0) { $exactSeconds = $job->workItemCount() / $itemsPerSecond; // use randomized rounding $seconds = floor($exactSeconds); $remainder = $exactSeconds - $seconds; $seconds += mt_rand() / mt_getrandmax() < $remainder ? 1 : 0; } return (int) $seconds; }
/** * @see JobQueue::doAck() * @param Job $job * @throws MWException * @return Job|bool */ protected function doAck(Job $job) { if (!isset($job->metadata['id'])) { throw new MWException("Job of type '{$job->getType()}' has no ID."); } $dbw = $this->getMasterDB(); try { $dbw->commit(__METHOD__, 'flush'); // flush existing transaction $autoTrx = $dbw->getFlag(DBO_TRX); // get current setting $dbw->clearFlag(DBO_TRX); // make each query its own transaction $scopedReset = new ScopedCallback(function () use($dbw, $autoTrx) { $dbw->setFlag($autoTrx ? DBO_TRX : 0); // restore old setting }); // Delete a row with a single DELETE without holding row locks over RTTs... $dbw->delete('job', array('job_cmd' => $this->type, 'job_id' => $job->metadata['id']), __METHOD__); JobQueue::incrStats('acks', $this->type); } catch (DBError $e) { $this->throwDBException($e); } return true; }
/** * Check if the "root" job of a given job has been superseded by a newer one * * @param Job $job * @throws MWException * @return bool */ protected final function isRootJobOldDuplicate(Job $job) { if ($job->getType() !== $this->type) { throw new MWException("Got '{$job->getType()}' job; expected '{$this->type}'."); } wfProfileIn(__METHOD__); $isDuplicate = $this->doIsRootJobOldDuplicate($job); wfProfileOut(__METHOD__); return $isDuplicate; }
/** * @param $job Job * @return array */ protected function insertFields(Job $job) { list($dbw, $scope) = $this->getMasterDB(); return array('job_cmd' => $job->getType(), 'job_namespace' => $job->getTitle()->getNamespace(), 'job_title' => $job->getTitle()->getDBkey(), 'job_params' => self::makeBlob($job->getParams()), 'job_id' => $dbw->nextSequenceValue('job_job_id_seq'), 'job_timestamp' => $dbw->timestamp(), 'job_sha1' => wfBaseConvert(sha1(serialize($job->getDeduplicationInfo())), 16, 36, 31), 'job_random' => mt_rand(0, self::MAX_JOB_RANDOM)); }
/** * Register the "root job" of a given job into the queue for de-duplication. * This should only be called right *after* all the new jobs have been inserted. * This is used to turn older, duplicate, job entries into no-ops. The root job * information will remain in the registry until it simply falls out of cache. * * This requires that $job has two special fields in the "params" array: * - rootJobSignature : hash (e.g. SHA1) that identifies the task * - rootJobTimestamp : TS_MW timestamp of this instance of the task * * A "root job" is a conceptual job that consist of potentially many smaller jobs * that are actually inserted into the queue. For example, "refreshLinks" jobs are * spawned when a template is edited. One can think of the task as "update links * of pages that use template X" and an instance of that task as a "root job". * However, what actually goes into the queue are potentially many refreshLinks2 jobs. * Since these jobs include things like page ID ranges and DB master positions, and morph * into smaller refreshLinks2 jobs recursively, simple duplicate detection (like job_sha1) * for individual jobs being identical is not useful. * * In the case of "refreshLinks", if these jobs are still in the queue when the template * is edited again, we want all of these old refreshLinks jobs for that template to become * no-ops. This can greatly reduce server load, since refreshLinks jobs involves parsing. * Essentially, the new batch of jobs belong to a new "root job" and the older ones to a * previous "root job" for the same task of "update links of pages that use template X". * * This does nothing for certain queue classes. * * @param $job Job * @return bool * @throws MWException */ public final function deduplicateRootJob(Job $job) { if ($job->getType() !== $this->type) { throw new MWException("Got '{$job->getType()}' job; expected '{$this->type}'."); } wfProfileIn(__METHOD__); $ok = $this->doDeduplicateRootJob($job); wfProfileOut(__METHOD__); return $ok; }
/** * @see JobQueue::doAck() * @param Job $job * @return Job|bool * @throws UnexpectedValueException * @throws JobQueueError */ protected function doAck(Job $job) { if (!isset($job->metadata['uuid'])) { throw new UnexpectedValueException("Job of type '{$job->getType()}' has no UUID."); } $uuid = $job->metadata['uuid']; $conn = $this->getConnection(); try { static $script = <<<LUA \t\t\tlocal kClaimed, kAttempts, kData = unpack(KEYS) \t\t\tlocal uuid = unpack(ARGV) \t\t\t-- Unmark the job as claimed \t\t\tredis.call('zRem',kClaimed,uuid) \t\t\tredis.call('hDel',kAttempts,uuid) \t\t\t-- Delete the job data itself \t\t\treturn redis.call('hDel',kData,uuid) LUA; $res = $conn->luaEval($script, array($this->getQueueKey('z-claimed'), $this->getQueueKey('h-attempts'), $this->getQueueKey('h-data'), $uuid), 3); if (!$res) { wfDebugLog('JobQueueRedis', "Could not acknowledge {$this->type} job {$uuid}."); return false; } JobQueue::incrStats('acks', $this->type); } catch (RedisException $e) { $this->throwRedisException($conn, $e); } return true; }
/** * @param Job $job * @return integer Seconds for this runner to avoid doing more jobs of this type * @see $wgJobBackoffThrottling */ private function getBackoffTimeToWait(Job $job) { global $wgJobBackoffThrottling; if (!isset($wgJobBackoffThrottling[$job->getType()])) { return 0; // not throttled } $itemsPerSecond = $wgJobBackoffThrottling[$job->getType()]; if ($itemsPerSecond <= 0) { return 0; // not throttled } $seconds = 0; if ($job->workItemCount() > 0) { $seconds = floor($job->workItemCount() / $itemsPerSecond); $remainder = $job->workItemCount() % $itemsPerSecond; $seconds += mt_rand(1, $itemsPerSecond) <= $remainder ? 1 : 0; } return (int) $seconds; }
/** * @param $job Job * @return array */ protected function getNewJobFields(Job $job) { return array('type' => $job->getType(), 'namespace' => $job->getTitle()->getNamespace(), 'title' => $job->getTitle()->getDBkey(), 'params' => $job->getParams(), 'rtimestamp' => $job->getReleaseTimestamp() ?: 0, 'uuid' => UIDGenerator::newRawUUIDv4(UIDGenerator::QUICK_RAND), 'sha1' => $job->ignoreDuplicates() ? wfBaseConvert(sha1(serialize($job->getDeduplicationInfo())), 16, 36, 31) : '', 'timestamp' => time()); }
/** * @param $job Job * @return array */ protected function getNewJobFields( Job $job ) { return array( // Fields that describe the nature of the job 'type' => $job->getType(), 'namespace' => $job->getTitle()->getNamespace(), 'title' => $job->getTitle()->getDBkey(), 'params' => $job->getParams(), // Some jobs cannot run until a "release timestamp" 'rtimestamp' => $job->getReleaseTimestamp() ?: 0, // Additional job metadata 'uuid' => UIDGenerator::newRawUUIDv4( UIDGenerator::QUICK_RAND ), 'sha1' => $job->ignoreDuplicates() ? wfBaseConvert( sha1( serialize( $job->getDeduplicationInfo() ) ), 16, 36, 31 ) : '', 'timestamp' => time() // UNIX timestamp ); }
/** * @param $job Job * @return array */ protected function insertFields( Job $job ) { $dbw = $this->getMasterDB(); return array( // Fields that describe the nature of the job 'job_cmd' => $job->getType(), 'job_namespace' => $job->getTitle()->getNamespace(), 'job_title' => $job->getTitle()->getDBkey(), 'job_params' => self::makeBlob( $job->getParams() ), // Additional job metadata 'job_id' => $dbw->nextSequenceValue( 'job_job_id_seq' ), 'job_timestamp' => $dbw->timestamp(), 'job_sha1' => wfBaseConvert( sha1( serialize( $job->getDeduplicationInfo() ) ), 16, 36, 31 ), 'job_random' => mt_rand( 0, self::MAX_JOB_RANDOM ) ); }
/** * @param $job Job * @return array */ protected function insertFields(Job $job) { // Rows that describe the nature of the job $descFields = array('job_cmd' => $job->getType(), 'job_namespace' => $job->getTitle()->getNamespace(), 'job_title' => $job->getTitle()->getDBkey(), 'job_params' => self::makeBlob($job->getParams())); // Additional job metadata if ($this->order === 'timestamp') { // oldest first $random = time() - 1325376000; // seconds since "January 1, 2012" } else { // random first $random = mt_rand(0, self::MAX_JOB_RANDOM); } $dbw = $this->getMasterDB(); $metaFields = array('job_id' => $dbw->nextSequenceValue('job_job_id_seq'), 'job_timestamp' => $dbw->timestamp(), 'job_sha1' => wfBaseConvert(sha1(serialize($descFields)), 16, 36, 32), 'job_random' => $random); return $descFields + $metaFields; }