public function execute() { if (wfReadOnly()) { $this->error("Unable to run jobs; the wiki is in read-only mode.", 1); // die } if ($this->hasOption('procs')) { $procs = intval($this->getOption('procs')); if ($procs < 1 || $procs > 1000) { $this->error("Invalid argument to --procs", true); } elseif ($procs != 1) { $fc = new ForkController($procs); if ($fc->start() != 'child') { exit(0); } } } $json = $this->getOption('result') === 'json'; $runner = new JobRunner(); if (!$json) { $runner->setDebugHandler(array($this, 'debugInternal')); } $response = $runner->run(array('type' => $this->getOption('type', false), 'maxJobs' => $this->getOption('maxjobs', false), 'maxTime' => $this->getOption('maxtime', false), 'throttle' => $this->hasOption('nothrottle') ? false : true)); if ($json) { $this->output(FormatJson::encode($response, true)); } }
public function execute() { global $wgCommandLineMode; if ($this->hasOption('procs')) { $procs = intval($this->getOption('procs')); if ($procs < 1 || $procs > 1000) { $this->error("Invalid argument to --procs", true); } elseif ($procs != 1) { $fc = new ForkController($procs); if ($fc->start() != 'child') { exit(0); } } } $outputJSON = $this->getOption('result') === 'json'; // Enable DBO_TRX for atomicity; JobRunner manages transactions // and works well in web server mode already (@TODO: this is a hack) $wgCommandLineMode = false; $runner = new JobRunner(LoggerFactory::getInstance('runJobs')); if (!$outputJSON) { $runner->setDebugHandler(array($this, 'debugInternal')); } $response = $runner->run(array('type' => $this->getOption('type', false), 'maxJobs' => $this->getOption('maxjobs', false), 'maxTime' => $this->getOption('maxtime', false), 'throttle' => $this->hasOption('nothrottle') ? false : true)); if ($outputJSON) { $this->output(FormatJson::encode($response, true)); } $wgCommandLineMode = true; }
/** * {@inheritDoc} */ public function start() { if ($this->procsToStart > 0) { $status = parent::start(); if ($status === 'child') { $this->consume(); } } else { $this->consume(); } }
public function execute() { global $wgTitle; if ( $this->hasOption( 'procs' ) ) { $procs = intval( $this->getOption( 'procs' ) ); if ( $procs < 1 || $procs > 1000 ) { $this->error( "Invalid argument to --procs", true ); } $fc = new ForkController( $procs ); if ( $fc->start() != 'child' ) { exit( 0 ); } } $maxJobs = $this->getOption( 'maxjobs', false ); $maxTime = $this->getOption( 'maxtime', false ); $startTime = time(); $type = $this->getOption( 'type', false ); $wgTitle = Title::newFromText( 'RunJobs.php' ); $dbw = wfGetDB( DB_MASTER ); $n = 0; $conds = ''; if ( $type !== false ) { $conds = "job_cmd = " . $dbw->addQuotes( $type ); } while ( $dbw->selectField( 'job', 'job_id', $conds, 'runJobs.php' ) ) { $offset = 0; for ( ; ; ) { $job = !$type ? Job::pop( $offset ) : Job::pop_type( $type ); if ( !$job ) { break; } wfWaitForSlaves( 5 ); $t = microtime( true ); $offset = $job->id; $status = $job->run(); $t = microtime( true ) - $t; $timeMs = intval( $t * 1000 ); if ( !$status ) { $this->runJobsLog( $job->toString() . " t=$timeMs error={$job->error}" ); } else { $this->runJobsLog( $job->toString() . " t=$timeMs good" ); } if ( $maxJobs && ++$n > $maxJobs ) { break 2; } if ( $maxTime && time() - $startTime > $maxTime ) { break 2; } } } }
public function execute() { global $wgTitle; if ($this->hasOption('procs')) { $procs = intval($this->getOption('procs')); if ($procs < 1 || $procs > 1000) { $this->error("Invalid argument to --procs", true); } $fc = new ForkController($procs); if ($fc->start() != 'child') { exit(0); } } $maxJobs = $this->getOption('maxjobs', false); $maxTime = $this->getOption('maxtime', false); $startTime = time(); $type = $this->getOption('type', false); $wgTitle = Title::newFromText('RunJobs.php'); $dbw = wfGetDB(DB_MASTER); $n = 0; $conds = ''; if ($type !== false) { $conds = "job_cmd = " . $dbw->addQuotes($type); } while ($dbw->selectField('job', 'job_id', $conds, 'runJobs.php')) { $offset = 0; for (;;) { $job = !$type ? Job::pop($offset) : Job::pop_type($type); if (!$job) { break; } //check for similar (but not IDENTICAL (different timestamp allowed) jobs and delete them. $dbw->delete('job', array('job_cmd' => $job->command, 'job_title' => $job->title->getDBkey()), __METHOD__); wfWaitForSlaves(); $t = microtime(true); $offset = $job->id; $status = $job->run(); $t = microtime(true) - $t; $timeMs = intval($t * 1000); if (!$status) { $this->runJobsLog($job->toString() . " t={$timeMs} error={$job->error}"); } else { $this->runJobsLog($job->toString() . " t={$timeMs} good"); } if ($maxJobs && ++$n > $maxJobs) { break 2; } if ($maxTime && time() - $startTime > $maxTime) { break 2; } } } }
public function execute() { global $wgTitle; if ($this->hasOption('procs')) { $procs = intval($this->getOption('procs')); if ($procs < 1 || $procs > 1000) { $this->error("Invalid argument to --procs", true); } $fc = new ForkController($procs); if ($fc->start() != 'child') { exit(0); } } $maxJobs = $this->getOption('maxjobs', false); $maxTime = $this->getOption('maxtime', false); $startTime = time(); $type = $this->getOption('type', false); $wgTitle = Title::newFromText('RunJobs.php'); $dbw = wfGetDB(DB_MASTER); $n = 0; $group = JobQueueGroup::singleton(); do { $job = $type === false ? $group->pop() : $group->get($type)->pop(); // job from a single queue if ($job) { // found a job // Perform the job (logging success/failure and runtime)... $t = microtime(true); $this->runJobsLog($job->toString() . " STARTING"); $status = $job->run(); $group->ack($job); // done $t = microtime(true) - $t; $timeMs = intval($t * 1000); if (!$status) { $this->runJobsLog($job->toString() . " t={$timeMs} error={$job->error}"); } else { $this->runJobsLog($job->toString() . " t={$timeMs} good"); } // Break out if we hit the job count or wall time limits... if ($maxJobs && ++$n >= $maxJobs) { break; } if ($maxTime && time() - $startTime > $maxTime) { break; } // Don't let any slaves/backups fall behind... $group->get($type)->waitForBackups(); } } while ($job); // stop when there are no jobs }
public function execute() { global $wgCommandLineMode; if ($this->hasOption('procs')) { $procs = intval($this->getOption('procs')); if ($procs < 1 || $procs > 1000) { $this->error("Invalid argument to --procs", true); } elseif ($procs != 1) { $fc = new ForkController($procs); if ($fc->start() != 'child') { exit(0); } } } $outputJSON = $this->getOption('result') === 'json'; $wait = $this->hasOption('wait'); // Enable DBO_TRX for atomicity; JobRunner manages transactions // and works well in web server mode already (@TODO: this is a hack) $wgCommandLineMode = false; $runner = new JobRunner(LoggerFactory::getInstance('runJobs')); if (!$outputJSON) { $runner->setDebugHandler([$this, 'debugInternal']); } $type = $this->getOption('type', false); $maxJobs = $this->getOption('maxjobs', false); $maxTime = $this->getOption('maxtime', false); $throttle = !$this->hasOption('nothrottle'); while (true) { $response = $runner->run(['type' => $type, 'maxJobs' => $maxJobs, 'maxTime' => $maxTime, 'throttle' => $throttle]); if ($outputJSON) { $this->output(FormatJson::encode($response, true)); } if (!$wait || $response['reached'] === 'time-limit' || $response['reached'] === 'job-limit' || $response['reached'] === 'memory-limit') { break; } if ($maxJobs !== false) { $maxJobs -= count($response['jobs']); } sleep(1); } $wgCommandLineMode = true; }
public function execute() { if ($this->hasOption('procs')) { $procs = intval($this->getOption('procs')); if ($procs < 1 || $procs > 1000) { $this->error("Invalid argument to --procs", true); } elseif ($procs != 1) { $fc = new ForkController($procs); if ($fc->start() != 'child') { exit(0); } } } $outputJSON = $this->getOption('result') === 'json'; $wait = $this->hasOption('wait'); $runner = new JobRunner(LoggerFactory::getInstance('runJobs')); if (!$outputJSON) { $runner->setDebugHandler([$this, 'debugInternal']); } $type = $this->getOption('type', false); $maxJobs = $this->getOption('maxjobs', false); $maxTime = $this->getOption('maxtime', false); $throttle = !$this->hasOption('nothrottle'); while (true) { $response = $runner->run(['type' => $type, 'maxJobs' => $maxJobs, 'maxTime' => $maxTime, 'throttle' => $throttle]); if ($outputJSON) { $this->output(FormatJson::encode($response, true)); } if (!$wait || $response['reached'] === 'time-limit' || $response['reached'] === 'job-limit' || $response['reached'] === 'memory-limit') { break; } if ($maxJobs !== false) { $maxJobs -= count($response['jobs']); } sleep(1); } }
<?php $optionsWithArgs = array('fake-job', 'procs'); require dirname(__FILE__) . '/../commandLine.inc'; require dirname(__FILE__) . '/gearman.inc'; if (isset($options['procs'])) { $procs = $options['procs']; if ($procs < 1 || $procs > 1000) { echo "Invalid number of processes, please specify a number between 1 and 1000\n"; exit(1); } $fc = new ForkController($procs, ForkController::RESTART_ON_ERROR); if ($fc->start() != 'child') { exit(0); } } if (!$args) { $args = array('localhost'); } if (isset($options['fake-job'])) { $params = unserialize($options['fake-job']); MWGearmanJob::runNoSwitch($params); } $worker = new NonScaryGearmanWorker($args); $worker->addAbility('mw_job'); $worker->beginWork('wfGearmanMonitor'); function wfGearmanMonitor($idle, $lastJob) { static $lastSleep = 0; $interval = 5; $now = time();
public function execute() { if (wfReadOnly()) { $this->error("Unable to run jobs; the wiki is in read-only mode.", 1); // die } if ($this->hasOption('procs')) { $procs = intval($this->getOption('procs')); if ($procs < 1 || $procs > 1000) { $this->error("Invalid argument to --procs", true); } elseif ($procs != 1) { $fc = new ForkController($procs); if ($fc->start() != 'child') { exit(0); } } } $type = $this->getOption('type', false); $maxJobs = $this->getOption('maxjobs', false); $maxTime = $this->getOption('maxtime', false); $noThrottle = $this->hasOption('nothrottle'); $startTime = time(); $group = JobQueueGroup::singleton(); // Handle any required periodic queue maintenance $count = $group->executeReadyPeriodicTasks(); if ($count > 0) { $this->runJobsLog("Executed {$count} periodic queue task(s)."); } $backoffs = $this->loadBackoffs(); // map of (type => UNIX expiry) $startingBackoffs = $backoffs; // avoid unnecessary writes $backoffExpireFunc = function ($t) { return $t > time(); }; $jobsRun = 0; // counter $flags = JobQueueGroup::USE_CACHE; $lastTime = time(); // time since last slave check do { $backoffs = array_filter($backoffs, $backoffExpireFunc); $blacklist = $noThrottle ? array() : array_keys($backoffs); if ($type === false) { $job = $group->pop(JobQueueGroup::TYPE_DEFAULT, $flags, $blacklist); } elseif (in_array($type, $blacklist)) { $job = false; // requested queue in backoff state } else { $job = $group->pop($type); // job from a single queue } if ($job) { // found a job ++$jobsRun; $this->runJobsLog($job->toString() . " STARTING"); // Set timer to stop the job if too much CPU time is used set_time_limit($maxTime ?: 0); // Run the job... wfProfileIn(__METHOD__ . '-' . get_class($job)); $t = microtime(true); try { $status = $job->run(); $error = $job->getLastError(); } catch (MWException $e) { MWExceptionHandler::rollbackMasterChangesAndLog($e); $status = false; $error = get_class($e) . ': ' . $e->getMessage(); $e->report(); // write error to STDERR and the log } $timeMs = intval((microtime(true) - $t) * 1000); wfProfileOut(__METHOD__ . '-' . get_class($job)); // Disable the timer set_time_limit(0); // Mark the job as done on success or when the job cannot be retried if ($status !== false || !$job->allowRetries()) { $group->ack($job); // done } if ($status === false) { $this->runJobsLog($job->toString() . " t={$timeMs} error={$error}"); } else { $this->runJobsLog($job->toString() . " t={$timeMs} good"); } // Back off of certain jobs for a while $ttw = $this->getBackoffTimeToWait($job); if ($ttw > 0) { $jType = $job->getType(); $backoffs[$jType] = isset($backoffs[$jType]) ? $backoffs[$jType] : 0; $backoffs[$jType] = max($backoffs[$jType], time() + $ttw); } // Break out if we hit the job count or wall time limits... if ($maxJobs && $jobsRun >= $maxJobs) { break; } elseif ($maxTime && time() - $startTime > $maxTime) { break; } // Don't let any of the main DB slaves get backed up $timePassed = time() - $lastTime; if ($timePassed >= 5 || $timePassed < 0) { wfWaitForSlaves(); $lastTime = time(); } // Don't let any queue slaves/backups fall behind if ($jobsRun > 0 && $jobsRun % 100 == 0) { $group->waitForBackups(); } // Bail if near-OOM instead of in a job $this->assertMemoryOK(); } } while ($job); // stop when there are no jobs // Sync the persistent backoffs for the next runJobs.php pass $backoffs = array_filter($backoffs, $backoffExpireFunc); if ($backoffs !== $startingBackoffs) { $this->syncBackoffs($backoffs); } }
public function execute() { global $wgTitle; if ( wfReadOnly() ) { $this->error( "Unable to run jobs; the wiki is in read-only mode.", 1 ); // die } if ( $this->hasOption( 'procs' ) ) { $procs = intval( $this->getOption( 'procs' ) ); if ( $procs < 1 || $procs > 1000 ) { $this->error( "Invalid argument to --procs", true ); } elseif ( $procs != 1 ) { $fc = new ForkController( $procs ); if ( $fc->start() != 'child' ) { exit( 0 ); } } } $maxJobs = $this->getOption( 'maxjobs', false ); $maxTime = $this->getOption( 'maxtime', false ); $startTime = time(); $type = $this->getOption( 'type', false ); $wgTitle = Title::newFromText( 'RunJobs.php' ); $jobsRun = 0; // counter $group = JobQueueGroup::singleton(); // Handle any required periodic queue maintenance $count = $group->executeReadyPeriodicTasks(); if ( $count > 0 ) { $this->runJobsLog( "Executed $count periodic queue task(s)." ); } $flags = JobQueueGroup::USE_CACHE | JobQueueGroup::USE_PRIORITY; $lastTime = time(); // time since last slave check do { $job = ( $type === false ) ? $group->pop( JobQueueGroup::TYPE_DEFAULT, $flags ) : $group->pop( $type ); // job from a single queue if ( $job ) { // found a job ++$jobsRun; $this->runJobsLog( $job->toString() . " STARTING" ); // Set timer to stop the job if too much CPU time is used set_time_limit( $maxTime ?: 0 ); // Run the job... wfProfileIn( __METHOD__ . '-' . get_class( $job ) ); $t = microtime( true ); try { $status = $job->run(); $error = $job->getLastError(); } catch ( MWException $e ) { $status = false; $error = get_class( $e ) . ': ' . $e->getMessage(); $e->report(); // write error to STDERR and the log } $timeMs = intval( ( microtime( true ) - $t ) * 1000 ); wfProfileOut( __METHOD__ . '-' . get_class( $job ) ); // Disable the timer set_time_limit( 0 ); // Mark the job as done on success or when the job cannot be retried if ( $status !== false || !$job->allowRetries() ) { $group->ack( $job ); // done } if ( $status === false ) { $this->runJobsLog( $job->toString() . " t=$timeMs error={$error}" ); } else { $this->runJobsLog( $job->toString() . " t=$timeMs good" ); } // Break out if we hit the job count or wall time limits... if ( $maxJobs && $jobsRun >= $maxJobs ) { break; } elseif ( $maxTime && ( time() - $startTime ) > $maxTime ) { break; } // Don't let any of the main DB slaves get backed up $timePassed = time() - $lastTime; if ( $timePassed >= 5 || $timePassed < 0 ) { wfWaitForSlaves(); $lastTime = time(); } // Don't let any queue slaves/backups fall behind if ( $jobsRun > 0 && ( $jobsRun % 100 ) == 0 ) { $group->waitForBackups(); } // Bail if near-OOM instead of in a job $this->assertMemoryOK(); } } while ( $job ); // stop when there are no jobs }
public function execute() { global $wgTitle; if (wfReadOnly()) { $this->error("Unable to run jobs; the wiki is in read-only mode.", 1); // die } if ($this->hasOption('procs')) { $procs = intval($this->getOption('procs')); if ($procs < 1 || $procs > 1000) { $this->error("Invalid argument to --procs", true); } $fc = new ForkController($procs); if ($fc->start() != 'child') { exit(0); } } $maxJobs = $this->getOption('maxjobs', false); $maxTime = $this->getOption('maxtime', false); $startTime = time(); $type = $this->getOption('type', false); $wgTitle = Title::newFromText('RunJobs.php'); $dbw = wfGetDB(DB_MASTER); $jobsRun = 0; // counter $group = JobQueueGroup::singleton(); // Handle any required periodic queue maintenance $count = $group->executeReadyPeriodicTasks(); if ($count > 0) { $this->runJobsLog("Executed {$count} periodic queue task(s)."); } $lastTime = time(); do { $job = $type === false ? $group->pop(JobQueueGroup::TYPE_DEFAULT, JobQueueGroup::USE_CACHE) : $group->pop($type); // job from a single queue if ($job) { // found a job ++$jobsRun; $this->runJobsLog($job->toString() . " STARTING"); // Run the job... $t = microtime(true); try { $status = $job->run(); $error = $job->getLastError(); } catch (MWException $e) { $status = false; $error = get_class($e) . ': ' . $e->getMessage(); } $timeMs = intval((microtime(true) - $t) * 1000); // Mark the job as done on success or when the job cannot be retried if ($status !== false || !$job->allowRetries()) { $group->ack($job); // done } if (!$status) { $this->runJobsLog($job->toString() . " t={$timeMs} error={$error}"); } else { $this->runJobsLog($job->toString() . " t={$timeMs} good"); } // Break out if we hit the job count or wall time limits... if ($maxJobs && $jobsRun >= $maxJobs) { break; } elseif ($maxTime && time() - $startTime > $maxTime) { break; } // Don't let any of the main DB slaves get backed up $timePassed = time() - $lastTime; if ($timePassed >= 5 || $timePassed < 0) { wfWaitForSlaves(); } // Don't let any queue slaves/backups fall behind if ($jobsRun > 0 && $jobsRun % 100 == 0) { $group->waitForBackups(); } } } while ($job); // stop when there are no jobs }