public static function cleanExclusiveJobs() { $jobs = kBatchExclusiveLock::getExpiredJobs(); foreach ($jobs as $job) { KalturaLog::log("Cleaning job id[" . $job->getId() . "]"); $job->setMessage("Job was cleaned up."); kJobsManager::updateBatchJob($job, BatchJob::BATCHJOB_STATUS_FATAL); } $jobs = kBatchExclusiveLock::getStatusInconsistentJob(); foreach ($jobs as $job) { KalturaLog::log("Fixing batch job Inconsistency [" . $job->getId() . "]"); $job->delete(); // The job shouldhave been deleted. The reason it got here is since the update // process has failed fataly. Therefore there is no point in retrying to save it. } return 0; }
public static function cleanExclusiveJobs() { $jobs = kBatchExclusiveLock::getExpiredJobs(); foreach ($jobs as $job) { KalturaLog::log("Cleaning job id[" . $job->getId() . "]"); kJobsManager::updateBatchJob($job, BatchJob::BATCHJOB_STATUS_FATAL); } $c = new Criteria(); $c->add(BatchJobPeer::STATUS, BatchJobPeer::getClosedStatusList(), Criteria::IN); $c->add(BatchJobPeer::BATCH_INDEX, null, Criteria::ISNOTNULL); // The 'closed' jobs should be donn for at least 10min. // before the cleanup starts messing upo with'em // This solves cases when job (convert) completes succesfully, // but the next job (closure)does not get a chance to take over due to the clean-up $c->add(BatchJobPeer::FINISH_TIME, time() - 600, Criteria::LESS_THAN); // MUST be the master DB $jobs = BatchJobPeer::doSelect($c, myDbHelper::getConnection(myDbHelper::DB_HELPER_CONN_PROPEL2)); foreach ($jobs as $job) { KalturaLog::log("Cleaning job id[" . $job->getId() . "]"); $job->setSchedulerId(null); $job->setWorkerId(null); $job->setBatchIndex(null); $job->setProcessorExpiration(null); $job->save(); } return count($jobs); }