/** * @param string $puser_id * @param string $entry * @param string $version * @param string $file_format * @return BatchJob */ public static function addJob($puser_id, $entry, $version, $file_format) { $entryId = $entry->getId(); $entryIntId = $entry->getIntId(); $entryVersion = $version ? $version : $entry->getVersion(); if ($entry) { $partner = $entry->getPartner(); $email = $partner->getAdminEmail(); } $data = json_encode(array('puserId' => $puser_id, 'entryId' => $entryId, 'entryIntId' => $entryIntId, 'entryVersion' => $entryVersion, 'fileFormat' => $file_format, 'email' => $email)); $job = new BatchJob(); $job->setJobType(BatchJobType::FLATTEN); $job->setData($data, true); $job->setStatus(BatchJob::BATCHJOB_STATUS_PENDING); $job->setCheckAgainTimeout(time() + 10); $job->setProgress(0); $job->setMessage('Queued'); $job->setDescription('Queued, waiting to run'); $job->setUpdatesCount(0); $job->setEntryId($entryId); $job->setPartnerId($entry->getPartnerId()); $job->setSubpId($entry->getSubpId()); $job->save(); return $job; }
public function executeImpl($partner_id, $subp_id, $puser_id, $partner_prefix, $puser_kuser) { $entry_id = $this->getPM("entry_id"); $entry = entryPeer::retrieveByPK($entry_id); if (!$entry) { $this->addError(APIErrors::INVALID_ENTRY_ID, "entry", $entry_id); } else { $job = new BatchJob(); $job->setJobType(BatchJobType::DVDCREATOR); $job->setStatus(BatchJob::BATCHJOB_STATUS_PENDING); $job->setCheckAgainTimeout(time() + 10); $job->setProgress(0); $job->setUpdatesCount(0); $job->setEntryId($entry_id); $job->setPartnerId($entry->getPartnerId()); $job->setSubpId($entry->getSubpId()); $job->save(); $wrapper = objectWrapperBase::getWrapperClass($job, objectWrapperBase::DETAIL_LEVEL_DETAILED); // TODO - remove this code when cache works properly when saving objects (in their save method) $wrapper->removeFromCache("batch_job", $job->getId()); $this->addMsg("batchjob", $wrapper); } }
public static function addFutureDeletionJob(BatchJob $parentJob = null, $entryId = null, Partner $partner, $syncKey, $localFileSyncPath, $dc) { $deleteFileData = new kDeleteFileJobData(); $deleteFileData->setLocalFileSyncPath($localFileSyncPath); $deleteFileData->setSyncKey($syncKey); if ($parentJob) { $batchJob = $parentJob->createChild(false); } else { $batchJob = new BatchJob(); $batchJob->setEntryId($entryId); $batchJob->setPartnerId($partner->getId()); } $batchJob->setStatus(BatchJob::BATCHJOB_STATUS_RETRY); $batchJob->setCheckAgainTimeout(12 * 60 * 60); $batchJob->setDc($dc); KalturaLog::log("Creating File Delete job, from data center id: " . $deleteFileData->getDC() . " with source file: " . $deleteFileData->getLocalFileSyncPath()); return self::addJob($batchJob, $deleteFileData, BatchJobType::DELETE_FILE); }
/** * @param BatchJob $dbBatchJob * @param BatchJob $twinJob * @return bool true if should continue to the next consumer */ public function updatedJob(BatchJob $dbBatchJob, BatchJob $twinJob = null) { try { $jobType = $dbBatchJob->getJobType(); if (is_null($dbBatchJob->getQueueTime()) && $dbBatchJob->getStatus() != BatchJob::BATCHJOB_STATUS_PENDING) { $dbBatchJob->setQueueTime(time()); $dbBatchJob->save(); } if ($dbBatchJob->getStatus() == BatchJob::BATCHJOB_STATUS_FINISHED) { $dbBatchJob->setFinishTime(time()); $dbBatchJob->save(); } if ($dbBatchJob->getStatus() == BatchJob::BATCHJOB_STATUS_RETRY) { $dbBatchJob->setCheckAgainTimeout(time() + BatchJobPeer::getCheckAgainTimeout($jobType)); $dbBatchJob->setQueueTime(null); $dbBatchJob->save(); } if ($dbBatchJob->getStatus() == BatchJob::BATCHJOB_STATUS_ALMOST_DONE) { $dbBatchJob->setCheckAgainTimeout(time() + BatchJobPeer::getCheckAgainTimeout($jobType)); $dbBatchJob->save(); } if ($dbBatchJob->getStatus() == BatchJob::BATCHJOB_STATUS_FAILED || $dbBatchJob->getStatus() == BatchJob::BATCHJOB_STATUS_FATAL) { $dbBatchJob->setFinishTime(time()); $dbBatchJob->save(); // TODO - don't abort if it's bulk upload kJobsManager::abortChildJobs($dbBatchJob); } switch ($jobType) { case BatchJobType::IMPORT: $dbBatchJob = $this->updatedImport($dbBatchJob, $dbBatchJob->getData(), $twinJob); break; case BatchJobType::EXTRACT_MEDIA: $dbBatchJob = $this->updatedExtractMedia($dbBatchJob, $dbBatchJob->getData(), $twinJob); break; case BatchJobType::CONVERT: $dbBatchJob = $this->updatedConvert($dbBatchJob, $dbBatchJob->getData(), $twinJob); break; case BatchJobType::POSTCONVERT: $dbBatchJob = $this->updatedPostConvert($dbBatchJob, $dbBatchJob->getData(), $twinJob); break; case BatchJobType::BULKUPLOAD: $dbBatchJob = $this->updatedBulkUpload($dbBatchJob, $dbBatchJob->getData(), $twinJob); break; case BatchJobType::CONVERT_PROFILE: $dbBatchJob = $this->updatedConvertProfile($dbBatchJob, $dbBatchJob->getData(), $twinJob); break; case BatchJobType::BULKDOWNLOAD: $dbBatchJob = $this->updatedBulkDownload($dbBatchJob, $dbBatchJob->getData(), $twinJob); break; case BatchJobType::PROVISION_PROVIDE: $dbBatchJob = $this->updatedProvisionProvide($dbBatchJob, $dbBatchJob->getData(), $twinJob); break; case BatchJobType::PROVISION_DELETE: $dbBatchJob = $this->updatedProvisionDelete($dbBatchJob, $dbBatchJob->getData(), $twinJob); break; case BatchJobType::CONVERT_COLLECTION: $dbBatchJob = $this->updatedConvertCollection($dbBatchJob, $dbBatchJob->getData(), $twinJob); break; case BatchJobType::STORAGE_EXPORT: $dbBatchJob = $this->updatedStorageExport($dbBatchJob, $dbBatchJob->getData(), $twinJob); break; case BatchJobType::STORAGE_DELETE: $dbBatchJob = $this->updatedStorageDelete($dbBatchJob, $dbBatchJob->getData(), $twinJob); break; case BatchJobType::CAPTURE_THUMB: $dbBatchJob = $this->updatedCaptureThumb($dbBatchJob, $dbBatchJob->getData(), $twinJob); break; default: break; } if (!kConf::get("batch_ignore_duplication")) { if ($dbBatchJob->getStatus() == BatchJob::BATCHJOB_STATUS_FINISHED) { $twinBatchJobs = $dbBatchJob->getTwinJobs(); // update status at all twin jobs foreach ($twinBatchJobs as $twinBatchJob) { if ($twinBatchJob->getStatus() != BatchJob::BATCHJOB_STATUS_FINISHED) { kJobsManager::updateBatchJob($twinBatchJob, BatchJob::BATCHJOB_STATUS_FINISHED); } } } } if ($dbBatchJob->getStatus() == BatchJob::BATCHJOB_STATUS_RETRY && $dbBatchJob->getExecutionAttempts() >= BatchJobPeer::getMaxExecutionAttempts($jobType)) { $dbBatchJob = kJobsManager::updateBatchJob($dbBatchJob, BatchJob::BATCHJOB_STATUS_FAILED); } } catch (Exception $ex) { self::alert($dbBatchJob, $ex); KalturaLog::err("Error:" . $ex->getMessage()); } return true; }