Esempio n. 1
0
 /**
  * Perform the move.
  * @return FileRepoStatus
  */
 function execute()
 {
     $repo = $this->file->repo;
     $status = $repo->newGood();
     $triplets = $this->getMoveTriplets();
     $triplets = $this->removeNonexistentFiles($triplets);
     $destFile = wfLocalFile($this->target);
     $this->file->lock();
     // begin
     $destFile->lock();
     // quickly fail if destination is not available
     // Rename the file versions metadata in the DB.
     // This implicitly locks the destination file, which avoids race conditions.
     // If we moved the files from A -> C before DB updates, another process could
     // move files from B -> C at this point, causing storeBatch() to fail and thus
     // cleanupTarget() to trigger. It would delete the C files and cause data loss.
     $statusDb = $this->doDBUpdates();
     if (!$statusDb->isGood()) {
         $this->file->unlockAndRollback();
         $statusDb->ok = false;
         return $statusDb;
     }
     wfDebugLog('imagemove', "Renamed {$this->file->getName()} in database: " . "{$statusDb->successCount} successes, {$statusDb->failCount} failures");
     // Copy the files into their new location.
     // If a prior process fataled copying or cleaning up files we tolerate any
     // of the existing files if they are identical to the ones being stored.
     $statusMove = $repo->storeBatch($triplets, FileRepo::OVERWRITE_SAME);
     wfDebugLog('imagemove', "Moved files for {$this->file->getName()}: " . "{$statusMove->successCount} successes, {$statusMove->failCount} failures");
     if (!$statusMove->isGood()) {
         // Delete any files copied over (while the destination is still locked)
         $this->cleanupTarget($triplets);
         $this->file->unlockAndRollback();
         // unlocks the destination
         wfDebugLog('imagemove', "Error in moving files: " . $statusMove->getWikiText());
         $statusMove->ok = false;
         return $statusMove;
     }
     $destFile->unlock();
     $this->file->unlock();
     // done
     // Everything went ok, remove the source files
     $this->cleanupSource($triplets);
     $status->merge($statusDb);
     $status->merge($statusMove);
     return $status;
 }
Esempio n. 2
0
 /**
  * Run the transaction
  */
 function execute()
 {
     global $wgUseSquid;
     wfProfileIn(__METHOD__);
     $this->file->lock();
     // Leave private files alone
     $privateFiles = array();
     list($oldRels, $deleteCurrent) = $this->getOldRels();
     $dbw = $this->file->repo->getMasterDB();
     if (!empty($oldRels)) {
         $res = $dbw->select('oldimage', array('oi_archive_name'), array('oi_name' => $this->file->getName(), 'oi_archive_name IN (' . $dbw->makeList(array_keys($oldRels)) . ')', $dbw->bitAnd('oi_deleted', File::DELETED_FILE) => File::DELETED_FILE), __METHOD__);
         foreach ($res as $row) {
             $privateFiles[$row->oi_archive_name] = 1;
         }
     }
     // Prepare deletion batch
     $hashes = $this->getHashes();
     $this->deletionBatch = array();
     $ext = $this->file->getExtension();
     $dotExt = $ext === '' ? '' : ".{$ext}";
     foreach ($this->srcRels as $name => $srcRel) {
         // Skip files that have no hash (missing source).
         // Keep private files where they are.
         if (isset($hashes[$name]) && !array_key_exists($name, $privateFiles)) {
             $hash = $hashes[$name];
             $key = $hash . $dotExt;
             $dstRel = $this->file->repo->getDeletedHashPath($key) . $key;
             $this->deletionBatch[$name] = array($srcRel, $dstRel);
         }
     }
     // Lock the filearchive rows so that the files don't get deleted by a cleanup operation
     // We acquire this lock by running the inserts now, before the file operations.
     //
     // This potentially has poor lock contention characteristics -- an alternative
     // scheme would be to insert stub filearchive entries with no fa_name and commit
     // them in a separate transaction, then run the file ops, then update the fa_name fields.
     $this->doDBInserts();
     // Removes non-existent file from the batch, so we don't get errors.
     $this->deletionBatch = $this->removeNonexistentFiles($this->deletionBatch);
     // Execute the file deletion batch
     $status = $this->file->repo->deleteBatch($this->deletionBatch);
     if (!$status->isGood()) {
         $this->status->merge($status);
     }
     if (!$this->status->ok) {
         // Critical file deletion error
         // Roll back inserts, release lock and abort
         // TODO: delete the defunct filearchive rows if we are using a non-transactional DB
         $this->file->unlockAndRollback();
         wfProfileOut(__METHOD__);
         return $this->status;
     }
     // Purge squid
     if ($wgUseSquid) {
         $urls = array();
         foreach ($this->srcRels as $srcRel) {
             $urlRel = str_replace('%2F', '/', rawurlencode($srcRel));
             $urls[] = $this->file->repo->getZoneUrl('public') . '/' . $urlRel;
         }
         SquidUpdate::purge($urls);
     }
     // Delete image/oldimage rows
     $this->doDBDeletes();
     // Commit and return
     $this->file->unlock();
     wfProfileOut(__METHOD__);
     return $this->status;
 }