public function execute() { $sender = new DataSender($this->getCirrusConnection()); if ($this->getParameter('thaw')) { $sender->thawIndexes(); } else { $sender->freezeIndexes(); } }
public function execute() { $sender = new DataSender($this->getConnection()); if ($this->hasOption('thaw')) { $sender->thawIndexes(); $this->output("Thawed any existing cluster-wide freeze\n\n"); } else { $sender->freezeIndexes(); $this->output("Applied cluster-wide freeze\n\n"); } }
/** * Dump everything from the live index into the one being worked on. * * @param int $processes * @param int $refreshInterval * @param int $retryAttempts * @param int $chunkSize * @param float $acceptableCountDeviation */ public function reindex($processes = 1, $refreshInterval = 1, $retryAttempts = 5, $chunkSize = 100, $acceptableCountDeviation = 0.05) { global $wgCirrusSearchWikimediaExtraPlugin; // Set some settings that should help io load during bulk indexing. We'll have to // optimize after this to consolidate down to a proper number of shards but that is // is worth the price. total_shards_per_node will help to make sure that each shard // has as few neighbors as possible. $settings = $this->index->getSettings(); $maxShardsPerNode = $this->decideMaxShardsPerNodeForReindex(); $settings->set(array('refresh_interval' => -1, 'merge.policy.segments_per_tier' => 40, 'merge.policy.max_merge_at_once' => 40, 'routing.allocation.total_shards_per_node' => $maxShardsPerNode)); $sender = new DataSender($this->connection); $frozenIndexes = $this->connection->indexToIndexTypes($this->types); $sender->freezeIndexes($frozenIndexes); if ($processes > 1) { if (!isset($wgCirrusSearchWikimediaExtraPlugin['id_hash_mod_filter']) || !$wgCirrusSearchWikimediaExtraPlugin['id_hash_mod_filter']) { $this->error("Can't use multiple processes without \$wgCirrusSearchWikimediaExtraPlugin[ 'id_hash_mod_filter' ] = true", 1); } $fork = new ForkController($processes); $forkResult = $fork->start(); // we don't want to share sockets between forks, so destroy the client. $this->connection->destroyClient(); // destroying the client resets the timeout so we have to reinstate it. $this->setConnectionTimeout(); switch ($forkResult) { case 'child': foreach ($this->types as $i => $type) { $oldType = $this->oldTypes[$i]; $this->reindexInternal($type, $oldType, $processes, $fork->getChildNumber(), $chunkSize, $retryAttempts); } die(0); case 'done': break; default: $this->error("Unexpected result while forking: {$forkResult}", 1); } $this->outputIndented("Verifying counts..."); // We can't verify counts are exactly equal because they won't be - we still push updates into // the old index while reindexing the new one. foreach ($this->types as $i => $type) { $oldType = $this->oldTypes[$i]; $oldCount = (double) $oldType->count(); $this->index->refresh(); $newCount = (double) $type->count(); $difference = $oldCount > 0 ? abs($oldCount - $newCount) / $oldCount : 0; if ($difference > $acceptableCountDeviation) { $this->output("Not close enough! old={$oldCount} new={$newCount} difference={$difference}\n"); $this->error('Failed to load index - counts not close enough. ' . "old={$oldCount} new={$newCount} difference={$difference}. " . 'Check for warnings above.', 1); } } $this->output("done\n"); } else { foreach ($this->types as $i => $type) { $oldType = $this->oldTypes[$i]; $this->reindexInternal($type, $oldType, 1, 1, $chunkSize, $retryAttempts); } } // Revert settings changed just for reindexing $settings->set(array('refresh_interval' => $refreshInterval . 's', 'merge.policy' => $this->mergeSettings)); $sender->thawIndexes($frozenIndexes); }
/** * Check the frozen indices * @return true if the cluster/index is not frozen, false otherwise. */ private function canWrite() { $name = $this->getConnection()->getIndexName($this->indexBaseName, Connection::TITLE_SUGGEST_TYPE_NAME); // Reuse DataSender even if we don't send anything with it. $sender = new DataSender($this->getConnection()); return $sender->areIndexesAvailableForWrites(array($name)); }