public function __construct() { parent::__construct(); $this->addDescription('Run one or more searches against the specified cluster. ' . 'search queries are read from stdin.'); $this->addOption('baseName', 'What basename to use for all indexes, ' . 'defaults to wiki id', false, true); $this->addOption('type', 'What type of search to run, prefix, suggest or full_text. ' . 'defaults to full_text.', false, true); $this->addOption('options', 'A JSON object mapping from global variable to ' . 'its test value'); $this->addOption('fork', 'Fork multiple processes to run queries from.' . 'defaults to false.', false, true); }
public function __construct() { parent::__construct(); $this->mDescription = "Make the index sane. Always operates on a single cluster."; $this->addOption('fromId', 'Start sanitizing at a specific page_id. Default to 0.', false, true); $this->addOption('toId', 'Stop sanitizing at a specific page_id. Default to the maximum id in the db + 100.', false, true); $this->addOption('noop', 'Rather then queue remediation actions do nothing.'); $this->addOption('logSane', 'Print all sane pages.'); $this->addOption('buildChunks', 'Instead of running the script spit out commands that can be farmed out to ' . 'different processes or machines to check the index. If specified as a number then chunks no larger than ' . 'that size are spat out. If specified as a number followed by the word "total" without a space between them ' . 'then that many chunks will be spat out sized to cover the entire wiki.', false, true); }
public function __construct() { parent::__construct(); $this->addDescription("Copy index from one cluster to another.\nThe index name and index type should be the same on both clusters."); $this->addOption('indexType', 'Source index. Either content or general.', true, true); $this->addOption('targetCluster', 'Target Cluster.', true, true); $this->addOption('reindexChunkSize', 'Documents per shard to reindex in a batch. ' . 'Note when changing the number of shards that the old shard size is used, not the new ' . 'one. If you see many errors submitting documents in bulk but the automatic retry as ' . 'singles works then lower this number. Defaults to 100.', false, true); $this->addOption('reindexRetryAttempts', 'Number of times to back off and retry ' . 'per failure. Note that failures are not common but if Elasticsearch is in the process ' . 'of moving a shard this can time out. This will retry the attempt after some backoff ' . 'rather than failing the whole reindex process. Defaults to 5.', false, true); $this->addOption('processes', 'Number of processes to copy with. Defaults to 1', false, true); }
public function __construct() { parent::__construct(); $this->addDescription("Create a new suggester index. Always operates on a single cluster."); $this->addOption('baseName', 'What basename to use for all indexes, ' . 'defaults to wiki id', false, true); $this->addOption('indexChunkSize', 'Documents per shard to index in a batch. ' . 'Note when changing the number of shards that the old shard size is used, not the new ' . 'one. If you see many errors submitting documents in bulk but the automatic retry as ' . 'singles works then lower this number. Defaults to 100.', false, true); $this->addOption('indexRetryAttempts', 'Number of times to back off and retry ' . 'per failure. Note that failures are not common but if Elasticsearch is in the process ' . 'of moving a shard this can time out. This will retry the attempt after some backoff ' . 'rather than failing the whole reindex process. Defaults to 5.', false, true); $this->addOption('optimize', 'Optimize the index to 1 segment. Defaults to false.', false, false); $this->addOption('with-geo', 'Build geo contextualized suggestions. Defaults to false.', false, false); $this->addOption('scoringMethod', 'The scoring method to use when computing suggestion weights. ' . 'Detauls to quality.', false, true); }
/** * @param string $message * @param int $die */ private function error($message, $die = 0) { // @todo: I'll want to get rid of this method, but this patch will be big enough already // @todo: I'll probably want to throw exceptions and/or return Status objects instead, later if ($this->out) { $this->out->error($message, $die); } $die = intval($die); if ($die > 0) { die($die); } }
public function __construct() { parent::__construct(); $this->mDescription = "Force indexing some pages. Setting --from or --to will switch from id based indexing to " . "date based indexing which uses less efficient queries and follows redirects.\n\n" . "Note: All froms are _exclusive_ and all tos are _inclusive_.\n" . "Note 2: Setting fromId and toId use the efficient query so those are ok.\n" . "Note 3: Operates on all clusters unless --cluster is provided.\n"; $this->setBatchSize(10); $this->addOption('from', 'Start date of reindex in YYYY-mm-ddTHH:mm:ssZ (exc. Defaults to 0 epoch.', false, true); $this->addOption('to', 'Stop date of reindex in YYYY-mm-ddTHH:mm:ssZ. Defaults to now.', false, true); $this->addOption('fromId', 'Start indexing at a specific page_id. Not useful with --deletes.', false, true); $this->addOption('toId', 'Stop indexing at a specific page_id. Not useful with --deletes or --from or --to.', false, true); $this->addOption('deletes', 'If this is set then just index deletes, not updates or creates.', false); $this->addOption('limit', 'Maximum number of pages to process before exiting the script. Default to unlimited.', false, true); $this->addOption('buildChunks', 'Instead of running the script spit out commands that can be farmed out to ' . 'different processes or machines to rebuild the index. Works with fromId and toId, not from and to. ' . 'If specified as a number then chunks no larger than that size are spat out. If specified as a number ' . 'followed by the word "total" without a space between them then that many chunks will be spat out sized to ' . 'cover the entire wiki.', false, true); $this->addOption('queue', 'Rather than perform the indexes in process add them to the job queue. Ignored for delete.'); $this->addOption('maxJobs', 'If there are more than this many index jobs in the queue then pause before adding ' . 'more. This is only checked every ' . self::SECONDS_BETWEEN_JOB_QUEUE_LENGTH_CHECKS . ' seconds. Not meaningful ' . 'without --queue.', false, true); $this->addOption('pauseForJobs', 'If paused adding jobs then wait for there to be less than this many before ' . 'starting again. Defaults to the value specified for --maxJobs. Not meaningful without --queue.', false, true); $this->addOption('indexOnSkip', 'When skipping either parsing or links send the document as an index. ' . 'This replaces the contents of the index for that entry with the entry built from a skipped process.' . 'Without this if the entry does not exist then it will be skipped entirely. Only set this when running ' . 'the first pass of building the index. Otherwise, don\'t tempt fate by indexing half complete documents.'); $this->addOption('skipParse', 'Skip parsing the page. This is really only good for running the second half ' . 'of the two phase index build. If this is specified then the default batch size is actually 50.'); $this->addOption('skipLinks', 'Skip looking for links to the page (counting and finding redirects). Use ' . 'this with --indexOnSkip for the first half of the two phase index build.'); $this->addOption('namespace', 'Only index pages in this given namespace', false, true); }
public function __construct() { parent::__construct(); $this->mDescription = "Freeze/thaw writes to the elasticsearch cluster. This effects " . "all wikis in a multi-wiki environment. This always operates on a single cluster."; $this->addOption('thaw', 'Re-allow writes to the elasticsearch cluster.'); }
public function __construct() { parent::__construct(); $this->addDescription("Update the configuration or contents of all search indecies. Always operates on a single cluster."); }
public function error($err, $die = 0) { parent::error($err, $die); }
public function output($message, $channel = NULL) { if ($this->mQuiet) { return; } if ($this->logToStderr) { // We must log to stderr fwrite(STDERR, $message); } else { parent::output($message); } }
/** * @param Maintenance $maintenance */ public static function addSharedOptions($maintenance) { $maintenance->addOption('startOver', 'Blow away the identified index and rebuild it with ' . 'no data.'); $maintenance->addOption('indexIdentifier', "Set the identifier of the index to work on. " . "You'll need this if you have an index in production serving queries and you have " . "to alter some portion of its configuration that cannot safely be done without " . "rebuilding it. Once you specify a new indexIdentifier for this wiki you'll have to " . "run this script with the same identifier each time. Defaults to 'current' which " . "infers the currently in use identifier. You can also use 'now' to set the identifier " . "to the current time in seconds which should give you a unique identifier.", false, true); $maintenance->addOption('reindexAndRemoveOk', "If the alias is held by another index then " . "reindex all documents from that index (via the alias) to this one, swing the " . "alias to this index, and then remove other index. Updates performed while this" . "operation is in progress will be queued up in the job queue. Defaults to false."); $maintenance->addOption('reindexProcesses', 'Number of processes to use in reindex. ' . 'Not supported on Windows. Defaults to 1 on Windows and 5 otherwise.', false, true); $maintenance->addOption('reindexAcceptableCountDeviation', 'How much can the reindexed ' . 'copy of an index is allowed to deviate from the current copy without triggering a ' . 'reindex failure. Defaults to 5%.', false, true); $maintenance->addOption('reindexChunkSize', 'Documents per shard to reindex in a batch. ' . 'Note when changing the number of shards that the old shard size is used, not the new ' . 'one. If you see many errors submitting documents in bulk but the automatic retry as ' . 'singles works then lower this number. Defaults to 100.', false, true); $maintenance->addOption('reindexRetryAttempts', 'Number of times to back off and retry ' . 'per failure. Note that failures are not common but if Elasticsearch is in the process ' . 'of moving a shard this can time out. This will retry the attempt after some backoff ' . 'rather than failing the whole reindex process. Defaults to 5.', false, true); $maintenance->addOption('baseName', 'What basename to use for all indexes, ' . 'defaults to wiki id', false, true); $maintenance->addOption('debugCheckConfig', 'Print the configuration as it is checked ' . 'to help debug unexpected configuration mismatches.'); $maintenance->addOption('justCacheWarmers', 'Just validate that the cache warmers are correct ' . 'and perform no additional checking. Use when you need to apply new cache warmers but ' . "want to be sure that you won't apply any other changes at an inopportune time."); $maintenance->addOption('justAllocation', 'Just validate the shard allocation settings. Use ' . "when you need to apply new cache warmers but want to be sure that you won't apply any other " . 'changes at an inopportune time.'); }
public function __construct() { parent::__construct(); $this->mDescription = "Check that all Cirrus indexes report OK. This always operates on a single cluster."; $this->addOption('nagios', 'Output in nagios format'); }
public function __construct() { parent::__construct(); $this->addDescription("Create a new suggester index. Always operates on a single cluster."); $this->addOption('baseName', 'What basename to use for all indexes, ' . 'defaults to wiki id', false, true); $this->addOption('indexChunkSize', 'Documents per shard to index in a batch. ' . 'Note when changing the number of shards that the old shard size is used, not the new ' . 'one. If you see many errors submitting documents in bulk but the automatic retry as ' . 'singles works then lower this number. Defaults to 100.', false, true); $this->addOption('indexRetryAttempts', 'Number of times to back off and retry ' . 'per failure. Note that failures are not common but if Elasticsearch is in the process ' . 'of moving a shard this can time out. This will retry the attempt after some backoff ' . 'rather than failing the whole reindex process. Defaults to 5.', false, true); $this->addOption('optimize', 'Optimize the index to 1 segment. Defaults to false.', false, false); $this->addOption('with-geo', 'Build geo contextualized suggestions. Defaults to false.', false, false); $this->addOption('scoringMethod', 'The scoring method to use when computing suggestion weights. ' . 'Detauls to quality.', false, true); $this->addOption('masterTimeout', 'The amount of time to wait for the master to respond to mapping ' . 'updates before failing. Defaults to $wgCirrusSearchMasterTimeout.', false, true); $this->addOption('replicationTimeout', 'The amount of time (seconds) to wait for the replica shards to initialize. ' . 'Defaults to 3600 seconds.', false, true); $this->addOption('allocationIncludeTag', 'Set index.routing.allocation.include.tag on the created index. ' . 'Useful if you want to force the suggester index not to be allocated on a specific set of nodes.', false, true); $this->addOption('allocationExcludeTag', 'Set index.routing.allocation.exclude.tag on the created index. ' . 'Useful if you want to force the suggester index not to be allocated on a specific set of nodes.', false, true); }