/** * @covers HashRing */ public function testHashRing() { $ring = new HashRing(array('s1' => 1, 's2' => 1, 's3' => 2, 's4' => 2, 's5' => 2, 's6' => 3)); $locations = array(); for ($i = 0; $i < 20; $i++) { $locations["hello{$i}"] = $ring->getLocation("hello{$i}"); } $expectedLocations = array("hello0" => "s5", "hello1" => "s6", "hello2" => "s2", "hello3" => "s5", "hello4" => "s6", "hello5" => "s4", "hello6" => "s5", "hello7" => "s4", "hello8" => "s5", "hello9" => "s5", "hello10" => "s3", "hello11" => "s6", "hello12" => "s1", "hello13" => "s3", "hello14" => "s3", "hello15" => "s5", "hello16" => "s4", "hello17" => "s6", "hello18" => "s6", "hello19" => "s3"); $this->assertEquals($expectedLocations, $locations, 'Items placed at proper locations'); $locations = array(); for ($i = 0; $i < 5; $i++) { $locations["hello{$i}"] = $ring->getLocations("hello{$i}", 2); } $expectedLocations = array("hello0" => array("s5", "s6"), "hello1" => array("s6", "s4"), "hello2" => array("s2", "s1"), "hello3" => array("s5", "s6"), "hello4" => array("s6", "s4")); $this->assertEquals($expectedLocations, $locations, 'Items placed at proper locations'); }
protected function doDeduplicateRootJob( Job $job ) { $params = $job->getRootJobParams(); $partitions = $this->partitionPushRing->getLocations( $params['rootJobSignature'], 2 ); try { return $this->partitionQueues[$partitions[0]]->doDeduplicateRootJob( $job ); } catch ( JobQueueError $e ) { if ( isset( $partitions[1] ) ) { // check fallback partition return $this->partitionQueues[$partitions[1]]->doDeduplicateRootJob( $job ); } } return false; }
protected function doDeduplicateRootJob(IJobSpecification $job) { $signature = $job->getRootJobParams()['rootJobSignature']; $partition = $this->partitionRing->getLiveLocation($signature); try { return $this->partitionQueues[$partition]->doDeduplicateRootJob($job); } catch (JobQueueError $e) { if ($this->partitionRing->ejectFromLiveRing($partition, 5)) { $partition = $this->partitionRing->getLiveLocation($signature); return $this->partitionQueues[$partition]->doDeduplicateRootJob($job); } } return false; }
/** * @return Status Uses RediConnRef as value on success */ protected function getConnection() { if (!isset($this->conn)) { $conn = false; $servers = $this->ring->getLocations($this->key, 3); ArrayUtils::consistentHashSort($servers, $this->key); foreach ($servers as $server) { $conn = $this->pool->getConnection($this->serversByLabel[$server], $this->logger); if ($conn) { break; } } if (!$conn) { return Status::newFatal('pool-servererror', implode(', ', $servers)); } $this->conn = $conn; } return Status::newGood($this->conn); }
/** * @todo This tests should be moved in Predis\Cluster\Distribution\DistributionStrategyTestCase * @group disconnected */ public function testCallbackToGetNodeHash() { $node = '127.0.0.1:7000'; $replicas = HashRing::DEFAULT_REPLICAS; $callable = $this->getMock('stdClass', array('__invoke')); $callable->expects($this->once())->method('__invoke')->with($node)->will($this->returnValue($node)); $ring = new HashRing($replicas, $callable); $ring->add($node); $this->getNodes($ring); }
public function __construct() { parent::__construct($this::DEFAULT_REPLICAS); }
/** * @param mixed $nodeHashCallback Callback returning the string used to calculate the hash of a node. */ public function __construct($nodeHashCallback = null) { parent::__construct($this::DEFAULT_REPLICAS, $nodeHashCallback); }
/** * @param array $jobs * @param array $partitionsTry * @param integer $flags * @return array List of Job object that could not be inserted */ protected function tryJobInsertions(array $jobs, array &$partitionsTry, $flags) { if (!count($partitionsTry)) { return $jobs; // can't insert anything } $jobsLeft = array(); $partitionRing = new HashRing($partitionsTry); // Because jobs are spread across partitions, per-job de-duplication needs // to use a consistent hash to avoid allowing duplicate jobs per partition. // When inserting a batch of de-duplicated jobs, QOS_ATOMIC is disregarded. $uJobsByPartition = array(); // (partition name => job list) foreach ($jobs as $key => $job) { if ($job->ignoreDuplicates()) { $sha1 = sha1(serialize($job->getDeduplicationInfo())); $uJobsByPartition[$partitionRing->getLocation($sha1)][] = $job; unset($jobs[$key]); } } // Get the batches of jobs that are not de-duplicated if ($flags & self::QOS_ATOMIC) { $nuJobBatches = array($jobs); // all or nothing } else { // Split the jobs into batches and spread them out over servers if there // are many jobs. This helps keep the partitions even. Otherwise, send all // the jobs to a single partition queue to avoids the extra connections. $nuJobBatches = array_chunk($jobs, 300); } // Insert the de-duplicated jobs into the queues... foreach ($uJobsByPartition as $partition => $jobBatch) { $queue = $this->partitionQueues[$partition]; if ($queue->doBatchPush($jobBatch, $flags)) { $key = $this->getCacheKey('empty'); $this->cache->set($key, 'false', JobQueueDB::CACHE_TTL_LONG); } else { unset($partitionsTry[$partition]); // blacklist partition $jobsLeft = array_merge($jobsLeft, $jobBatch); // not inserted } } // Insert the jobs that are not de-duplicated into the queues... foreach ($nuJobBatches as $jobBatch) { $partition = ArrayUtils::pickRandom($partitionsTry); if ($partition === false) { // all partitions at 0 weight? $jobsLeft = array_merge($jobsLeft, $jobBatch); // not inserted } else { $queue = $this->partitionQueues[$partition]; if ($queue->doBatchPush($jobBatch, $flags)) { $key = $this->getCacheKey('empty'); $this->cache->set($key, 'false', JobQueueDB::CACHE_TTL_LONG); } else { unset($partitionsTry[$partition]); // blacklist partition $jobsLeft = array_merge($jobsLeft, $jobBatch); // not inserted } } } return $jobsLeft; }