require_once 'PHPDFS/DataLocator/RUSHr.php'; foreach ($bucketConfs as $currConfig => &$confStats) { $hm = new PHPDFS_DataLocator_RUSHr($configs['data'][$currConfig]); echo "total nodes: " . $hm->getTotalNodes() . "\nfailed disk: " . $failedDiskArg . "\n"; $failedDisk = $failedDiskArg; if ($failedDiskArg > $hm->getTotalNodes()) { $failedDisk = $hm->getTotalNodes(); } else { if ($failedDiskArg < 0) { $failedDisk = 0; } } $confStats['stats']['failedDisk'] = $failedDisk; if ($currConfig > 0) { // make the locator for the previous config $hmPrev = new PHPDFS_DataLocator_RUSHr($configs['data'][$currConfig - 1]); } foreach ($objs as $objId) { $time = microtime(1); $disks = $hm->findNodes($objId); $totalTime += microtime(1) - $time; $lookups++; $movedFrom = null; $movedTo = null; if ($currConfig > 0) { $prevDisks = $hmPrev->findNodes($objId); $movedFrom = array_diff($prevDisks, $disks); $movedTo = array_diff($disks, $prevDisks); if (count($movedFrom)) { //print_r( array( $prevDisks, $disks, $movedFrom, $movedTo) ); //exit();
<?php // the number of objects to generate and test for data distribution $totalObjs = isset($argv[1]) ? $argv[1] : 1000; // now we loop on each config // and take stats on the distribution // and data movement $configs = makeConfigs(100); $totalConfs = count($configs); $timings = array(); require_once 'PHPDFS/DataLocator/RUSHr.php'; for ($n = 0; $n < $totalConfs; $n++) { echo "processing conf {$n} with " . count($configs[$n]['clusters']) . " clusters.\n"; $totalTime = 0; $hm = new PHPDFS_DataLocator_RUSHr($configs[$n]); for ($i = 0; $i < $totalObjs; $i++) { $objKey = uuid_create(); $time = microtime(1); $hm->findNode($objKey); $time2 = microtime(1); $totalTime += $time2 - $time; } $timings[$n] = $totalTime / $totalObjs; echo "avgtime:" . $timings[$n] . "\n"; } print_r($timings); function makeConfigs($numConfigs = 1) { $configs = array(); $replicationDegree = 3; for ($num = 0; $num < $numConfigs; $num++) {