/** * Purges a list of Squids defined in $wgSquidServers. * $urlArr should contain the full URLs to purge as values * (example: $urlArr[] = 'http://my.host/something') * XXX report broken Squids per mail or log * * @param $urlArr array * @return void */ static function purge($urlArr) { global $wgSquidServers, $wgHTCPMulticastAddress, $wgHTCPPort; /*if ( (@$wgSquidServers[0]) == 'echo' ) { echo implode("<br />\n", $urlArr) . "<br />\n"; return; }*/ if (!$urlArr) { return; } // wikia change start global $wgPurgeSquidViaScribe; if ($wgPurgeSquidViaScribe == true) { SquidUpdate::ScribePurge($urlArr); return; } // wikia change end if ($wgHTCPMulticastAddress && $wgHTCPPort) { SquidUpdate::HTCPPurge($urlArr); } wfProfileIn(__METHOD__); $maxSocketsPerSquid = 8; // socket cap per Squid $urlsPerSocket = 400; // 400 seems to be a good tradeoff, opening a socket takes a while $socketsPerSquid = ceil(count($urlArr) / $urlsPerSocket); if ($socketsPerSquid > $maxSocketsPerSquid) { $socketsPerSquid = $maxSocketsPerSquid; } $pool = new SquidPurgeClientPool(); $chunks = array_chunk($urlArr, ceil(count($urlArr) / $socketsPerSquid)); foreach ($wgSquidServers as $server) { foreach ($chunks as $chunk) { $client = new SquidPurgeClient($server); foreach ($chunk as $url) { $client->queuePurge($url); } $pool->addClient($client); } } $pool->run(); wfProfileOut(__METHOD__); }
/** * Purges a list of CDN nodes defined in $wgSquidServers. * $urlArr should contain the full URLs to purge as values * (example: $urlArr[] = 'http://my.host/something') * * @param string[] $urlArr List of full URLs to purge */ public static function purge(array $urlArr) { global $wgSquidServers, $wgHTCPRouting; if (!$urlArr) { return; } // Remove duplicate URLs from list $urlArr = array_unique($urlArr); wfDebugLog('squid', __METHOD__ . ': ' . implode(' ', $urlArr)); // Reliably broadcast the purge to all edge nodes $relayer = EventRelayerGroup::singleton()->getRelayer('cdn-url-purges'); $relayer->notify('cdn-url-purges', array('urls' => array_values($urlArr), 'timestamp' => microtime(true))); // Send lossy UDP broadcasting if enabled if ($wgHTCPRouting) { self::HTCPPurge($urlArr); } // Do direct server purges if enabled (this does not scale very well) if ($wgSquidServers) { // Maximum number of parallel connections per squid $maxSocketsPerSquid = 8; // Number of requests to send per socket // 400 seems to be a good tradeoff, opening a socket takes a while $urlsPerSocket = 400; $socketsPerSquid = ceil(count($urlArr) / $urlsPerSocket); if ($socketsPerSquid > $maxSocketsPerSquid) { $socketsPerSquid = $maxSocketsPerSquid; } $pool = new SquidPurgeClientPool(); $chunks = array_chunk($urlArr, ceil(count($urlArr) / $socketsPerSquid)); foreach ($wgSquidServers as $server) { foreach ($chunks as $chunk) { $client = new SquidPurgeClient($server); foreach ($chunk as $url) { $client->queuePurge($url); } $pool->addClient($client); } } $pool->run(); } }
/** * Purges a list of Squids defined in $wgSquidServers. * $urlArr should contain the full URLs to purge as values * (example: $urlArr[] = 'http://my.host/something') * XXX report broken Squids per mail or log * * @param $urlArr array * @return void */ static function purge($urlArr) { global $wgSquidServers, $wgHTCPMulticastRouting; if (!$urlArr) { return; } wfDebug("Squid purge: " . implode(' ', $urlArr) . "\n"); if ($wgHTCPMulticastRouting) { SquidUpdate::HTCPPurge($urlArr); } wfProfileIn(__METHOD__); $urlArr = array_unique($urlArr); // Remove duplicates $maxSocketsPerSquid = 8; // socket cap per Squid $urlsPerSocket = 400; // 400 seems to be a good tradeoff, opening a socket takes a while $socketsPerSquid = ceil(count($urlArr) / $urlsPerSocket); if ($socketsPerSquid > $maxSocketsPerSquid) { $socketsPerSquid = $maxSocketsPerSquid; } $pool = new SquidPurgeClientPool(); $chunks = array_chunk($urlArr, ceil(count($urlArr) / $socketsPerSquid)); foreach ($wgSquidServers as $server) { foreach ($chunks as $chunk) { $client = new SquidPurgeClient($server); foreach ($chunk as $url) { $client->queuePurge($url); } $pool->addClient($client); } } $pool->run(); wfProfileOut(__METHOD__); }
/** * Purges a list of CDN nodes defined in $wgSquidServers. * $urlArr should contain the full URLs to purge as values * (example: $urlArr[] = 'http://my.host/something') * * @param string[] $urlArr List of full URLs to purge */ public static function purge(array $urlArr) { global $wgSquidServers, $wgHTCPRouting; if (!$urlArr) { return; } // Remove duplicate URLs from list $urlArr = array_unique($urlArr); wfDebugLog('squid', __METHOD__ . ': ' . implode(' ', $urlArr)); if ($wgHTCPRouting) { self::HTCPPurge($urlArr); } if ($wgSquidServers) { // Maximum number of parallel connections per squid $maxSocketsPerSquid = 8; // Number of requests to send per socket // 400 seems to be a good tradeoff, opening a socket takes a while $urlsPerSocket = 400; $socketsPerSquid = ceil(count($urlArr) / $urlsPerSocket); if ($socketsPerSquid > $maxSocketsPerSquid) { $socketsPerSquid = $maxSocketsPerSquid; } $pool = new SquidPurgeClientPool(); $chunks = array_chunk($urlArr, ceil(count($urlArr) / $socketsPerSquid)); foreach ($wgSquidServers as $server) { foreach ($chunks as $chunk) { $client = new SquidPurgeClient($server); foreach ($chunk as $url) { $client->queuePurge($url); } $pool->addClient($client); } } $pool->run(); } }