/** * @return EventRelayerGroup */ public static function singleton() { if (!self::$instance) { self::$instance = new self(RequestContext::getMain()->getConfig()); } return self::$instance; }
/** * Purges a list of CDN nodes defined in $wgSquidServers. * $urlArr should contain the full URLs to purge as values * (example: $urlArr[] = 'http://my.host/something') * * @param string[] $urlArr List of full URLs to purge */ public static function purge(array $urlArr) { global $wgSquidServers, $wgHTCPRouting; if (!$urlArr) { return; } // Remove duplicate URLs from list $urlArr = array_unique($urlArr); wfDebugLog('squid', __METHOD__ . ': ' . implode(' ', $urlArr)); // Reliably broadcast the purge to all edge nodes $relayer = EventRelayerGroup::singleton()->getRelayer('cdn-url-purges'); $relayer->notify('cdn-url-purges', array('urls' => array_values($urlArr), 'timestamp' => microtime(true))); // Send lossy UDP broadcasting if enabled if ($wgHTCPRouting) { self::HTCPPurge($urlArr); } // Do direct server purges if enabled (this does not scale very well) if ($wgSquidServers) { // Maximum number of parallel connections per squid $maxSocketsPerSquid = 8; // Number of requests to send per socket // 400 seems to be a good tradeoff, opening a socket takes a while $urlsPerSocket = 400; $socketsPerSquid = ceil(count($urlArr) / $urlsPerSocket); if ($socketsPerSquid > $maxSocketsPerSquid) { $socketsPerSquid = $maxSocketsPerSquid; } $pool = new SquidPurgeClientPool(); $chunks = array_chunk($urlArr, ceil(count($urlArr) / $socketsPerSquid)); foreach ($wgSquidServers as $server) { foreach ($chunks as $chunk) { $client = new SquidPurgeClient($server); foreach ($chunk as $url) { $client->queuePurge($url); } $pool->addClient($client); } } $pool->run(); } }