get() публичный Метод

This method will automatically set the following options: - CURLINFO_HEADER_OUT - TRUE - CURLOPT_HEADER - TRUE - CURLOPT_HTTPGET - TRUE - CURLOPT_NOBODY - FALSE ...and will unset the following options: - CURLOPT_BINARYTRANSFER - CURLOPT_CUSTOMREQUEST - CURLOPT_FILE - CURLOPT_POST - CURLOPT_POSTFIELDS Multiple requests are processed asynchronously, in parallel, and the callback function is called for each and every request, as soon as a request finishes. The number of parallel requests to be constantly processed, at all times, can be set through the {@link threads} property. See also the {@link pause_interval} property. Note that requests may not finish in the same order as initiated! the callback function to be executed for each and every request, as soon as a request finishes the callback function receives as argument an object with 4 properties (info, header, body and response) function mycallback($result) { everything went well at cURL level if ($result->response[1] == CURLE_OK) { if server responded with code 200 (meaning that everything went well) see http://httpstatus.es/ for a list of possible response codes if ($result->info['http_code'] == 200) { see all the returned data print_r('
');
            print_r($result);
show the server's response code
        } else die('Server responded with code ' . $result->info['http_code']);
something went wrong
($result still contains all data that could be gathered)
    } else die('cURL responded with: ' . $result->response[0]);

}
include the Zebra_cURL library
require 'path/to/Zebra_cURL';
instantiate the Zebra_cURL object
$curl = new Zebra_cURL();
cache results in the "cache" folder and for 3600 seconds (one hour)
$curl->cache('cache', 3600);
let's fetch the RSS feeds of some popular websites
execute the "mycallback" function for each request, as soon as it finishes
$curl->get(array(
    'http://feeds.feedburner.com/alistapart/main',
    'http://feeds.feedburner.com/TechCrunch',
    'http://feeds.mashable.com/mashable',
), 'mycallback')
        
public get ( mixed $urls, mixed $callback = '' ) : void
$urls mixed A single URL or an array of URLs to process. @param mixed $callback (Optional) Callback function to be called as soon as a request finishes. May be given as a string representing a name of an existing function, as an anonymous function created on the fly via {@link http://www.php.net/manual/en/function.create-function.php create_function} or via a {@link http://www.php.net/manual/en/function.create-function.php closure}. The callback function receives as first argument an object with 4 properties as described below, while any further arguments passed to the {@link get} method will be passed as extra arguments to the callback function: - info - an associative array containing information about the request that just finished, as returned by PHP's {@link http://php.net/manual/en/function.curl-getinfo.php curl_getinfo} function; - headers - an associative array with 2 items: - last_request an array with a single entry containing the request headers generated by the last request; so, remember, if there are redirects involved, there will be more requests made, but only information from the last one will be available; - responses an array with one or more entries (if there are redirects involved) with the response headers of all the requests made; Each entry in the headers' array is an associative array in the form of property => value - body - the response of the request (the content of the page at the URL). Unless disabled via the {@link __construct() constructor}, all applicable characters will be converted to HTML entities via PHP's {@link http://php.net/manual/en/function.htmlentities.php htmlentities} function, so remember to use PHP's {@link http://www.php.net/manual/en/function.html-entity-decode.php html_entity_decode} function to do reverse this, if it's the case; - response - the response given by the cURL library as an array with 2 entries: the first entry is the textual representation of the result's code, while second is the result's code itself; if the request was successful, these values will be array(CURLE_OK, 0); consult {@link http://www.php.net/manual/en/function.curl-errno.php#103128 this list} to see the possible values of this property; If the callback function returns FALSE while {@link cache} is enabled, the library will not cache the respective request, making it easy to retry failed requests without having to clear all cache. @return void
$callback mixed
Результат void
Пример #1
0
 public static function do_sync()
 {
     global $wpdb;
     $debug_mode = defined('KIGO_DEBUG') && KIGO_DEBUG;
     // Do not log into New Relic, because this function is slow and we know why
     if (extension_loaded('newrelic')) {
         newrelic_ignore_transaction();
     }
     //Check that cron is "enabled" and that the secret is correct
     if (!defined('KIGO_CRON_SECRET') || !isset($_GET[self::GET_PARAM_CRON_SECRET]) || $_GET[self::GET_PARAM_CRON_SECRET] !== KIGO_CRON_SECRET) {
         self::log(array('message' => 'Missing/Invalid cron secret', 'info' => $_SERVER));
         self::handle_logs($debug_mode);
         exit;
     }
     // Ensure that no other cron will run concurrently by acquiring an advisory lock (at MySQL database)
     if (!$wpdb->get_var($wpdb->prepare('SELECT GET_LOCK(%s, 0)', self::ADV_LOCK_PROCESSING))) {
         self::log('Previous cron execution is not finished, could not acquire cron lock');
         self::handle_logs($debug_mode);
         exit;
     }
     $prevTimeTotal = microtime(true);
     if (is_multisite()) {
         require_once dirname(__FILE__) . '/ext/class-zebra-curl.php';
         // Change the default value of wp_is_large_network necessary if # of sites reach the 10000
         add_filter('wp_is_large_network', array('Kigo_Network_Cron', 'custom_wp_is_large_network'), 1, 3);
         // Initialize the list of sites
         $sites = wp_get_sites(array('limit' => self::CUSTOM_WP_IS_LARGE_NETWORK, 'deleted' => 0, 'archived' => 0));
         shuffle($sites);
         // Filter the sites, not to trigger a sync for site where the solution data have not been updated since X months
         self::filter_old_sites($sites);
         self::log(array('nb_sites' => count($sites)));
         //Do the Zebra cURL call (asynchronous calls)
         $curl = new Zebra_cURL();
         $curl->option(CURLOPT_TIMEOUT, self::CURL_TIMEOUT);
         $curl->threads = self::CURL_PARALLEL_CALLS;
         //Prepare URLs to be called
         $urls = array_map(array('Kigo_Network_Cron', 'generate_curl_urls'), $sites);
         $urls = array_filter($urls, function ($url) {
             return is_string($url);
         });
         $curl->get($urls, array('Kigo_Network_Cron', 'zebra_curl_callback'));
     } else {
         set_error_handler(array('Kigo_Network_Cron', 'php_error_handler'));
         // Add our custom handler for wp_die() because some functions die on error, and we don't want the script to die !
         add_filter('wp_die_ajax_handler', array('Kigo_Network_Cron', 'kigo_cron_wp_die_handler_filter'));
         $site_cron = new Kigo_Site_Cron();
         self::log($site_cron->sync_entities() ? true : $site_cron->_errors);
         restore_error_handler();
     }
     self::log(array('total_execution_time' => microtime(true) - $prevTimeTotal));
     if (!$wpdb->query($wpdb->prepare('SELECT RELEASE_LOCK(%s)', self::ADV_LOCK_PROCESSING))) {
         self::log('Could not release cron lock');
     }
     // Echo the logs in debug mode or send them by mail
     self::handle_logs($debug_mode);
     exit;
 }
Пример #2
0
function callback($result)
{
    // everything went well at cURL level
    if ($result->response[1] == CURLE_OK) {
        // if server responded with code 200 (meaning that everything went well)
        // see http://httpstatus.es/ for a list of possible response codes
        if ($result->info['http_code'] == 200) {
            // see all the returned data
            // remember, that the "body" property of $result, unless specifically disabled in the library's constructor,
            // is run through "htmlentities()", so you may want to "html_entity_decode" it
            print_r('<pre>');
            print_r($result->info);
            // show the server's response code
        } else {
            die('Server responded with code ' . $result->info['http_code']);
        }
        // something went wrong
        // ($result still contains all data that could be gathered)
    } else {
        die('cURL responded with: ' . $result->response[0]);
    }
}
// include the library
require '../Zebra_cURL.php';
// instantiate the Zebra_cURL class
$curl = new Zebra_cURL();
// cache results 3600 seconds
$curl->cache('cache', 3600);
// get RSS feeds of some popular tech websites
$curl->get(array('http://rss1.smashingmagazine.com/feed/', 'http://allthingsd.com/feed/', 'http://feeds.feedburner.com/nettuts', 'http://feeds.feedburner.com/alistapart/main'), 'callback');
Пример #3
0
<?php

function callback($result)
{
    // results from twitter is json-encoded;
    // remember, the "body" property of $result is run through "htmlentities()" so we need to "html_entity_decode" it
    $result->body = json_decode(html_entity_decode($result->body));
    // show everything
    print_r('<pre>');
    print_r($result->info);
}
// include the library
require '../Zebra_cURL.php';
// instantiate the Zebra_cURL class
$curl = new Zebra_cURL();
// cache results 60 seconds
$curl->cache('cache', 60);
// search twitter for the "jquery" hashtag
$curl->get('http://search.twitter.com/search.json?q=' . urlencode('#jquery'), 'callback');
Пример #4
0
            } else {
                // show title and date for each entry
                foreach ($xml->entry as $entry) {
                    echo '<h2><span>' . $feeds[$result->info['original_url']] . '</span> <a href="' . $entry->link['href'] . '">' . $entry->title . '</a></h2>';
                    echo '<p>' . $entry->updated . '</p><hr>';
                }
            }
            // show the server's response code
        } else {
            die('Server responded with code ' . $result->info['http_code']);
        }
        // something went wrong
        // ($result still contains all data that could be gathered)
    } else {
        die('cURL responded with: ' . $result->response[0]);
    }
}
// include the library
require '../Zebra_cURL.php';
// instantiate the Zebra_cURL class
$curl = new Zebra_cURL();
// cache results 3600 seconds
$curl->cache('cache', 3600);
$feeds = array('http://rss1.smashingmagazine.com/feed/' => 'Smashing Magazine', 'http://feeds.feedburner.com/nettuts' => 'TutsPlus', 'http://feeds.feedburner.com/alistapart/main' => 'A List Apart');
// get RSS feeds of some popular tech websites
$curl->get(array_keys($feeds), 'callback', $feeds);
?>

</body
</html>