Esempio n. 1
1
 public function addTopic($name, array $conf)
 {
     $producer = new \RdKafka\Producer();
     $producer->addBrokers(implode(',', $conf['brokers']));
     $producerTopicConf = new \RdKafka\TopicConf();
     $producerTopic = $producer->newTopic($name, $producerTopicConf);
     $consumer = new \RdKafka\Consumer();
     $consumer->addBrokers(implode(',', $conf['brokers']));
     $consumerTopicConf = new \RdKafka\TopicConf();
     $consumerTopicConf->set("auto.commit.interval.ms", 1000.0);
     $consumerTopicConf->set("offset.store.sync.interval.ms", 60000.0);
     $consumerTopic = $consumer->newTopic($name, $consumerTopicConf);
     $topic = new Topic($name, $producer, $producerTopic, $consumer, $consumerTopic);
     $this->topics[$name] = $topic;
 }
Esempio n. 2
0
<?php

$rk = new RdKafka\Consumer();
$rk->setLogLevel(LOG_DEBUG);
$rk->addBrokers("127.0.0.1");
$topic = $rk->newTopic("test1");
$topic->consumeStart(0, RD_KAFKA_OFFSET_BEGINNING);
while (true) {
    $msg = $topic->consume(0, 1000);
    if ($msg->err) {
        echo $msg->errstr(), "\n";
        break;
    } else {
        echo $msg->payload, "\n";
    }
}
Esempio n. 3
0
$consumer->addBrokers($brokersAddr);
$consumer->setLogLevel(LOG_DEBUG);
$topicConf = new RdKafka\TopicConf();
$topicConf->set('auto.commit.interval.ms', 1000);
// Set the offset store method to 'file'
$topicConf->set('offset.store.method', 'file');
$topicConf->set('offset.store.path', sys_get_temp_dir());
//$topicConf->set('api.version.request', true);
//$topicConf->set('broker.version.fallback', '0.8.2.2');
// Alternatively, set the offset store method to 'broker'
// $topicConf->set('offset.store.method', 'broker');
// Set where to start consuming messages when there is no initial offset in
// offset store or the desired offset is out of range.
// 'smallest': start from the beginning
$topicConf->set('auto.offset.reset', 'largest');
$topic = $consumer->newTopic("Topic_Name", $topicConf);
// Start consuming partition 0
$metaData = $consumer->getMetadata(false, $topic, 1000);
$partitions = $metaData->getTopics()->current()->getPartitions();
$partition = count($partitions);
for ($i = 0; $i <= $partition; $i++) {
    $topic->consumeStart($i, RD_KAFKA_OFFSET_STORED);
}
while (true) {
    for ($i = 0; $i <= $partition; $i++) {
        $message = $topic->consume($i, 120 * 10000);
        switch ($message->err) {
            case RD_KAFKA_RESP_ERR_NO_ERROR:
                echo "resutl:" . $i;
                var_dump($message->offset);
                var_dump($message->payload);