/**
  * @expectedException Kafka_Exception_Socket
  */
 public function testConnectFailure()
 {
     $consumer = new Kafka_SimpleConsumer('invalid-host-name', 1234567890, 10, 1000000);
     $consumer->connect();
     $this->fail('The above call should throw an exception');
 }
 /**
  * Get an approximate measure of the amount of data still to be consumed
  *
  * @return integer
  */
 public function getRemainingSize()
 {
     try {
         if (0 == $this->nIterators) {
             $this->rewind();
             // initialise simple consumers
         }
     } catch (Kafka_Exception_InvalidTopic $e) {
         $logMsg = 'Invalid topic from ZookeeperConsumer::rewind(): Most likely cause is no topic yet as there is no data';
         error_log($logMsg);
     }
     $totalSize = 0;
     foreach ($this->iterators as $it) {
         $readBytes = $it->offset + $it->uncommittedOffset;
         if (null !== $it->messages) {
             $readBytes += $it->messages->validBytes();
         }
         $consumer = new Kafka_SimpleConsumer($it->host, $it->port, $this->socketTimeout, $this->maxBatchSize);
         $offsets = $consumer->getOffsetsBefore($this->topic, $it->partition, Kafka_SimpleConsumer::OFFSET_LAST, 1);
         if (count($offsets) > 0) {
             $remaining = $offsets[0] - $readBytes;
             // remaining bytes for this broker/partition
             if ($remaining > 0) {
                 $totalSize += $remaining;
             }
         }
         $consumer->close();
     }
     return $totalSize;
 }
Beispiel #3
0
 * limitations under the License.
 */
set_include_path(implode(PATH_SEPARATOR, array(realpath(__DIR__ . '/../lib'), get_include_path())));
require 'autoloader.php';
$host = 'localhost';
$zkPort = 2181;
//zookeeper
$kPort = 9092;
//kafka server
$topic = 'test';
$maxSize = 10000000;
$socketTimeout = 2;
$offset = 0;
$partition = 0;
$nMessages = 0;
$consumer = new Kafka_SimpleConsumer($host, $kPort, $socketTimeout, $maxSize);
while (true) {
    try {
        //create a fetch request for topic "test", partition 0, current offset and fetch size of 1MB
        $fetchRequest = new Kafka_FetchRequest($topic, $partition, $offset, $maxSize);
        //get the message set from the consumer and print them out
        $partialOffset = 0;
        $messages = $consumer->fetch($fetchRequest);
        foreach ($messages as $msg) {
            ++$nMessages;
            echo "\nconsumed[{$offset}][{$partialOffset}][msg #{$nMessages}]: " . $msg->payload();
            $partialOffset = $messages->validBytes();
        }
        //advance the offset after consuming each message
        $offset += $messages->validBytes();
        //echo "\n---[Advancing offset to $offset]------(".date('H:i:s').")";
Beispiel #4
0
 /**
  * Get an approximate measure of the amount of data still to be consumed
  *
  * @return integer
  */
 public function getRemainingSize()
 {
     if (0 == $this->nIterators) {
         $this->rewind();
         // initialise simple consumers
     }
     $totalSize = 0;
     foreach ($this->iterators as $it) {
         $readBytes = $it->offset + $it->uncommittedOffset;
         if (null !== $it->messages) {
             $readBytes += $it->messages->validBytes();
         }
         $consumer = new Kafka_SimpleConsumer($it->host, $it->port, $this->socketTimeout, $this->maxBatchSize);
         $offsets = $consumer->getOffsetsBefore($this->topic, $it->partition, Kafka_SimpleConsumer::OFFSET_LAST, 1);
         if (count($offsets) > 0) {
             $remaining = $offsets[0] - $readBytes;
             // remaining bytes for this broker/partition
             if ($remaining > 0) {
                 $totalSize += $remaining;
             }
         }
         $consumer->close();
     }
     return $totalSize;
 }