$kPort = 9092; //kafka server $topic = 'test'; $maxSize = 10000000; $socketTimeout = 2; $offset = 0; $partition = 0; $nMessages = 0; $consumer = new Kafka_SimpleConsumer($host, $kPort, $socketTimeout, $maxSize); while (true) { try { //create a fetch request for topic "test", partition 0, current offset and fetch size of 1MB $fetchRequest = new Kafka_FetchRequest($topic, $partition, $offset, $maxSize); //get the message set from the consumer and print them out $partialOffset = 0; $messages = $consumer->fetch($fetchRequest); foreach ($messages as $msg) { ++$nMessages; echo "\nconsumed[{$offset}][{$partialOffset}][msg #{$nMessages}]: " . $msg->payload(); $partialOffset = $messages->validBytes(); } //advance the offset after consuming each message $offset += $messages->validBytes(); //echo "\n---[Advancing offset to $offset]------(".date('H:i:s').")"; unset($fetchRequest); //sleep(2); } catch (Exception $e) { // probably consumed all items in the queue. echo "\nERROR: " . get_class($e) . ': ' . $e->getMessage() . "\n" . $e->getTraceAsString() . "\n"; sleep(2); }
/** * Resync invalid offsets to the first valid position * * @return integer Number of partitions/broker resync'ed */ public function resyncOffsets() { $nReset = 0; foreach ($this->iterators as $it) { $consumer = new Kafka_SimpleConsumer($it->host, $it->port, $this->socketTimeout, $this->maxBatchSize); try { $newOffset = $it->offset + $it->uncommittedOffset; $request = new Kafka_FetchRequest($this->topic, $it->partition, $newOffset, $this->maxBatchSize); $it->messages = $consumer->fetch($request); } catch (Kafka_Exception_OffsetOutOfRange $e) { $offsets = $consumer->getOffsetsBefore($this->topic, $it->partition, Kafka_SimpleConsumer::OFFSET_FIRST, 1); if (count($offsets) > 0) { $newOffset = $offsets[0]; $this->offsetRegistry->commit($this->topic, $it->broker, $it->partition, $newOffset); $it->uncommittedOffset = 0; $it->offset = $newOffset; ++$nReset; } } } return $nReset; }