Compare commits

...

2 Commits

Author SHA1 Message Date
Rajiv Singh
5c34138dab
Merge 832a5de75880bf6ccdc5b34e15f5aad86708521a into fb54d8b54984f89f7dba90a18e7c3048421464c3 2023-05-22 17:40:27 -05:00
Rajiv Ranjan Singh
832a5de758 optimize code for initializing Kafka consumers and processors
Signed-off-by: Rajiv Ranjan Singh <rajivperfect007@gmail.com>
2023-04-01 09:30:37 +05:30

View File

@ -85,50 +85,40 @@ trait UnifiedGraphWriter[
} }
} }
private def initRecosHoseKafka( private def initRecosHoseKafka(queue: java.util.Queue[Array[RecosHoseMessage]], queuelimit: Semaphore): Unit = {
queue: java.util.Queue[Array[RecosHoseMessage]], try {
queuelimit: Semaphore, val consumers = (0 until consumerNum).map { index =>
): Unit = { new ThreadSafeKafkaConsumerClient(kafkaConsumerBuilder.clientId(s"clientId-$index").enableAutoCommit(false).config)
try {
consumers = (0 until consumerNum).map { index =>
new ThreadSafeKafkaConsumerClient(
kafkaConsumerBuilder.clientId(s"clientId-$index").enableAutoCommit(false).config)
}
processors = consumers.zipWithIndex.map {
case (consumer, index) =>
val bufferedWriter = BufferedEdgeCollector(bufferSize, queue, queuelimit, statsReceiver)
val processor = RecosEdgeProcessor(bufferedWriter)(statsReceiver)
AtLeastOnceProcessor[String, RecosHoseMessage](
s"recos-injector-kafka-$index",
hosename,
consumer,
processor.process,
maxPendingRequests = MaxPendingRequests * bufferSize,
workerThreads = ProcessorThreads,
commitIntervalMs = CommitIntervalMs,
statsReceiver = statsReceiver
)
}
log.info(s"starting ${processors.size} recosKafka processors")
processors.foreach { processor =>
processor.start()
}
} catch {
case e: Throwable =>
e.printStackTrace()
log.error(e, e.toString)
processors.foreach { processor =>
processor.close()
}
processors = Seq.empty
consumers.foreach { consumer =>
consumer.close()
}
consumers = Seq.empty
} }
val processors = consumers.zipWithIndex.map {
case (consumer, index) =>
val bufferedWriter = BufferedEdgeCollector(bufferSize, queue, queuelimit, statsReceiver)
val processor = RecosEdgeProcessor(bufferedWriter)(statsReceiver)
AtLeastOnceProcessor[String, RecosHoseMessage](
s"recos-injector-kafka-$index",
hosename,
consumer,
processor.process,
maxPendingRequests = MaxPendingRequests * bufferSize,
workerThreads = ProcessorThreads,
commitIntervalMs = CommitIntervalMs,
statsReceiver = statsReceiver
)
}
log.info(s"starting ${processors.size} recosKafka processors")
processors.foreach(_.start())
} catch {
case e: Throwable =>
e.printStackTrace()
log.error(e, e.toString)
processors.foreach(_.close())
consumers.foreach(_.close())
} }
}
/** /**
* Initialize the graph writers, * Initialize the graph writers,