如何获取kafka 创建topic某topic的Logsize

如何为一个kafka集群选择topics/partitions的数量_百度知道Java Code mon.TopicAndPartition
Java Code Examples mon.TopicAndPartition
The following are top voted examples for showing how to use
mon.TopicAndPartition. These examples are extracted from open source projects.
You can vote up the examples you like and your votes will be used in our system to product
more good examples.
+ Save this class to your library
public long[] getOffsets (int partition, long time) throws FetchingOffsetException
String topic = this.properties.getProperty(&topic&);
TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
Map&TopicAndPartition, PartitionOffsetRequestInfo& requestInfo = new HashMap&TopicAndPartition, PartitionOffsetRequestInfo&();
requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(time, 1));
kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), this.clientName);
OffsetResponse response = consumer.getOffsetsBefore(request);
if (response.hasError()) {
throw new FetchingOffsetException(&Error fetching data Offset Data the Broker. Reason: & + response.errorCode(topic, partition));
return response.offsets(topic, partition);
protected final JavaInputDStream&MessageAndMetadata&K,M&& buildInputDStream(
JavaStreamingContext streamingContext) {
Preconditions.checkArgument(
com.cloudera.oryx.kafka.util.KafkaUtils.topicExists(inputTopicLockMaster, inputTopic),
&Topic % did you create it?&, inputTopic);
Preconditions.checkArgument(
com.cloudera.oryx.kafka.util.KafkaUtils.topicExists(updateTopicLockMaster, updateTopic),
&Topic % did you create it?&, updateTopic);
Map&String,String& kafkaParams = new HashMap&&();
//kafkaParams.put(&zookeeper.connect&, inputTopicLockMaster);
String groupID = getGroupID();
kafkaParams.put(&group.id&, groupID);
// Don't re-consume old messages from input by default
kafkaParams.put(&auto.offset.reset&, &largest&);
kafkaParams.put(&metadata.broker.list&, inputBroker);
// Newer version of metadata.broker.list:
kafkaParams.put(&bootstrap.servers&, inputBroker);
Map&TopicAndPartition,Long& offsets =
com.cloudera.oryx.kafka.util.KafkaUtils.getOffsets(inputTopicLockMaster,
inputTopic);
fillInLatestOffsets(offsets, kafkaParams);
(&Initial offsets: {}&, offsets);
// Ugly compiler-pleasing acrobatics:
@SuppressWarnings(&unchecked&)
Class&MessageAndMetadata&K,M&& streamClass =
(Class&MessageAndMetadata&K,M&&) (Class&?&) MessageAndMetadata.
return KafkaUtils.createDirectStream(streamingContext,
messageClass,
keyDecoderClass,
messageDecoderClass,
streamClass,
kafkaParams,
Functions.&MessageAndMetadata&K,M&&identity());
public static long getLastOffset(SimpleConsumer consumer, String topic,
int partition, long whichTime, String clientName) {
TopicAndPartition topicAndPartition = new TopicAndPartition(topic,
partition);
Map&TopicAndPartition, PartitionOffsetRequestInfo& requestInfo = new HashMap&TopicAndPartition, PartitionOffsetRequestInfo&();
requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(
whichTime, 1));
kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(
requestInfo, kafka.api.OffsetRequest.CurrentVersion(),
clientName);
OffsetResponse response = consumer.getOffsetsBefore(request);
if (response.hasError()) {
TRACE.log(TraceLevel.ERROR,
&Error fetching data Offset Data the Broker. Reason: &
+ response.errorCode(topic, partition));
long[] offsets = response.offsets(topic, partition);
TRACE.log(TraceLevel.TRACE,
&Retrieving offsets: & + Arrays.toString(offsets));
return offsets[0];
private static long[] findAllOffsets(SimpleConsumer consumer, String topicName, int partitionId) {
TopicAndPartition topicAndPartition = new TopicAndPartition(topicName, partitionId);
// The API implies that this will always return all of the offsets. So it seems a partition can not have
// more than Integer.MAX_VALUE-1 segments.
// This also assumes that the lowest value returned will be the first segment available. So if segments have been dropped off, this value
// should not be 0.
PartitionOffsetRequestInfo partitionOffsetRequestInfo = new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.LatestTime(), 10000);
OffsetRequest offsetRequest = new OffsetRequest(ImmutableMap.of(topicAndPartition, partitionOffsetRequestInfo), kafka.api.OffsetRequest.CurrentVersion(), consumer.clientId());
OffsetResponse offsetResponse = consumer.getOffsetsBefore(offsetRequest);
if (offsetResponse.hasError()) {
short errorCode = offsetResponse.errorCode(topicName, partitionId);
LOGGER.warn(format(&Offset response has error: %d&, errorCode));
throw new RakamException(&could not fetch data from Kafka, error code is '& + errorCode + &'&, HttpResponseStatus.INTERNAL_SERVER_ERROR);
long[] offsets = offsetResponse.offsets(topicName, partitionId);
private static OffsetInfo getOffset(String topic, PartitionMetadata partition) {
Broker broker = partition.leader();
SimpleConsumer consumer = new SimpleConsumer(broker.host(), broker.port(), 10,
&com.rekko.newrelic.storm.kafka&);
TopicAndPartition
topicAndPartition =
new TopicAndPartition(topic, partition.partitionId());
PartitionOffsetRequestInfo rquest = new PartitionOffsetRequestInfo(-1, 1);
Map&TopicAndPartition, PartitionOffsetRequestInfo&
new HashMap&TopicAndPartition, PartitionOffsetRequestInfo&();
map.put(topicAndPartition, rquest);
OffsetRequest req = new OffsetRequest(map, (short) 0, &com.rekko.newrelic.storm.kafka&);
OffsetResponse resp = consumer.getOffsetsBefore(req);
OffsetInfo offset = new OffsetInfo();
offset.offset = resp.offsets(topic, partition.partitionId())[0];
} finally {
consumer.close();
* Retrieves the last offset before the given timestamp for a given topic partition.
* @return The last offset before the given timestamp or
0} if failed to do so.
private long getLastOffset(TopicPartition topicPart, long timestamp) {
BrokerInfo brokerInfo = brokerService.getLeader(topicPart.getTopic(), topicPart.getPartition());
SimpleConsumer consumer = brokerInfo == null ? null : consumers.getUnchecked(brokerInfo);
// If no broker, treat it as failure attempt.
if (consumer == null) {
LOG.warn(&Failed to talk to any broker. Default offset to 0 for {}&, topicPart);
return 0L;
// Fire offset request
OffsetRequest request = new OffsetRequest(ImmutableMap.of(
new TopicAndPartition(topicPart.getTopic(), topicPart.getPartition()),
new PartitionOffsetRequestInfo(timestamp, 1)
), kafka.api.OffsetRequest.CurrentVersion(), consumer.clientId());
OffsetResponse response = consumer.getOffsetsBefore(request);
// Retrieve offsets from response
long[] offsets = response.hasError() ? null : response.offsets(topicPart.getTopic(), topicPart.getPartition());
if (offsets == null || offsets.length &= 0) {
short errorCode = response.errorCode(topicPart.getTopic(), topicPart.getPartition());
// If the topic partition doesn't exists, use offset 0 without logging error.
if (errorCode != ErrorMapping.UnknownTopicOrPartitionCode()) {
consumers.refresh(brokerInfo);
LOG.warn(&Failed to fetch offset for {} with timestamp {}. Error: {}. Default offset to 0.&,
topicPart, timestamp, errorCode);
return 0L;
LOG.debug(&Offset {} fetched for {} with timestamp {}.&, offsets[0], topicPart, timestamp);
return offsets[0];
public long getOffset(String topic, int partition, long startOffsetTime) {
SimpleConsumer simpleConsumer = findLeaderConsumer(partition);
if (simpleConsumer == null) {
LOG.error(&Error consumer is null get offset from partition:& + partition);
return -1;
TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
Map&TopicAndPartition, PartitionOffsetRequestInfo& requestInfo = new HashMap&TopicAndPartition, PartitionOffsetRequestInfo&();
requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(startOffsetTime, 1));
OffsetRequest request = new OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), simpleConsumer.clientId());
long[] offsets = simpleConsumer.getOffsetsBefore(request).offsets(topic, partition);
if (offsets.length & 0) {
return offsets[0];
return NO_OFFSET;
public long getLastOffset(SimpleConsumer consumer, String topic, int partition,
long whichTime, String clientName) {
long lastOffset = 0;
List&String& topics = Collections.singletonList(topic);
TopicMetadataRequest req = new TopicMetadataRequest(topics);
kafka.javaapi.TopicMetadataResponse topicMetadataResponse = consumer.send(req);
TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
for (TopicMetadata topicMetadata : topicMetadataResponse.topicsMetadata()) {
for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) {
if (partitionMetadata.partitionId() == partition) {
String partitionHost = partitionMetadata.leader().host();
consumer = getConsumer(partitionHost, partitionMetadata.leader().port(), clientName);
Map&TopicAndPartition, PartitionOffsetRequestInfo& requestInfo = new HashMap&TopicAndPartition, PartitionOffsetRequestInfo&();
requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(whichTime, 1));
kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientName);
OffsetResponse response = consumer.getOffsetsBefore(request);
if (response.hasError()) {
LOG.error(&Error fetching Offset Data from the Broker. Reason: & + response.errorCode(topic, partition));
lastOffset = 0;
long[] offsets = response.offsets(topic, partition);
lastOffset = offsets[0];
} catch (Exception e) {
LOG.error(&Error while collecting the log Size for topic: & + topic + &, and partition: & + partition, e);
return lastO
public static long getOffset(SimpleConsumer consumer, String topic,
int partition, long startOffsetTime) {
TopicAndPartition topicAndPartition = new TopicAndPartition(topic,
partition);
Map&TopicAndPartition, PartitionOffsetRequestInfo& requestInfo = new HashMap&TopicAndPartition, PartitionOffsetRequestInfo&();
requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(
startOffsetTime, 1));
OffsetRequest request = new OffsetRequest(requestInfo,
kafka.api.OffsetRequest.CurrentVersion(), consumer.clientId());
long[] offsets = consumer.getOffsetsBefore(request).offsets(topic,
partition);
if (offsets.length & 0) {
return offsets[0];
return NO_OFFSET;
Example 10
* A Java transliteration of what the scala implementation does, which unfortunately is declared as private
protected void flushDirtyLogs() {
LOG.debug(&Checking for dirty logs to flush...&);
final Set&Map.Entry&TopicAndPartition, Log&& entries = JavaConversions.mapAsJavaMap(logManager.logsByTopicPartition()).entrySet();
for (final Map.Entry&TopicAndPartition, Log& topicAndPartitionLogEntry : entries) {
final TopicAndPartition topicAndPartition = topicAndPartitionLogEntry.getKey();
final Log kafkaLog = topicAndPartitionLogEntry.getValue();
final long timeSinceLastFlush = JODA_TIME.milliseconds() - kafkaLog.lastFlushTime();
LOG.debug(
&Checking if flush is needed on {} flush interval {} last flushed {} time since last flush: {}&,
topicAndPartition.topic(),
kafkaLog.config().flushInterval(),
kafkaLog.lastFlushTime(),
timeSinceLastFlush);
if (timeSinceLastFlush &= kafkaLog.config().flushMs()) {
kafkaLog.flush();
} catch (Exception e) {
LOG.error(&Error flushing topic & + topicAndPartition.topic(), e);
Example 11
* @param consumer
* @param topic
* @param partition
* @param whichTime
* @param clientName
* @return 0 if consumer is null at this time
public static long getLastOffset(SimpleConsumer consumer, String topic, int partition, long whichTime, String clientName)
if (consumer == null) {
TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
Map&TopicAndPartition, PartitionOffsetRequestInfo& requestInfo = new HashMap&TopicAndPartition, PartitionOffsetRequestInfo&();
requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(whichTime, 1));
OffsetRequest request = new OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientName);
OffsetResponse response = consumer.getOffsetsBefore(request);
if (response.hasError()) {
logger.error(&Error fetching data Offset Data the Broker. Reason: & + response.errorCode(topic, partition));
long[] offsets = response.offsets(topic, partition);
return offsets[0];
Example 12
private static long[] findAllOffsets(SimpleConsumer consumer, KafkaPartition partition)
TopicAndPartition topicAndPartition = new TopicAndPartition(partition.getTopicName(), partition.getPartitionIdAsInt());
// The API implies that this will always return all of the offsets. So it seems a partition can not have
// more than Integer.MAX_VALUE-1 segments.
// This also assumes that the lowest value returned will be the first segment available. So if segments have been dropped off, this value
// should not be 0.
PartitionOffsetRequestInfo partitionOffsetRequestInfo = new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.LatestTime(), Integer.MAX_VALUE);
OffsetRequest offsetRequest = new OffsetRequest(ImmutableMap.of(topicAndPartition, partitionOffsetRequestInfo), kafka.api.OffsetRequest.CurrentVersion(), consumer.clientId());
OffsetResponse offsetResponse = consumer.getOffsetsBefore(offsetRequest);
if (offsetResponse.hasError()) {
short errorCode = offsetResponse.errorCode(partition.getTopicName(), partition.getPartitionIdAsInt());
log.warn(&Offset response has error: %d&, errorCode);
throw new PrestoException(KAFKA_SPLIT_ERROR, &could not fetch data from Kafka, error code is '& + errorCode + &'&);
return offsetResponse.offsets(partition.getTopicName(), partition.getPartitionIdAsInt());
Example 13
private long getOffset(boolean earliest) throws InterruptedException
TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partitionId);
Map&TopicAndPartition, PartitionOffsetRequestInfo& requestInfo = new HashMap&TopicAndPartition, PartitionOffsetRequestInfo&();
requestInfo.put(
topicAndPartition,
new PartitionOffsetRequestInfo(
earliest ? kafka.api.OffsetRequest.EarliestTime() : kafka.api.OffsetRequest.LatestTime(), 1
OffsetRequest request = new OffsetRequest(
requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientId
OffsetResponse response =
response = consumer.getOffsetsBefore(request);
catch (Exception e) {
ensureNotInterrupted(e);
log.error(e, &caught exception in getOffsetsBefore [%s] - [%s]&, topic, partitionId);
return -1;
if (response.hasError()) {
log.error(
&error fetching data Offset from the Broker [%s]. reason: [%s]&, leaderBroker.host(),
response.errorCode(topic, partitionId)
return -1;
long[] offsets = response.offsets(topic, partitionId);
return earliest ? offsets[0] : offsets[offsets.length - 1];
Example 14
private void mockSimpleConsumerForEarliestOffset(
SimpleConsumer mockConsumer, String topic, int partition,
long earliestOffset) {
TopicAndPartition tp = new TopicAndPartition(topic, partition);
OffsetResponse earliestResponse = mock(OffsetResponse.class);
when(earliestResponse.hasError()).thenReturn(false);
when(earliestResponse.offsets(topic, partition)).thenReturn(
new long[] { earliestOffset });
mockConsumer
.getOffsetsBefore(argThat(new IsEarliestOffsetRequest(
tp)))).thenReturn(earliestResponse);
Example 15
public static long getLastOffset(SimpleConsumer consumer, String topic, int partition,
long whichTime, String clientName) throws StageException {
TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
Map&TopicAndPartition, PartitionOffsetRequestInfo& requestInfo = new HashMap&&();
requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(whichTime, 1));
kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(
requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientName);
OffsetResponse response = consumer.getOffsetsBefore(request);
if (response.hasError()) {
LOG.error(KafkaErrors.KAFKA_22.getMessage(), consumer.host() + &:& + consumer.port(),
response.errorCode(topic, partition));
long[] offsets = response.offsets(topic, partition);
return offsets[0];
} catch (Exception e) {
LOG.error(KafkaErrors.KAFKA_30.getMessage(), e);
throw new StageException(KafkaErrors.KAFKA_30, e.toString(), e);
Example 16
private long getTopicLogSize(String topic, int pid) {
Option&Object& o = ZkUtils.getLeaderForPartition(zkClient, topic, pid);
if (o.isEmpty() || o.get() == null) {
log.error(&No broker for partition %s - %s&, topic, pid);
Integer leaderId = Int.unbox(o.get());
SimpleConsumer consumer = consumerMap.get(leaderId);
if (consumer == null) {
consumer = createSimpleConsumer(leaderId);
// createSimpleConsumer may fail.
if (consumer == null) {
consumerMap.put(leaderId, consumer);
TopicAndPartition topicAndPartition = new TopicAndPartition(topic, pid);
PartitionOffsetRequestInfo requestInfo = new PartitionOffsetRequestInfo(OffsetRequest.LatestTime(), 1);
OffsetRequest request = new OffsetRequest(
new Map1&TopicAndPartition, PartitionOffsetRequestInfo&(topicAndPartition, requestInfo),
Request.OrdinaryConsumerId()
OffsetResponse response = consumer.getOffsetsBefore(request);
PartitionOffsetsResponse offsetsResponse = response.partitionErrorAndOffsets().get(topicAndPartition).get();
return scala.Long.unbox(offsetsResponse.offsets().head());
Example 17
public static long getLastOffset(SimpleConsumer consumer, String topic, int partition,
long whichTime, String clientName) {
TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
Map&TopicAndPartition, PartitionOffsetRequestInfo& requestInfo = new HashMap&TopicAndPartition, PartitionOffsetRequestInfo&();
requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(whichTime, 1));
kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(
requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientName);
OffsetResponse response = consumer.getOffsetsBefore(request);
if (response.hasError()) {
throw new RuntimeException(&Error fetching data Offset Data the Broker. Reason: & + response.errorCode(topic, partition));
long[] offsets = response.offsets(topic, partition);
return offsets[0];
Example 18
* Request latest offsets for a set of partitions, via a Kafka consumer.
* @param consumer The consumer connected to lead broker
* @param topic The topic name
* @param partitions The list of partitions we need offsets for
* @param whichTime The type of time we are requesting. -1 and -2 are special constants (See OffsetRequest)
private static void getLastOffset(SimpleConsumer consumer, String topic, List&FetchPartition& partitions, long whichTime) {
Map&TopicAndPartition, PartitionOffsetRequestInfo& requestInfo = new HashMap&&();
for (FetchPartition fp: partitions) {
TopicAndPartition topicAndPartition = new TopicAndPartition(topic, fp.partition);
requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(whichTime, 1));
kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), consumer.clientId());
OffsetResponse response = consumer.getOffsetsBefore(request);
if (response.hasError()) {
String exception = &&;
for (FetchPartition fp: partitions) {
if ( (code=response.errorCode(topic, fp.partition)) != ErrorMapping.NoError()) {
exception += &\nException for partition &+fp.partition+&: &+ StringUtils.stringifyException(ErrorMapping.exceptionFor(code));
throw new RuntimeException(&Unable to get last offset for topic & + topic + & and partitions & + partitions
+ &. & + exception);
for (FetchPartition fp: partitions) {
// the resulting offset is the next offset we are going to read
// for not-yet-consumed partitions, it is 0.
fp.nextOffsetToRead = response.offsets(topic, fp.partition)[0];
Example 19
long getLastCommitOffset() {
TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partitionMetadata.partitionId());
List&TopicAndPartition& topicAndPartitions = new ArrayList&&();
topicAndPartitions.add(topicAndPartition);
OffsetFetchRequest oRequest = new OffsetFetchRequest(name, topicAndPartitions, (short) 0, 0, name);
OffsetFetchResponse oResponse = consumer.fetchOffsets(oRequest);
Map&TopicAndPartition, OffsetMetadataAndError& offsets = oResponse.offsets();
OffsetMetadataAndError offset = offsets.get(topicAndPartition);
long currOffset = offset.offset() ;
if(currOffset & 0) currOffset = 0;
return currO
Example 20
* Test that getOrCreateLog on a non-existent log creates a new log and that we can append to the new log.
public void testCreateLog() {
Log log = logManager.createLog(new TopicAndPartition(name, 0), logConfig);
File logFile = new File(logDir, name + &-0&);
assertTrue(logFile.exists());
log.append(TestUtils.singleMessageSet(&test&.getBytes()));

我要回帖

更多关于 kafka清空topic数据 的文章

 

随机推荐