Skip to content

Commit 65451d9

Browse files
authored
fetchTopicOffsetsByTimestamp api implemented (#206)
* added fetchTopicOffsetsByTimeStamp * Requested changes * Requested changes * requested changes * Changelog changes
1 parent 3d8c258 commit 65451d9

File tree

7 files changed

+411
-0
lines changed

7 files changed

+411
-0
lines changed

CHANGELOG.md

+9
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,12 @@
1+
# confluent-kafka-javascript v1.1.0
2+
3+
v1.1.0 is a feature release. It is supported for all usage.
4+
5+
## Enhancements
6+
7+
1. Add support for an Admin API to fetch topic offsets by timestamp (#206).
8+
9+
110
# confluent-kafka-javascript v1.0.0
211

312
v1.0.0 is a feature release. It is supported for all usage.

MIGRATION.md

+2
Original file line numberDiff line numberDiff line change
@@ -339,6 +339,8 @@ The admin-client only has support for a limited subset of methods, with more to
339339
and `includeAuthorizedOperations` options. Fetching for all topics is not advisable.
340340
* The `fetchTopicOffsets` method is supported with additional `timeout`
341341
and `isolationLevel` options.
342+
* The `fetchTopicOffsetsByTimestamp` method is supported with additional `timeout`
343+
and `isolationLevel` options.
342344
343345
### Using the Schema Registry
344346
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,90 @@
1+
const { Kafka, IsolationLevel } = require('@confluentinc/kafka-javascript').KafkaJS;
2+
const { parseArgs } = require('node:util');
3+
4+
async function fetchOffsetsByTimestamp() {
5+
// Parse command-line arguments
6+
const args = parseArgs({
7+
allowPositionals: true,
8+
options: {
9+
'bootstrap-servers': {
10+
type: 'string',
11+
short: 'b',
12+
default: 'localhost:9092',
13+
},
14+
'timeout': {
15+
type: 'string',
16+
short: 't',
17+
default: '5000',
18+
},
19+
'isolation-level': {
20+
type: 'string',
21+
short: 'i',
22+
default: '0', // Default to '0' for read_uncommitted
23+
},
24+
'timestamp': {
25+
type: 'string',
26+
short: 's',
27+
},
28+
},
29+
});
30+
31+
const {
32+
'bootstrap-servers': bootstrapServers,
33+
timeout,
34+
'isolation-level': isolationLevel,
35+
timestamp,
36+
} = args.values;
37+
38+
const [topic] = args.positionals;
39+
40+
if (!topic) {
41+
console.error('Topic name is required');
42+
process.exit(1);
43+
}
44+
45+
// Determine the isolation level
46+
let isolationLevelValue;
47+
if (isolationLevel === '0') {
48+
isolationLevelValue = IsolationLevel.READ_UNCOMMITTED;
49+
} else if (isolationLevel === '1') {
50+
isolationLevelValue = IsolationLevel.READ_COMMITTED;
51+
} else {
52+
console.error('Invalid isolation level. Use 0 for READ_UNCOMMITTED or 1 for READ_COMMITTED.');
53+
process.exit(1);
54+
}
55+
56+
// Parse the timestamp if provided
57+
const timestampValue = timestamp ? Number(timestamp) : undefined;
58+
59+
const kafka = new Kafka({
60+
kafkaJS: {
61+
brokers: [bootstrapServers],
62+
},
63+
});
64+
65+
const admin = kafka.admin();
66+
await admin.connect();
67+
68+
try {
69+
// Prepare options
70+
const options = {
71+
isolationLevel: isolationLevelValue,
72+
timeout: Number(timeout),
73+
};
74+
75+
// Fetch offsets by timestamp for the specified topic
76+
const offsets = await admin.fetchTopicOffsetsByTimestamp(
77+
topic,
78+
timestampValue, // Only pass timestamp if provided
79+
options
80+
);
81+
82+
console.log(`Offsets for topic "${topic}" with timestamp ${timestampValue || 'not provided'}:`, JSON.stringify(offsets, null, 2));
83+
} catch (err) {
84+
console.error('Error fetching topic offsets by timestamp:', err);
85+
} finally {
86+
await admin.disconnect();
87+
}
88+
}
89+
90+
fetchOffsetsByTimestamp();

lib/admin.js

+1
Original file line numberDiff line numberDiff line change
@@ -67,6 +67,7 @@ const IsolationLevel = {
6767
* Either a timestamp can be used, or else, one of the special, pre-defined values
6868
* (EARLIEST, LATEST, MAX_TIMESTAMP) can be used while passing an OffsetSpec to listOffsets.
6969
* @param {number} timestamp - The timestamp to list offsets at.
70+
* @memberof RdKafka
7071
* @constructor
7172
*/
7273
function OffsetSpec(timestamp) {

lib/kafkajs/_admin.js

+125
Original file line numberDiff line numberDiff line change
@@ -872,6 +872,131 @@ class Admin {
872872
});
873873
});
874874
}
875+
876+
/**
877+
* List offsets for the topic partition(s) by timestamp.
878+
*
879+
* @param {string} topic - The topic to fetch offsets for.
880+
* @param {number?} timestamp - The timestamp to fetch offsets for.
881+
* @param {object?} options
882+
* @param {number?} options.timeout - The request timeout in milliseconds.
883+
* May be unset (default: 5000)
884+
* @param {KafkaJS.IsolationLevel?} options.isolationLevel - The isolation level for reading the offsets.
885+
* (default: READ_UNCOMMITTED)
886+
*
887+
* The returned topic partitions contain the earliest offset whose timestamp is greater than or equal to
888+
* the given timestamp. If there is no such offset, or if the timestamp is unset, the latest offset is returned instead.
889+
*
890+
* @returns {Promise<Array<{partition: number, offset: string}>>}
891+
*/
892+
async fetchTopicOffsetsByTimestamp(topic, timestamp, options = {}) {
893+
if (this.#state !== AdminState.CONNECTED) {
894+
throw new error.KafkaJSError("Admin client is not connected.", { code: error.ErrorCodes.ERR__STATE });
895+
}
896+
897+
if (!Object.hasOwn(options, 'timeout')) {
898+
options.timeout = 5000;
899+
}
900+
901+
let topicData;
902+
let startTime, endTime, timeTaken;
903+
904+
try {
905+
// Measure time taken for fetchTopicMetadata
906+
startTime = hrtime.bigint();
907+
topicData = await this.fetchTopicMetadata({ topics: [topic], timeout: options.timeout });
908+
endTime = hrtime.bigint();
909+
timeTaken = Number(endTime - startTime) / 1e6; // Convert nanoseconds to milliseconds
910+
911+
// Adjust timeout for the next request
912+
options.timeout -= timeTaken;
913+
if (options.timeout <= 0) {
914+
throw new error.KafkaJSError("Timeout exceeded while fetching topic metadata.", { code: error.ErrorCodes.ERR__TIMED_OUT });
915+
}
916+
} catch (err) {
917+
throw new createKafkaJsErrorFromLibRdKafkaError(err);
918+
}
919+
920+
const partitionIds = topicData.flatMap(topic =>
921+
topic.partitions.map(partition => partition.partitionId)
922+
);
923+
let topicPartitionOffset = [];
924+
if (typeof timestamp === 'undefined') {
925+
topicPartitionOffset = partitionIds.map(partitionId => ({
926+
topic,
927+
partition: partitionId,
928+
offset: OffsetSpec.LATEST
929+
}));
930+
}
931+
else {
932+
topicPartitionOffset = partitionIds.map(partitionId => ({
933+
topic,
934+
partition: partitionId,
935+
offset: new OffsetSpec(timestamp)
936+
}));
937+
}
938+
939+
const topicPartitionOffsetsLatest = partitionIds.map(partitionId => ({
940+
topic,
941+
partition: partitionId,
942+
offset: OffsetSpec.LATEST
943+
}));
944+
945+
try {
946+
// Measure time taken for listOffsets (by timestamp)
947+
startTime = hrtime.bigint();
948+
const offsetsByTimeStamp = await this.#listOffsets(topicPartitionOffset, options);
949+
endTime = hrtime.bigint();
950+
timeTaken = Number(endTime - startTime) / 1e6; // Convert nanoseconds to milliseconds
951+
952+
// Adjust timeout for the next request
953+
options.timeout -= timeTaken;
954+
if (options.timeout <= 0) {
955+
throw new error.KafkaJSError("Timeout exceeded while fetching offsets.", { code: error.ErrorCodes.ERR__TIMED_OUT });
956+
}
957+
958+
if (typeof timestamp === 'undefined') {
959+
// Return result from offsetsByTimestamp if timestamp is undefined
960+
return offsetsByTimeStamp.map(offset => ({
961+
partition: offset.partition,
962+
offset: offset.offset.toString(),
963+
}));
964+
} else {
965+
// Measure time taken for listOffsets(latest)
966+
startTime = hrtime.bigint();
967+
const latestOffsets = await this.#listOffsets(topicPartitionOffsetsLatest, options);
968+
endTime = hrtime.bigint();
969+
timeTaken = Number(endTime - startTime) / 1e6; // Convert nanoseconds to milliseconds
970+
971+
// Adjust timeout for the next request
972+
options.timeout -= timeTaken;
973+
if (options.timeout <= 0) {
974+
throw new error.KafkaJSError("Timeout exceeded while fetching latest offsets.", { code: error.ErrorCodes.ERR__TIMED_OUT });
975+
}
976+
977+
const combinedResults = partitionIds.map(partitionId => {
978+
const latest = latestOffsets.find(offset => offset.partition === partitionId);
979+
const timestampOffset = offsetsByTimeStamp.find(offset => offset.partition === partitionId);
980+
981+
if (timestampOffset.offset === -1) {
982+
return {
983+
partition: partitionId,
984+
offset: latest.offset.toString(),
985+
};
986+
} else {
987+
return {
988+
partition: partitionId,
989+
offset: timestampOffset.offset.toString(),
990+
};
991+
}
992+
});
993+
994+
return combinedResults;
995+
}
996+
} catch (err) {
997+
throw createKafkaJsErrorFromLibRdKafkaError(err);
998+
}
999+
}
8751000
}
8761001

8771002
module.exports = {

0 commit comments

Comments
 (0)