@@ -872,6 +872,131 @@ class Admin {
872
872
} ) ;
873
873
} ) ;
874
874
}
875
+
876
+ /**
877
+ * List offsets for the topic partition(s) by timestamp.
878
+ *
879
+ * @param {string } topic - The topic to fetch offsets for.
880
+ * @param {number? } timestamp - The timestamp to fetch offsets for.
881
+ * @param {object? } options
882
+ * @param {number? } options.timeout - The request timeout in milliseconds.
883
+ * May be unset (default: 5000)
884
+ * @param {KafkaJS.IsolationLevel? } options.isolationLevel - The isolation level for reading the offsets.
885
+ * (default: READ_UNCOMMITTED)
886
+ *
887
+ * The returned topic partitions contain the earliest offset whose timestamp is greater than or equal to
888
+ * the given timestamp. If there is no such offset, or if the timestamp is unset, the latest offset is returned instead.
889
+ *
890
+ * @returns {Promise<Array<{partition: number, offset: string}>> }
891
+ */
892
+ async fetchTopicOffsetsByTimestamp ( topic , timestamp , options = { } ) {
893
+ if ( this . #state !== AdminState . CONNECTED ) {
894
+ throw new error . KafkaJSError ( "Admin client is not connected." , { code : error . ErrorCodes . ERR__STATE } ) ;
895
+ }
896
+
897
+ if ( ! Object . hasOwn ( options , 'timeout' ) ) {
898
+ options . timeout = 5000 ;
899
+ }
900
+
901
+ let topicData ;
902
+ let startTime , endTime , timeTaken ;
903
+
904
+ try {
905
+ // Measure time taken for fetchTopicMetadata
906
+ startTime = hrtime . bigint ( ) ;
907
+ topicData = await this . fetchTopicMetadata ( { topics : [ topic ] , timeout : options . timeout } ) ;
908
+ endTime = hrtime . bigint ( ) ;
909
+ timeTaken = Number ( endTime - startTime ) / 1e6 ; // Convert nanoseconds to milliseconds
910
+
911
+ // Adjust timeout for the next request
912
+ options . timeout -= timeTaken ;
913
+ if ( options . timeout <= 0 ) {
914
+ throw new error . KafkaJSError ( "Timeout exceeded while fetching topic metadata." , { code : error . ErrorCodes . ERR__TIMED_OUT } ) ;
915
+ }
916
+ } catch ( err ) {
917
+ throw new createKafkaJsErrorFromLibRdKafkaError ( err ) ;
918
+ }
919
+
920
+ const partitionIds = topicData . flatMap ( topic =>
921
+ topic . partitions . map ( partition => partition . partitionId )
922
+ ) ;
923
+ let topicPartitionOffset = [ ] ;
924
+ if ( typeof timestamp === 'undefined' ) {
925
+ topicPartitionOffset = partitionIds . map ( partitionId => ( {
926
+ topic,
927
+ partition : partitionId ,
928
+ offset : OffsetSpec . LATEST
929
+ } ) ) ;
930
+ }
931
+ else {
932
+ topicPartitionOffset = partitionIds . map ( partitionId => ( {
933
+ topic,
934
+ partition : partitionId ,
935
+ offset : new OffsetSpec ( timestamp )
936
+ } ) ) ;
937
+ }
938
+
939
+ const topicPartitionOffsetsLatest = partitionIds . map ( partitionId => ( {
940
+ topic,
941
+ partition : partitionId ,
942
+ offset : OffsetSpec . LATEST
943
+ } ) ) ;
944
+
945
+ try {
946
+ // Measure time taken for listOffsets (by timestamp)
947
+ startTime = hrtime . bigint ( ) ;
948
+ const offsetsByTimeStamp = await this . #listOffsets( topicPartitionOffset , options ) ;
949
+ endTime = hrtime . bigint ( ) ;
950
+ timeTaken = Number ( endTime - startTime ) / 1e6 ; // Convert nanoseconds to milliseconds
951
+
952
+ // Adjust timeout for the next request
953
+ options . timeout -= timeTaken ;
954
+ if ( options . timeout <= 0 ) {
955
+ throw new error . KafkaJSError ( "Timeout exceeded while fetching offsets." , { code : error . ErrorCodes . ERR__TIMED_OUT } ) ;
956
+ }
957
+
958
+ if ( typeof timestamp === 'undefined' ) {
959
+ // Return result from offsetsByTimestamp if timestamp is undefined
960
+ return offsetsByTimeStamp . map ( offset => ( {
961
+ partition : offset . partition ,
962
+ offset : offset . offset . toString ( ) ,
963
+ } ) ) ;
964
+ } else {
965
+ // Measure time taken for listOffsets(latest)
966
+ startTime = hrtime . bigint ( ) ;
967
+ const latestOffsets = await this . #listOffsets( topicPartitionOffsetsLatest , options ) ;
968
+ endTime = hrtime . bigint ( ) ;
969
+ timeTaken = Number ( endTime - startTime ) / 1e6 ; // Convert nanoseconds to milliseconds
970
+
971
+ // Adjust timeout for the next request
972
+ options . timeout -= timeTaken ;
973
+ if ( options . timeout <= 0 ) {
974
+ throw new error . KafkaJSError ( "Timeout exceeded while fetching latest offsets." , { code : error . ErrorCodes . ERR__TIMED_OUT } ) ;
975
+ }
976
+
977
+ const combinedResults = partitionIds . map ( partitionId => {
978
+ const latest = latestOffsets . find ( offset => offset . partition === partitionId ) ;
979
+ const timestampOffset = offsetsByTimeStamp . find ( offset => offset . partition === partitionId ) ;
980
+
981
+ if ( timestampOffset . offset === - 1 ) {
982
+ return {
983
+ partition : partitionId ,
984
+ offset : latest . offset . toString ( ) ,
985
+ } ;
986
+ } else {
987
+ return {
988
+ partition : partitionId ,
989
+ offset : timestampOffset . offset . toString ( ) ,
990
+ } ;
991
+ }
992
+ } ) ;
993
+
994
+ return combinedResults ;
995
+ }
996
+ } catch ( err ) {
997
+ throw createKafkaJsErrorFromLibRdKafkaError ( err ) ;
998
+ }
999
+ }
875
1000
}
876
1001
877
1002
module . exports = {
0 commit comments