diff --git a/.gitignore b/.gitignore index 3160d58..6f25fdb 100644 --- a/.gitignore +++ b/.gitignore @@ -18,3 +18,20 @@ bin/ doc/ /.metadata/ + +.classpath +.project +.settings/ +clients/.classpath +clients/.project +clients/.settings/ +examples/.project +examples/.settings/ +examples/ojdbc.properties +examples/consumer/.classpath +examples/consumer/.project +examples/consumer/.settings/ +examples/producer/.classpath +examples/producer/.project +examples/producer/.settings/ +okafka-github/ diff --git a/LICENSE.txt b/LICENSE.txt index 7e182dc..0fce3de 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -1,4 +1,4 @@ -Copyright (c) 2019, 2023 Oracle and/or its affiliates. +Copyright (c) 2019, 2024 Oracle and/or its affiliates. The Universal Permissive License (UPL), Version 1.0 @@ -873,4 +873,4 @@ GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. -_______________________ +_______________________ \ No newline at end of file diff --git a/README.md b/README.md index 5e915c7..7eee562 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,5 @@ -# Kafka Java Client for Oracle Transactional Event Queues (Preview v0.8) + +# Kafka Java Client for Oracle Transactional Event Queues ## Building the Kafka Java Client for Oracle TxEventQ distribution @@ -6,170 +7,99 @@ This distribution contains Java source code to provide Kafka Java client compati You need to have [Gradle 7.3 or above](http://www.gradle.org/installation) and [Java](http://www.oracle.com/technetwork/java/javase/downloads/index.html) installed. -This distribution contains preview version 0.8 of the `Kafka Java Client for Oracle Transactional Event Queues` project. This is tested with [JRE 8u162](https://www.oracle.com/technetwork/java/javase/downloads/jre8-downloads-2133155.html) but we recommend using the latest version. - -Java 8u162 or above(recommended is 8U251) should be used for building in order to support both Java 8 and Java 10 at runtime. +This distribution contains version 23.4.0.0 of the `Kafka Java Client for Oracle Transactional Event Queues` project. It will be referred as OKafka-23.4.0.0 henceforth. This is tested with JDK 11.0.22 but we recommend using the latest version. -The Kafka Java Client works with Oracle Database 20c Cloud Preview (Database Cloud Service), although some features will work with Autonomous Database for testing the simpler producer/consumer examples. +The Kafka Java Client works with Oracle Database 23ai Free version as well as Oracle Database 23ai available on Oracle Autonomous Cloud platform. To test this distribution in free Oracle Cloud environment create [Oracle Cloud account](https://docs.cloud.oracle.com/en-us/iaas/Content/FreeTier/freetier.htm) then create [Oracle Autonomous Transaction Processing Database instance](https://docs.oracle.com/en/cloud/paas/autonomous-data-warehouse-cloud/tutorial-getting-started-autonomous-db/index.html) in cloud. -A database user should be created and should be granted the privileges mentioned in configuration section. Then create a Transactional Event Queue to produce and consume messages. +A database user should be created and should be granted the privileges mentioned in Database user configuration section. Then create a Transactional Event Queue to produce and consume messages. -Finally, build `okafka.jar` and run [Producer.java](./examples/producer/src/main/java/org/oracle/okafka/examples/Producer.java) to produce into Oracle TxEventQ, [Consumer.java](./examples/consumer/src/main/java/org/oracle/okafka/examples/Consumer.java) to consume from Oracle TxEventQ. - -### Configuration ### -To run `okafka.jar` against Oracle Database, a database user should be created and should be granted below privileges. +### Database user configuration ### + +To run `OKafka application` against Oracle Database, a database user must be created and must be granted below privileges. ```roomsql -create user identified by -grant connect, resource to user -grant execute on dbms_aqadm to use` -grant execute on dbms_aqin to user -grant execute on dbms_aqjms to user -grant select_catalog_role to user +create user identified by + +GRANT AQ_USER_ROLE to user; +GRANT CONNECT, RESOURCE, unlimited tablespace to user; +GRANT EXECUTE on DBMS_AQ to user; +GRANT EXECUTE on DBMS_AQADM to user; +GRANT EXECUTE on DBMS_AQIN to user; +GRANT EXECUTE on DBMS_TEQK to user; +GRANT SELECT on GV_$SESSION to user; +GRANT SELECT on V_$SESSION to user; +GRANT SELECT on GV_$INSTANCE to user; +GRANT SELECT on GV_$LISTENER_NETWORK to user; +GRANT SELECT on GV_$PDBS to user; +GRANT SELECT on USER_QUEUE_PARTITION_ASSIGNMENT_TABLE to user; +GRANT SELECT on SYS.DBA_RSRC_PLAN_DIRECTIVES to user; +EXEC DBMS_AQADM.GRANT_PRIV_FOR_RM_PLAN('user'); +``` + +Note: +It is preferred in general to assign or grant a specific quota on a tablespace to a database user instead of granting unlimited quota in default tablespace. One can create a table space and use the following command to grant quota on a specific tablespace to a database user. + +```roomsql +ALTER USER user QUOTA UNLIMITED /* or size-clause */ on tablespace_name; ``` -Once user is created and above privileges are granted, connect to Oracle Database as this user and create a Transactional Event Queue using below PL/SQL script. For this preview release, upper case Topic/queue names are only allowed. Also this preview supports TxEventQ with only 1 partition hence, in below script `SHARD_NUM` parameter for TxEventQ is set to 1. +Once user is created and above privileges are granted, connect to Oracle Database as this user and create a Transactional Event Queue using below PL/SQL script. One can also use `KafkaAdmin` interface as shown in `CreateTopic.java` in `examples` directory to create a Transactional Event Queue. ```roomsql +-- Create an OKafka topic named 'TXEQ' with 5 partition and retention time of 7 days. begin - sys.dbms_aqadm.create_sharded_queue(queue_name=>"TxEventQ", multiple_consumers => TRUE); - sys.dbms_aqadm.set_queue_parameter('TxEventQ', 'SHARD_NUM', 1); - sys.dbms_aqadm.set_queue_parameter('TxEventQ', 'STICKY_DEQUEUE', 1); - sys.dbms_aqadm.set_queue_parameter('TxEventQ', 'KEY_BASED_ENQUEUE', 1); - sys.dbms_aqadm.start_queue('TxEventQ'); + dbms_aqadm.create_database_kafka_topic( topicname=> 'TXEQ', partition_num=>5, retentiontime => 7*24*3600); end; ``` #### Connection configuration #### -This project uses JDBC(thin driver) connection to connect to Oracle Database instance using any one of two security protocols. +`OKafka` uses JDBC(thin driver) connection to connect to Oracle Database instance using any one of two security protocols. 1. PLAINTEXT -2. SSL. - -The following properties have to be provided to use these protocols. - -1. PLAINTEXT: In this protocol a JDBC connection uses user_name and password to connect to Oracle instance. To use PLAINTEXT protocol then user must provide following properties through application. - - security.protocol = "PLAINTEXT" - oracle.net.tns_admin = "location of tnsnames.ora file" (for parsing JDBC connection string) - oracle.service.name = "name of the service running on the instance" - oracle.instance.name = "name of the oracle database instance" - bootstrap.servers = "host:port" - - and following properties in `ojdbc.properties` file and `ojdbc.properties` file should be in location `oracle.net.tns_admin`. - - user(in lowercase) = "user name of database user" - password(in lowercase) = "user password" - - - -2. SSL: To use SSL secured connections to connect to Autonomous Database on Oracle Cloud follow these steps. - * JDBC Thin Driver Connection prerequisites for SSL security: Use JDK8u162 or higher(recommended latest). Use 18.3 JDBC Thin driver or higher(recommended latest) - - * To leverage JDBC SSL security to connect to Oracle Database instance the following properties have to be set. - JDBC supports SSL secured connections to Oracle Database in two ways 1. Wallets 2. Java Key Store. - + Using wallets: - - Add the following required dependent jars for using Oracle Wallets in classpath. Download oraclepki.jar, osdt_cert.jar, and osdt_core.jar files along with JDBC thin driver from [JDBC and UCP download page](https://www.oracle.com/database/technologies/appdev/jdbc-downloads.html) and add these jars to classpath. - - Enable Oracle PKI provider: Enable it statically as follows, Add OraclePKIProvider at the end of file `java.security` located at `$JRE_HOME/jre/lib/security/java.security`. If SSO wallet i.e cwallet.sso is used for providing SSL security. - - security.provider.1=sun.security.provider.Sun - security.provider.2=sun.security.rsa.SunRsaSign - security.provider.3=com.sun.net.ssl.internal.ssl.Provider - security.provider.4=com.sun.crypto.provider.SunJCE - security.provider.5=sun.security.jgss.SunProvider - security.provider.6=com.sun.security.sasl.Provider - security.provider.7=oracle.security.pki.OraclePKIProvider - - To use ewallet.p12 for SSL security then place OraclePKIProvider before sun provider in file `java.security`. - - security.provider.1=sun.security.provider.Sun - security.provider.2=sun.security.rsa.SunRsaSign - security.provider.3=oracle.security.pki.OraclePKIProvider - security.provider.4=com.sun.net.ssl.internal.ssl.Provider - security.provider.5=com.sun.crypto.provider.SunJCE - security.provider.6=sun.security.jgss.SunProvider - security.provider.7=com.sun.security.sasl.Provider - - Also, it is possible enabling it dynamically by code including Oracle PKI library in project dependencies - - ``` - implementation group: 'com.oracle.database.security', name: 'oraclepki', version: '21.5.0.0' - ``` - - and the following code in your project. - - ```java - private static void addOraclePKIProvider(){ - System.out.println("Installing Oracle PKI provider."); - Provider oraclePKI = new oracle.security.pki.OraclePKIProvider(); - Security.insertProviderAt(oraclePKI,3); - } - ``` - - - Must provide following properties through application. - - security.protocol = "SSL" - oracle.net.tns_admin = "location of tnsnames.ora file" (for parsing JDBC connection string) - tns.alias = "alias of connection string in tnsnames.ora" - - and following properties in `ojdbc.properties` file and `ojdbc.properties` file should be in location `oracle.net.tns_admin` - - user(in lowercase) = "name of database user" - password(in lowercase) = "user password" - oracle.net.ssl_server_dn_match=true - oracle.net.wallet_location="(SOURCE=(METHOD=FILE)(METHOD_DATA=(DIRECTORY=/location../wallet_dbname)))" - - + using Java Key Store: - To Provide JDBC SSL security with Java Key Store then provide following properties through application. - - security.protocol = "SSL" - oracle.net.tns_admin = "location of tnsnames.ora file" - tns.alias = "alias of connection string in tnsnames.ora" - - and following properties in `ojdbc.properties` file and `ojdbc.properties` file should be in location `oracle.net.tns_admin` - - user(in lowercase) = "user name of database user" - password(in lowercase) = "user password" - oracle.net.ssl_server_dn_match=true - javax.net.ssl.trustStore==${TNS_ADMIN}/truststore.jks - javax.net.ssl.trustStorePassword = password - javax.net.ssl.keyStore= ${TNS_ADMIN}/keystore.jks - javax.net.ssl.keyStorePassword="password "" - -Note: tnsnames.ora file in wallet downloaded from Oracle Autonomous Database contains JDBC connection string which is used for establishing JDBC connection. +2. SSL -Learn more about [JDBC SSL security](https://docs.oracle.com/en/cloud/paas/atp-cloud/atpug/connect-jdbc-thin-wallet.html#GUID-5ED3C08C-1A84-4E5A-B07A-A5114951AA9E) to establish SSL secured JDBC connections. - - -### First bootstrap and download the wrapper ### -``` -cd okafka_source_dir -gradle wrapper -``` +1.PLAINTEXT: In this protocol a JDBC connection is setup by providing username and password in plain text in ojdbc.prperties file. To use PLAINTEXT protocol user must provide following properties through application. -### Building okafka.jar + security.protocol = "PLAINTEXT" + bootstrap.servers = "host:port" + oracle.service.name = "name of the service running on the instance" + oracle.net.tns_admin = "location of ojdbc.properties file" + +`ojdbc.properties` file must have below properties + + user(in lowercase)=DatabaseUserName + password(in lowercase)=Password -Simplest way to build the `okafka.jar` file is by using Gradle build tool. -This distribution contains gradle build files which will work for Gradle 7.3 or higher. +2.SSL: This protocol requires that, while connecting to Oracle Database, the JDBC driver authenticates database user using Oracle Wallet or Java KeyStore(JKS) files. This protocol is typically used to o connect to Oracle database 23ai instance in Oracle Autonomous cloud. To use this protocol `Okafka` application must specify following properties. -To build the `okafka.jar` file which includes all the dependent jar files in itself. + security.protocol = "SSL" + oracle.net.tns_admin = "location containing Oracle Wallet, tnsname.ora and ojdbc.properties file" + tns.alias = "alias of connection string in tnsnames.ora" -``` -./gradlew fullJar -``` -This generates `okafka-0.8-full.jar` in `okafka_source_dir/clients/build/libs`. +Directory location provided in `oracle.net.tns_admin` property should have +1. Oracle Wallet +2. tnsnames.ora file +3. ojdbc.properties file (optional) +This depends on how the Oracle Wallet is configured. -To build the `okafka.jar` file without including the dependent jar files in the `okafka.jar` itself. +Learn more about [JDBC Thin Connections with a Wallet (mTLS)](https://docs.oracle.com/en/cloud/paas/atp-cloud/atpug/connect-jdbc-thin-wallet.html#GUID-5ED3C08C-1A84-4E5A-B07A-A5114951AA9E) to establish secured JDBC connections. + +Note: tnsnames.ora file in wallet downloaded from Oracle Autonomous Database contains JDBC connection string which is used for establishing JDBC connection. + +### Building okafka.jar + +Simplest way to build the `okafka.jar` file is by using Gradle build tool. +This distribution contains gradle build files which will work for Gradle 7.3 or higher. ``` -./gradlew jar +./gradle jar ``` -This generates `okafka-0.8.jar` in `okafka_source_dir/clients/build/libs` and `okafka-0.8-[producer|consumer].jar` in `okafka_source_dir/examples/[producer|consumer]/build/libs`. +This generates `okafka-23.4.0.0.jar` in `okafka_source_dir/clients/build/libs`. **Project Dependency:** @@ -183,252 +113,42 @@ Mandatory jar files for this project to work. * `javax.jms-api-.jar` * `jta-.jar` * `slf4j-api-.jar` +* `kafka-clients-3.7.1.jar` All these jars are downloaded from Maven Repository during gradle build. -If one is using the `okafka-0.8-full.jar` file generated using `./gradlew fullJar` command, then it is not required to add other jar files in the classpath while running the Oracle Kafka application. - -## Using the okafka-0.8-full.jar - -This section describes the sample Producer and Consumer application that uses `okafka-0.8-full.jar` file. These files are available in the examples directory. Assuming user has built the `okafka-0.8-full.jar` file using the `./gradlew fullJar` build command, so that no other jar file is required to be placed in the classpath. - -To compile `Producer.java` you can use java, for example: - -```java -javac -classpath .:okafka-0.8-full.jar Producer.java -``` - -or gradle - -```bash -gradle :examples:producer:build -x test -``` - -To run `Producer.java` - -```java -java -classpath .:okafka-0.8-full.jar Producer -``` - -or using gradle +To build the `okafka.jar` file which includes all the dependent jar files in itself. -```bash -gradle :examples:producer:run ``` - -To compile `Consumer.java` - -```java -javac -classpath .:okafka-0.8-full.jar Consumer.java +./gradle fullJar ``` +This generates `okafka-full-23.4.0.0.jar` in `okafka_source_dir/clients/build/libs`. -or gradle + +## Build javadoc -```bash -gradle :examples:consumer:build -x test -``` -To run `Consumer.java` +This command generates javadoc in `okafka_source_dir/clients/build/docs/javadoc` ``` -java -classpath .:okafka-0.8-full.jar Consumer 10 +gradle javadoc ``` -or gradle +## Examples -```bash -gradle :examples:consumer:run -``` +Repository contains 2 common OKafka application examples in `examples` folder. -## Build javadoc +`1. ProducerOKafka.java` +Produces 10 messages into TxEQ topic. -This command generates javadoc in `okafka_source_dir/clients/build/docs/javadoc`. +`2. ConsumerOKafka.java` +Consumes 10 messages from TxEQ topic. -``` -./gradlew javadoc -``` - -Bellow, there is a sample code for the Producer and one for the Consumer. Both are available in examples folder, to use fill the properties externalized on application.properties to point to your Oracle Database. - -**Producer.java - A simple Producer application that uses okafka.jar** - -```java -import java.util.Properties; -import org.oracle.okafka.clients.producer.*; - -public class Producer { - -public static void main(String[] args) { - - KafkaProducer prod = null; - int msgCnt =10; - Properties props = new Properties(); - - /* change the properties to point to the Oracle Database */ - props.put("oracle.service.name", "serviceid.regress.rdbms.dev.us.oracle.com"); //name of the service running on the instance - props.put("oracle.instance.name", "instancename"); //name of the Oracle Database instance - props.put("oracle.net.tns_admin", "location of tnsnames.ora/ojdbc.properties file"); //eg: "/user/home" if ojdbc.properies file is in home - props.put("bootstrap.servers", "host:port"); //ip address or host name where instance running : port where instance listener running - props.put("linger.ms", 1000); - props.put("key.serializer", "org.oracle.okafka.common.serialization.StringSerializer"); - props.put("value.serializer", "org.oracle.okafka.common.serialization.StringSerializer"); - - prod=new KafkaProducer(props); - try { - System.out.println("Producing messages " + msgCnt); - for(int j=0;j < msgCnt; j++) { - prod.send(new ProducerRecord("TOPIC1" , "Key","This is new message"+j)); - } - - System.out.println("Messages sent " ); - - } catch(Exception ex) { - ex.printStackTrace(); - } finally { - prod.close(); - } - } -} -``` - -**Consumer.java - A simple Consumer application that uses okafka.jar** - -```java -import java.util.Properties; -import java.time.Duration; -import java.util.Arrays; - -import org.oracle.okafka.clients.consumer.*; - -public class Consumer { - -public static void main(String[] args) { - Properties props = new Properties(); - - /*change the bootstrap server to point to the Oracle Database*/ - props.put("oracle.service.name", "serviceid.regress.rdbms.dev.us.oracle.com"); //name of the service running on the instance - props.put("oracle.instance.name", "instancename"); //name of Oracle Database instance - props.put("oracle.net.tns_admin", "location of tnsnames.ora/ojdbc.properties file"); //eg: "/user/home" if ojdbc.properies file is in home - props.put("bootstrap.servers", "host:port"); //ip address or host name where instance running : port where instance listener running - props.put("group.id", "subscriber"); - props.put("enable.auto.commit", "false"); - props.put("key.deserializer", "org.oracle.okafka.common.serialization.StringDeserializer"); - props.put("value.deserializer", "org.oracle.okafka.common.serialization.StringDeserializer"); - props.put("max.poll.records", 500); - - KafkaConsumer consumer = null; - - try { - consumer = new KafkaConsumer(props); - consumer.subscribe(Arrays.asList("TOPIC1")); - ConsumerRecords records; - - records = consumer.poll(Duration.ofMillis(1000)); - - for (ConsumerRecord record : records) - { - System.out.println("topic = , key = , value = \n" + - record.topic()+ "\t" + record.key()+ "\t" + record.value()); - } - consumer.commitSync(); - } catch(Exception ex) { - ex.printStackTrace(); - } finally { - consumer.close(); - } - } -} -``` ## Kafka Java Client APIs supported -### KafkaProducer APIs supported - -* `KafkaProducer`: Constructor that creates a producer object and internal AQ JMS objects. KafkaProducer class has 4 types of constructor defined which all take configuration parameters as input. - -* `send(ProducerRecord)`: Produces a message into Oracle Transactional Event Queue (Oracle TxEventQ). A message is called 'Producer Record' for Kafka application and called 'Event' for Oracle TxEventQ. Both the overloaded versions of send, that is, `send(ProducerRecord)` and `send(ProducerRecord, Callback)` will be supported. Records will be published into the topic using AQJMS. - -* `close`: Closes the producer, its sender thread and frees the accumulator. It also closes internal AQJMS objects like connection, session JMS producer etc. - -* `ProducerRecord`: Class that represents a message in Kafka platform. It will be translated into an 'event' for Oracle TxEventQ Platform which is an AQJMS Message. Relevant fields like payload and key can be directly translated into Oracle TxEventQ payload and message key for Oracle TxEventQ. - -* `RecordMetadata`: This class contains metadata of the record like offset, timestamp etc. of the Record in Kafka platform. This will be assigned value relevant for Oracle TxEventQ. An event id of Oracle TxEventQ will be converted into an offset of RecordMetadata. - -* `Callback Interface`: A callback function which will be executed once a Record is successfully published into Oracle TxEventQ Event Stream. - -* `Partitioner Interface`: An Interface which maps a key of the message to a partition number of the topic. A partition number is analogous to a Event Stream Id of Oracle TxEventQ. Application developer can implement their own Partitioner interface to map messages to a partition. The partition of a topic is analogous to a 'Event Stream' of Oracle TxEventQ. Thus a message is published into the assigned Event Stream of Oracle TxEventQ. - -* `Property: bootstrap.servers`: IP address and port of a machine where database instance is running. - -* `Property: Key Serializer and Value serializer`: Converts key and payload into byte array respectively. - -* `Property: acks`: For this project, only value relevant for acks property is 'all'. Any other field set by the user is ignored. - -* `Property: linger.ms`: Time in milliseconds for which sender thread will wait before publishing the records in Oracle TxEventQ. - -* `Property: batch.size`: Size of accumulator buffer in bytes for which sender thread will wait before publishing records in Oracle TxEventQ. - -* `Property: buffer.memory`: Total memory in bytes the accumulator can hold. - -* `Property: max.block.ms`: If buffer.memory size is full in accumulator then wait for max.block.ms amount of time before send() method can receive out of memory error. - -* `Property: retries` : This property enables producer to resend the record in case of transient errors. This value is an upper limit on how many resends. -* `Property: retry.backoff.ms` : The amount of time to wait before attempting to retry a failed request to a given topic partition. This avoids repeatedly sending requests in a tight loop under some failure scenarios. - -### KafkaConsumer APIs supported - -* `KafkaConsumer`: Constructor that creates a consumer that allows application to consume messages from a queue of Oracle TxEventQ. Internal to client, Oracle AQJMS objects will be created which will not be visible to client application. All variations of the KafkaConsumer constructor are supported in this version. - -* `Subscribe(java.util.Collection)`: This method takes a list of topics to subscribe to. In this version only the first topic of the list will be subscribed to. An exception will be thrown if size of list is greater than one. This method will create a durable subscriber on Transactional Event Queue at Oracle TxEventQ server side with Group-Id as subscriber name. - -* `Poll(java.time.Duration)`: Poll attempts to dequeue messages from the Oracle TxEventQ for the subscriber. It dequeues a batch of messages from the Oracle TxEventQ within a timeout provided as argument in poll. Size of the batch depends on the parameter max.poll.records set by the Kafka client application. In this preview release, when poll is invoked for the first time, Oracle TxEventQ assigns a single available partition to this consumer. This assignment will stay for the lifetime of the consumer and no other partition is assigned to this consumer. Each poll to this consumer returns messages belonging to the partition assigned to this consumer. It is the responsibility of the application developer to start as many consumers as number of partitions of the queue. If number of consumers are less than number of partitions then messages from unassigned partitions will never be consumed. If number of consumers are more than number of partitions then extra consumers will not be assigned any partition and hence will not be able to consume any messages. No two consumer applications will consume from same partition at the same time. - -* `commitSync()`: Commit all consumed messages. Commit to an offset is not supported in this version. This call will directly call commit on database which will commit all consumed messages from Oracle TxEventQ. `Kafka Java Client for Oracle Transactional Event Queues` maintains only a single session for a connection. And this session is transactional, calling commit on session() either succeeds or rolls back. So commit is not retried in case of commit failure. - -* `commitSync(java.time.Duration)`: Commit all consumed messages. Commit to an offset is not supported in this preview release. This call will directly call commit on database which will commit all consumed messages from Oracle TxEventQ. `Kafka Java Client for Oracle Transactional Event Queues` maintains only a single session for a connection. And this session is transactional, calling commit on session() either succeeds or rolls back. commit is not retried in case of commit failure. - -* `commitAsync()`: This call is translated into `commitSync` internally. - -* `commitAsync(OffsetCommitCallback)`: This call is translated into `commitSync`. A callback function passed as argument will get executed once the commit is successful. - -* `Unsubscribe`: Unsubscribes the topic it has subscribed to. Unsubscribed consumer can no longer consume messages from unsubscribed topics. Other consumer applications in same group can still continue to consume. - -* `seekToBeginning(Collection)`: Seek to first available offset . - -* `seek(TopicPartition, long)`: Seek to offset for a given topic partition. - -* `seekToEnd(Collection)`: Seek to last offset for a given list of topic partitions. - -* `close()`: closes the consumer i.e close the connection, release resources. - -* `ConsumerRecord`: A class representing a consumed record in Kafka Platform. In this implementation, AQJMS Message will be converted into ConsumerRecord. - -* `Property: bootstrap.servers`: IP address and port of a machine where database instance is running. - -* `Property: key.deserializer and value.deserialzer`: In Oracle TxEventQ's queue key, value are stored as byte array in user property, payload of JMS message respectively. On consuming these byte arrays are deserialized into key, value having user provided format internally by the consumer using `key.deserializer` and `value.deserializer` respectively. - -* `Property: group.id:` This is a Consumer Group name for which messages are consumed from the Kafka topic. This property will be used as a durable subscriber name for Oracle TxEventQ's queue. - -* `Property: max.poll.records`: Max number of records to fetch from Oracle TxEventQ server in a single poll call. - -* `Property: fetch.max.wait.ms`: Maximum amount of time in milliseconds to wait for fetching messages if not available. - -* `Property: enable.auto.commit`: Enables auto commit of consumed messages for every specified interval. - -* `Property: auto.commit.interval.ms`: Interval in milliseconds for auto commit of messages. - -### Kafka Admin APIs supported - -* `create(props) & create(config)`: Create an object of KafkaAdmin class which uses passed parameters. For this implementation, we will create a database session which will be used for further operations. Client application should provide Oracle specific properties which are `bootstrap.servers`, `oracle.servicename`, `oracle.instancename`, `user`, `password` that will be used to setup the database connection. - -* `close`: Closes database session. - -* `createTopic(Collection, CreateTopicsOptions)`: Create an Oracle TxEventQ with initial partition count (or Event Streams count) passed by the application as an argument into the function. This method is not supported in preview release. - -* `deleteTopic(Collection, DeleteTopicsOptions)`: Stop and Drop Oracle TxEventQ queue. - -* `Property: bootstrap.servers`: IP address and port of a machine where database instance is running. +For detailed documentation of OKafka please refer to [Kafka API for Oracle Transactional Event Queues](https://docs.oracle.com/en/database/oracle/oracle-database/23/adque/Kafka_cient_interface_TEQ.html) documentation. -* `Property: retention.ms`: Amount of time in milliseconds a message is retained in queue after all consumer groups or subscribers dequeued a message. This is a topic level config. +For list of APIs supported with Oracle 23.4.0.0 version of OKafka please refer to [OKafka 23ai javadoc](https://docs.oracle.com/en/database/oracle/oracle-database/23/okjdc/). ## Contributing @@ -440,7 +160,7 @@ Please consult the [security guide](./SECURITY.md) for our responsible security ## License -Copyright (c) 2019, 2023 Oracle and/or its affiliates. +Copyright (c) 2019, 2024 Oracle and/or its affiliates. Released under the Universal Permissive License v1.0 as shown at . diff --git a/build.gradle b/build.gradle index 1197426..18cea7c 100644 --- a/build.gradle +++ b/build.gradle @@ -1,7 +1,7 @@ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** -** Copyright (c) 2019, 2022 Oracle and/or its affiliates. +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. */ @@ -18,21 +18,22 @@ allprojects { } group = 'org.oracle.okafka' - version = '0.8' + version = '23.4.0.0' tasks.withType(Javadoc) { // disable the crazy super-strict doclint tool in Java 8 // noinspection SpellCheckingInspection + title ="Oracle Kafka 23.4.0.0 API" options.addStringOption('Xdoclint:none', '-quiet') options.windowTitle = "Oracle Database Transactional Event Queues Java API Reference" - options.header = """Oracle® Database Transactional Event Queues Java API Reference
20c

F30355-02
""" - options.bottom = """
Copyright © 2001, 2020, Oracle and/or its affiliates. All rights reserved.


""" + options.header = """Oracle® Database Transactional Event Queues Java API Reference
23ai

FF46992-04
""" + options.bottom = """
Copyright © 2001, 2024, Oracle and/or its affiliates. All rights reserved.


""" } } ext { - gradleVersion = '7.3' - minJavaVersion = JavaVersion.VERSION_1_8 + gradleVersion = '8.8' + minJavaVersion = JavaVersion.VERSION_17 mavenUrl = project.hasProperty('mavenUrl') ? project.mavenUrl : '' mavenUsername = project.hasProperty('mavenUsername') ? project.mavenUsername : '' @@ -45,50 +46,61 @@ project(':clients') { sourceCompatibility = minJavaVersion targetCompatibility = minJavaVersion - println 'project okafka client lib' + sourceSets { + main { + java { + srcDir 'src/main/java' + exclude 'tests/**' + exclude 'test/**' + } + } + } + + println 'Building okafka 23.4.0.0 Java API jar' dependencies { - // Test dependencies - testImplementation group: 'org.easymock', name: 'easymock', version: '4.3' - testImplementation group: 'org.powermock', name: 'powermock-api-support', version: '2.0.9' - testImplementation group: 'org.powermock', name: 'powermock-api-easymock', version: '2.0.9' - testImplementation group: 'org.powermock', name: 'powermock-module-junit4', version: '2.0.9' - testImplementation group: 'org.slf4j', name: 'slf4j-simple', version: '1.7.36' // These dependencies are used by the application. - implementation group: 'com.oracle.database.jdbc', name: 'ojdbc11', version: '21.5.0.0' - implementation group: 'com.oracle.database.messaging', name: 'aqapi', version: '19.3.0.0' - implementation group: 'com.oracle.database.security', name: 'oraclepki', version: '21.5.0.0' - implementation group: 'com.oracle.database.security', name: 'osdt_core', version: '21.5.0.0' - implementation group: 'com.oracle.database.security', name: 'osdt_cert', version: '21.5.0.0' - implementation group: 'javax.jms', name: 'javax.jms-api', version: '2.0.1' + implementation group: 'com.oracle.database.jdbc', name: 'ojdbc11', version: '23.4.0.24.05' + implementation group: 'com.oracle.database.messaging', name: 'aqapi', version: '23.3.0.0' implementation group: 'javax.transaction', name: 'jta', version: '1.1' - implementation group: 'org.slf4j', name: 'slf4j-api', version: '1.7.36' - implementation group: 'org.slf4j', name: 'slf4j-simple', version: '1.7.36' + implementation group: 'javax.jms', name: 'javax.jms-api', version: '2.0' + implementation group: 'com.oracle.database.security', name: 'oraclepki', version: '23.4.0.24.05' + implementation group: 'org.slf4j', name: 'slf4j-api', version: '2.0.0-alpha0' + implementation group: 'org.apache.kafka', name: 'kafka-clients', version: '3.7.1' + // Use JUnit test framework + implementation group: 'junit', name: 'junit', version: '4.12' + + // Test dependencies + testImplementation group: 'org.easymock', name: 'easymock', version: '3.6' + testImplementation group: 'org.powermock', name: 'powermock-module-junit4', version: '2.0.0-beta.5' + testImplementation group: 'org.powermock', name: 'powermock-api-support', version: '2.0.5' + testImplementation group: 'org.powermock', name: 'powermock-api-easymock', version: '2.0.0-beta.5' + } javadoc { - include "**/org/oracle/okafka/clients/admin/*" - include "**/org/oracle/okafka/clients/consumer/*" - include "**/org/oracle/okafka/clients/producer/*" - include "**/org/oracle/okafka/common/*" - include "**/org/oracle/okafka/common/annotation/*" - include "**/org/oracle/okafka/common/errors/*" - include "**/org/oracle/okafka/common/resource/*" - include "**/org/oracle/okafka/common/serialization/*" - include "**/org/oracle/okafka/common/config/*" + include "org/oracle/okafka/clients/admin/*" + include "org/oracle/okafka/clients/consumer/*" + include "org/oracle/okafka/clients/producer/*" + include "org/oracle/okafka/common/errors/*" } tasks.named('jar') { - description('Generates okafka client jar ') + description('Generates okafka 23.4.0.0 API jar ') archiveBaseName = 'okafka' + archiveVersion = '23.4.0.0' from "${rootProject.projectDir}/LICENSE.txt" from "${rootProject.projectDir}/NOTICE" manifest { - attributes( 'Implementation-Title' : 'okafka', - 'Implementation-Version': project.version) + attributes ( + 'Implementation-Title' : 'okafka', + 'Implementation-Version': project.version, + 'Version': '23.4.0.0', + 'Build-Time-ISO-8601':new Date().format("yyyy-MM-dd HH:mm:ss") + ) } } @@ -98,7 +110,7 @@ project(':clients') { manifest { attributes( 'Implementation-Title' : 'okafka', - 'Implementation-Version': project.version) + 'Implementation-Version': project.version) } from "${rootProject.projectDir}/LICENSE.txt" @@ -115,6 +127,7 @@ project(':clients') { } } + project(':examples:consumer') { apply plugin : 'java' apply plugin : 'application' @@ -125,7 +138,9 @@ project(':examples:consumer') { dependencies { // These dependencies are used by the application. implementation project(':clients') - implementation group: 'com.oracle.database.security', name: 'oraclepki', version: '21.5.0.0' + implementation group: 'com.oracle.database.security', name: 'oraclepki', version: '23.4.0.24.05' + implementation group: 'org.apache.kafka', name: 'kafka-clients', version: '3.7.1' + implementation group: 'ch.qos.logback', name: 'logback-classic', version: '1.5.6' } @@ -139,14 +154,14 @@ project(':examples:consumer') { manifest { attributes( 'Implementation-Title' : 'okafka consumer', - 'Implementation-Version': project.version) + 'Implementation-Version': project.version) } } tasks.named('run') { - description('Run okafka client consumer') + description('Run okafka client simple consumer') application { - mainClass = 'org.oracle.okafka.examples.Consumer' + mainClass = 'org.oracle.okafka.examples.ConsumerOKafka' } } } @@ -161,11 +176,13 @@ project(':examples:producer') { dependencies { // These dependencies are used by the application. implementation project(':clients') - implementation group: 'com.oracle.database.security', name: 'oraclepki', version: '21.5.0.0' + implementation group: 'com.oracle.database.security', name: 'oraclepki', version: '23.4.0.24.05' + implementation group: 'org.apache.kafka', name: 'kafka-clients', version: '3.7.1' + implementation group: 'ch.qos.logback', name: 'logback-classic', version: '1.5.6' } tasks.named('jar') { - description('Generates okafka client producer jar ') + description('Generates okafka client simple producer jar ') archiveBaseName = 'okafka' archiveClassifier = 'producer' @@ -174,14 +191,14 @@ project(':examples:producer') { manifest { attributes( 'Implementation-Title' : 'okafka producer', - 'Implementation-Version': project.version) + 'Implementation-Version': project.version) } } tasks.named('run') { - description('Run okafka client producer') + description('Run okafka client simple producer') application { - mainClass = 'org.oracle.okafka.examples.Producer' + mainClass = 'org.oracle.okafka.examples.ProducerOKafka' } } } diff --git a/clients/okafka-0.8.pom b/clients/okafka.pom similarity index 84% rename from clients/okafka-0.8.pom rename to clients/okafka.pom index 160854c..a0b3a43 100644 --- a/clients/okafka-0.8.pom +++ b/clients/okafka.pom @@ -3,12 +3,12 @@ 4.0.0 com.oracle.database.messaging okafka - 0.8 + 23.4.0.0 okafka Oracle's implementation of Kafka Java Client for Oracle Transactional Event Queues - https://docs.oracle.com/en/database/oracle/oracle-database/21/okjdc/index.html + https://docs.oracle.com/en/database/oracle/oracle-database/23/okjdc/index.html @@ -38,8 +38,8 @@ 11 ${java.version} ${java.version} - 21.5.0.0 - 19.3.0.0 + 23.4.0.24.05 + 23.3.0.0 2.0.1 1.3 @@ -60,16 +60,6 @@ oraclepki ${oracle-jdbc.version} - - com.oracle.database.security - osdt_core - ${oracle-jdbc.version} - - - com.oracle.database.security - osdt_cert - ${oracle-jdbc.version} - javax.jms javax.jms-api @@ -90,5 +80,11 @@ aqapi ${oracle-db-messaging.version} + + + org.apache.kafka + kafka-clients + 3.7.1 + \ No newline at end of file diff --git a/clients/pom.xml b/clients/pom.xml index 797f4bc..30b9598 100644 --- a/clients/pom.xml +++ b/clients/pom.xml @@ -4,7 +4,7 @@ com.oracle.database.messaging okafka - 0.8 + 23.4.0.0 jar @@ -32,8 +32,8 @@ ${java.version} 11 1.3.12 - 21.5.0.0 - 19.3.0.0 + 23.4.0.24.05 + 23.3.0.0 2.0.1 1.3 @@ -61,16 +61,6 @@ oraclepki ${oracle-jdbc.version} - - com.oracle.database.security - osdt_core - ${oracle-jdbc.version} - - - com.oracle.database.security - osdt_cert - ${oracle-jdbc.version} - javax.jms javax.jms-api @@ -91,6 +81,11 @@ aqapi ${oracle-db-messaging.version} + + org.apache.kafka + kafka-clients + 3.7.1 + junit diff --git a/clients/src/main/java/org/oracle/okafka/clients/ClientRequest.java b/clients/src/main/java/org/oracle/okafka/clients/ClientRequest.java deleted file mode 100644 index 68f81c8..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/ClientRequest.java +++ /dev/null @@ -1,120 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.clients; - -import org.oracle.okafka.common.Node; -import org.oracle.okafka.common.protocol.ApiKeys; -import org.oracle.okafka.common.requests.AbstractRequest; -import org.oracle.okafka.common.requests.RequestHeader; - -/** - * A request being sent to the server. - */ -public final class ClientRequest { - - private final Node destination; - private final AbstractRequest.Builder requestBuilder; - private final int correlationId; - private final String clientId; - private final long createdTimeMs; - private final boolean expectResponse; - private final int requestTimeoutMs; - private final RequestCompletionHandler callback; - - /** - * @param destination The brokerId to send the request to - * @param requestBuilder The builder for the request to make - * @param correlationId The correlation id for this client request - * @param clientId The client ID to use for the header - * @param createdTimeMs The unix timestamp in milliseconds for the time at which this request was created. - * @param expectResponse Should we expect a response message or is this request complete once it is sent? - * @param callback A callback to execute when the response has been received (or null if no callback is necessary) - */ - public ClientRequest(Node destination, - AbstractRequest.Builder requestBuilder, - int correlationId, - String clientId, - long createdTimeMs, - boolean expectResponse, - int requestTimeoutMs, - RequestCompletionHandler callback) { - this.destination = destination; - this.requestBuilder = requestBuilder; - this.correlationId = correlationId; - this.clientId = clientId; - this.createdTimeMs = createdTimeMs; - this.expectResponse = expectResponse; - this.requestTimeoutMs = requestTimeoutMs; - this.callback = callback; - } - - @Override - public String toString() { - return "ClientRequest(expectResponse=" + expectResponse + - ", callback=" + callback + - ", destination=" + destination + - ", correlationId=" + correlationId + - ", clientId=" + clientId + - ", createdTimeMs=" + createdTimeMs + - ", requestBuilder=" + requestBuilder + - ")"; - } - - public boolean expectResponse() { - return expectResponse; - } - - public ApiKeys apiKey() { - return requestBuilder.apiKey(); - } - - public AbstractRequest.Builder requestBuilder() { - return requestBuilder; - } - - public Node destination() { - return destination; - } - - public RequestCompletionHandler callback() { - return callback; - } - - public RequestHeader makeHeader() { - return new RequestHeader(apiKey(), clientId, correlationId); - } - - public long createdTimeMs() { - return createdTimeMs; - } - - public int correlationId() { - return correlationId; - } - - public int requestTimeoutMs() { - return requestTimeoutMs; - } -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/ClientResponse.java b/clients/src/main/java/org/oracle/okafka/clients/ClientResponse.java deleted file mode 100644 index 778dcce..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/ClientResponse.java +++ /dev/null @@ -1,123 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. - * Methods removed: - * versionMismatch() - * authenticationEception() - */ - -package org.oracle.okafka.clients; - -import org.oracle.okafka.common.Node; -import org.oracle.okafka.common.requests.AbstractResponse; -import org.oracle.okafka.common.requests.RequestHeader; -/** - * A response from the server. Contains both the body of the response as well as the correlated request - * metadata that was originally sent. - */ -public class ClientResponse { - private final RequestHeader requestHeader; - private final RequestCompletionHandler callback; - private final Node destination; - private final long receivedTimeMs; - private final long latencyMs; - private final boolean disconnected; - private final AbstractResponse responseBody; - - /** - * @param requestHeader The header of the corresponding request - * @param callback The callback to be invoked - * @param createdTimeMs The unix timestamp when the corresponding request was created - * @param destination The node the corresponding request was sent to - * @param receivedTimeMs The unix timestamp when this response was received - * @param disconnected Whether the client disconnected before fully reading a response - * @param responseBody The response contents (or null) - */ - public ClientResponse(RequestHeader requestHeader, - RequestCompletionHandler callback, - Node destination, - long createdTimeMs, - long receivedTimeMs, - boolean disconnected, - AbstractResponse responseBody) { - this.requestHeader = requestHeader; - this.callback = callback; - this.destination = destination; - this.receivedTimeMs = receivedTimeMs; - this.latencyMs = receivedTimeMs - createdTimeMs; - this.disconnected = disconnected; - this.responseBody = responseBody; - } - - public long receivedTimeMs() { - return receivedTimeMs; - } - - public boolean wasDisconnected() { - return disconnected; - } - - public Node destination() { - return destination; - } - - public RequestHeader requestHeader() { - return requestHeader; - } - - public AbstractResponse responseBody() { - return responseBody; - } - - public boolean hasResponse() { - return responseBody != null; - } - - public long requestLatencyMs() { - return latencyMs; - } - - public void onComplete() { - if (callback != null) { - callback.onComplete(this); - } - - } - - @Override - public String toString() { - return "ClientResponse(receivedTimeMs=" + receivedTimeMs + - ", latencyMs=" + - latencyMs + - ", disconnected=" + - disconnected + - ", requestHeader=" + - ", responseBody=" + - responseBody + - ")"; - } - -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/ClientUtils.java b/clients/src/main/java/org/oracle/okafka/clients/ClientUtils.java deleted file mode 100644 index 337cd0b..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/ClientUtils.java +++ /dev/null @@ -1,88 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. - * Methods removed: - * createChannelBuilder(AbstractConfig) - */ - -package org.oracle.okafka.clients; - -import org.oracle.okafka.common.config.ConfigException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import static org.oracle.okafka.common.utils.Utils.getHost; -import static org.oracle.okafka.common.utils.Utils.getPort; - -import java.io.Closeable; -import java.net.InetSocketAddress; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.atomic.AtomicReference; - -public final class ClientUtils { - private static final Logger log = LoggerFactory.getLogger(ClientUtils.class); - - private ClientUtils() {} - - public static List parseAndValidateAddresses(List urls) { - List addresses = new ArrayList<>(); - for (String url : urls) { - if (url != null && !url.isEmpty()) { - try { - String host = getHost(url); - Integer port = getPort(url); - if (host == null || port == null) - throw new ConfigException("Invalid url in " + CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG + ": " + url); - - InetSocketAddress address = new InetSocketAddress(host, port); - - if (address.isUnresolved()) { - log.warn("Removing server {} from {} as DNS resolution failed for {}", url, CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, host); - } else { - addresses.add(address); - } - } catch (IllegalArgumentException e) { - throw new ConfigException("Invalid port in " + CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG + ": " + url); - } - } - } - if (addresses.isEmpty()) - throw new ConfigException("No resolvable bootstrap urls given in " + CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG); - return addresses; - } - - public static void closeQuietly(Closeable c, String name, AtomicReference firstException) { - if (c != null) { - try { - c.close(); - } catch (Throwable t) { - firstException.compareAndSet(null, t); - log.error("Failed to close " + name, t); - } - } - } -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/ClusterConnectionStates.java b/clients/src/main/java/org/oracle/okafka/clients/ClusterConnectionStates.java index 51b0d0d..eb6ff9d 100644 --- a/clients/src/main/java/org/oracle/okafka/clients/ClusterConnectionStates.java +++ b/clients/src/main/java/org/oracle/okafka/clients/ClusterConnectionStates.java @@ -1,7 +1,7 @@ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. */ @@ -33,7 +33,8 @@ import java.util.concurrent.ThreadLocalRandom; import org.oracle.okafka.common.Node; -import org.oracle.okafka.common.errors.AuthenticationException; +import org.apache.kafka.clients.ConnectionState; +import org.apache.kafka.common.errors.AuthenticationException; import java.util.HashMap; import java.util.Map; @@ -65,12 +66,14 @@ public ClusterConnectionStates(long reconnectBackoffMs, long reconnectBackoffMax */ public boolean canConnect(Node node, long now) { NodeConnectionState state = nodeState.get(node); - if (state == null) + { return true; - else + } + else { return state.state.isDisconnected() && now - state.lastConnectAttemptMs >= state.reconnectBackoffMs; + } } /** @@ -83,8 +86,16 @@ public boolean isBlackedOut(Node node, long now) { if (state == null) return false; else + { + /*if(state.state.isDisconnected()) + { + System.out.println("Disconnected Node " + node); + System.out.println("Now = " + now +" LastConnection AttemptMs =" + state.lastConnectAttemptMs + +" reconnectBackOff " +state.reconnectBackoffMs ); + }*/ return state.state.isDisconnected() && now - state.lastConnectAttemptMs < state.reconnectBackoffMs; + } } /** @@ -288,7 +299,6 @@ private NodeConnectionState nodeState(Node node) { * The state of our connection to a node. */ private static class NodeConnectionState { - ConnectionState state; AuthenticationException authenticationException; long lastConnectAttemptMs; diff --git a/clients/src/main/java/org/oracle/okafka/clients/CommonClientConfigs.java b/clients/src/main/java/org/oracle/okafka/clients/CommonClientConfigs.java index 83e4fc8..8bf80b9 100644 --- a/clients/src/main/java/org/oracle/okafka/clients/CommonClientConfigs.java +++ b/clients/src/main/java/org/oracle/okafka/clients/CommonClientConfigs.java @@ -1,7 +1,7 @@ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. */ @@ -31,15 +31,15 @@ import java.util.HashMap; import java.util.Map; -import org.oracle.okafka.common.config.AbstractConfig; +//import org.oracle.okafka.common.config.AbstractConfig; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Some configurations shared by both producer and consumer */ -public class CommonClientConfigs { - private static final Logger log = LoggerFactory.getLogger(CommonClientConfigs.class); +public class CommonClientConfigs extends org.apache.kafka.clients.CommonClientConfigs { + //private static final Logger log = LoggerFactory.getLogger(CommonClientConfigs.class); /* * NOTE: DO NOT CHANGE EITHER CONFIG NAMES AS THESE ARE PART OF THE PUBLIC API AND CHANGE WILL BREAK USER CODE. @@ -55,6 +55,10 @@ public class CommonClientConfigs { public static final String ORACLE_INSTANCE_NAME_DOC = "instance name of the oracle database instance to connect to"; public static final String ORACLE_NET_TNS_ADMIN = "oracle.net.tns_admin"; public static final String ORACLE_NET_TNS_ADMIN_DOC = "location of file tnsnames.ora and ojdbc.properties"; + + public static final String ORACLE_TRANSACTIONAL_PRODUCER ="oracle.transactional.producer"; + + /* public static final String METADATA_MAX_AGE_CONFIG = "metadata.max.age.ms"; public static final String METADATA_MAX_AGE_DOC = "The period of time in milliseconds after which we force a refresh of metadata even if we haven't seen any partition leadership changes to proactively discover any new brokers or partitions."; @@ -102,6 +106,7 @@ public class CommonClientConfigs { + "for the response of a request. If the response is not received before the timeout " + "elapses the client will resend the request if necessary or fail the request if " + "retries are exhausted. This property is not yet supported."; + */ /** * Postprocess the configuration so that exponential backoff is disabled when reconnect backoff @@ -112,6 +117,7 @@ public class CommonClientConfigs { * * @return The new values which have been set as described in postProcessParsedConfig. */ + /* public static Map postProcessReconnectBackoffConfigs(AbstractConfig config, Map parsedValues) { HashMap rval = new HashMap<>(); @@ -122,5 +128,5 @@ public static Map postProcessReconnectBackoffConfigs(AbstractCon rval.put(RECONNECT_BACKOFF_MAX_MS_CONFIG, parsedValues.get(RECONNECT_BACKOFF_MS_CONFIG)); } return rval; - } + } */ } diff --git a/clients/src/main/java/org/oracle/okafka/clients/ConnectionState.java b/clients/src/main/java/org/oracle/okafka/clients/ConnectionState.java deleted file mode 100644 index 5f085eb..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/ConnectionState.java +++ /dev/null @@ -1,51 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. - * - * Enum removed: CHECKING_API_VERSIONS - */ - -package org.oracle.okafka.clients; - -/** - * The states of a node connection - * - * DISCONNECTED: connection has not been successfully established yet - * CONNECTING: connection is under progress - * READY: connection is ready to send requests - * AUTHENTICATION_FAILED: connection failed due to an authentication error - */ -public enum ConnectionState { - DISCONNECTED, CONNECTING, READY, AUTHENTICATION_FAILED; - - public boolean isDisconnected() { - return this == AUTHENTICATION_FAILED || this == DISCONNECTED; - } - - public boolean isConnected() { - return this == READY; - } -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/KafkaClient.java b/clients/src/main/java/org/oracle/okafka/clients/KafkaClient.java index efb6184..4816daa 100644 --- a/clients/src/main/java/org/oracle/okafka/clients/KafkaClient.java +++ b/clients/src/main/java/org/oracle/okafka/clients/KafkaClient.java @@ -1,7 +1,7 @@ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. */ @@ -32,7 +32,10 @@ import java.io.Closeable; import org.oracle.okafka.common.Node; -import org.oracle.okafka.common.errors.AuthenticationException; +import org.apache.kafka.clients.ClientRequest; +import org.apache.kafka.clients.ClientResponse; +import org.apache.kafka.clients.RequestCompletionHandler; +import org.apache.kafka.common.errors.AuthenticationException; import org.oracle.okafka.common.requests.AbstractRequest; /** @@ -167,5 +170,10 @@ ClientRequest newClientRequest(Node node, boolean expectResponse, int requestTimeoutMs, RequestCompletionHandler callback); + + default void wakeup() + { + // NO-OP for all AQKafkaAdmin AQKafkaConsumer and AQKafkaProuducer + } } diff --git a/clients/src/main/java/org/oracle/okafka/clients/Metadata.java b/clients/src/main/java/org/oracle/okafka/clients/Metadata.java index f2c256e..9f27fd4 100644 --- a/clients/src/main/java/org/oracle/okafka/clients/Metadata.java +++ b/clients/src/main/java/org/oracle/okafka/clients/Metadata.java @@ -1,9 +1,9 @@ /* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ + ** OKafka Java Client version 23.4. + ** + ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. + ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + */ /* * Licensed to the Apache Software Foundation (ASF) under one or more @@ -29,15 +29,16 @@ package org.oracle.okafka.clients; -import org.oracle.okafka.common.Cluster; -import org.oracle.okafka.common.KafkaException; +//import org.oracle.okafka.common.Cluster; +import org.apache.kafka.common.Cluster; +import org.apache.kafka.common.KafkaException; import org.oracle.okafka.common.Node; -import org.oracle.okafka.common.PartitionInfo; -import org.oracle.okafka.common.config.AbstractConfig; -import org.oracle.okafka.common.errors.AuthenticationException; -import org.oracle.okafka.common.errors.TimeoutException; -import org.oracle.okafka.common.internals.ClusterResourceListeners; -import org.oracle.okafka.common.utils.Time; +import org.apache.kafka.common.PartitionInfo; +import org.apache.kafka.common.config.AbstractConfig; +import org.apache.kafka.common.errors.AuthenticationException; +import org.apache.kafka.common.errors.TimeoutException; +import org.apache.kafka.common.internals.ClusterResourceListeners; +import org.apache.kafka.common.utils.Time; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -51,9 +52,11 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.Set; - +import java.util.function.Supplier; import java.sql.SQLException; +import java.util.StringTokenizer; /** * A class encapsulating some of the logic around metadata. @@ -69,194 +72,218 @@ */ public final class Metadata implements Closeable { - private static final Logger log = LoggerFactory.getLogger(Metadata.class); - - public static final long TOPIC_EXPIRY_MS = 5 * 60 * 1000; - private static final long TOPIC_EXPIRY_NEEDS_UPDATE = -1L; - - private final long refreshBackoffMs; - private final long metadataExpireMs; - private int version; - private long lastRefreshMs; - private long lastSuccessfulRefreshMs; - private AuthenticationException authenticationException; - private Cluster cluster; - private boolean needUpdate; - /* Topics with expiry time */ - private final Map topics; - private final List listeners; - private final ClusterResourceListeners clusterResourceListeners; - private boolean needMetadataForAllTopics; - private final boolean allowAutoTopicCreation; - private final boolean topicExpiryEnabled; - private boolean isClosed; - private final AbstractConfig configs; - public Metadata(long refreshBackoffMs, long metadataExpireMs, boolean allowAutoTopicCreation, AbstractConfig configs) { - this(refreshBackoffMs, metadataExpireMs, allowAutoTopicCreation, false, new ClusterResourceListeners(), configs); - } - - /** - * Create a new Metadata instance - * @param refreshBackoffMs The minimum amount of time that must expire between metadata refreshes to avoid busy - * polling - * @param metadataExpireMs The maximum amount of time that metadata can be retained without refresh - * @param allowAutoTopicCreation If this and the broker config 'auto.create.topics.enable' are true, topics that - * don't exist will be created by the broker when a metadata request is sent - * @param topicExpiryEnabled If true, enable expiry of unused topics - * @param clusterResourceListeners List of ClusterResourceListeners which will receive metadata updates. - */ - public Metadata(long refreshBackoffMs, long metadataExpireMs, boolean allowAutoTopicCreation, - boolean topicExpiryEnabled, ClusterResourceListeners clusterResourceListeners, AbstractConfig configs) { - this.refreshBackoffMs = refreshBackoffMs; - this.metadataExpireMs = metadataExpireMs; - this.allowAutoTopicCreation = allowAutoTopicCreation; - this.topicExpiryEnabled = topicExpiryEnabled; - this.lastRefreshMs = 0L; - this.lastSuccessfulRefreshMs = 0L; - this.version = 0; - this.cluster = Cluster.empty(); - this.needUpdate = false; - this.topics = new HashMap<>(); - this.listeners = new ArrayList<>(); - this.clusterResourceListeners = clusterResourceListeners; - this.needMetadataForAllTopics = false; - this.isClosed = false; - this.configs = configs; - } - - /** - * Get the current cluster info without blocking - */ - public synchronized Cluster fetch() { - return this.cluster; - } - - /** - * Add the topic to maintain in the metadata. If topic expiry is enabled, expiry time - * will be reset on the next update. - */ - public synchronized void add(String topic) { - Objects.requireNonNull(topic, "topic cannot be null"); - if (topics.put(topic, TOPIC_EXPIRY_NEEDS_UPDATE) == null) { - // requestUpdateForNewTopics(); - } - } - - /** - * The next time to update the cluster info is the maximum of the time the current info will expire and the time the - * current info can be updated (i.e. backoff time has elapsed); If an update has been request then the expiry time - * is now - */ - public synchronized long timeToNextUpdate(long nowMs) { - long timeToExpire = needUpdate ? 0 : Math.max(this.lastSuccessfulRefreshMs + this.metadataExpireMs - nowMs, 0); - long timeToAllowUpdate = this.lastRefreshMs + this.refreshBackoffMs - nowMs; - return Math.max(timeToExpire, timeToAllowUpdate); - } - - /** - * Request an update of the current cluster metadata info, return the current version before the update - */ - public synchronized int requestUpdate() { - this.needUpdate = true; - return this.version; - } - - /** - * Check whether an update has been explicitly requested. - * @return true if an update was requested, false otherwise - */ - public synchronized boolean updateRequested() { - return this.needUpdate; - } - - /** - * If any non-retriable authentication exceptions were encountered during - * metadata update, clear and return the exception. - */ - public synchronized AuthenticationException getAndClearAuthenticationException() { - if (authenticationException != null) { - AuthenticationException exception = authenticationException; - authenticationException = null; - return exception; - } else - return null; - } - - /** - * Wait for metadata update until the current version is larger than the last version we know of - */ - public synchronized void awaitUpdate(final int lastVersion, final long maxWaitMs) throws InterruptedException { - if (maxWaitMs < 0) - throw new IllegalArgumentException("Max time to wait for metadata updates should not be < 0 milliseconds"); - - long begin = System.currentTimeMillis(); - long remainingWaitMs = maxWaitMs; - while ((this.version <= lastVersion) && !isClosed()) { - AuthenticationException ex = getAndClearAuthenticationException(); - if (ex != null) - throw ex; - if (remainingWaitMs != 0) - wait(remainingWaitMs); - long elapsed = System.currentTimeMillis() - begin; - if (elapsed >= maxWaitMs) - throw new TimeoutException("Failed to update metadata after " + maxWaitMs + " ms."); - remainingWaitMs = maxWaitMs - elapsed; - } - if (isClosed()) - throw new KafkaException("Requested metadata update after close"); - } - - /** - * Replace the current set of topics maintained to the one provided. - * If topic expiry is enabled, expiry time of the topics will be - * reset on the next update. - * @param topics - */ - public synchronized void setTopics(Collection topics) { - if (!this.topics.keySet().containsAll(topics)) { - requestUpdateForNewTopics(); - } - this.topics.clear(); - for (String topic : topics) - this.topics.put(topic, TOPIC_EXPIRY_NEEDS_UPDATE); - } - - /** - * Get the list of topics we are currently maintaining metadata for - */ - public synchronized Set topics() { - return new HashSet<>(this.topics.keySet()); - } - - /** - * Check if a topic is already in the topic set. - * @param topic topic to check - * @return true if the topic exists, false otherwise - */ - public synchronized boolean containsTopic(String topic) { - return this.topics.containsKey(topic); - } - - /** - * Updates the cluster metadata. If topic expiry is enabled, expiry time - * is set for topics if required and expired topics are removed from the metadata. - * - * @param newCluster the cluster containing metadata for topics with valid metadata - * @param unavailableTopics topics which are non-existent or have one or more partitions whose - * leader is not known - * @param now current time in milliseconds - */ - public synchronized void update(Cluster newCluster, Set unavailableTopics, long now) { - Objects.requireNonNull(newCluster, "cluster should not be null"); - if (isClosed()) - throw new IllegalStateException("Update requested after metadata close"); - - this.needUpdate = false; - this.lastRefreshMs = now; - this.lastSuccessfulRefreshMs = now; - this.version += 1; - - if (topicExpiryEnabled) { + private static final Logger log = LoggerFactory.getLogger(Metadata.class); + + public static final long TOPIC_EXPIRY_MS = 5 * 60 * 1000; + private static final long TOPIC_EXPIRY_NEEDS_UPDATE = -1L; + + private final long refreshBackoffMs; + private final long metadataExpireMs; + private int version; + private long lastRefreshMs; + private long lastSuccessfulRefreshMs; + private AuthenticationException authenticationException; + private Cluster cluster; + private boolean isBootStrap; + private boolean needUpdate; + /* Topics with expiry time */ + private final Map topics; + private final List listeners; + private final ClusterResourceListeners clusterResourceListeners; + private boolean needMetadataForAllTopics; + private final boolean allowAutoTopicCreation; + private final boolean topicExpiryEnabled; + private boolean isClosed; + private final AbstractConfig configs; + HashMap clusterLeaderMap = new HashMap(); + private KafkaException fatalException; + int dbMajorVersion = 23; + int dbMinorVersion = 1; + public final HashMap topicParaMap = new HashMap<>(); + + public Metadata(long refreshBackoffMs, long metadataExpireMs, boolean allowAutoTopicCreation, AbstractConfig configs) { + this(refreshBackoffMs, metadataExpireMs, allowAutoTopicCreation, false, new ClusterResourceListeners(), configs); + } + + /** + * Create a new Metadata instance + * @param refreshBackoffMs The minimum amount of time that must expire between metadata refreshes to avoid busy + * polling + * @param metadataExpireMs The maximum amount of time that metadata can be retained without refresh + * @param allowAutoTopicCreation If this and the broker config 'auto.create.topics.enable' are true, topics that + * don't exist will be created by the broker when a metadata request is sent + * @param topicExpiryEnabled If true, enable expiry of unused topics + * @param clusterResourceListeners List of ClusterResourceListeners which will receive metadata updates. + */ + public Metadata(long refreshBackoffMs, long metadataExpireMs, boolean allowAutoTopicCreation, + boolean topicExpiryEnabled, ClusterResourceListeners clusterResourceListeners, AbstractConfig configs) { + this.refreshBackoffMs = refreshBackoffMs; + this.metadataExpireMs = metadataExpireMs; + this.allowAutoTopicCreation = allowAutoTopicCreation; + this.topicExpiryEnabled = topicExpiryEnabled; + this.lastRefreshMs = 0L; + this.lastSuccessfulRefreshMs = 0L; + this.version = 0; + this.cluster = Cluster.empty(); + this.needUpdate = false; + this.topics = new HashMap<>(); + this.listeners = new ArrayList<>(); + this.clusterResourceListeners = clusterResourceListeners; + this.needMetadataForAllTopics = false; + this.isClosed = false; + this.configs = configs; + this.isBootStrap=true; + } + + /** + * Get the current cluster info without blocking + */ + public synchronized Cluster fetch() { + return this.cluster; + } + + /** + * Add the topic to maintain in the metadata. If topic expiry is enabled, expiry time + * will be reset on the next update. + */ + public synchronized void add(String topic) { + Objects.requireNonNull(topic, "topic cannot be null"); + if (topics.put(topic, TOPIC_EXPIRY_NEEDS_UPDATE) == null) { + // requestUpdateForNewTopics(); + } + } + // Changes for 2.8.1 See if requestUpdateForNewTopics needs to be invoked + /** + * Add the topic to maintain in the metadata. If topic expiry is enabled, expiry time + * will be reset on the next update. + */ + public synchronized void add(String topic, long timeout) { + Objects.requireNonNull(topic, "topic cannot be null"); + if (topics.put(topic, timeout) == null) { + // requestUpdateForNewTopics(); + } + } + + + /** + * The next time to update the cluster info is the maximum of the time the current info will expire and the time the + * current info can be updated (i.e. backoff time has elapsed); If an update has been request then the expiry time + * is now + */ + public synchronized long timeToNextUpdate(long nowMs) { + long timeToExpire = needUpdate ? 0 : Math.max(this.lastSuccessfulRefreshMs + this.metadataExpireMs - nowMs, 0); + + long timeToAllowUpdate = this.lastRefreshMs + this.refreshBackoffMs - nowMs; + return Math.max(timeToExpire, timeToAllowUpdate); + } + + /** + * Request an update of the current cluster metadata info, return the current version before the update + */ + public synchronized int requestUpdate() { + this.needUpdate = true; + return this.version; + } + + /** + * Check whether an update has been explicitly requested. + * @return true if an update was requested, false otherwise + */ + public synchronized boolean updateRequested() { + return this.needUpdate; + } + + /** + * If any non-retriable authentication exceptions were encountered during + * metadata update, clear and return the exception. + */ + public synchronized AuthenticationException getAndClearAuthenticationException() { + if (authenticationException != null) { + AuthenticationException exception = authenticationException; + authenticationException = null; + return exception; + } else + return null; + } + + /** + * Wait for metadata update until the current version is larger than the last version we know of + */ + public synchronized void awaitUpdate(final int lastVersion, final long maxWaitMs) throws InterruptedException { + if (maxWaitMs < 0) + throw new IllegalArgumentException("Max time to wait for metadata updates should not be < 0 milliseconds"); + + long begin = System.currentTimeMillis(); + long remainingWaitMs = maxWaitMs; + while ((this.version <= lastVersion) && !isClosed()) { + AuthenticationException ex = getAndClearAuthenticationException(); + if (ex != null) + throw ex; + if (remainingWaitMs != 0) + wait(remainingWaitMs); + long elapsed = System.currentTimeMillis() - begin; + if (elapsed >= maxWaitMs) + throw new TimeoutException("Failed to update metadata after " + maxWaitMs + " ms."); + remainingWaitMs = maxWaitMs - elapsed; + } + if (isClosed()) + throw new KafkaException("Requested metadata update after close"); + } + + /** + * Replace the current set of topics maintained to the one provided. + * If topic expiry is enabled, expiry time of the topics will be + * reset on the next update. + * @param topics + */ + public synchronized void setTopics(Collection topics) { + if (!this.topics.keySet().containsAll(topics)) { + requestUpdateForNewTopics(); + } + this.topics.clear(); + for (String topic : topics) + this.topics.put(topic, TOPIC_EXPIRY_NEEDS_UPDATE); + } + + /** + * Get the list of topics we are currently maintaining metadata for + */ + public synchronized Set topics() { + return new HashSet<>(this.topics.keySet()); + } + + /** + * Check if a topic is already in the topic set. + * @param topic topic to check + * @return true if the topic exists, false otherwise + */ + public synchronized boolean containsTopic(String topic) { + return this.topics.containsKey(topic); + } + + /** + * Updates the cluster metadata. If topic expiry is enabled, expiry time + * is set for topics if required and expired topics are removed from the metadata. + * + * @param newCluster the cluster containing metadata for topics with valid metadata + * @param unavailableTopics topics which are non-existent or have one or more partitions whose + * leader is not known + * @param now current time in milliseconds + */ + public synchronized void update(Cluster newCluster, Set unavailableTopics, long now, boolean bootstrap) { + + log.debug("Update Metadata. isBootstap? " + bootstrap); + Objects.requireNonNull(newCluster, "cluster should not be null"); + if (isClosed()) + throw new IllegalStateException("Update requested after metadata close"); + + this.needUpdate = false; + this.lastRefreshMs = now; + this.lastSuccessfulRefreshMs = now; + this.version += 1; + this.isBootStrap = bootstrap; + + /* if (topicExpiryEnabled) { // Handle expiry of topics from the metadata refresh set. for (Iterator> it = topics.entrySet().iterator(); it.hasNext(); ) { Map.Entry entry = it.next(); @@ -264,169 +291,313 @@ public synchronized void update(Cluster newCluster, Set unavailableTopic if (expireMs == TOPIC_EXPIRY_NEEDS_UPDATE) entry.setValue(now + TOPIC_EXPIRY_MS); else if (expireMs <= now) { - it.remove(); log.debug("Removing unused topic {} from the metadata list, expiryMs {} now {}", entry.getKey(), expireMs, now); } } - } - - for (Listener listener: listeners) - listener.onMetadataUpdate(newCluster, unavailableTopics); - - String previousClusterId = cluster.clusterResource().clusterId(); - - if (this.needMetadataForAllTopics) { - // the listener may change the interested topics, which could cause another metadata refresh. - // If we have already fetched all topics, however, another fetch should be unnecessary. - this.needUpdate = false; - this.cluster = getClusterForCurrentTopics(newCluster); - } else { - this.cluster = newCluster; - } - - // The bootstrap cluster is guaranteed not to have any useful information - if (!newCluster.isBootstrapConfigured()) { - String newClusterId = newCluster.clusterResource().clusterId(); - if (newClusterId == null ? previousClusterId != null : !newClusterId.equals(previousClusterId)) - log.info("Cluster ID: {}", newClusterId); - clusterResourceListeners.onUpdate(newCluster.clusterResource()); - } - - notifyAll(); - log.debug("Updated cluster metadata version {} to {}", this.version, this.cluster); - } - - /** - * Record an attempt to update the metadata that failed. We need to keep track of this - * to avoid retrying immediately. - */ - public synchronized void failedUpdate(long now, AuthenticationException authenticationException) { - this.lastRefreshMs = now; - this.authenticationException = authenticationException; - if (authenticationException != null) - this.notifyAll(); - } - - /** - * @return The current metadata version - */ - public synchronized int version() { - return this.version; - } - - /** - * The last time metadata was successfully updated. - */ - public synchronized long lastSuccessfulUpdate() { - return this.lastSuccessfulRefreshMs; - } - - public boolean allowAutoTopicCreation() { - return allowAutoTopicCreation; - } - - /** - * Set state to indicate if metadata for all topics in Kafka cluster is required or not. - * @param needMetadataForAllTopics boolean indicating need for metadata of all topics in cluster. - */ - public synchronized void needMetadataForAllTopics(boolean needMetadataForAllTopics) { - if (needMetadataForAllTopics && !this.needMetadataForAllTopics) { - // requestUpdateForNewTopics(); - } - this.needMetadataForAllTopics = needMetadataForAllTopics; - } - - /** - * Get whether metadata for all topics is needed or not - */ - public synchronized boolean needMetadataForAllTopics() { - return this.needMetadataForAllTopics; - } - - /** - * Add a Metadata listener that gets notified of metadata updates - */ - public synchronized void addListener(Listener listener) { - this.listeners.add(listener); - } - - /** - * Stop notifying the listener of metadata updates - */ - public synchronized void removeListener(Listener listener) { - this.listeners.remove(listener); - } - - /** - * "Close" this metadata instance to indicate that metadata updates are no longer possible. This is typically used - * when the thread responsible for performing metadata updates is exiting and needs a way to relay this information - * to any other thread(s) that could potentially wait on metadata update to come through. - */ - @Override - public synchronized void close() { - this.isClosed = true; - /*try { + } */ + + for (Listener listener: listeners) + listener.onMetadataUpdate(newCluster, unavailableTopics); + + if(bootstrap) + { + this.cluster = newCluster; + log.debug("Updated cluster metadata version {} to {}", this.version, this.cluster); + return; + } + + + String previousClusterId = cluster.clusterResource().clusterId(); + Node newLeaderNode = null; + + newLeaderNode = getLeaderNode(this.cluster, newCluster); + + /* If a node previously present is not available in newCluster, then + * we still want to keep it because + * existing nodes may go down and at the same time previously disconnected nodes may come up */ + ArrayList newClusterNodes = new ArrayList(); + newClusterNodes.addAll(newCluster.nodes()); + + boolean oldNodesAdded = false; + for(org.apache.kafka.common.Node oldNode : cluster.nodes()) + { + org.apache.kafka.common.Node nodeById = newCluster.nodeById(oldNode.id()); + if(nodeById == null) + { + newClusterNodes.add(oldNode); + //newCluster.nodes().add(oldNode); + oldNodesAdded = true; + //new Cluster(clusterId, cluster.nodes(), partitionInfos, unauthorizedTopics, internalTopics, controller); + + log.debug("Added Down Node " + oldNode ); + + } + } + if(oldNodesAdded) + { + // Create a new cluster with All previous and current Node. + // Maintain the PartitionInfo of the latest cluster + Cluster newClusterWithOldNodes = getClusterForCurrentTopics(newCluster, newClusterNodes); + newCluster = newClusterWithOldNodes; + } + + if (this.needMetadataForAllTopics) { + log.debug("needMetadataForAllTopics = " + needMetadataForAllTopics); + + // the listener may change the interested topics, which could cause another metadata refresh. + // If we have already fetched all topics, however, another fetch should be unnecessary. + this.needUpdate = false; + this.cluster = getClusterForCurrentTopics(newCluster, null); + } else { + this.cluster = newCluster; + } + //Changes for 2.8.1: + setLeader(newLeaderNode); + //clusterLeaderMap.put(cluster.clusterResource().clusterId()+"_"+version, newLeaderNode); + + // The bootstrap cluster is guaranteed not to have any useful information + if (!newCluster.isBootstrapConfigured()) { + String newClusterId = newCluster.clusterResource().clusterId(); + if (newClusterId == null ? previousClusterId != null : !newClusterId.equals(previousClusterId)) + log.info("Cluster ID: {}", newClusterId); + clusterResourceListeners.onUpdate(newCluster.clusterResource()); + } + + notifyAll(); + log.debug("Updated cluster metadata version {} to {}", this.version, this.cluster); + } + + private Node getLeaderNode(Cluster oldCluster, Cluster newCluster) + { + if(oldCluster == null || newCluster == null) + return null; + + Node oldLeader = getLeader(this.version-1); + + if(oldLeader != null) + { + log.debug("Update Metadata: OldLeaderNode as of version"+(this.version-1)+": " + oldLeader); + } + else + log.debug("Update Metadata: No old leader as of now for cluster " + + cluster.clusterResource().clusterId() + " for version " + (this.version-1)); + + if(oldLeader == null) + { + oldLeader = (org.oracle.okafka.common.Node)oldCluster.controller(); + log.debug("Update Metadata: Checking with cluster Controller node " + oldLeader); + } + + if(oldLeader == null) + { + log.debug("No Old Leader. Returning new cluster's controller node" + newCluster.controller()); + return (org.oracle.okafka.common.Node)newCluster.controller(); + } + + List newNodeList = NetworkClient.convertToOracleNodes(newCluster.nodes()); + if(newNodeList == null || newNodeList.size() == 0) + return null; + + boolean portChange = true; + boolean hostChange = true; + boolean serviceChange = true; + Node bestSoFar = null; + Node newLeader = null; + for(Node newNode: newNodeList) + { + portChange = false; + hostChange = false; + serviceChange = false; + if(newNode.hashCode() == oldLeader.hashCode()) + { + newLeader = newNode; + break; + } + else if(newNode.id() == oldLeader.id() && newNode.user().equalsIgnoreCase(oldLeader.user())) + { + if(!newNode.serviceName().equalsIgnoreCase(oldLeader.serviceName())) + { + serviceChange = true; + } + else if(newNode.port() != oldLeader.port()) { + portChange = true; + } + else if(!newNode.host().equalsIgnoreCase(oldLeader.host())) + { + hostChange = true; + } + + if(portChange || hostChange || serviceChange) + { + bestSoFar = newNode; + } + else + { + newLeader = newNode; + break; + } + } + } + if(newLeader != null) + { + // Remove the newly added node which is same as old leader. Keep the old leader object in use. + newNodeList.remove(newLeader); + newNodeList.add(0,oldLeader); + + //setLeader(oldLeader); + log.debug("Old Leader continuing " + oldLeader); + return oldLeader; + } + else if(bestSoFar != null) + { + /* Put Best So Far at the top. + Do not set leader here. The oldLeader is no more alive. + Connection to the old leader would have disconnected or will be disconnected. + New leader will be found eventually. */ + newNodeList.remove(bestSoFar); + newNodeList.add(0,bestSoFar); + log.debug("Update Metadata: New Leader to be chosen. Potential candidate " + bestSoFar); + } + else + { + return (org.oracle.okafka.common.Node)newCluster.controller(); + } + return null; + } + /** + * Record an attempt to update the metadata that failed. We need to keep track of this + * to avoid retrying immediately. + */ + public synchronized void failedUpdate(long now, AuthenticationException authenticationException) { + this.lastRefreshMs = now; + this.authenticationException = authenticationException; + if (authenticationException != null) + this.notifyAll(); + } + + /** + * @return The current metadata version + */ + public synchronized int version() { + return this.version; + } + + /** + * The last time metadata was successfully updated. + */ + public synchronized long lastSuccessfulUpdate() { + return this.lastSuccessfulRefreshMs; + } + + public boolean allowAutoTopicCreation() { + return allowAutoTopicCreation; + } + + /** + * Set state to indicate if metadata for all topics in Kafka cluster is required or not. + * @param needMetadataForAllTopics boolean indicating need for metadata of all topics in cluster. + */ + public synchronized void needMetadataForAllTopics(boolean needMetadataForAllTopics) { + if (needMetadataForAllTopics && !this.needMetadataForAllTopics) { + // requestUpdateForNewTopics(); + } + this.needMetadataForAllTopics = needMetadataForAllTopics; + } + + /** + * Get whether metadata for all topics is needed or not + */ + public synchronized boolean needMetadataForAllTopics() { + return this.needMetadataForAllTopics; + } + + /** + * Add a Metadata listener that gets notified of metadata updates + */ + public synchronized void addListener(Listener listener) { + this.listeners.add(listener); + } + + /** + * Stop notifying the listener of metadata updates + */ + public synchronized void removeListener(Listener listener) { + this.listeners.remove(listener); + } + + /** + * "Close" this metadata instance to indicate that metadata updates are no longer possible. This is typically used + * when the thread responsible for performing metadata updates is exiting and needs a way to relay this information + * to any other thread(s) that could potentially wait on metadata update to come through. + */ + @Override + public synchronized void close() { + this.isClosed = true; + /*try { this.cluster.close(); } catch(SQLException sql) { log.error("failed to close cluster",sql); }*/ - this.notifyAll(); - } - - /** - * Check if this metadata instance has been closed. See {@link #close()} for more information. - * @return True if this instance has been closed; false otherwise - */ - public synchronized boolean isClosed() { - return this.isClosed; - } - - /** - * MetadataUpdate Listener - */ - public interface Listener { - /** - * Callback invoked on metadata update. - * - * @param cluster the cluster containing metadata for topics with valid metadata - * @param unavailableTopics topics which are non-existent or have one or more partitions whose - * leader is not known - */ - void onMetadataUpdate(Cluster cluster, Set unavailableTopics); - } - - private synchronized void requestUpdateForNewTopics() { - // Override the timestamp of last refresh to let immediate update. - this.lastRefreshMs = 0; - requestUpdate(); - } - - private Cluster getClusterForCurrentTopics(Cluster cluster) { - Set unauthorizedTopics = new HashSet<>(); - Collection partitionInfos = new ArrayList<>(); - List nodes = Collections.emptyList(); - Set internalTopics = Collections.emptySet(); - Node controller = null; - String clusterId = null; - if (cluster != null) { - clusterId = cluster.clusterResource().clusterId(); - internalTopics = cluster.internalTopics(); - unauthorizedTopics.addAll(cluster.unauthorizedTopics()); - unauthorizedTopics.retainAll(this.topics.keySet()); - - for (String topic : this.topics.keySet()) { - List partitionInfoList = cluster.partitionsForTopic(topic); - if (!partitionInfoList.isEmpty()) { - partitionInfos.addAll(partitionInfoList); - } - } - nodes = cluster.nodes(); - controller = cluster.controller(); - } - return new Cluster(clusterId, nodes, partitionInfos, unauthorizedTopics, internalTopics, controller, cluster.getConfigs()); - } - - /*public synchronized long getUpdate(String topic,Integer partition, long maxWaitMs) { + this.notifyAll(); + } + + /** + * Check if this metadata instance has been closed. See {@link #close()} for more information. + * @return True if this instance has been closed; false otherwise + */ + public synchronized boolean isClosed() { + return this.isClosed; + } + + /** + * MetadataUpdate Listener + */ + public interface Listener { + /** + * Callback invoked on metadata update. + * + * @param cluster the cluster containing metadata for topics with valid metadata + * @param unavailableTopics topics which are non-existent or have one or more partitions whose + * leader is not known + */ + void onMetadataUpdate(Cluster cluster, Set unavailableTopics); + } + + private synchronized void requestUpdateForNewTopics() { + // Override the timestamp of last refresh to let immediate update. + this.lastRefreshMs = 0; + requestUpdate(); + } + + private Cluster getClusterForCurrentTopics(Cluster cluster, List newNodeList) { + Set unauthorizedTopics = new HashSet<>(); + + Collection partitionInfos = new ArrayList<>(); + + if(newNodeList == null) + newNodeList = cluster.nodes(); + + Set internalTopics = Collections.emptySet(); + Node controller = null; + String clusterId = null; + if (cluster != null) { + clusterId = cluster.clusterResource().clusterId(); + internalTopics = cluster.internalTopics(); + unauthorizedTopics.addAll(cluster.unauthorizedTopics()); + unauthorizedTopics.retainAll(this.topics.keySet()); + + for (String topic : this.topics.keySet()) { + List partitionInfoList = cluster.partitionsForTopic(topic); + if (!partitionInfoList.isEmpty()) { + partitionInfos.addAll(partitionInfoList); + } + } + controller = (org.oracle.okafka.common.Node)cluster.controller(); + } + return new Cluster(clusterId, newNodeList, partitionInfos, unauthorizedTopics, internalTopics, controller);//, cluster.getConfigs()); + } + + /*public synchronized long getUpdate(String topic,Integer partition, long maxWaitMs) { if (maxWaitMs < 0) throw new IllegalArgumentException("Max time to wait for metadata updates should not be < 0 milliseconds"); if (isClosed()) @@ -448,8 +619,104 @@ private Cluster getClusterForCurrentTopics(Cluster cluster) { } return elapsed; }*/ - - public AbstractConfig getConfigs() { - return this.configs; - } + + public AbstractConfig getConfigs() { + return this.configs; + } + + /* + * Return org.oracle.okafka.common.Node by Node Id + */ + public Node getNodeById(int id) + { + if(cluster!=null) + return (Node)cluster.nodeById(id); + return null; + } + + public void setLeader(Node leaderNode) + { + if(cluster != null) + { + clusterLeaderMap.put(cluster.clusterResource().clusterId()+"_"+version, leaderNode); + log.debug("Leader Node for Version " + + cluster.clusterResource().clusterId()+"_"+version + ":" + leaderNode); + } + } + + public Node getLeader() + { + return getLeader(this.version); + } + + public Node getLeader(int version) + { + if(cluster != null) + return getLeader(cluster.clusterResource().clusterId(), version); + else + return null; + } + + public Node getLeader(String clusterId, int version) + { + return clusterLeaderMap.get(clusterId+"_"+version); + } + + public boolean isBootstrap() + { + return isBootStrap; + } + + /** + * Propagate a fatal error which affects the ability to fetch metadata for the cluster. + * Two examples are authentication and unsupported version exceptions. + * + * @param exception The fatal exception + */ + public synchronized void fatalError(KafkaException exception) { + this.fatalException = exception; + } + + /** + * If any fatal exceptions were encountered during metadata update, throw the exception. This is used by + * the producer to abort waiting for metadata if there were fatal exceptions (e.g. authentication failures) + * in the last metadata update. + */ + protected synchronized void maybeThrowFatalException() { + KafkaException metadataException = this.fatalException; + if (metadataException != null) { + fatalException = null; + throw metadataException; + } + } + // Parse DB Major and Minor Version + public void setDBVersion(String dbVersion) + { + try { + StringTokenizer stn = new StringTokenizer(dbVersion,"."); + setDBMajorVersion(Integer.parseInt(stn.nextToken())); + setDBMinorVersion(Integer.parseInt(stn.nextToken())); + }catch(Exception e) { + + } + } + + public void setDBMajorVersion(int dbMVersion) + { + this.dbMajorVersion = dbMVersion; + } + + public void setDBMinorVersion(int dbMinorVersion) + { + this.dbMinorVersion = dbMinorVersion; + } + public int getDBMajorVersion() + { + return this.dbMajorVersion; + } + public int getDBMinorVersion() + { + return this.dbMinorVersion; + } + } diff --git a/clients/src/main/java/org/oracle/okafka/clients/MetadataUpdater.java b/clients/src/main/java/org/oracle/okafka/clients/MetadataUpdater.java deleted file mode 100644 index aa88f4b..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/MetadataUpdater.java +++ /dev/null @@ -1,100 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.clients; - -import java.io.Closeable; -import java.util.List; - -import org.oracle.okafka.common.Node; -import org.oracle.okafka.common.errors.AuthenticationException; -import org.oracle.okafka.common.requests.MetadataResponse; -import org.oracle.okafka.common.requests.RequestHeader; - -/** - * The interface used by `NetworkClient` to request cluster metadata info to be updated and to retrieve the cluster nodes - * from such metadata. This is an internal class. - *

- * This class is not thread-safe! - */ -public interface MetadataUpdater extends Closeable { - - /** - * Gets the current cluster info without blocking. - */ - List fetchNodes(); - - /** - * Returns true if an update to the cluster metadata info is due. - */ - boolean isUpdateDue(long now); - - /** - * Starts a cluster metadata update if needed and possible. Returns the time until the metadata update (which would - * be 0 if an update has been started as a result of this call). - * - * If the implementation relies on `NetworkClient` to send requests, `handleCompletedMetadataResponse` will be - * invoked after the metadata response is received. - * - * The semantics of `needed` and `possible` are implementation-dependent and may take into account a number of - * factors like node availability, how long since the last metadata update, etc. - */ - long maybeUpdate(long now); - - /** - * Handle disconnections for metadata requests. - * - * This provides a mechanism for the `MetadataUpdater` implementation to use the NetworkClient instance for its own - * requests with special handling for disconnections of such requests. - * @param destination - */ - void handleDisconnection(String destination); - - /** - * Handle authentication failure. Propagate the authentication exception if awaiting metadata. - * - * @param exception authentication exception from broker - */ - void handleAuthenticationFailure(AuthenticationException exception); - - /** - * Handle responses for metadata requests. - * - * This provides a mechanism for the `MetadataUpdater` implementation to use the NetworkClient instance for its own - * requests with special handling for completed receives of such requests. - */ - void handleCompletedMetadataResponse(RequestHeader requestHeader, long now, MetadataResponse metadataResponse); - - /** - * Schedules an update of the current cluster metadata info. A subsequent call to `maybeUpdate` would trigger the - * start of the update if possible (see `maybeUpdate` for more information). - */ - void requestUpdate(); - - /** - * Close this updater. - */ - @Override - void close(); -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/NetworkClient.java b/clients/src/main/java/org/oracle/okafka/clients/NetworkClient.java index cdf1cc2..ad2d414 100644 --- a/clients/src/main/java/org/oracle/okafka/clients/NetworkClient.java +++ b/clients/src/main/java/org/oracle/okafka/clients/NetworkClient.java @@ -1,9 +1,9 @@ /* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ + ** OKafka Java Client version 23.4. + ** + ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. + ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + */ /* * Licensed to the Apache Software Foundation (ASF) under one or more @@ -29,23 +29,31 @@ package org.oracle.okafka.clients; -import org.oracle.okafka.common.Cluster; +//import org.apache.kafka.clients.unused.ClusterConnectionStates; +import org.apache.kafka.clients.admin.internals.AdminMetadataManager; import org.oracle.okafka.common.Node; -import org.oracle.okafka.common.errors.AuthenticationException; +import org.apache.kafka.clients.ClientRequest; +import org.apache.kafka.clients.ClientResponse; +import org.apache.kafka.clients.MetadataUpdater; +import org.apache.kafka.clients.RequestCompletionHandler; +import org.apache.kafka.common.Cluster; +import org.apache.kafka.common.KafkaException; +import org.apache.kafka.common.errors.AuthenticationException; import org.oracle.okafka.common.errors.InvalidLoginCredentialsException; -import org.oracle.okafka.common.metrics.Sensor; +import org.apache.kafka.common.metrics.Sensor; import org.oracle.okafka.common.network.AQClient; import org.oracle.okafka.common.requests.AbstractRequest; import org.oracle.okafka.common.requests.MetadataRequest; import org.oracle.okafka.common.requests.MetadataResponse; -import org.oracle.okafka.common.requests.RequestHeader; -import org.oracle.okafka.common.utils.LogContext; -import org.oracle.okafka.common.utils.Time; +import org.apache.kafka.common.utils.LogContext; +import org.apache.kafka.common.utils.Time; import org.slf4j.Logger; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Random; import javax.jms.JMSException; @@ -59,549 +67,721 @@ */ public class NetworkClient implements KafkaClient { - private final Logger log; - - /* the selector used to perform network i/o */ - private final AQClient aqClient; - - private final MetadataUpdater metadataUpdater; - - private final Random randOffset; - - /* the state of each node's connection */ - private final ClusterConnectionStates connectionStates; - - /* the socket send buffer size in bytes */ - private final int socketSendBuffer; - - /* the socket receive size buffer in bytes */ - private final int socketReceiveBuffer; - - /* the client id used to identify this client in requests to the server */ - private final String clientId; - - /* the current correlation id to use when sending requests to servers */ - private int correlation; - - /* default timeout for individual requests to await acknowledgement from servers */ - private final int defaultRequestTimeoutMs; - - /* time in ms to wait before retrying to create connection to a server */ - private final long reconnectBackoffMs; - - private final Time time; - - private final Sensor throttleTimeSensor; - - public NetworkClient(AQClient aqClient, - Metadata metadata, - String clientId, - long reconnectBackoffMs, - long reconnectBackoffMax, - int socketSendBuffer, - int socketReceiveBuffer, - int defaultRequestTimeoutMs, - Time time, - LogContext logContext) { - this(null, - metadata, - aqClient, - clientId, - reconnectBackoffMs, - reconnectBackoffMax, - socketSendBuffer, - socketReceiveBuffer, - defaultRequestTimeoutMs, - time, - null, - logContext); - } - - public NetworkClient(AQClient aqClient, - Metadata metadata, - String clientId, - long reconnectBackoffMs, - long reconnectBackoffMax, - int socketSendBuffer, - int socketReceiveBuffer, - int defaultRequestTimeoutMs, - Time time, - Sensor throttleTimeSensor, - LogContext logContext) { - this(null, - metadata, - aqClient, - clientId, - reconnectBackoffMs, - reconnectBackoffMax, - socketSendBuffer, - socketReceiveBuffer, - defaultRequestTimeoutMs, - time, - throttleTimeSensor, - logContext); - } - - public NetworkClient(AQClient aqClient, - MetadataUpdater metadataUpdater, - String clientId, - long reconnectBackoffMs, - long reconnectBackoffMax, - int socketSendBuffer, - int socketReceiveBuffer, - int defaultRequestTimeoutMs, - Time time, - LogContext logContext) { - this(metadataUpdater, - null, - aqClient, - clientId, - reconnectBackoffMs, - reconnectBackoffMax, - socketSendBuffer, - socketReceiveBuffer, - defaultRequestTimeoutMs, - time, - null, - logContext); - } - - private NetworkClient(MetadataUpdater metadataUpdater, - Metadata metadata, - AQClient aqClient, - String clientId, - long reconnectBackoffMs, - long reconnectBackoffMax, - int socketSendBuffer, - int socketReceiveBuffer, - int defaultRequestTimeoutMs, - Time time, - Sensor throttleTimeSensor, - LogContext logContext) { - /* It would be better if we could pass `DefaultMetadataUpdater` from the public constructor, but it's not - * possible because `DefaultMetadataUpdater` is an inner class and it can only be instantiated after the - * super constructor is invoked. - */ - if (metadataUpdater == null) { - if (metadata == null) - throw new IllegalArgumentException("`metadata` must not be null"); - this.metadataUpdater = new DefaultMetadataUpdater(metadata); - } else { - this.metadataUpdater = metadataUpdater; - } - this.aqClient = aqClient; - this.clientId = clientId; - this.connectionStates = new ClusterConnectionStates(reconnectBackoffMs, reconnectBackoffMax); - this.socketSendBuffer = socketSendBuffer; - this.socketReceiveBuffer = socketReceiveBuffer; - this.correlation = 0; - this.randOffset = new Random(); - this.defaultRequestTimeoutMs = defaultRequestTimeoutMs; - this.reconnectBackoffMs = reconnectBackoffMs; - this.time = time; - this.throttleTimeSensor = throttleTimeSensor; - this.log = logContext.logger(NetworkClient.class); - } - - /** - * Begin connecting to the given node, return true if we are already connected and ready to send to that node. - * - * @param node The node to check - * @param now The current timestamp - * @return True if we are ready to send to the given node - */ - @Override - public boolean ready(Node node, long now) { - if (node.isEmpty()) - throw new IllegalArgumentException("Cannot connect to empty node " + node); - - if (isReady(node, now)) - return true; - if (connectionStates.canConnect(node, now)) { - // if we are interested in sending to a node and we don't have a connection to it, initiate one - return initiateConnect(node, now); - } - - return false; - } - - // Visible for testing - boolean canConnect(Node node, long now) { - return connectionStates.canConnect(node, now); - } - - /** - * Disconnects the connection to a particular node, if there is one. - * Any pending ClientRequests for this connection will receive disconnections. - * - * @param nodeId The id of the node - */ - @Override - public void disconnect(Node node) { - - } - - public ClusterConnectionStates getConnectionStates() { - return this.connectionStates; - } - - /** - * Closes the connection to a particular node (if there is one). - * - * @param node the node - */ - @Override - public void close(Node node) { - aqClient.close(node); - connectionStates.remove(node); - } - - /** - * Returns the number of milliseconds to wait, based on the connection state, before attempting to send data. When - * disconnected, this respects the reconnect backoff time. When connecting or connected, this handles slow/stalled - * connections. - * - * @param node The node to check - * @param now The current timestamp - * @return The number of milliseconds to wait. - */ - @Override - public long connectionDelay(Node node, long now) { - return connectionStates.connectionDelay(node, now); - } - - /** - * Return the poll delay in milliseconds based on both connection and throttle delay. - * @param node the connection to check - * @param now the current time in ms - */ - @Override - public long pollDelayMs(Node node, long now) { - return connectionStates.pollDelayMs(node, now); - } - - /** - * Check if the connection of the node has failed, based on the connection state. Such connection failure are - * usually transient and can be resumed in the next {@link #ready(org.oracle.okafka.common.Node, long)} } - * call, but there are cases where transient failures needs to be caught and re-acted upon. - * - * @param node the node to check - * @return true iff the connection has failed and the node is disconnected - */ - @Override - public boolean connectionFailed(Node node) { - return connectionStates.isDisconnected(node); - } - - /** - * Check if authentication to this node has failed, based on the connection state. Authentication failures are - * propagated without any retries. - * - * @param node the node to check - * @return an AuthenticationException iff authentication has failed, null otherwise - */ - @Override - public AuthenticationException authenticationException(Node node) { - return connectionStates.authenticationException(node); - } - - /** - * Check if the node is ready to send more requests. - * - * @param node The node - * @param now The current time in ms - * @return true if the node is ready - */ - @Override - public boolean isReady(Node node, long now) { - // if we need to update our metadata now declare all requests unready to make metadata requests first - // priority - return !metadataUpdater.isUpdateDue(now) && canSendRequest(node, now); - } - - /** - * Are we connected and ready and able to send more requests to the given connection? - * - * @param node The node - * @param now the current timestamp - */ - private boolean canSendRequest(Node node, long now) { - return this.connectionStates.isReady(node, now); - - } - - /** - * Send the given request. Requests can only be sent out to ready nodes. - * @param request The request - * @param now The current timestamp - */ - @Override - public ClientResponse send(ClientRequest request, long now) { - return doSend(request, false, now); - } - - private void sendInternalMetadataRequest(MetadataRequest.Builder builder, Node node, long now) { - ClientRequest clientRequest = newClientRequest(node, builder, now, true); - ClientResponse response = doSend(clientRequest, true, now); - log.debug("Got response for metadata request {} from node {}", builder, node); - metadataUpdater.handleCompletedMetadataResponse(response.requestHeader(), time.milliseconds(), (MetadataResponse)response.responseBody()); - } - - private ClientResponse doSend(ClientRequest clientRequest, boolean isInternalRequest, long now) { - Node node = clientRequest.destination(); - - if (node !=null && !isInternalRequest) { - // If this request came from outside the NetworkClient, validate - // that we can send data. If the request is internal, we trust - // that internal code has done this validation. Validation - // will be slightly different for some internal requests (for - // example, ApiVersionsRequests can be sent prior to being in - // READY state.) - if (!canSendRequest(node, now)) - throw new IllegalStateException("Attempt to send a request to node " + node + " which is not ready."); - } - ClientResponse response = aqClient.send(clientRequest); - handleDisconnection(node, response.wasDisconnected(), time.milliseconds()); - return response; - } - - @Override - public boolean hasReadyNodes(long now) { - return connectionStates.hasReadyNodes(now); - } - - @Override - public long maybeUpdateMetadata(long now) { - return metadataUpdater.maybeUpdate(now); - } - - /** - * Close the network client - */ - @Override - public void close() { - aqClient.close(); - this.metadataUpdater.close(); - } - - /** - * Choose first ready node. - * - * @return The node ready. - */ - @Override - public Node leastLoadedNode(long now) { - List nodes = this.metadataUpdater.fetchNodes(); - Node found = null; - int offset = this.randOffset.nextInt(nodes.size()); - for (int i = 0; i < nodes.size(); i++) { - int idx = (offset + i) % nodes.size(); - Node node = nodes.get(idx); - if (isReady(node, now)) { - // if we find an established connection with no in-flight requests we can stop right away - log.debug("Found least loaded node {}", node); - return node; - } else if (!this.connectionStates.isBlackedOut(node, now) ) { - // otherwise if this is the best we have found so far, record that - found = node; - } else if (log.isTraceEnabled()) { - log.debug("Removing node {} from least loaded node selection: is-blacked-out: {}", - node, this.connectionStates.isBlackedOut(node, now)); - } - } - - if (found != null) - log.debug("Found least loaded node {}", found); - else - log.debug("Least loaded node selection failed to find an available node"); - - return found; - } - - @Override - public void disconnected(Node node, long now) { - this.connectionStates.disconnected(node, now); - } - - - /** - * Initiate a connection to the given node - */ - private boolean initiateConnect(Node node, long now) { - try { - log.debug("Initiating connection to node {}", node); - this.connectionStates.connecting(node, now); - aqClient.connect(node); - this.connectionStates.ready(node); - log.trace("Connection is established to node {}", node); - } catch(Exception e) { - if(e instanceof JMSException) { - if(((JMSException)e).getErrorCode().equals("1405")) { - log.error("create session privilege is not assigned", e.getMessage()); - log.info("create session, execute on dbms_aqin, execute on dbms_aqadm privileges required for producer to work"); - } else if (((JMSException)e).getErrorCode().equals("6550")) { - log.error("execute on dbms_aqin is not assigned", e.getMessage()); - log.info("create session, execute on dbms_aqin, dbms_aqadm , dbms_aqjms privileges required for producer or consumer to work"); - } - - } - /* attempt failed, we'll try again after the backoff */ - connectionStates.disconnected(node, now); - /* maybe the problem is our metadata, update it */ - metadataUpdater.requestUpdate(); - - log.warn("Error connecting to node {}", node, e); - if(e instanceof JMSSecurityException || ((JMSException)e).getErrorCode().equals("12505")) - throw new InvalidLoginCredentialsException("Invalid login details provided:" + e.getMessage()); - return false; - } - return true; - } - - private void handleDisconnection(Node node, boolean disconnected, long now) { - if(disconnected) { - disconnected(node, now); - metadataUpdater.requestUpdate(); - } - } - - class DefaultMetadataUpdater implements MetadataUpdater { - - /* the current cluster metadata */ - private final Metadata metadata; - - /* true iff there is a metadata request that has been sent and for which we have not yet received a response */ - private boolean metadataFetchInProgress; - - DefaultMetadataUpdater(Metadata metadata) { - this.metadata = metadata; - this.metadataFetchInProgress = false; - } - - @Override - public List fetchNodes() { - return metadata.fetch().nodes(); - } - - @Override - public boolean isUpdateDue(long now) { - return !this.metadataFetchInProgress && this.metadata.timeToNextUpdate(now) == 0; - } - - @Override - public long maybeUpdate(long now) { - // should we update our metadata? - long timeToNextMetadataUpdate = metadata.timeToNextUpdate(now); - long waitForMetadataFetch = this.metadataFetchInProgress ? defaultRequestTimeoutMs : 0; - - long metadataTimeout = Math.max(timeToNextMetadataUpdate, waitForMetadataFetch); - - if (metadataTimeout > 0) { - return metadataTimeout; - } - - Node node = leastLoadedNode(now); - if (node == null) { - log.debug("Give up sending metadata request since no node is available"); - return reconnectBackoffMs; - } - - return maybeUpdate(now, node); - } - - @Override - public void handleCompletedMetadataResponse(RequestHeader requestHeader, long now, MetadataResponse response) { - this.metadataFetchInProgress = false; - Cluster cluster = response.cluster(metadata.getConfigs()); - - // check if any topics metadata failed to get updated - Map errors = response.topicErrors(); - if (!errors.isEmpty()) - log.warn("Error while fetching metadata : {}", errors); - - // don't update the cluster if there are no valid nodes...the topic we want may still be in the process of being - // created which means we will get errors and no nodes until it exists - if (cluster.nodes().size() > 0) { - this.metadata.update(cluster, null, now); - } else { - log.trace("Ignoring empty metadata response with correlation id {}.", requestHeader.correlationId()); - this.metadata.failedUpdate(now, null); - } - } - - @Override - public void handleDisconnection(String destination) { - //not used - } - - @Override - public void handleAuthenticationFailure(AuthenticationException exception) { - metadataFetchInProgress = false; - if (metadata.updateRequested()) - metadata.failedUpdate(time.milliseconds(), exception); - } - - @Override - public void requestUpdate() { - this.metadata.requestUpdate(); - } - - @Override - public void close() { - aqClient.close(); - this.metadata.close(); - } - - /** - * Add a metadata request to the list of sends if we can make one - */ - private long maybeUpdate(long now, Node node) { - - if (!canSendRequest(node, now)) { - if (connectionStates.canConnect(node, now)) { - // we don't have a connection to this node right now, make one - log.debug("Initialize connection to node {} for sending metadata request", node); - try { - if( !initiateConnect(node, now)) - return reconnectBackoffMs; - } catch(InvalidLoginCredentialsException ilc) { - log.error("Failed to connect to node {} with error {}", node, ilc.getMessage()); - this.metadata.failedUpdate(now, new AuthenticationException(ilc.getMessage())); - return reconnectBackoffMs; - } - } else return reconnectBackoffMs; - } - this.metadataFetchInProgress = true; - MetadataRequest.Builder metadataRequest; - if (metadata.needMetadataForAllTopics()) - metadataRequest = MetadataRequest.Builder.allTopics(); - else - metadataRequest = new MetadataRequest.Builder(new ArrayList<>(metadata.topics()), - metadata.allowAutoTopicCreation()); - log.debug("Sending metadata request {} to node {}", metadataRequest, node); - sendInternalMetadataRequest(metadataRequest, node, now); - return defaultRequestTimeoutMs; - } - - } - - @Override - public ClientRequest newClientRequest(Node node, - AbstractRequest.Builder requestBuilder, - long createdTimeMs, - boolean expectResponse) { - return newClientRequest(node, requestBuilder, createdTimeMs, expectResponse, defaultRequestTimeoutMs, null); - } - - @Override - public ClientRequest newClientRequest(Node node, - AbstractRequest.Builder requestBuilder, - long createdTimeMs, - boolean expectResponse, - int requestTimeoutMs, - RequestCompletionHandler callback) { - return new ClientRequest(node, requestBuilder, correlation++, clientId, createdTimeMs, expectResponse, - defaultRequestTimeoutMs, callback); - } + private final Logger log; + + /* the selector used to perform network i/o */ + private final AQClient aqClient; + + private Metadata metadata; + private AdminMetadataManager metadataManager; + private MetadataUpdater metadataUpdater; + + private final Random randOffset; + + /* the state of each node's connection */ + private final ClusterConnectionStates connectionStates; + + /* the socket send buffer size in bytes */ + private final int socketSendBuffer; + + /* the socket receive size buffer in bytes */ + private final int socketReceiveBuffer; + + /* the client id used to identify this client in requests to the server */ + private final String clientId; + + /* the current correlation id to use when sending requests to servers */ + private int correlation; + + /* default timeout for individual requests to await acknowledgement from servers */ + private final int defaultRequestTimeoutMs; + + /* time in ms to wait before retrying to create connection to a server */ + private final long reconnectBackoffMs; + + private final Time time; + + private final Sensor throttleTimeSensor; + + // Invoked from KafkaProducer and KafkaConsumer + public NetworkClient(AQClient aqClient, + Metadata metadata, + String clientId, + long reconnectBackoffMs, + long reconnectBackoffMax, + int socketSendBuffer, + int socketReceiveBuffer, + int defaultRequestTimeoutMs, + Time time, + LogContext logContext) { + this(null, + metadata, + aqClient, + clientId, + reconnectBackoffMs, + reconnectBackoffMax, + socketSendBuffer, + socketReceiveBuffer, + defaultRequestTimeoutMs, + time, + null, + logContext); + } + +/* public NetworkClient(AQClient aqClient, + Metadata metadata, + String clientId, + long reconnectBackoffMs, + long reconnectBackoffMax, + int socketSendBuffer, + int socketReceiveBuffer, + int defaultRequestTimeoutMs, + Time time, + Sensor throttleTimeSensor, + LogContext logContext) { + this(null, + metadata, + aqClient, + clientId, + reconnectBackoffMs, + reconnectBackoffMax, + socketSendBuffer, + socketReceiveBuffer, + defaultRequestTimeoutMs, + time, + throttleTimeSensor, + logContext); + }*/ + + //Invoked from KafkaAdmin. Passing metadataManger instead of metadata + //ToDo: Check if this is needed or not. + + public NetworkClient(AQClient aqClient, + AdminMetadataManager metadataManger, + String clientId, + long reconnectBackoffMs, + long reconnectBackoffMax, + int socketSendBuffer, + int socketReceiveBuffer, + int defaultRequestTimeoutMs, + Time time, + LogContext logContext) { + this(metadataManger, + null, + aqClient, + clientId, + reconnectBackoffMs, + reconnectBackoffMax, + socketSendBuffer, + socketReceiveBuffer, + defaultRequestTimeoutMs, + time, + null, + logContext); + } + + private NetworkClient(AdminMetadataManager metadataManager, + Metadata metadata, + AQClient aqClient, + String clientId, + long reconnectBackoffMs, + long reconnectBackoffMax, + int socketSendBuffer, + int socketReceiveBuffer, + int defaultRequestTimeoutMs, + Time time, + Sensor throttleTimeSensor, + LogContext logContext) { + /* It would be better if we could pass `DefaultMetadataUpdater` from the public constructor, but it's not + * possible because `DefaultMetadataUpdater` is an inner class and it can only be instantiated after the + * super constructor is invoked. + */ + this.metadata = metadata; + this.metadataManager = metadataManager; + + if(metadataManager != null) + { + this.metadataUpdater = metadataManager.updater(); + }else + { + this.metadataManager = null; + this.metadataUpdater = null; + } + if (metadataUpdater == null) { + if (this.metadata == null) + throw new IllegalArgumentException("`metadata` must not be null"); + + this.metadataUpdater = new DefaultMetadataUpdater(metadata); + } + /*else { + this.metadataUpdater = metadataUpdater; + }*/ + this.aqClient = aqClient; + this.clientId = clientId; + this.connectionStates = new ClusterConnectionStates(reconnectBackoffMs, reconnectBackoffMax); + this.socketSendBuffer = socketSendBuffer; + this.socketReceiveBuffer = socketReceiveBuffer; + this.correlation = 0; + this.randOffset = new Random(); + this.defaultRequestTimeoutMs = defaultRequestTimeoutMs; + this.reconnectBackoffMs = reconnectBackoffMs; + this.time = time; + this.throttleTimeSensor = throttleTimeSensor; + this.log = logContext.logger(NetworkClient.class); + } + + /** + * Begin connecting to the given node, return true if we are already connected and ready to send to that node. + * + * @param node The node to check + * @param now The current timestamp + * @return True if we are ready to send to the given node + */ + // @Override + public boolean ready(Node node, long now) { + if (node.isEmpty()) + throw new IllegalArgumentException("Cannot connect to empty node " + node); + if (isReady(node, now)) + return true; + if (connectionStates.canConnect(node, now)) { + // if we are interested in sending to a node and we don't have a connection to it, initiate one + return initiateConnect(node, now); + } + + return false; + } + + // Visible for testing + boolean canConnect(Node node, long now) { + return connectionStates.canConnect(node, now); + } + + /** + * Disconnects the connection to a particular node, if there is one. + * Any pending ClientRequests for this connection will receive disconnections. + * + * @param nodeId The id of the node + */ + // @Override + public void disconnect(Node node) { + + } + + public ClusterConnectionStates getConnectionStates() { + return this.connectionStates; + } + + /** + * Closes the connection to a particular node (if there is one). + * + * @param node the node + */ + //@Override + public void close(Node node) { + aqClient.close(node); + connectionStates.remove(node); + } + + /** + * Returns the number of milliseconds to wait, based on the connection state, before attempting to send data. When + * disconnected, this respects the reconnect backoff time. When connecting or connected, this handles slow/stalled + * connections. + * + * @param node The node to check + * @param now The current timestamp + * @return The number of milliseconds to wait. + */ + // @Override + public long connectionDelay(Node node, long now) { + return connectionStates.connectionDelay(node, now); + } + + /** + * Return the poll delay in milliseconds based on both connection and throttle delay. + * @param node the connection to check + * @param now the current time in ms + */ + // @Override + public long pollDelayMs(Node node, long now) { + return connectionStates.pollDelayMs(node, now); + } + + /** + * Check if the connection of the node has failed, based on the connection state. Such connection failure are + * usually transient and can be resumed in the next {@link #ready(org.oracle.okafka.common.Node, long)} } + * call, but there are cases where transient failures needs to be caught and re-acted upon. + * + * @param node the node to check + * @return true iff the connection has failed and the node is disconnected + */ + // @Override + public boolean connectionFailed(Node node) { + return connectionStates.isDisconnected(node); + } + + /** + * Check if authentication to this node has failed, based on the connection state. Authentication failures are + * propagated without any retries. + * + * @param node the node to check + * @return an AuthenticationException iff authentication has failed, null otherwise + */ + //@Override + public AuthenticationException authenticationException(Node node) { + return connectionStates.authenticationException(node); + } + + /** + * Check if the node is ready to send more requests. + * + * @param node The node + * @param now The current time in ms + * @return true if the node is ready + */ + // @Override + public boolean isReady(Node node, long now) { + // if we need to update our metadata now declare all requests unready to make metadata requests first + // priority + // isReady will return false if metadata is due for update. Alternative is to not check for this and handle on the caller partreturn canSendRequest(node, now); + + //return !metadataUpdater.isUpdateDue(now) && canSendRequest(node, now); + //No Point in stopping the world if MetaData update is due. We periodically check and update it. + return canSendRequest(node, now); + } + + /** + * Are we connected and ready and able to send more requests to the given connection? + * + * @param node The node + * @param now the current timestamp + */ + private boolean canSendRequest(Node node, long now) { + boolean connState = this.connectionStates.isReady(node, now); + return connState; + + } + + /** + * Send the given request. Requests can only be sent out to ready nodes. + * @param request The request + * @param now The current timestamp + */ + //@Override + public ClientResponse send(ClientRequest request, long now) { + return doSend(request, false, now); + } + + private void sendInternalMetadataRequest(MetadataRequest.Builder builder, Node node, long now) { + ClientRequest clientRequest = newClientRequest(node, builder, now, true); + ClientResponse response = doSend(clientRequest, true, now); + log.debug("Got response for metadata request {} from node {}", builder, node); + ((DefaultMetadataUpdater)metadataUpdater).handleCompletedMetadataResponse( + response.requestHeader(), time.milliseconds(), (MetadataResponse)response.responseBody()); + } + + private ClientResponse doSend(ClientRequest clientRequest, boolean isInternalRequest, long now) { + ClientResponse response = null; + try { + Node node = null; + if (metadata != null) + { + node = (org.oracle.okafka.common.Node)metadata.getNodeById(Integer.parseInt(clientRequest.destination())); + } + else if(metadataManager != null) + { + node = (org.oracle.okafka.common.Node)metadataManager.nodeById(Integer.parseInt(clientRequest.destination())); + } + + if (node !=null && !isInternalRequest) { + // If this request came from outside the NetworkClient, validate + // that we can send data. If the request is internal, we trust + // that internal code has done this validation. Validation + // will be slightly different for some internal requests (for + // example, ApiVersionsRequests can be sent prior to being in + // READY state.) + if (!canSendRequest(node, now)) { + log.info("Attempt to send a request to node " + node + " which is not ready."); + throw new IllegalStateException("Attempt to send a request to node " + node + " which is not ready."); + } + } + log.debug("Sending Request: " + clientRequest.apiKey().name); + response = aqClient.send(clientRequest); + log.debug("Response Received " + clientRequest.apiKey().name); + handleDisconnection(node, response.wasDisconnected(), time.milliseconds()); + } catch(Exception e) + { + log.error("Exception from NetworkClient.doSend " + e,e); + throw e; + } + return response; + } + + // @Override + public boolean hasReadyNodes(long now) { + return connectionStates.hasReadyNodes(now); + } + + // @Override + public long maybeUpdateMetadata(long now) { + return metadataUpdater.maybeUpdate(now); + } + + /** + * Close the network client + */ + // @Override + public void close() { + aqClient.close(); + this.metadataUpdater.close(); + } + + /** + * Choose first ready node. + * + * @return The node ready. + */ + // @Override + public Node leastLoadedNode(long now) { + + List nodes = convertToOracleNodes(this.metadataUpdater.fetchNodes()); + log.info("Available Nodes " + nodes.size()); + for(Node n : nodes) + { + log.debug(n.toString()); + } + Node found = null; + int offset = this.randOffset.nextInt(nodes.size()); + for (int i = 0; i < nodes.size(); i++) { + int idx = (offset + i) % nodes.size(); + Node node = nodes.get(idx); + /* Removed isMetadataUpdate pending check */ + //if (isReady(node, now)) { + if (canSendRequest(node, now)) { + // if we find an established connection with no in-flight requests we can stop right away + log.debug("Found connected node {}", node); + return node; + }/* else if (!this.connectionStates.isBlackedOut(node, now) ) { + // otherwise if this is the best we have found so far, record that + found = node; + } else if (log.isTraceEnabled()) { + log.debug("Removing node {} from least loaded node selection: is-blacked-out: {}", + node, this.connectionStates.isBlackedOut(node, now)); + } */ + } + + /* if (found != null) + log.debug("Found least loaded node {}", found); + else */ + { + log.info("All Known nodes are disconnected. Try one time to connect."); + //System.out.println("All known nodes are disconnected. Try to re-connect to each node one after the other"); + // If no node is reachable, try to connect one time + boolean connected = false; + for(Node node : nodes) + { + connected = initiateConnect(node,now); + if(connected) + { + log.info("Reconnect successful to node " + node); + found = node; + break; + } + /*else { + try + { + //Cannot connect to Oracle Database. Retry after reconnectBackoffMs seconds + Thread.sleep(reconnectBackoffMs); + } + catch(Exception ignoreE) {} + } */ + } + //If no known node is reachable, try without instnace_name. This is needed in case + //application is using SCAN-Listener and database service which gets migrated to available instance + /* if(!connected) + { + log.info("Not able to connect to any know instances."); + Node oldNode = nodes.get(0); + Node newNode = new Node(oldNode.host(), oldNode.port(), oldNode.serviceName()); + log.info("Trying to connect to: " + newNode); + connected = initiateConnect(newNode,now); + if(connected) { + log.info("Connected to "+ newNode); + found = newNode; + } + else { + log.error("Not able to reach Oracle Database:" + newNode); + } + } */ + if(found == null) + log.debug("Least loaded node selection failed to find an available node"); + } + + return found; + } + + // @Override + public void disconnected(Node node, long now) { + this.connectionStates.disconnected(node, now); + } + + + /** + * Initiate a connection to the given node + */ + public boolean initiateConnect(Node node, long now) { + try { + + log.info("Initiating connection to node {}", node); + //Thread.dumpStack(); + aqClient.connect(node); + this.connectionStates.connecting(node, now); + this.connectionStates.ready(node); + log.debug("Connection is established to node {}", node); + } catch(Exception e) { + if(e instanceof JMSException) { + JMSException jmsExcp = (JMSException)e; + String jmsError = jmsExcp.getErrorCode(); + log.error("Connection Error " +jmsExcp ); + if(jmsError != null && jmsError.equals("1405")) { + log.error("create session privilege is not assigned", e.getMessage()); + log.info("create session, execute on dbms_aqin, execute on dbms_aqadm privileges required for producer to work"); + } + + } + + /* attempt failed, we'll try again after the backoff */ + connectionStates.disconnected(node, now); + /* maybe the problem is our metadata, update it */ + ((DefaultMetadataUpdater)metadataUpdater).requestUpdate(); + + log.warn("Error connecting to node {}", node, e); + if(e instanceof JMSSecurityException || ((JMSException)e).getErrorCode().equals("12505")) + throw new InvalidLoginCredentialsException("Invalid login details provided:" + e.getMessage()); + return false; + } + return true; + } + + private void handleDisconnection(Node node, boolean disconnected, long now) { + if(disconnected) { + disconnected(node, now); + ((DefaultMetadataUpdater)metadataUpdater).requestUpdate(); + } + } + + public static List convertToKafkaNodes(List okafkaNodeList) + { + ArrayList kafkaNodeList = new ArrayList(); + for(Node n : okafkaNodeList) + { + kafkaNodeList.add((org.apache.kafka.common.Node)n); + } + return kafkaNodeList; + } + + public static List convertToOracleNodes(List apacheNodeList) + { + ArrayList oracleNodeList = new ArrayList(); + for(org.apache.kafka.common.Node n : apacheNodeList) + { + oracleNodeList.add((org.oracle.okafka.common.Node)n); + } + return oracleNodeList; + } + + + class DefaultMetadataUpdater implements MetadataUpdater { + + /* the current cluster metadata */ + private final Metadata metadata; + + /* true iff there is a metadata request that has been sent and for which we have not yet received a response */ + private boolean metadataFetchInProgress; + + DefaultMetadataUpdater(Metadata metadata) { + this.metadata = metadata; + this.metadataFetchInProgress = false; + } + + @Override + public List fetchNodes() { + return (metadata.fetch().nodes()); + } + + + @Override + public boolean isUpdateDue(long now) { + return !this.metadataFetchInProgress && this.metadata.timeToNextUpdate(now) == 0; + } + + @Override + public long maybeUpdate(long now) { + // should we update our metadata? + long timeToNextMetadataUpdate = metadata.timeToNextUpdate(now); + long waitForMetadataFetch = this.metadataFetchInProgress ? defaultRequestTimeoutMs : 0; + long metadataTimeout = Math.max(timeToNextMetadataUpdate, waitForMetadataFetch); + if (metadataTimeout > 0) { + return metadataTimeout; + } + Node node = leastLoadedNode(now); + if (node == null) + { + if(metadata != null && metadata.fetch() != null) + { + List nodes = convertToOracleNodes(metadata.fetch().nodes()); + if(nodes != null) + { + String oldClusterId = null; + try { + oldClusterId = metadata.fetch().clusterResource().clusterId(); + } catch(Exception ignoreExcp) {} + + Node oldNode = nodes.get(0); + Node newNode = new Node(oldNode.host(), oldNode.port(), oldNode.serviceName()); + log.info("MetaData Updater : Trying to connect to: " + newNode); + boolean connected = initiateConnect(newNode,now); + if(connected) { + log.info("Connection Successful. Using this node to fetch metadata"); + node = newNode; + Cluster renewCluster = new Cluster(oldClusterId, Collections.singletonList(newNode), new ArrayList<>(0), + Collections.emptySet(), Collections.emptySet()); + this.metadata.update(renewCluster, Collections.emptySet(), time.milliseconds(), true); + } + else { + log.info("Not able to connect to "+ newNode); + } + } + } + } + // If connection is not setup yet then return + if(node == null) + { + log.error("Give up sending metadata request since no node is available. Retry after " + reconnectBackoffMs); + return reconnectBackoffMs; + } + else { + log.debug("May Update matadata with node : " + node); + } + + return maybeUpdate(now, node); + } + + public void handleCompletedMetadataResponse(org.apache.kafka.common.requests.RequestHeader requestHeader, long now, MetadataResponse response) { + this.metadataFetchInProgress = false; + //org.apache.kafka.common.Cluster cluster = response.cluster(metadata.getConfigs()); + org.apache.kafka.common.Cluster cluster = response.cluster(); + + // check if any topics metadata failed to get updated + Map errors = response.topicErrors(); + if (!errors.isEmpty()) + log.warn("Error while fetching metadata : {}", errors); + + // don't update the cluster if there are no valid nodes...the topic we want may still be in the process of being + // created which means we will get errors and no nodes until it exists + if (cluster.nodes().size() > 0) { + this.metadata.update(cluster, null, now, false); + } else { + log.debug("Ignoring empty metadata response with correlation id {}.", requestHeader.correlationId()); + this.metadata.failedUpdate(now, null); + } + } + + //@Override + public void handleDisconnection(String destination) { + //not used + } + + // @Override + public void handleAuthenticationFailure(AuthenticationException exception) { + metadataFetchInProgress = false; + if (metadata.updateRequested()) + metadata.failedUpdate(time.milliseconds(), exception); + } + + // @Override + public void requestUpdate() { + this.metadata.requestUpdate(); + } + + @Override + public void close() { + aqClient.close(); + this.metadata.close(); + } + + /** + * Add a metadata request to the list of sends if we can make one + */ + private long maybeUpdate(long now, Node node) { + if (!canSendRequest(node, now)) { + log.debug("Cannot send Request. connect Now to node: " + node); + if (connectionStates.canConnect(node, now)) { + // we don't have a connection to this node right now, make one + //log.info(this.toString() + " Initialize connection to node {} for sending metadata request", node); + try { + if( !initiateConnect(node, now)) + return reconnectBackoffMs; + } catch(InvalidLoginCredentialsException ilc) { + log.error("Failed to connect to node {} with error {}", node, ilc.getMessage()); + this.metadata.failedUpdate(now, new AuthenticationException(ilc.getMessage())); + return reconnectBackoffMs; + } + } else return reconnectBackoffMs; + } + this.metadataFetchInProgress = true; + MetadataRequest.Builder metadataRequest; + if (metadata.needMetadataForAllTopics()) + metadataRequest = MetadataRequest.Builder.allTopics(); + else + { + List topicList = new ArrayList<>(metadata.topics()); + metadataRequest = new MetadataRequest.Builder(topicList, + metadata.allowAutoTopicCreation(), topicList); + } + log.debug("Sending metadata request {} to node {}", metadataRequest, node); + sendInternalMetadataRequest(metadataRequest, node, now); + return defaultRequestTimeoutMs; + } + + @Override + public void handleServerDisconnect(long now, String nodeId, + Optional maybeAuthException) { + // TODO Auto-generated method stub + + } + + @Override + public void handleFailedRequest(long now, Optional maybeFatalException) { + // TODO Auto-generated method stub + + } + + @Override + public void handleSuccessfulResponse(org.apache.kafka.common.requests.RequestHeader requestHeader, long now, + org.apache.kafka.common.requests.MetadataResponse metadataResponse) { + // TODO Auto-generated method stub + + } + + } + + @Override + public ClientRequest newClientRequest(Node node, + AbstractRequest.Builder requestBuilder, + long createdTimeMs, + boolean expectResponse) { + return newClientRequest(node, requestBuilder, createdTimeMs, expectResponse, defaultRequestTimeoutMs, null); + } + + @Override + public ClientRequest newClientRequest(Node node, + AbstractRequest.Builder requestBuilder, + long createdTimeMs, + boolean expectResponse, + int requestTimeoutMs, + RequestCompletionHandler callback) { + return new ClientRequest(""+node.id(), requestBuilder, correlation++, clientId, createdTimeMs, expectResponse, + defaultRequestTimeoutMs, callback); + } } diff --git a/clients/src/main/java/org/oracle/okafka/clients/RequestCompletionHandler.java b/clients/src/main/java/org/oracle/okafka/clients/RequestCompletionHandler.java deleted file mode 100644 index 7bc0829..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/RequestCompletionHandler.java +++ /dev/null @@ -1,35 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.clients; - -/** - * A callback interface for attaching an action to be executed when a request is complete and the corresponding response - * has been received. This handler will also be invoked if there is a disconnection while handling the request. - */ -public interface RequestCompletionHandler { - - public void onComplete(ClientResponse response); - -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/TopicTeqParameters.java b/clients/src/main/java/org/oracle/okafka/clients/TopicTeqParameters.java new file mode 100644 index 0000000..dee90bd --- /dev/null +++ b/clients/src/main/java/org/oracle/okafka/clients/TopicTeqParameters.java @@ -0,0 +1,68 @@ +/* +** OKafka Java Client version 23.4. +** +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. +** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +*/ + +package org.oracle.okafka.clients; + + +import java.util.HashMap; + +public class TopicTeqParameters { + + int keyBased; + int stickyDeq; + int shardNum; + int dbMajorVersion; + int dbMinorVersion; + int msgVersion; + + + public void setKeyBased(int keyBased) + { + this.keyBased = keyBased; + } + + public void setStickyDeq(int stickyDeq) + { + this.stickyDeq = stickyDeq; + } + + public void setShardNum(int shardNum) + { + this.shardNum = shardNum; + } + + private void setMsgVersion(int msgVersion) + { + this.msgVersion = msgVersion; + } + + public int getKeyBased() + { + return this.keyBased; + } + + public int getStickyDeq() + { + return this.stickyDeq; + } + + public int getShardNum() + { + return this.shardNum; + } + + public int getMsgVersion() + { + if(getStickyDeq()!=2) { + this.msgVersion = 1; + } + else { + this.msgVersion = 2; + } + return this.msgVersion; + } +} diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/AbstractOptions.java b/clients/src/main/java/org/oracle/okafka/clients/admin/AbstractOptions.java deleted file mode 100644 index 08191a1..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/AbstractOptions.java +++ /dev/null @@ -1,53 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.clients.admin; - -/* - * This class implements the common APIs that are shared by Options classes for various AdminClient commands - */ -public abstract class AbstractOptions { - - protected Integer timeoutMs = null; - - /** - * Set the request timeout in milliseconds for this operation or {@code null} if the default request timeout for the - * AdminClient should be used. - */ - @SuppressWarnings("unchecked") - public T timeoutMs(Integer timeoutMs) { - this.timeoutMs = timeoutMs; - return (T) this; - } - - /** - * The request timeout in milliseconds for this operation or {@code null} if the default request timeout for the - * AdminClient should be used. - */ - public Integer timeoutMs() { - return timeoutMs; - } - - -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/Admin.java b/clients/src/main/java/org/oracle/okafka/clients/admin/Admin.java new file mode 100644 index 0000000..934e60d --- /dev/null +++ b/clients/src/main/java/org/oracle/okafka/clients/admin/Admin.java @@ -0,0 +1,1596 @@ +/* + ** OKafka Java Client version 23.4. + ** + ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. + ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + */ + +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.oracle.okafka.clients.admin; + +import java.time.Duration; +import java.util.Collection; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Properties; +import java.util.Set; +import java.util.concurrent.TimeUnit; + +import org.apache.kafka.clients.admin.CreateTopicsOptions; +import org.apache.kafka.clients.admin.CreateTopicsResult; +import org.apache.kafka.clients.admin.DeleteTopicsOptions; +import org.apache.kafka.clients.admin.DeleteTopicsResult; +import org.apache.kafka.clients.admin.ListTopicsOptions; +import org.apache.kafka.clients.admin.ListTopicsResult; +import org.apache.kafka.clients.admin.NewPartitionReassignment; +import org.apache.kafka.clients.admin.NewTopic; +import org.apache.kafka.clients.admin.OffsetSpec; +import org.apache.kafka.clients.consumer.OffsetAndMetadata; +import org.apache.kafka.common.ElectionType; +import org.apache.kafka.common.KafkaFuture; +import org.apache.kafka.common.Metric; +import org.apache.kafka.common.MetricName; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.TopicPartitionReplica; +import org.apache.kafka.common.acl.AclBinding; +import org.apache.kafka.common.acl.AclBindingFilter; +import org.apache.kafka.common.annotation.InterfaceStability; +import org.apache.kafka.common.config.ConfigResource; +import org.apache.kafka.common.errors.FeatureUpdateFailedException; +import org.apache.kafka.common.quota.ClientQuotaAlteration; +import org.apache.kafka.common.quota.ClientQuotaFilter; +import org.apache.kafka.common.requests.LeaveGroupResponse; +import org.oracle.okafka.common.errors.FeatureNotSupportedException; +import org.apache.kafka.clients.admin.AlterClientQuotasOptions; +import org.apache.kafka.clients.admin.AlterClientQuotasResult; +import org.apache.kafka.clients.admin.AlterConfigOp; +import org.apache.kafka.clients.admin.AlterConfigsOptions; +import org.apache.kafka.clients.admin.AlterConfigsResult; +import org.apache.kafka.clients.admin.AlterConsumerGroupOffsetsOptions; +import org.apache.kafka.clients.admin.AlterConsumerGroupOffsetsResult; +import org.apache.kafka.clients.admin.AlterPartitionReassignmentsOptions; +import org.apache.kafka.clients.admin.AlterPartitionReassignmentsResult; +import org.apache.kafka.clients.admin.AlterReplicaLogDirsOptions; +import org.apache.kafka.clients.admin.AlterReplicaLogDirsResult; +import org.apache.kafka.clients.admin.AlterUserScramCredentialsOptions; +import org.apache.kafka.clients.admin.AlterUserScramCredentialsResult; +import org.apache.kafka.clients.admin.Config; +import org.apache.kafka.clients.admin.CreateAclsOptions; +import org.apache.kafka.clients.admin.CreateAclsResult; +import org.apache.kafka.clients.admin.CreateDelegationTokenOptions; +import org.apache.kafka.clients.admin.CreateDelegationTokenResult; +import org.apache.kafka.clients.admin.CreatePartitionsOptions; +import org.apache.kafka.clients.admin.CreatePartitionsResult; +import org.apache.kafka.clients.admin.CreateTopicsOptions; +import org.apache.kafka.clients.admin.CreateTopicsResult; +import org.apache.kafka.clients.admin.DeleteAclsOptions; +import org.apache.kafka.clients.admin.DeleteAclsResult; +import org.apache.kafka.clients.admin.DeleteConsumerGroupOffsetsOptions; +import org.apache.kafka.clients.admin.DeleteConsumerGroupOffsetsResult; +import org.apache.kafka.clients.admin.DeleteConsumerGroupsOptions; +import org.apache.kafka.clients.admin.DeleteConsumerGroupsResult; +import org.apache.kafka.clients.admin.DeleteRecordsOptions; +import org.apache.kafka.clients.admin.DeleteRecordsResult; +import org.apache.kafka.clients.admin.DeleteTopicsOptions; +import org.apache.kafka.clients.admin.DeleteTopicsResult; +import org.apache.kafka.clients.admin.DescribeAclsOptions; +import org.apache.kafka.clients.admin.DescribeAclsResult; +import org.apache.kafka.clients.admin.DescribeClientQuotasOptions; +import org.apache.kafka.clients.admin.DescribeClientQuotasResult; +import org.apache.kafka.clients.admin.DescribeClusterOptions; +import org.apache.kafka.clients.admin.DescribeClusterResult; +import org.apache.kafka.clients.admin.DescribeConfigsOptions; +import org.apache.kafka.clients.admin.DescribeConfigsResult; +import org.apache.kafka.clients.admin.DescribeConsumerGroupsOptions; +import org.apache.kafka.clients.admin.DescribeConsumerGroupsResult; +import org.apache.kafka.clients.admin.DescribeDelegationTokenOptions; +import org.apache.kafka.clients.admin.DescribeDelegationTokenResult; +import org.apache.kafka.clients.admin.DescribeFeaturesOptions; +import org.apache.kafka.clients.admin.DescribeFeaturesResult; +import org.apache.kafka.clients.admin.DescribeLogDirsOptions; +import org.apache.kafka.clients.admin.DescribeLogDirsResult; +import org.apache.kafka.clients.admin.DescribeReplicaLogDirsOptions; +import org.apache.kafka.clients.admin.DescribeReplicaLogDirsResult; +import org.apache.kafka.clients.admin.DescribeTopicsOptions; +import org.apache.kafka.clients.admin.DescribeTopicsResult; +import org.apache.kafka.clients.admin.DescribeUserScramCredentialsOptions; +import org.apache.kafka.clients.admin.DescribeUserScramCredentialsResult; +import org.apache.kafka.clients.admin.ElectLeadersOptions; +import org.apache.kafka.clients.admin.ElectLeadersResult; +import org.apache.kafka.clients.admin.ExpireDelegationTokenOptions; +import org.apache.kafka.clients.admin.ExpireDelegationTokenResult; +import org.apache.kafka.clients.admin.FeatureUpdate; +import org.apache.kafka.clients.admin.ListConsumerGroupOffsetsOptions; +import org.apache.kafka.clients.admin.ListConsumerGroupOffsetsResult; +import org.apache.kafka.clients.admin.ListConsumerGroupsOptions; +import org.apache.kafka.clients.admin.ListConsumerGroupsResult; +import org.apache.kafka.clients.admin.ListOffsetsOptions; +import org.apache.kafka.clients.admin.ListOffsetsResult; +import org.apache.kafka.clients.admin.ListPartitionReassignmentsOptions; +import org.apache.kafka.clients.admin.ListPartitionReassignmentsResult; +import org.apache.kafka.clients.admin.ListTopicsOptions; +import org.apache.kafka.clients.admin.ListTopicsResult; +import org.apache.kafka.clients.admin.NewPartitions; +import org.apache.kafka.clients.admin.NewTopic; +import org.apache.kafka.clients.admin.RecordsToDelete; +import org.apache.kafka.clients.admin.RemoveMembersFromConsumerGroupOptions; +import org.apache.kafka.clients.admin.RemoveMembersFromConsumerGroupResult; +import org.apache.kafka.clients.admin.RenewDelegationTokenOptions; +import org.apache.kafka.clients.admin.RenewDelegationTokenResult; +import org.apache.kafka.clients.admin.UnregisterBrokerOptions; +import org.apache.kafka.clients.admin.UnregisterBrokerResult; +import org.apache.kafka.clients.admin.UpdateFeaturesOptions; +import org.apache.kafka.clients.admin.UpdateFeaturesResult; +import org.apache.kafka.clients.admin.UserScramCredentialAlteration; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.TopicPartitionReplica; +import org.apache.kafka.common.acl.AclBinding; +import org.apache.kafka.common.acl.AclBindingFilter; +import org.apache.kafka.common.annotation.InterfaceStability; +import org.apache.kafka.common.config.ConfigResource; + +/** + * The administrative client for Kafka, which supports managing and inspecting topics. + *

+ * Instances returned from the {@code create} methods of this interface are guaranteed to be thread safe. + * However, the {@link KafkaFuture KafkaFutures} returned from request methods are executed + * by a single thread so it is important that any code which executes on that thread when they complete + * (using {@link KafkaFuture#thenApply(KafkaFuture.Function)}, for example) doesn't block + * for too long. If necessary, processing of results should be passed to another thread. + *

+ * The operations exposed by Admin follow a consistent pattern: + *

    + *
  • Admin instances should be created using {@link Admin#create(Properties)} or {@link Admin#create(Map)}
  • + *
  • Each operation typically has two overloaded methods, one which uses a default set of options and an + * overloaded method where the last parameter is an explicit options object. + *
  • The operation method's first parameter is a {@code Collection} of items to perform + * the operation on. Batching multiple requests into a single call is more efficient and should be + * preferred over multiple calls to the same method. + *
  • The operation methods execute asynchronously. + *
  • Each {@code xxx} operation method returns an {@code XxxResult} class with methods which expose + * {@link KafkaFuture} for accessing the result(s) of the operation. + *
  • Typically an {@code all()} method is provided for getting the overall success/failure of the batch and a + * {@code values()} method provided access to each item in a request batch. + * Other methods may also be provided. + *
  • For synchronous behaviour use {@link KafkaFuture#get()} + *
+ *

+ * Here is a simple example of using an Admin client instance to create a new topic: + *

+ * {@code
+ * Properties props = new Properties();
+ * props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
+ *
+ * try (Admin admin = Admin.create(props)) {
+ *   String topicName = "my-topic";
+ *   int partitions = 12;
+ *   short replicationFactor = 3;
+ *   // Create a compacted topic
+ *   CreateTopicsResult result = admin.createTopics(Collections.singleton(
+ *     new NewTopic(topicName, partitions, replicationFactor)
+ *       .configs(Collections.singletonMap(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT)));
+ *
+ *   // Call values() to get the result for a specific topic
+ *   KafkaFuture future = result.values().get(topicName);
+ *
+ *   // Call get() to block until the topic creation is complete or has failed
+ *   // if creation failed the ExecutionException wraps the underlying cause.
+ *   future.get();
+ * }
+ * }
+ * 
+ * + *

Bootstrap and balancing

+ *

+ * The {@code bootstrap.servers} config in the {@code Map} or {@code Properties} passed + * to {@link Admin#create(Properties)} is only used for discovering the brokers in the cluster, + * which the client will then connect to as needed. + * As such, it is sufficient to include only two or three broker addresses to cope with the possibility of brokers + * being unavailable. + *

+ * Different operations necessitate requests being sent to different nodes in the cluster. For example + * {@link #createTopics(Collection)} communicates with the controller, but {@link #describeTopics(Collection)} + * can talk to any broker. When the recipient does not matter the instance will try to use the broker with the + * fewest outstanding requests. + *

+ * The client will transparently retry certain errors which are usually transient. + * For example if the request for {@code createTopics()} get sent to a node which was not the controller + * the metadata would be refreshed and the request re-sent to the controller. + */ +@InterfaceStability.Evolving +public interface Admin extends org.apache.kafka.clients.admin.Admin { + + /** + * Create a new Admin with the given configuration. + * + * @param props The configuration. + * @return The new KafkaAdminClient. + */ + static org.apache.kafka.clients.admin.Admin create(Properties props) { + return KafkaAdminClient.createInternal(new AdminClientConfig(props, true), null); + } + + /** + * Create a new Admin with the given configuration. + * + * @param conf The configuration. + * @return The new KafkaAdminClient. + */ + static org.apache.kafka.clients.admin.Admin create(Map conf) { + return KafkaAdminClient.createInternal(new AdminClientConfig(conf, true), null); + } + + /** + * Close the Admin and release all associated resources. + *

+ * See {@link #close(long, TimeUnit)} + */ + @Override + default void close() { + close(Long.MAX_VALUE, TimeUnit.MILLISECONDS); + } + + /** + * Close the Admin and release all associated resources. + *

+ * The close operation has a grace period during which current operations will be allowed to + * complete, specified by the given duration and time unit. + * New operations will not be accepted during the grace period. Once the grace period is over, + * all operations that have not yet been completed will be aborted with a {@link org.apache.kafka.common.errors.TimeoutException}. + * + * @param duration The duration to use for the wait time. + * @param unit The time unit to use for the wait time. + * @deprecated Since 2.2. Use {@link #close(Duration)} or {@link #close()}. + */ + @Deprecated + default void close(long duration, TimeUnit unit) { + close(Duration.ofMillis(unit.toMillis(duration))); + } + + /** + * Close the Admin client and release all associated resources. + *

+ * The close operation has a grace period during which current operations will be allowed to + * complete, specified by the given duration. + * New operations will not be accepted during the grace period. Once the grace period is over, + * all operations that have not yet been completed will be aborted with a {@link org.apache.kafka.common.errors.TimeoutException}. + * + * @param timeout The time to use for the wait time. + */ + void close(Duration timeout); + + /** + * Create a batch of new topics with the default options. + *

+ * This is a convenience method for {@link #createTopics(Collection, CreateTopicsOptions)} with default options. + * See the overload for more details. + *

+ * This operation is supported by brokers with version 0.10.1.0 or higher. + * + * @param newTopics The new topics to create. + * @return The CreateTopicsResult. + */ + default CreateTopicsResult createTopics(Collection newTopics) { + return createTopics(newTopics, new CreateTopicsOptions()); + } + + /** + * Create a batch of new topics. + *

+ * This operation is not transactional so it may succeed for some topics while fail for others. + *

+ * It may take several seconds after {@link CreateTopicsResult} returns + * success for all the brokers to become aware that the topics have been created. + * During this time, {@link #listTopics()} and {@link #describeTopics(Collection)} + * may not return information about the new topics. + *

+ * This operation is supported by brokers with version 0.10.1.0 or higher. The validateOnly option is supported + * from version 0.10.2.0. + * + * @param newTopics The new topics to create. + * @param options The options to use when creating the new topics. + * @return The CreateTopicsResult. + */ + CreateTopicsResult createTopics(Collection newTopics, CreateTopicsOptions options); + + /** + * This is a convenience method for {@link #deleteTopics(Collection, DeleteTopicsOptions)} + * with default options. See the overload for more details. + *

+ * This operation is supported by brokers with version 0.10.1.0 or higher. + * + * @param topics The topic names to delete. + * @return The DeleteTopicsResult. + */ + default DeleteTopicsResult deleteTopics(Collection topics) { + return deleteTopics(topics, new DeleteTopicsOptions()); + } + + /** + * Delete a batch of topics. + *

+ * This operation is not transactional so it may succeed for some topics while fail for others. + *

+ * It may take several seconds after the {@link DeleteTopicsResult} returns + * success for all the brokers to become aware that the topics are gone. + * During this time, {@link #listTopics()} and {@link #describeTopics(Collection)} + * may continue to return information about the deleted topics. + *

+ * If delete.topic.enable is false on the brokers, deleteTopics will mark + * the topics for deletion, but not actually delete them. The futures will + * return successfully in this case. + *

+ * This operation is supported by brokers with version 0.10.1.0 or higher. + * + * @param topics The topic names to delete. + * @param options The options to use when deleting the topics. + * @return The DeleteTopicsResult. + */ + DeleteTopicsResult deleteTopics(Collection topics, DeleteTopicsOptions options); + + /** + * List the topics available in the cluster with the default options. + *

+ * This is a convenience method for {@link #listTopics(ListTopicsOptions)} with default options. + * See the overload for more details. + * + * @return The ListTopicsResult. + */ + default ListTopicsResult listTopics() { + return listTopics(new ListTopicsOptions()); + } + + /** + * List the topics available in the cluster. + * + * @param options The options to use when listing the topics. + * @return The ListTopicsResult. + */ + ListTopicsResult listTopics(ListTopicsOptions options); + + /** + * Describe some topics in the cluster, with the default options. + *

+ * This is a convenience method for {@link #describeTopics(Collection, DescribeTopicsOptions)} with + * default options. See the overload for more details. + * + * @param topicNames The names of the topics to describe. + * @return The DescribeTopicsResult. + */ + default DescribeTopicsResult describeTopics(Collection topicNames) { + return describeTopics(topicNames, new DescribeTopicsOptions()); + } + + /** + * Describe some topics in the cluster. + * + * @param topicNames The names of the topics to describe. + * @param options The options to use when describing the topic. + * @return The DescribeTopicsResult. + */ + DescribeTopicsResult describeTopics(Collection topicNames, DescribeTopicsOptions options); + + /** + * Get information about the nodes in the cluster, using the default options. + *

+ * This is a convenience method for {@link #describeCluster(DescribeClusterOptions)} with default options. + * See the overload for more details. + * + * @return The DescribeClusterResult. + */ + default DescribeClusterResult describeCluster() { + return describeCluster(new DescribeClusterOptions()); + } + + /** + * Get information about the nodes in the cluster. + * + * @param options The options to use when getting information about the cluster. + * @return The DescribeClusterResult. + */ + DescribeClusterResult describeCluster(DescribeClusterOptions options); + + /** + * This is a convenience method for {@link #describeAcls(AclBindingFilter, DescribeAclsOptions)} with + * default options. See the overload for more details. + *

+ * This operation is supported by brokers with version 0.11.0.0 or higher. + * + * @param filter The filter to use. + * @return The DeleteAclsResult. + */ + default DescribeAclsResult describeAcls(AclBindingFilter filter) { + return describeAcls(filter, new DescribeAclsOptions()); + } + + /** + * Lists access control lists (ACLs) according to the supplied filter. + *

+ * Note: it may take some time for changes made by {@code createAcls} or {@code deleteAcls} to be reflected + * in the output of {@code describeAcls}. + *

+ * This operation is supported by brokers with version 0.11.0.0 or higher. + * + * @param filter The filter to use. + * @param options The options to use when listing the ACLs. + * @return The DeleteAclsResult. + */ + DescribeAclsResult describeAcls(AclBindingFilter filter, DescribeAclsOptions options); + + /** + * This is a convenience method for {@link #createAcls(Collection, CreateAclsOptions)} with + * default options. See the overload for more details. + *

+ * This operation is supported by brokers with version 0.11.0.0 or higher. + * + * @param acls The ACLs to create + * @return The CreateAclsResult. + */ + default CreateAclsResult createAcls(Collection acls) { + return createAcls(acls, new CreateAclsOptions()); + } + + /** + * Creates access control lists (ACLs) which are bound to specific resources. + *

+ * This operation is not transactional so it may succeed for some ACLs while fail for others. + *

+ * If you attempt to add an ACL that duplicates an existing ACL, no error will be raised, but + * no changes will be made. + *

+ * This operation is supported by brokers with version 0.11.0.0 or higher. + * + * @param acls The ACLs to create + * @param options The options to use when creating the ACLs. + * @return The CreateAclsResult. + */ + CreateAclsResult createAcls(Collection acls, CreateAclsOptions options); + + /** + * This is a convenience method for {@link #deleteAcls(Collection, DeleteAclsOptions)} with default options. + * See the overload for more details. + *

+ * This operation is supported by brokers with version 0.11.0.0 or higher. + * + * @param filters The filters to use. + * @return The DeleteAclsResult. + */ + default DeleteAclsResult deleteAcls(Collection filters) { + return deleteAcls(filters, new DeleteAclsOptions()); + } + + /** + * Deletes access control lists (ACLs) according to the supplied filters. + *

+ * This operation is not transactional so it may succeed for some ACLs while fail for others. + *

+ * This operation is supported by brokers with version 0.11.0.0 or higher. + * + * @param filters The filters to use. + * @param options The options to use when deleting the ACLs. + * @return The DeleteAclsResult. + */ + DeleteAclsResult deleteAcls(Collection filters, DeleteAclsOptions options); + + + /** + * Get the configuration for the specified resources with the default options. + *

+ * This is a convenience method for {@link #describeConfigs(Collection, DescribeConfigsOptions)} with default options. + * See the overload for more details. + *

+ * This operation is supported by brokers with version 0.11.0.0 or higher. + * + * @param resources The resources (topic and broker resource types are currently supported) + * @return The DescribeConfigsResult + */ + default DescribeConfigsResult describeConfigs(Collection resources) { + return describeConfigs(resources, new DescribeConfigsOptions()); + } + + /** + * Get the configuration for the specified resources. + *

+ * The returned configuration includes default values and the isDefault() method can be used to distinguish them + * from user supplied values. + *

+ * The value of config entries where isSensitive() is true is always {@code null} so that sensitive information + * is not disclosed. + *

+ * Config entries where isReadOnly() is true cannot be updated. + *

+ * This operation is supported by brokers with version 0.11.0.0 or higher. + * + * @param resources The resources (topic and broker resource types are currently supported) + * @param options The options to use when describing configs + * @return The DescribeConfigsResult + */ + DescribeConfigsResult describeConfigs(Collection resources, DescribeConfigsOptions options); + + /** + * Update the configuration for the specified resources with the default options. + *

+ * This is a convenience method for {@link #alterConfigs(Map, AlterConfigsOptions)} with default options. + * See the overload for more details. + *

+ * This operation is supported by brokers with version 0.11.0.0 or higher. + * + * @param configs The resources with their configs (topic is the only resource type with configs that can + * be updated currently) + * @return The AlterConfigsResult + * @deprecated Since 2.3. Use {@link #incrementalAlterConfigs(Map)}. + */ + @Deprecated + default AlterConfigsResult alterConfigs(Map configs) { + return alterConfigs(configs, new AlterConfigsOptions()); + } + + /** + * Update the configuration for the specified resources with the default options. + *

+ * Updates are not transactional so they may succeed for some resources while fail for others. The configs for + * a particular resource are updated atomically. + *

+ * This operation is supported by brokers with version 0.11.0.0 or higher. + * + * @param configs The resources with their configs (topic is the only resource type with configs that can + * be updated currently) + * @param options The options to use when describing configs + * @return The AlterConfigsResult + * @deprecated Since 2.3. Use {@link #incrementalAlterConfigs(Map, AlterConfigsOptions)}. + */ + @Deprecated + AlterConfigsResult alterConfigs(Map configs, AlterConfigsOptions options); + + /** + * Incrementally updates the configuration for the specified resources with default options. + *

+ * This is a convenience method for {@link #incrementalAlterConfigs(Map, AlterConfigsOptions)} with default options. + * See the overload for more details. + *

+ * This operation is supported by brokers with version 2.3.0 or higher. + * + * @param configs The resources with their configs + * @return The AlterConfigsResult + */ + default AlterConfigsResult incrementalAlterConfigs(Map> configs) { + return incrementalAlterConfigs(configs, new AlterConfigsOptions()); + } + + /** + * Incrementally update the configuration for the specified resources. + *

+ * Updates are not transactional so they may succeed for some resources while fail for others. The configs for + * a particular resource are updated atomically. + *

+ * The following exceptions can be anticipated when calling {@code get()} on the futures obtained from + * the returned {@link AlterConfigsResult}: + *

    + *
  • {@link org.apache.kafka.common.errors.ClusterAuthorizationException} + * if the authenticated user didn't have alter access to the cluster.
  • + *
  • {@link org.apache.kafka.common.errors.TopicAuthorizationException} + * if the authenticated user didn't have alter access to the Topic.
  • + *
  • {@link org.apache.kafka.common.errors.UnknownTopicOrPartitionException} + * if the Topic doesn't exist.
  • + *
  • {@link org.apache.kafka.common.errors.InvalidRequestException} + * if the request details are invalid. e.g., a configuration key was specified more than once for a resource
  • + *
+ *

+ * This operation is supported by brokers with version 2.3.0 or higher. + * + * @param configs The resources with their configs + * @param options The options to use when altering configs + * @return The AlterConfigsResult + */ + AlterConfigsResult incrementalAlterConfigs(Map> configs, AlterConfigsOptions options); + + /** + * Change the log directory for the specified replicas. If the replica does not exist on the broker, the result + * shows REPLICA_NOT_AVAILABLE for the given replica and the replica will be created in the given log directory on the + * broker when it is created later. If the replica already exists on the broker, the replica will be moved to the given + * log directory if it is not already there. For detailed result, inspect the returned {@link AlterReplicaLogDirsResult} instance. + *

+ * This operation is not transactional so it may succeed for some replicas while fail for others. + *

+ * This is a convenience method for {@link #alterReplicaLogDirs(Map, AlterReplicaLogDirsOptions)} with default options. + * See the overload for more details. + *

+ * This operation is supported by brokers with version 1.1.0 or higher. + * + * @param replicaAssignment The replicas with their log directory absolute path + * @return The AlterReplicaLogDirsResult + */ + default AlterReplicaLogDirsResult alterReplicaLogDirs(Map replicaAssignment) { + return alterReplicaLogDirs(replicaAssignment, new AlterReplicaLogDirsOptions()); + } + + /** + * Change the log directory for the specified replicas. If the replica does not exist on the broker, the result + * shows REPLICA_NOT_AVAILABLE for the given replica and the replica will be created in the given log directory on the + * broker when it is created later. If the replica already exists on the broker, the replica will be moved to the given + * log directory if it is not already there. For detailed result, inspect the returned {@link AlterReplicaLogDirsResult} instance. + *

+ * This operation is not transactional so it may succeed for some replicas while fail for others. + *

+ * This operation is supported by brokers with version 1.1.0 or higher. + * + * @param replicaAssignment The replicas with their log directory absolute path + * @param options The options to use when changing replica dir + * @return The AlterReplicaLogDirsResult + */ + AlterReplicaLogDirsResult alterReplicaLogDirs(Map replicaAssignment, + AlterReplicaLogDirsOptions options); + + /** + * Query the information of all log directories on the given set of brokers + *

+ * This is a convenience method for {@link #describeLogDirs(Collection, DescribeLogDirsOptions)} with default options. + * See the overload for more details. + *

+ * This operation is supported by brokers with version 1.0.0 or higher. + * + * @param brokers A list of brokers + * @return The DescribeLogDirsResult + */ + default DescribeLogDirsResult describeLogDirs(Collection brokers) { + return describeLogDirs(brokers, new DescribeLogDirsOptions()); + } + + /** + * Query the information of all log directories on the given set of brokers + *

+ * This operation is supported by brokers with version 1.0.0 or higher. + * + * @param brokers A list of brokers + * @param options The options to use when querying log dir info + * @return The DescribeLogDirsResult + */ + DescribeLogDirsResult describeLogDirs(Collection brokers, DescribeLogDirsOptions options); + + /** + * Query the replica log directory information for the specified replicas. + *

+ * This is a convenience method for {@link #describeReplicaLogDirs(Collection, DescribeReplicaLogDirsOptions)} + * with default options. See the overload for more details. + *

+ * This operation is supported by brokers with version 1.0.0 or higher. + * + * @param replicas The replicas to query + * @return The DescribeReplicaLogDirsResult + */ + default DescribeReplicaLogDirsResult describeReplicaLogDirs(Collection replicas) { + return describeReplicaLogDirs(replicas, new DescribeReplicaLogDirsOptions()); + } + + /** + * Query the replica log directory information for the specified replicas. + *

+ * This operation is supported by brokers with version 1.0.0 or higher. + * + * @param replicas The replicas to query + * @param options The options to use when querying replica log dir info + * @return The DescribeReplicaLogDirsResult + */ + DescribeReplicaLogDirsResult describeReplicaLogDirs(Collection replicas, DescribeReplicaLogDirsOptions options); + + /** + * Increase the number of partitions of the topics given as the keys of {@code newPartitions} + * according to the corresponding values. If partitions are increased for a topic that has a key, + * the partition logic or ordering of the messages will be affected. + *

+ * This is a convenience method for {@link #createPartitions(Map, CreatePartitionsOptions)} with default options. + * See the overload for more details. + * + * @param newPartitions The topics which should have new partitions created, and corresponding parameters + * for the created partitions. + * @return The CreatePartitionsResult. + */ + default CreatePartitionsResult createPartitions(Map newPartitions) { + return createPartitions(newPartitions, new CreatePartitionsOptions()); + } + + /** + * Increase the number of partitions of the topics given as the keys of {@code newPartitions} + * according to the corresponding values. If partitions are increased for a topic that has a key, + * the partition logic or ordering of the messages will be affected. + *

+ * This operation is not transactional so it may succeed for some topics while fail for others. + *

+ * It may take several seconds after this method returns + * success for all the brokers to become aware that the partitions have been created. + * During this time, {@link #describeTopics(Collection)} + * may not return information about the new partitions. + *

+ * This operation is supported by brokers with version 1.0.0 or higher. + *

+ * The following exceptions can be anticipated when calling {@code get()} on the futures obtained from the + * {@link CreatePartitionsResult#values() values()} method of the returned {@link CreatePartitionsResult} + *

    + *
  • {@link org.apache.kafka.common.errors.AuthorizationException} + * if the authenticated user is not authorized to alter the topic
  • + *
  • {@link org.apache.kafka.common.errors.TimeoutException} + * if the request was not completed in within the given {@link CreatePartitionsOptions#timeoutMs()}.
  • + *
  • {@link org.apache.kafka.common.errors.ReassignmentInProgressException} + * if a partition reassignment is currently in progress
  • + *
  • {@link org.apache.kafka.common.errors.BrokerNotAvailableException} + * if the requested {@link NewPartitions#assignments()} contain a broker that is currently unavailable.
  • + *
  • {@link org.apache.kafka.common.errors.InvalidReplicationFactorException} + * if no {@link NewPartitions#assignments()} are given and it is impossible for the broker to assign + * replicas with the topics replication factor.
  • + *
  • Subclasses of {@link org.apache.kafka.common.KafkaException} + * if the request is invalid in some way.
  • + *
+ * + * @param newPartitions The topics which should have new partitions created, and corresponding parameters + * for the created partitions. + * @param options The options to use when creating the new partitions. + * @return The CreatePartitionsResult. + */ + CreatePartitionsResult createPartitions(Map newPartitions, + CreatePartitionsOptions options); + + /** + * Delete records whose offset is smaller than the given offset of the corresponding partition. + *

+ * This is a convenience method for {@link #deleteRecords(Map, DeleteRecordsOptions)} with default options. + * See the overload for more details. + *

+ * This operation is supported by brokers with version 0.11.0.0 or higher. + * + * @param recordsToDelete The topic partitions and related offsets from which records deletion starts. + * @return The DeleteRecordsResult. + */ + default DeleteRecordsResult deleteRecords(Map recordsToDelete) { + return deleteRecords(recordsToDelete, new DeleteRecordsOptions()); + } + + /** + * Delete records whose offset is smaller than the given offset of the corresponding partition. + *

+ * This operation is supported by brokers with version 0.11.0.0 or higher. + * + * @param recordsToDelete The topic partitions and related offsets from which records deletion starts. + * @param options The options to use when deleting records. + * @return The DeleteRecordsResult. + */ + DeleteRecordsResult deleteRecords(Map recordsToDelete, + DeleteRecordsOptions options); + + /** + * Create a Delegation Token. + *

+ * This is a convenience method for {@link #createDelegationToken(CreateDelegationTokenOptions)} with default options. + * See the overload for more details. + * + * @return The CreateDelegationTokenResult. + */ + default CreateDelegationTokenResult createDelegationToken() { + return createDelegationToken(new CreateDelegationTokenOptions()); + } + + + /** + * Create a Delegation Token. + *

+ * This operation is supported by brokers with version 1.1.0 or higher. + *

+ * The following exceptions can be anticipated when calling {@code get()} on the futures obtained from the + * {@link CreateDelegationTokenResult#delegationToken() delegationToken()} method of the returned {@link CreateDelegationTokenResult} + *

    + *
  • {@link org.apache.kafka.common.errors.UnsupportedByAuthenticationException} + * If the request sent on PLAINTEXT/1-way SSL channels or delegation token authenticated channels.
  • + *
  • {@link org.apache.kafka.common.errors.InvalidPrincipalTypeException} + * if the renewers principal type is not supported.
  • + *
  • {@link org.apache.kafka.common.errors.DelegationTokenDisabledException} + * if the delegation token feature is disabled.
  • + *
  • {@link org.apache.kafka.common.errors.TimeoutException} + * if the request was not completed in within the given {@link CreateDelegationTokenOptions#timeoutMs()}.
  • + *
+ * + * @param options The options to use when creating delegation token. + * @return The DeleteRecordsResult. + */ + CreateDelegationTokenResult createDelegationToken(CreateDelegationTokenOptions options); + + + /** + * Renew a Delegation Token. + *

+ * This is a convenience method for {@link #renewDelegationToken(byte[], RenewDelegationTokenOptions)} with default options. + * See the overload for more details. + * + * @param hmac HMAC of the Delegation token + * @return The RenewDelegationTokenResult. + */ + default RenewDelegationTokenResult renewDelegationToken(byte[] hmac) { + return renewDelegationToken(hmac, new RenewDelegationTokenOptions()); + } + + /** + * Renew a Delegation Token. + *

+ * This operation is supported by brokers with version 1.1.0 or higher. + *

+ * The following exceptions can be anticipated when calling {@code get()} on the futures obtained from the + * {@link RenewDelegationTokenResult#expiryTimestamp() expiryTimestamp()} method of the returned {@link RenewDelegationTokenResult} + *

    + *
  • {@link org.apache.kafka.common.errors.UnsupportedByAuthenticationException} + * If the request sent on PLAINTEXT/1-way SSL channels or delegation token authenticated channels.
  • + *
  • {@link org.apache.kafka.common.errors.DelegationTokenDisabledException} + * if the delegation token feature is disabled.
  • + *
  • {@link org.apache.kafka.common.errors.DelegationTokenNotFoundException} + * if the delegation token is not found on server.
  • + *
  • {@link org.apache.kafka.common.errors.DelegationTokenOwnerMismatchException} + * if the authenticated user is not owner/renewer of the token.
  • + *
  • {@link org.apache.kafka.common.errors.DelegationTokenExpiredException} + * if the delegation token is expired.
  • + *
  • {@link org.apache.kafka.common.errors.TimeoutException} + * if the request was not completed in within the given {@link RenewDelegationTokenOptions#timeoutMs()}.
  • + *
+ * + * @param hmac HMAC of the Delegation token + * @param options The options to use when renewing delegation token. + * @return The RenewDelegationTokenResult. + */ + RenewDelegationTokenResult renewDelegationToken(byte[] hmac, RenewDelegationTokenOptions options); + + /** + * Expire a Delegation Token. + *

+ * This is a convenience method for {@link #expireDelegationToken(byte[], ExpireDelegationTokenOptions)} with default options. + * This will expire the token immediately. See the overload for more details. + * + * @param hmac HMAC of the Delegation token + * @return The ExpireDelegationTokenResult. + */ + default ExpireDelegationTokenResult expireDelegationToken(byte[] hmac) { + return expireDelegationToken(hmac, new ExpireDelegationTokenOptions()); + } + + /** + * Expire a Delegation Token. + *

+ * This operation is supported by brokers with version 1.1.0 or higher. + *

+ * The following exceptions can be anticipated when calling {@code get()} on the futures obtained from the + * {@link ExpireDelegationTokenResult#expiryTimestamp() expiryTimestamp()} method of the returned {@link ExpireDelegationTokenResult} + *

    + *
  • {@link org.apache.kafka.common.errors.UnsupportedByAuthenticationException} + * If the request sent on PLAINTEXT/1-way SSL channels or delegation token authenticated channels.
  • + *
  • {@link org.apache.kafka.common.errors.DelegationTokenDisabledException} + * if the delegation token feature is disabled.
  • + *
  • {@link org.apache.kafka.common.errors.DelegationTokenNotFoundException} + * if the delegation token is not found on server.
  • + *
  • {@link org.apache.kafka.common.errors.DelegationTokenOwnerMismatchException} + * if the authenticated user is not owner/renewer of the requested token.
  • + *
  • {@link org.apache.kafka.common.errors.DelegationTokenExpiredException} + * if the delegation token is expired.
  • + *
  • {@link org.apache.kafka.common.errors.TimeoutException} + * if the request was not completed in within the given {@link ExpireDelegationTokenOptions#timeoutMs()}.
  • + *
+ * + * @param hmac HMAC of the Delegation token + * @param options The options to use when expiring delegation token. + * @return The ExpireDelegationTokenResult. + */ + ExpireDelegationTokenResult expireDelegationToken(byte[] hmac, ExpireDelegationTokenOptions options); + + /** + * Describe the Delegation Tokens. + *

+ * This is a convenience method for {@link #describeDelegationToken(DescribeDelegationTokenOptions)} with default options. + * This will return all the user owned tokens and tokens where user have Describe permission. See the overload for more details. + * + * @return The DescribeDelegationTokenResult. + */ + default DescribeDelegationTokenResult describeDelegationToken() { + return describeDelegationToken(new DescribeDelegationTokenOptions()); + } + + /** + * Describe the Delegation Tokens. + *

+ * This operation is supported by brokers with version 1.1.0 or higher. + *

+ * The following exceptions can be anticipated when calling {@code get()} on the futures obtained from the + * {@link DescribeDelegationTokenResult#delegationTokens() delegationTokens()} method of the returned {@link DescribeDelegationTokenResult} + *

    + *
  • {@link org.apache.kafka.common.errors.UnsupportedByAuthenticationException} + * If the request sent on PLAINTEXT/1-way SSL channels or delegation token authenticated channels.
  • + *
  • {@link org.apache.kafka.common.errors.DelegationTokenDisabledException} + * if the delegation token feature is disabled.
  • + *
  • {@link org.apache.kafka.common.errors.TimeoutException} + * if the request was not completed in within the given {@link DescribeDelegationTokenOptions#timeoutMs()}.
  • + *
+ * + * @param options The options to use when describing delegation tokens. + * @return The DescribeDelegationTokenResult. + */ + DescribeDelegationTokenResult describeDelegationToken(DescribeDelegationTokenOptions options); + + /** + * Describe some group IDs in the cluster. + * + * @param groupIds The IDs of the groups to describe. + * @param options The options to use when describing the groups. + * @return The DescribeConsumerGroupResult. + */ + DescribeConsumerGroupsResult describeConsumerGroups(Collection groupIds, + DescribeConsumerGroupsOptions options); + + /** + * Describe some group IDs in the cluster, with the default options. + *

+ * This is a convenience method for {@link #describeConsumerGroups(Collection, DescribeConsumerGroupsOptions)} + * with default options. See the overload for more details. + * + * @param groupIds The IDs of the groups to describe. + * @return The DescribeConsumerGroupResult. + */ + default DescribeConsumerGroupsResult describeConsumerGroups(Collection groupIds) { + return describeConsumerGroups(groupIds, new DescribeConsumerGroupsOptions()); + } + + /** + * List the consumer groups available in the cluster. + * + * @param options The options to use when listing the consumer groups. + * @return The ListGroupsResult. + */ + ListConsumerGroupsResult listConsumerGroups(ListConsumerGroupsOptions options); + + /** + * List the consumer groups available in the cluster with the default options. + *

+ * This is a convenience method for {@link #listConsumerGroups(ListConsumerGroupsOptions)} with default options. + * See the overload for more details. + * + * @return The ListGroupsResult. + */ + default ListConsumerGroupsResult listConsumerGroups() { + return listConsumerGroups(new ListConsumerGroupsOptions()); + } + + /** + * List the consumer group offsets available in the cluster. + * + * @param options The options to use when listing the consumer group offsets. + * @return The ListGroupOffsetsResult + */ + ListConsumerGroupOffsetsResult listConsumerGroupOffsets(String groupId, ListConsumerGroupOffsetsOptions options); + + /** + * List the consumer group offsets available in the cluster with the default options. + *

+ * This is a convenience method for {@link #listConsumerGroupOffsets(String, ListConsumerGroupOffsetsOptions)} with default options. + * + * @return The ListGroupOffsetsResult. + */ + default ListConsumerGroupOffsetsResult listConsumerGroupOffsets(String groupId) { + return listConsumerGroupOffsets(groupId, new ListConsumerGroupOffsetsOptions()); + } + + /** + * Delete consumer groups from the cluster. + * + * @param options The options to use when deleting a consumer group. + * @return The DeletConsumerGroupResult. + */ + DeleteConsumerGroupsResult deleteConsumerGroups(Collection groupIds, DeleteConsumerGroupsOptions options); + + /** + * Delete consumer groups from the cluster with the default options. + * + * @return The DeleteConsumerGroupResult. + */ + default DeleteConsumerGroupsResult deleteConsumerGroups(Collection groupIds) { + return deleteConsumerGroups(groupIds, new DeleteConsumerGroupsOptions()); + } + + /** + * Delete committed offsets for a set of partitions in a consumer group. This will + * succeed at the partition level only if the group is not actively subscribed + * to the corresponding topic. + * + * @param options The options to use when deleting offsets in a consumer group. + * @return The DeleteConsumerGroupOffsetsResult. + */ + DeleteConsumerGroupOffsetsResult deleteConsumerGroupOffsets(String groupId, + Set partitions, + DeleteConsumerGroupOffsetsOptions options); + + /** + * Delete committed offsets for a set of partitions in a consumer group with the default + * options. This will succeed at the partition level only if the group is not actively + * subscribed to the corresponding topic. + * + * @return The DeleteConsumerGroupOffsetsResult. + */ + default DeleteConsumerGroupOffsetsResult deleteConsumerGroupOffsets(String groupId, Set partitions) { + return deleteConsumerGroupOffsets(groupId, partitions, new DeleteConsumerGroupOffsetsOptions()); + } + + /** + * Elect the preferred replica as leader for topic partitions. + *

+ * This is a convenience method for {@link #electLeaders(ElectionType, Set, ElectLeadersOptions)} + * with preferred election type and default options. + *

+ * This operation is supported by brokers with version 2.2.0 or higher. + * + * @param partitions The partitions for which the preferred leader should be elected. + * @return The ElectPreferredLeadersResult. + * @deprecated Since 2.4.0. Use {@link #electLeaders(ElectionType, Set)}. + */ + @Deprecated + default ElectLeadersResult electPreferredLeaders(Collection partitions) { + return electPreferredLeaders(partitions, new ElectLeadersOptions()); + } + + /** + * Elect the preferred replica as leader for topic partitions. + *

+ * This is a convenience method for {@link #electLeaders(ElectionType, Set, ElectLeadersOptions)} + * with preferred election type. + *

+ * This operation is supported by brokers with version 2.2.0 or higher. + * + * @param partitions The partitions for which the preferred leader should be elected. + * @param options The options to use when electing the preferred leaders. + * @return The ElectPreferredLeadersResult. + * @deprecated Since 2.4.0. Use {@link #electLeaders(ElectionType, Set, ElectLeadersOptions)}. + */ + @Deprecated + default ElectLeadersResult electPreferredLeaders(Collection partitions, + ElectLeadersOptions options) { + + throw new FeatureNotSupportedException("This feature is not suported for this release."); + /* + final ElectLeadersOptions newOptions = new ElectLeadersOptions(); + newOptions.timeoutMs(options.timeoutMs()); + final Set topicPartitions = partitions == null ? null : new HashSet<>(partitions); + + return new ElectPreferredLeadersResult(electLeaders(ElectionType.PREFERRED, topicPartitions, newOptions)); + */ + } + + /** + * Elect a replica as leader for topic partitions. + *

+ * This is a convenience method for {@link #electLeaders(ElectionType, Set, ElectLeadersOptions)} + * with default options. + * + * @param electionType The type of election to conduct. + * @param partitions The topics and partitions for which to conduct elections. + * @return The ElectLeadersResult. + */ + default ElectLeadersResult electLeaders(ElectionType electionType, Set partitions) { + return electLeaders(electionType, partitions, new ElectLeadersOptions()); + } + + /** + * Elect a replica as leader for the given {@code partitions}, or for all partitions if the argument + * to {@code partitions} is null. + *

+ * This operation is not transactional so it may succeed for some partitions while fail for others. + *

+ * It may take several seconds after this method returns success for all the brokers in the cluster + * to become aware that the partitions have new leaders. During this time, + * {@link #describeTopics(Collection)} may not return information about the partitions' + * new leaders. + *

+ * This operation is supported by brokers with version 2.2.0 or later if preferred election is use; + * otherwise the brokers most be 2.4.0 or higher. + *

+ * The following exceptions can be anticipated when calling {@code get()} on the future obtained + * from the returned {@link ElectLeadersResult}: + *

    + *
  • {@link org.apache.kafka.common.errors.ClusterAuthorizationException} + * if the authenticated user didn't have alter access to the cluster.
  • + *
  • {@link org.apache.kafka.common.errors.UnknownTopicOrPartitionException} + * if the topic or partition did not exist within the cluster.
  • + *
  • {@link org.apache.kafka.common.errors.InvalidTopicException} + * if the topic was already queued for deletion.
  • + *
  • {@link org.apache.kafka.common.errors.NotControllerException} + * if the request was sent to a broker that was not the controller for the cluster.
  • + *
  • {@link org.apache.kafka.common.errors.TimeoutException} + * if the request timed out before the election was complete.
  • + *
  • {@link org.apache.kafka.common.errors.LeaderNotAvailableException} + * if the preferred leader was not alive or not in the ISR.
  • + *
+ * + * @param electionType The type of election to conduct. + * @param partitions The topics and partitions for which to conduct elections. + * @param options The options to use when electing the leaders. + * @return The ElectLeadersResult. + */ + ElectLeadersResult electLeaders( + ElectionType electionType, + Set partitions, + ElectLeadersOptions options); + + + /** + * Change the reassignments for one or more partitions. + * Providing an empty Optional (e.g via {@link Optional#empty()}) will revert the reassignment for the associated partition. + * + * This is a convenience method for {@link #alterPartitionReassignments(Map, AlterPartitionReassignmentsOptions)} + * with default options. See the overload for more details. + */ + default AlterPartitionReassignmentsResult alterPartitionReassignments( + Map> reassignments) { + return alterPartitionReassignments(reassignments, new AlterPartitionReassignmentsOptions()); + } + + /** + * Change the reassignments for one or more partitions. + * Providing an empty Optional (e.g via {@link Optional#empty()}) will revert the reassignment for the associated partition. + * + *

The following exceptions can be anticipated when calling {@code get()} on the futures obtained from + * the returned {@code AlterPartitionReassignmentsResult}:

+ *
    + *
  • {@link org.apache.kafka.common.errors.ClusterAuthorizationException} + * If the authenticated user didn't have alter access to the cluster.
  • + *
  • {@link org.apache.kafka.common.errors.UnknownTopicOrPartitionException} + * If the topic or partition does not exist within the cluster.
  • + *
  • {@link org.apache.kafka.common.errors.TimeoutException} + * if the request timed out before the controller could record the new assignments.
  • + *
  • {@link org.apache.kafka.common.errors.InvalidReplicaAssignmentException} + * If the specified assignment was not valid.
  • + *
  • {@link org.apache.kafka.common.errors.NoReassignmentInProgressException} + * If there was an attempt to cancel a reassignment for a partition which was not being reassigned.
  • + *
+ * + * @param reassignments The reassignments to add, modify, or remove. See {@link NewPartitionReassignment}. + * @param options The options to use. + * @return The result. + */ + AlterPartitionReassignmentsResult alterPartitionReassignments( + Map> reassignments, + AlterPartitionReassignmentsOptions options); + + + /** + * List all of the current partition reassignments + * + * This is a convenience method for {@link #listPartitionReassignments(ListPartitionReassignmentsOptions)} + * with default options. See the overload for more details. + */ + default ListPartitionReassignmentsResult listPartitionReassignments() { + return listPartitionReassignments(new ListPartitionReassignmentsOptions()); + } + + /** + * List the current reassignments for the given partitions + * + * This is a convenience method for {@link #listPartitionReassignments(Set, ListPartitionReassignmentsOptions)} + * with default options. See the overload for more details. + */ + default ListPartitionReassignmentsResult listPartitionReassignments(Set partitions) { + return listPartitionReassignments(partitions, new ListPartitionReassignmentsOptions()); + } + + /** + * List the current reassignments for the given partitions + * + *

The following exceptions can be anticipated when calling {@code get()} on the futures obtained from + * the returned {@code ListPartitionReassignmentsResult}:

+ *
    + *
  • {@link org.apache.kafka.common.errors.ClusterAuthorizationException} + * If the authenticated user doesn't have alter access to the cluster.
  • + *
  • {@link org.apache.kafka.common.errors.UnknownTopicOrPartitionException} + * If a given topic or partition does not exist.
  • + *
  • {@link org.apache.kafka.common.errors.TimeoutException} + * If the request timed out before the controller could list the current reassignments.
  • + *
+ * + * @param partitions The topic partitions to list reassignments for. + * @param options The options to use. + * @return The result. + */ + default ListPartitionReassignmentsResult listPartitionReassignments( + Set partitions, + ListPartitionReassignmentsOptions options) { + return listPartitionReassignments(Optional.of(partitions), options); + } + + /** + * List all of the current partition reassignments + * + *

The following exceptions can be anticipated when calling {@code get()} on the futures obtained from + * the returned {@code ListPartitionReassignmentsResult}:

+ *
    + *
  • {@link org.apache.kafka.common.errors.ClusterAuthorizationException} + * If the authenticated user doesn't have alter access to the cluster.
  • + *
  • {@link org.apache.kafka.common.errors.UnknownTopicOrPartitionException} + * If a given topic or partition does not exist.
  • + *
  • {@link org.apache.kafka.common.errors.TimeoutException} + * If the request timed out before the controller could list the current reassignments.
  • + *
+ * + * @param options The options to use. + * @return The result. + */ + default ListPartitionReassignmentsResult listPartitionReassignments(ListPartitionReassignmentsOptions options) { + return listPartitionReassignments(Optional.empty(), options); + } + + /** + * @param partitions the partitions we want to get reassignment for, or an empty optional if we want to get the reassignments for all partitions in the cluster + * @param options The options to use. + * @return The result. + */ + ListPartitionReassignmentsResult listPartitionReassignments(Optional> partitions, + ListPartitionReassignmentsOptions options); + + /** + * Remove members from the consumer group by given member identities. + *

+ * For possible error codes, refer to {@link LeaveGroupResponse}. + * + * @param groupId The ID of the group to remove member from. + * @param options The options to carry removing members' information. + * @return The MembershipChangeResult. + */ + RemoveMembersFromConsumerGroupResult removeMembersFromConsumerGroup(String groupId, RemoveMembersFromConsumerGroupOptions options); + + /** + *

Alters offsets for the specified group. In order to succeed, the group must be empty. + * + *

This is a convenience method for {@link #alterConsumerGroupOffsets(String, Map, AlterConsumerGroupOffsetsOptions)} with default options. + * See the overload for more details. + * + * @param groupId The group for which to alter offsets. + * @param offsets A map of offsets by partition with associated metadata. + * @return The AlterOffsetsResult. + */ + default AlterConsumerGroupOffsetsResult alterConsumerGroupOffsets(String groupId, Map offsets) { + return alterConsumerGroupOffsets(groupId, offsets, new AlterConsumerGroupOffsetsOptions()); + } + + /** + *

Alters offsets for the specified group. In order to succeed, the group must be empty. + * + *

This operation is not transactional so it may succeed for some partitions while fail for others. + * + * @param groupId The group for which to alter offsets. + * @param offsets A map of offsets by partition with associated metadata. Partitions not specified in the map are ignored. + * @param options The options to use when altering the offsets. + * @return The AlterOffsetsResult. + */ + AlterConsumerGroupOffsetsResult alterConsumerGroupOffsets(String groupId, Map offsets, AlterConsumerGroupOffsetsOptions options); + + /** + *

List offset for the specified partitions and OffsetSpec. This operation enables to find + * the beginning offset, end offset as well as the offset matching a timestamp in partitions. + * + *

This is a convenience method for {@link #listOffsets(Map, ListOffsetsOptions)} + * + * @param topicPartitionOffsets The mapping from partition to the OffsetSpec to look up. + * @return The ListOffsetsResult. + */ + default ListOffsetsResult listOffsets(Map topicPartitionOffsets) { + return listOffsets(topicPartitionOffsets, new ListOffsetsOptions()); + } + + /** + *

List offset for the specified partitions. This operation enables to find + * the beginning offset, end offset as well as the offset matching a timestamp in partitions. + * + * @param topicPartitionOffsets The mapping from partition to the OffsetSpec to look up. + * @param options The options to use when retrieving the offsets + * @return The ListOffsetsResult. + */ + ListOffsetsResult listOffsets(Map topicPartitionOffsets, ListOffsetsOptions options); + + /** + * Describes all entities matching the provided filter that have at least one client quota configuration + * value defined. + *

+ * This is a convenience method for {@link #describeClientQuotas(ClientQuotaFilter, DescribeClientQuotasOptions)} + * with default options. See the overload for more details. + *

+ * This operation is supported by brokers with version 2.6.0 or higher. + * + * @param filter the filter to apply to match entities + * @return the DescribeClientQuotasResult containing the result + */ + default DescribeClientQuotasResult describeClientQuotas(ClientQuotaFilter filter) { + return describeClientQuotas(filter, new DescribeClientQuotasOptions()); + } + + /** + * Describes all entities matching the provided filter that have at least one client quota configuration + * value defined. + *

+ * The following exceptions can be anticipated when calling {@code get()} on the future from the + * returned {@link DescribeClientQuotasResult}: + *

    + *
  • {@link org.apache.kafka.common.errors.ClusterAuthorizationException} + * If the authenticated user didn't have describe access to the cluster.
  • + *
  • {@link org.apache.kafka.common.errors.InvalidRequestException} + * If the request details are invalid. e.g., an invalid entity type was specified.
  • + *
  • {@link org.apache.kafka.common.errors.TimeoutException} + * If the request timed out before the describe could finish.
  • + *
+ *

+ * This operation is supported by brokers with version 2.6.0 or higher. + * + * @param filter the filter to apply to match entities + * @param options the options to use + * @return the DescribeClientQuotasResult containing the result + */ + DescribeClientQuotasResult describeClientQuotas(ClientQuotaFilter filter, DescribeClientQuotasOptions options); + + /** + * Alters client quota configurations with the specified alterations. + *

+ * This is a convenience method for {@link #alterClientQuotas(Collection, AlterClientQuotasOptions)} + * with default options. See the overload for more details. + *

+ * This operation is supported by brokers with version 2.6.0 or higher. + * + * @param entries the alterations to perform + * @return the AlterClientQuotasResult containing the result + */ + default AlterClientQuotasResult alterClientQuotas(Collection entries) { + return alterClientQuotas(entries, new AlterClientQuotasOptions()); + } + + /** + * Alters client quota configurations with the specified alterations. + *

+ * Alterations for a single entity are atomic, but across entities is not guaranteed. The resulting + * per-entity error code should be evaluated to resolve the success or failure of all updates. + *

+ * The following exceptions can be anticipated when calling {@code get()} on the futures obtained from + * the returned {@link AlterClientQuotasResult}: + *

    + *
  • {@link org.apache.kafka.common.errors.ClusterAuthorizationException} + * If the authenticated user didn't have alter access to the cluster.
  • + *
  • {@link org.apache.kafka.common.errors.InvalidRequestException} + * If the request details are invalid. e.g., a configuration key was specified more than once for an entity.
  • + *
  • {@link org.apache.kafka.common.errors.TimeoutException} + * If the request timed out before the alterations could finish. It cannot be guaranteed whether the update + * succeed or not.
  • + *
+ *

+ * This operation is supported by brokers with version 2.6.0 or higher. + * + * @param entries the alterations to perform + * @return the AlterClientQuotasResult containing the result + */ + AlterClientQuotasResult alterClientQuotas(Collection entries, AlterClientQuotasOptions options); + + /** + * Describe all SASL/SCRAM credentials. + * + *

This is a convenience method for {@link #describeUserScramCredentials(List, DescribeUserScramCredentialsOptions)} + * + * @return The DescribeUserScramCredentialsResult. + */ + default DescribeUserScramCredentialsResult describeUserScramCredentials() { + return describeUserScramCredentials(null, new DescribeUserScramCredentialsOptions()); + } + + /** + * Describe SASL/SCRAM credentials for the given users. + * + *

This is a convenience method for {@link #describeUserScramCredentials(List, DescribeUserScramCredentialsOptions)} + * + * @param users the users for which credentials are to be described; all users' credentials are described if null + * or empty. + * @return The DescribeUserScramCredentialsResult. + */ + default DescribeUserScramCredentialsResult describeUserScramCredentials(List users) { + return describeUserScramCredentials(users, new DescribeUserScramCredentialsOptions()); + } + + /** + * Describe SASL/SCRAM credentials. + *

+ * The following exceptions can be anticipated when calling {@code get()} on the futures from the + * returned {@link DescribeUserScramCredentialsResult}: + *

    + *
  • {@link org.apache.kafka.common.errors.ClusterAuthorizationException} + * If the authenticated user didn't have describe access to the cluster.
  • + *
  • {@link org.apache.kafka.common.errors.ResourceNotFoundException} + * If the user did not exist/had no SCRAM credentials.
  • + *
  • {@link org.apache.kafka.common.errors.DuplicateResourceException} + * If the user was requested to be described more than once in the original request.
  • + *
  • {@link org.apache.kafka.common.errors.TimeoutException} + * If the request timed out before the describe operation could finish.
  • + *
+ *

+ * This operation is supported by brokers with version 2.7.0 or higher. + * + * @param users the users for which credentials are to be described; all users' credentials are described if null + * or empty. + * @param options The options to use when describing the credentials + * @return The DescribeUserScramCredentialsResult. + */ + DescribeUserScramCredentialsResult describeUserScramCredentials(List users, DescribeUserScramCredentialsOptions options); + + /** + * Alter SASL/SCRAM credentials for the given users. + * + *

This is a convenience method for {@link #alterUserScramCredentials(List, AlterUserScramCredentialsOptions)} + * + * @param alterations the alterations to be applied + * @return The AlterUserScramCredentialsResult. + */ + default AlterUserScramCredentialsResult alterUserScramCredentials(List alterations) { + return alterUserScramCredentials(alterations, new AlterUserScramCredentialsOptions()); + } + + /** + * Alter SASL/SCRAM credentials. + * + *

+ * The following exceptions can be anticipated when calling {@code get()} any of the futures from the + * returned {@link AlterUserScramCredentialsResult}: + *

    + *
  • {@link org.apache.kafka.common.errors.NotControllerException} + * If the request is not sent to the Controller broker.
  • + *
  • {@link org.apache.kafka.common.errors.ClusterAuthorizationException} + * If the authenticated user didn't have alter access to the cluster.
  • + *
  • {@link org.apache.kafka.common.errors.UnsupportedByAuthenticationException} + * If the user authenticated with a delegation token.
  • + *
  • {@link org.apache.kafka.common.errors.UnsupportedSaslMechanismException} + * If the requested SCRAM mechanism is unrecognized or otherwise unsupported.
  • + *
  • {@link org.apache.kafka.common.errors.UnacceptableCredentialException} + * If the username is empty or the requested number of iterations is too small or too large.
  • + *
  • {@link org.apache.kafka.common.errors.TimeoutException} + * If the request timed out before the describe could finish.
  • + *
+ *

+ * This operation is supported by brokers with version 2.7.0 or higher. + * + * @param alterations the alterations to be applied + * @param options The options to use when altering the credentials + * @return The AlterUserScramCredentialsResult. + */ + AlterUserScramCredentialsResult alterUserScramCredentials(List alterations, + AlterUserScramCredentialsOptions options); + /** + * Describes finalized as well as supported features. + *

+ * This is a convenience method for {@link #describeFeatures(DescribeFeaturesOptions)} with default options. + * See the overload for more details. + * + * @return the {@link DescribeFeaturesResult} containing the result + */ + default DescribeFeaturesResult describeFeatures() { + return describeFeatures(new DescribeFeaturesOptions()); + } + + /** + * Describes finalized as well as supported features. The request is issued to any random + * broker. + *

+ * The following exceptions can be anticipated when calling {@code get()} on the future from the + * returned {@link DescribeFeaturesResult}: + *

    + *
  • {@link org.apache.kafka.common.errors.TimeoutException} + * If the request timed out before the describe operation could finish.
  • + *
+ *

+ * + * @param options the options to use + * @return the {@link DescribeFeaturesResult} containing the result + */ + DescribeFeaturesResult describeFeatures(DescribeFeaturesOptions options); + + /** + * Applies specified updates to finalized features. This operation is not transactional so some + * updates may succeed while the rest may fail. + *

+ * The API takes in a map of finalized feature names to {@link FeatureUpdate} that needs to be + * applied. Each entry in the map specifies the finalized feature to be added or updated or + * deleted, along with the new max feature version level value. This request is issued only to + * the controller since the API is only served by the controller. The return value contains an + * error code for each supplied {@link FeatureUpdate}, and the code indicates if the update + * succeeded or failed in the controller. + *

    + *
  • Downgrade of feature version level is not a regular operation/intent. It is only allowed + * in the controller if the {@link FeatureUpdate} has the allowDowngrade flag set. Setting this + * flag conveys user intent to attempt downgrade of a feature max version level. Note that + * despite the allowDowngrade flag being set, certain downgrades may be rejected by the + * controller if it is deemed impossible.
  • + *
  • Deletion of a finalized feature version is not a regular operation/intent. It could be + * done by setting the allowDowngrade flag to true in the {@link FeatureUpdate}, and, setting + * the max version level to a value less than 1.
  • + *
+ *

+ * The following exceptions can be anticipated when calling {@code get()} on the futures + * obtained from the returned {@link UpdateFeaturesResult}: + *

    + *
  • {@link org.apache.kafka.common.errors.ClusterAuthorizationException} + * If the authenticated user didn't have alter access to the cluster.
  • + *
  • {@link org.apache.kafka.common.errors.InvalidRequestException} + * If the request details are invalid. e.g., a non-existing finalized feature is attempted + * to be deleted or downgraded.
  • + *
  • {@link org.apache.kafka.common.errors.TimeoutException} + * If the request timed out before the updates could finish. It cannot be guaranteed whether + * the updates succeeded or not.
  • + *
  • {@link FeatureUpdateFailedException} + * This means there was an unexpected error encountered when the update was applied on + * the controller. There is no guarantee on whether the update succeeded or failed. The best + * way to find out is to issue a {@link Admin#describeFeatures(DescribeFeaturesOptions)} + * request.
  • + *
+ *

+ * This operation is supported by brokers with version 2.7.0 or higher. + + * @param featureUpdates the map of finalized feature name to {@link FeatureUpdate} + * @param options the options to use + * @return the {@link UpdateFeaturesResult} containing the result + */ + UpdateFeaturesResult updateFeatures(Map featureUpdates, UpdateFeaturesOptions options); + + /** + * Unregister a broker. + *

+ * This operation does not have any effect on partition assignments. It is supported + * only on Kafka clusters which use Raft to store metadata, rather than ZooKeeper. + * + * This is a convenience method for {@link #unregisterBroker(int, UnregisterBrokerOptions)} + * + * @param brokerId the broker id to unregister. + * + * @return the {@link UnregisterBrokerResult} containing the result + */ + @InterfaceStability.Unstable + default UnregisterBrokerResult unregisterBroker(int brokerId) { + return unregisterBroker(brokerId, new UnregisterBrokerOptions()); + } + + /** + * Unregister a broker. + *

+ * This operation does not have any effect on partition assignments. It is supported + * only on Kafka clusters which use Raft to store metadata, rather than ZooKeeper. + * + * The following exceptions can be anticipated when calling {@code get()} on the future from the + * returned {@link UnregisterBrokerResult}: + *

    + *
  • {@link org.apache.kafka.common.errors.TimeoutException} + * If the request timed out before the describe operation could finish.
  • + *
  • {@link org.apache.kafka.common.errors.UnsupportedVersionException} + * If the software is too old to support the unregistration API, or if the + * cluster is not using Raft to store metadata. + *
+ *

+ * + * @param brokerId the broker id to unregister. + * @param options the options to use. + * + * @return the {@link UnregisterBrokerResult} containing the result + */ + @InterfaceStability.Unstable + UnregisterBrokerResult unregisterBroker(int brokerId, UnregisterBrokerOptions options); + + /** + * Get the metrics kept by the adminClient + */ + Map metrics(); +} diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/AdminClient.java b/clients/src/main/java/org/oracle/okafka/clients/admin/AdminClient.java index 712aa32..ee458c5 100644 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/AdminClient.java +++ b/clients/src/main/java/org/oracle/okafka/clients/admin/AdminClient.java @@ -1,7 +1,7 @@ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. */ @@ -29,24 +29,76 @@ import java.util.Properties; import java.util.concurrent.TimeUnit; -import org.oracle.okafka.common.TopicPartition; -import org.oracle.okafka.common.TopicPartitionReplica; -import org.oracle.okafka.common.acl.AclBinding; -import org.oracle.okafka.common.acl.AclBindingFilter; -import org.oracle.okafka.common.annotation.InterfaceStability; -import org.oracle.okafka.common.config.ConfigResource; +//import org.apache.kafka.clients.admin.Admin; +import org.apache.kafka.clients.admin.AlterConfigsOptions; +import org.apache.kafka.clients.admin.AlterConfigsResult; +import org.apache.kafka.clients.admin.AlterReplicaLogDirsOptions; +import org.apache.kafka.clients.admin.AlterReplicaLogDirsResult; +import org.apache.kafka.clients.admin.Config; +import org.apache.kafka.clients.admin.CreateAclsOptions; +import org.apache.kafka.clients.admin.CreateAclsResult; +import org.apache.kafka.clients.admin.CreateDelegationTokenOptions; +import org.apache.kafka.clients.admin.CreateDelegationTokenResult; +import org.apache.kafka.clients.admin.CreatePartitionsOptions; +import org.apache.kafka.clients.admin.CreatePartitionsResult; +import org.apache.kafka.clients.admin.CreateTopicsOptions; +import org.apache.kafka.clients.admin.CreateTopicsResult; +import org.apache.kafka.clients.admin.DeleteAclsOptions; +import org.apache.kafka.clients.admin.DeleteAclsResult; +import org.apache.kafka.clients.admin.DeleteConsumerGroupsOptions; +import org.apache.kafka.clients.admin.DeleteConsumerGroupsResult; +import org.apache.kafka.clients.admin.DeleteRecordsOptions; +import org.apache.kafka.clients.admin.DeleteRecordsResult; +import org.apache.kafka.clients.admin.DeleteTopicsOptions; +import org.apache.kafka.clients.admin.DeleteTopicsResult; +import org.apache.kafka.clients.admin.DescribeAclsOptions; +import org.apache.kafka.clients.admin.DescribeAclsResult; +import org.apache.kafka.clients.admin.DescribeClusterOptions; +import org.apache.kafka.clients.admin.DescribeClusterResult; +import org.apache.kafka.clients.admin.DescribeConfigsOptions; +import org.apache.kafka.clients.admin.DescribeConfigsResult; +import org.apache.kafka.clients.admin.DescribeConsumerGroupsOptions; +import org.apache.kafka.clients.admin.DescribeConsumerGroupsResult; +import org.apache.kafka.clients.admin.DescribeDelegationTokenOptions; +import org.apache.kafka.clients.admin.DescribeDelegationTokenResult; +import org.apache.kafka.clients.admin.DescribeLogDirsOptions; +import org.apache.kafka.clients.admin.DescribeLogDirsResult; +import org.apache.kafka.clients.admin.DescribeReplicaLogDirsOptions; +import org.apache.kafka.clients.admin.DescribeReplicaLogDirsResult; +import org.apache.kafka.clients.admin.DescribeTopicsOptions; +import org.apache.kafka.clients.admin.DescribeTopicsResult; +import org.apache.kafka.clients.admin.ExpireDelegationTokenOptions; +import org.apache.kafka.clients.admin.ExpireDelegationTokenResult; +import org.apache.kafka.clients.admin.ListConsumerGroupOffsetsOptions; +import org.apache.kafka.clients.admin.ListConsumerGroupOffsetsResult; +import org.apache.kafka.clients.admin.ListConsumerGroupsOptions; +import org.apache.kafka.clients.admin.ListConsumerGroupsResult; +import org.apache.kafka.clients.admin.ListTopicsOptions; +import org.apache.kafka.clients.admin.ListTopicsResult; +import org.apache.kafka.clients.admin.NewPartitions; +import org.apache.kafka.clients.admin.NewTopic; +import org.apache.kafka.clients.admin.RecordsToDelete; +import org.apache.kafka.clients.admin.RenewDelegationTokenOptions; +import org.apache.kafka.clients.admin.RenewDelegationTokenResult; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.TopicPartitionReplica; +import org.apache.kafka.common.acl.AclBinding; +import org.apache.kafka.common.acl.AclBindingFilter; +import org.apache.kafka.common.annotation.InterfaceStability; +import org.apache.kafka.common.config.ConfigResource; /** - * The administrative client for Transactional Event Queues(TEQ), which supports managing and inspecting topics. - * - * Topic can be created or altered with following configs. If these configs are not overriden by client then server default values are used. - * - * retention.ms: Amount of time in milliseconds messages stay in topic and are available for consumption. In kafka retention time starts after - * enqueue of a message whereas in TEQ retention starts after all subscribers(groups) of a topic consume a message. In TEQ retention.ms is rounded to seconds. This property is supported on or later 20c database. - * + * The administrative client for Transactional Event Queues(TXEQ), which supports managing and inspecting topics. + * For this release only creation of topic(s) and deletion of topic(s) is supported. + * A topic can be created by invoking {@code #createTopics(Collection)} and deleted by invoking {@code #deleteTopics(Collection)} method. + *

+ * Topic can be created with following configuration. + *

+ * retention.ms: Amount of time in milliseconds for which records stay in topic and are available for consumption. Internally, retention.ms value is rounded to the second. Default value for this parameter is 7 days. + *

*/ @InterfaceStability.Evolving -public abstract class AdminClient implements AutoCloseable { +public abstract class AdminClient implements Admin { /** * Create a new AdminClient with the given configuration. @@ -54,8 +106,19 @@ public abstract class AdminClient implements AutoCloseable { * @param props The configuration. * @return The new KafkaAdminClient. */ + final static String DUMMY_BOOTSTRAP ="localhost:1521"; public static AdminClient create(Properties props) { - return KafkaAdminClient.createInternal(new AdminClientConfig(props), null); + String bootStrap = (String)props.get(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG); + if(bootStrap== null) + { + String secProtocol = props.getProperty(AdminClientConfig.SECURITY_PROTOCOL_CONFIG); + if(secProtocol != null && secProtocol.equalsIgnoreCase("SSL")) { + // Connect using Oracle Wallet and tnsnames.ora. + // User does not need to know the database host ip and port. + props.setProperty(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, DUMMY_BOOTSTRAP); + } + } + return KafkaAdminClient.createInternal(new org.oracle.okafka.clients.admin.AdminClientConfig(props), new KafkaAdminClient.TimeoutProcessorFactory()); } /** @@ -65,6 +128,18 @@ public static AdminClient create(Properties props) { * @return The new KafkaAdminClient. */ public static AdminClient create(Map conf) { + + String bootStrap = (String)conf.get(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG); + if(bootStrap == null) + { + String setSecProtocol = (String)conf.get(AdminClientConfig.SECURITY_PROTOCOL_CONFIG); + if(setSecProtocol != null && setSecProtocol.equalsIgnoreCase("SSL")) + { + // Connect using Wallet and TNSNAMES.ora. + // User does not need to know the database host ip and port. + conf.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, DUMMY_BOOTSTRAP); + } + } return KafkaAdminClient.createInternal(new AdminClientConfig(conf), null); } @@ -140,6 +215,7 @@ public DeleteTopicsResult deleteTopics(Collection topics) { public abstract DeleteTopicsResult deleteTopics(Collection topics, DeleteTopicsOptions options); /** + * @hidden * This method is not yet supported. */ public ListTopicsResult listTopics() { @@ -147,11 +223,13 @@ public ListTopicsResult listTopics() { } /** + * @hidden * This method is not yet supported. */ public abstract ListTopicsResult listTopics(ListTopicsOptions options); /** + * @hidden * This method is not yet supported. */ public DescribeTopicsResult describeTopics(Collection topicNames) { @@ -159,12 +237,14 @@ public DescribeTopicsResult describeTopics(Collection topicNames) { } /** + * @hidden * This method is not yet supported. */ public abstract DescribeTopicsResult describeTopics(Collection topicNames, DescribeTopicsOptions options); /** + * @hidden * This method is not yet supported. */ public DescribeClusterResult describeCluster() { @@ -172,11 +252,13 @@ public DescribeClusterResult describeCluster() { } /** + * @hidden * This method is not yet supported. */ public abstract DescribeClusterResult describeCluster(DescribeClusterOptions options); /** + * @hidden * This method is not yet supported. */ public DescribeAclsResult describeAcls(AclBindingFilter filter) { @@ -184,11 +266,13 @@ public DescribeAclsResult describeAcls(AclBindingFilter filter) { } /** + * @hidden * This method is not yet supported. */ public abstract DescribeAclsResult describeAcls(AclBindingFilter filter, DescribeAclsOptions options); /** + * @hidden * This method is not yet supported. */ public CreateAclsResult createAcls(Collection acls) { @@ -196,11 +280,13 @@ public CreateAclsResult createAcls(Collection acls) { } /** + * @hidden * This method is not yet supported. */ public abstract CreateAclsResult createAcls(Collection acls, CreateAclsOptions options); /** + * @hidden * This method is not yet supported. */ public DeleteAclsResult deleteAcls(Collection filters) { @@ -208,12 +294,14 @@ public DeleteAclsResult deleteAcls(Collection filters) { } /** + * @hidden * This method is not yet supported. */ public abstract DeleteAclsResult deleteAcls(Collection filters, DeleteAclsOptions options); /** + * @hidden * This method is not yet supported. */ public DescribeConfigsResult describeConfigs(Collection resources) { @@ -221,12 +309,14 @@ public DescribeConfigsResult describeConfigs(Collection resource } /** + * @hidden * This method is not yet supported. */ public abstract DescribeConfigsResult describeConfigs(Collection resources, DescribeConfigsOptions options); /** + * @hidden * This method is not yet supported. */ public AlterConfigsResult alterConfigs(Map configs) { @@ -234,11 +324,13 @@ public AlterConfigsResult alterConfigs(Map configs) { } /** + * @hidden * This method is not yet supported. */ public abstract AlterConfigsResult alterConfigs(Map configs, AlterConfigsOptions options); /** + * @hidden * This method is not yet supported. */ public AlterReplicaLogDirsResult alterReplicaLogDirs(Map replicaAssignment) { @@ -246,11 +338,13 @@ public AlterReplicaLogDirsResult alterReplicaLogDirs(Map replicaAssignment, AlterReplicaLogDirsOptions options); /** + * @hidden * This method is not yet supported. */ public DescribeLogDirsResult describeLogDirs(Collection brokers) { @@ -258,11 +352,13 @@ public DescribeLogDirsResult describeLogDirs(Collection brokers) { } /** + * @hidden * This method is not yet supported. */ public abstract DescribeLogDirsResult describeLogDirs(Collection brokers, DescribeLogDirsOptions options); /** + * @hidden * This method is not yet supported. */ public DescribeReplicaLogDirsResult describeReplicaLogDirs(Collection replicas) { @@ -270,11 +366,13 @@ public DescribeReplicaLogDirsResult describeReplicaLogDirs(Collection replicas, DescribeReplicaLogDirsOptions options); /** + * @hidden * This method is not yet supported. */ public CreatePartitionsResult createPartitions(Map newPartitions) { @@ -282,12 +380,14 @@ public CreatePartitionsResult createPartitions(Map newPar } /** + * @hidden * This method is not yet supported. */ public abstract CreatePartitionsResult createPartitions(Map newPartitions, CreatePartitionsOptions options); /** + * @hidden * This method is not yet supported. */ public DeleteRecordsResult deleteRecords(Map recordsToDelete) { @@ -295,12 +395,14 @@ public DeleteRecordsResult deleteRecords(Map re } /** + * @hidden * This method is not yet supported. */ public abstract DeleteRecordsResult deleteRecords(Map recordsToDelete, DeleteRecordsOptions options); /** + * @hidden * This method is not yet supported. */ public CreateDelegationTokenResult createDelegationToken() { @@ -309,12 +411,14 @@ public CreateDelegationTokenResult createDelegationToken() { /** + * @hidden * This method is not yet supported. */ public abstract CreateDelegationTokenResult createDelegationToken(CreateDelegationTokenOptions options); /** + * @hidden * This method is not yet supported. */ public RenewDelegationTokenResult renewDelegationToken(byte[] hmac) { @@ -322,23 +426,27 @@ public RenewDelegationTokenResult renewDelegationToken(byte[] hmac) { } /** + * @hidden * This method is not yet supported. */ public abstract RenewDelegationTokenResult renewDelegationToken(byte[] hmac, RenewDelegationTokenOptions options); /** - * This method is not yet supported. + * @hidden + * groupIds, DescribeConsumerGroupsOptions options); /** + * @hidden * This method is not yet supported. */ public DescribeConsumerGroupsResult describeConsumerGroups(Collection groupIds) { @@ -364,11 +475,13 @@ public DescribeConsumerGroupsResult describeConsumerGroups(Collection gr } /** + * @hidden * This method is not yet supported. */ public abstract ListConsumerGroupsResult listConsumerGroups(ListConsumerGroupsOptions options); /** + * @hidden * This method is not yet supported. */ public ListConsumerGroupsResult listConsumerGroups() { @@ -376,11 +489,13 @@ public ListConsumerGroupsResult listConsumerGroups() { } /** + * @hidden * This method is not yet supported. */ public abstract ListConsumerGroupOffsetsResult listConsumerGroupOffsets(String groupId, ListConsumerGroupOffsetsOptions options); /** + * @hidden * This method is not yet supported. */ public ListConsumerGroupOffsetsResult listConsumerGroupOffsets(String groupId) { @@ -388,11 +503,13 @@ public ListConsumerGroupOffsetsResult listConsumerGroupOffsets(String groupId) { } /** + * @hidden * This method is not yet supported. */ public abstract DeleteConsumerGroupsResult deleteConsumerGroups(Collection groupIds, DeleteConsumerGroupsOptions options); /** + * @hidden * This method is not yet supported. */ public DeleteConsumerGroupsResult deleteConsumerGroups(Collection groupIds) { diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/AdminClientConfig.java b/clients/src/main/java/org/oracle/okafka/clients/admin/AdminClientConfig.java index 1767e88..ae3660b 100644 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/AdminClientConfig.java +++ b/clients/src/main/java/org/oracle/okafka/clients/admin/AdminClientConfig.java @@ -1,7 +1,7 @@ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. */ @@ -27,14 +27,15 @@ package org.oracle.okafka.clients.admin; import org.oracle.okafka.clients.CommonClientConfigs; -import org.oracle.okafka.common.config.AbstractConfig; -import org.oracle.okafka.common.config.ConfigDef; -import org.oracle.okafka.common.config.ConfigDef.Importance; -import org.oracle.okafka.common.config.ConfigDef.Type; -import org.oracle.okafka.common.metrics.Sensor; +import org.oracle.okafka.common.config.SslConfigs; +import org.apache.kafka.common.config.AbstractConfig; +import org.apache.kafka.common.config.ConfigDef; +import org.apache.kafka.common.config.ConfigDef.Importance; +import org.apache.kafka.common.config.ConfigDef.Type; +import org.apache.kafka.common.metrics.Sensor; -import static org.oracle.okafka.common.config.ConfigDef.Range.atLeast; -import static org.oracle.okafka.common.config.ConfigDef.ValidString.in; +import static org.apache.kafka.common.config.ConfigDef.Range.atLeast; +import static org.apache.kafka.common.config.ConfigDef.ValidString.in; import java.util.Map; import java.util.Set; @@ -179,7 +180,8 @@ public class AdminClientConfig extends AbstractConfig { Importance.MEDIUM, SECURITY_PROTOCOL_DOC) .withClientSslSupport() - .withClientSaslSupport() + //.withClientSaslSupport() + .define(SslConfigs.TNS_ALIAS, ConfigDef.Type.STRING, null, Importance.MEDIUM, SslConfigs.TNS_ALIAS_DOC) .define(ORACLE_SERVICE_NAME, Type.STRING, "", @@ -204,6 +206,10 @@ protected Map postProcessParsedConfig(final Map public AdminClientConfig(Map props) { super(CONFIG, props); } + + protected AdminClientConfig(Map props, boolean doLog) { + super(CONFIG, props, doLog); + } public static Set configNames() { return CONFIG.names(); diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/AlterConfigsOptions.java b/clients/src/main/java/org/oracle/okafka/clients/admin/AlterConfigsOptions.java deleted file mode 100644 index c90e4ea..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/AlterConfigsOptions.java +++ /dev/null @@ -1,66 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.clients.admin; - -import java.util.Map; - -import org.oracle.okafka.common.annotation.InterfaceStability; - -/** - * Options for {@link AdminClient#alterConfigs(Map)}. - * - * The API of this class is evolving, see {@link AdminClient} for details. - */ -@InterfaceStability.Evolving -public class AlterConfigsOptions extends AbstractOptions { - - private boolean validateOnly = false; - - /** - * Set the request timeout in milliseconds for this operation or {@code null} if the default request timeout for the - * AdminClient should be used. - * - */ - // This method is retained to keep binary compatibility with 0.11 - public AlterConfigsOptions timeoutMs(Integer timeoutMs) { - this.timeoutMs = timeoutMs; - return this; - } - - /** - * Return true if the request should be validated without altering the configs. - */ - public boolean shouldValidateOnly() { - return validateOnly; - } - - /** - * Set to true if the request should be validated without altering the configs. - */ - public AlterConfigsOptions validateOnly(boolean validateOnly) { - this.validateOnly = validateOnly; - return this; - } -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/AlterConfigsResult.java b/clients/src/main/java/org/oracle/okafka/clients/admin/AlterConfigsResult.java deleted file mode 100644 index f083aea..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/AlterConfigsResult.java +++ /dev/null @@ -1,43 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. - * - */ - -package org.oracle.okafka.clients.admin; - -import java.util.Map; - -import org.oracle.okafka.common.annotation.InterfaceStability; - -/** - * The result of the {@link AdminClient#alterConfigs(Map)} call. - * - * The API of this class is evolving, see {@link AdminClient} for details. - */ -@InterfaceStability.Evolving -public class AlterConfigsResult { -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/AlterReplicaLogDirsOptions.java b/clients/src/main/java/org/oracle/okafka/clients/admin/AlterReplicaLogDirsOptions.java deleted file mode 100644 index a87f4a4..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/AlterReplicaLogDirsOptions.java +++ /dev/null @@ -1,37 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.clients.admin; - -import java.util.Map; - -import org.oracle.okafka.common.annotation.InterfaceStability; - -/** - * Options for {@link AdminClient#alterReplicaLogDirs(Map, AlterReplicaLogDirsOptions)}. - */ -@InterfaceStability.Evolving -public class AlterReplicaLogDirsOptions extends AbstractOptions { - -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/AlterReplicaLogDirsResult.java b/clients/src/main/java/org/oracle/okafka/clients/admin/AlterReplicaLogDirsResult.java deleted file mode 100644 index b1729d7..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/AlterReplicaLogDirsResult.java +++ /dev/null @@ -1,43 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. - * - */ - - -package org.oracle.okafka.clients.admin; - -import java.util.Map; - -import org.oracle.okafka.common.annotation.InterfaceStability; - - -/** - * The result of {@link AdminClient#alterReplicaLogDirs(Map, AlterReplicaLogDirsOptions)}. - */ -@InterfaceStability.Evolving -public class AlterReplicaLogDirsResult { -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/Config.java b/clients/src/main/java/org/oracle/okafka/clients/admin/Config.java deleted file mode 100644 index 0f535ac..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/Config.java +++ /dev/null @@ -1,88 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.clients.admin; - -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; - -import org.oracle.okafka.common.annotation.InterfaceStability; - -/** - * A configuration object containing the configuration entries for a resource. - *

- * The API of this class is evolving, see {@link AdminClient} for details. - */ -@InterfaceStability.Evolving -public class Config { - - private final Map entries = new HashMap<>(); - - /** - * Create a configuration instance with the provided entries. - */ - public Config(Collection entries) { - for (ConfigEntry entry : entries) { - this.entries.put(entry.name(), entry); - } - } - - /** - * Configuration entries for a resource. - */ - public Collection entries() { - return Collections.unmodifiableCollection(entries.values()); - } - - /** - * Get the configuration entry with the provided name or null if there isn't one. - */ - public ConfigEntry get(String name) { - return entries.get(name); - } - - @Override - public boolean equals(Object o) { - if (this == o) - return true; - if (o == null || getClass() != o.getClass()) - return false; - - Config config = (Config) o; - - return entries.equals(config.entries); - } - - @Override - public int hashCode() { - return entries.hashCode(); - } - - @Override - public String toString() { - return "Config(entries=" + entries.values() + ")"; - } -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/ConfigEntry.java b/clients/src/main/java/org/oracle/okafka/clients/admin/ConfigEntry.java deleted file mode 100644 index f0f05bc..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/ConfigEntry.java +++ /dev/null @@ -1,273 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.clients.admin; - -import java.util.Collections; -import java.util.List; -import java.util.Objects; - -import org.oracle.okafka.common.annotation.InterfaceStability; - -/** - * A class representing a configuration entry containing name, value and additional metadata. - * - * The API of this class is evolving, see {@link AdminClient} for details. - */ -@InterfaceStability.Evolving -public class ConfigEntry { - - private final String name; - private final String value; - private final ConfigSource source; - private final boolean isSensitive; - private final boolean isReadOnly; - private final List synonyms; - - /** - * Create a configuration entry with the provided values. - * - * @param name the non-null config name - * @param value the config value or null - */ - public ConfigEntry(String name, String value) { - this(name, value, false, false, false); - } - - /** - * Create a configuration with the provided values. - * - * @param name the non-null config name - * @param value the config value or null - * @param isDefault whether the config value is the default or if it's been explicitly set - * @param isSensitive whether the config value is sensitive, the broker never returns the value if it is sensitive - * @param isReadOnly whether the config is read-only and cannot be updated - * @deprecated since 1.1.0. This constructor will be removed in a future release. - */ - //TODO return to this point to document when open the PR - @Deprecated - public ConfigEntry(String name, String value, boolean isDefault, boolean isSensitive, boolean isReadOnly) { - this(name, - value, - isDefault ? ConfigSource.DEFAULT_CONFIG : ConfigSource.UNKNOWN, - isSensitive, - isReadOnly, - Collections.emptyList()); - } - - /** - * Create a configuration with the provided values. - * - * @param name the non-null config name - * @param value the config value or null - * @param source the source of this config entry - * @param isSensitive whether the config value is sensitive, the broker never returns the value if it is sensitive - * @param isReadOnly whether the config is read-only and cannot be updated - * @param synonyms Synonym configs in order of precedence - */ - ConfigEntry(String name, String value, ConfigSource source, boolean isSensitive, boolean isReadOnly, - List synonyms) { - Objects.requireNonNull(name, "name should not be null"); - this.name = name; - this.value = value; - this.source = source; - this.isSensitive = isSensitive; - this.isReadOnly = isReadOnly; - this.synonyms = synonyms; - } - - /** - * Return the config name. - */ - public String name() { - return name; - } - - /** - * Return the value or null. Null is returned if the config is unset or if isSensitive is true. - */ - public String value() { - return value; - } - - /** - * Return the source of this configuration entry. - */ - public ConfigSource source() { - return source; - } - - /** - * Return whether the config value is the default or if it's been explicitly set. - */ - public boolean isDefault() { - return source == ConfigSource.DEFAULT_CONFIG; - } - - /** - * Return whether the config value is sensitive. The value is always set to null by the broker if the config value - * is sensitive. - */ - public boolean isSensitive() { - return isSensitive; - } - - /** - * Return whether the config is read-only and cannot be updated. - */ - public boolean isReadOnly() { - return isReadOnly; - } - - /** - * Returns all config values that may be used as the value of this config along with their source, - * in the order of precedence. The list starts with the value returned in this ConfigEntry. - * The list is empty if synonyms were not requested using {@link DescribeConfigsOptions#includeSynonyms(boolean)} - */ - public List synonyms() { - return synonyms; - } - - @Override - public boolean equals(Object o) { - if (this == o) - return true; - if (o == null || getClass() != o.getClass()) - return false; - - ConfigEntry that = (ConfigEntry) o; - - return this.name.equals(that.name) && - this.value != null ? this.value.equals(that.value) : that.value == null && - this.isSensitive == that.isSensitive && - this.isReadOnly == that.isReadOnly && - this.source == that.source && - Objects.equals(this.synonyms, that.synonyms); - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + name.hashCode(); - result = prime * result + ((value == null) ? 0 : value.hashCode()); - result = prime * result + (isSensitive ? 1 : 0); - result = prime * result + (isReadOnly ? 1 : 0); - result = prime * result + source.hashCode(); - result = prime * result + synonyms.hashCode(); - return result; - } - - @Override - public String toString() { - return "ConfigEntry(" + - "name=" + name + - ", value=" + value + - ", source=" + source + - ", isSensitive=" + isSensitive + - ", isReadOnly=" + isReadOnly + - ", synonyms=" + synonyms + - ")"; - } - - - /** - * Source of configuration entries. - */ - public enum ConfigSource { - DYNAMIC_TOPIC_CONFIG, // dynamic topic config that is configured for a specific topic - DYNAMIC_BROKER_CONFIG, // dynamic broker config that is configured for a specific broker - DYNAMIC_DEFAULT_BROKER_CONFIG, // dynamic broker config that is configured as default for all brokers in the cluster - STATIC_BROKER_CONFIG, // static broker config provided as broker properties at start up (e.g. server.properties file) - DEFAULT_CONFIG, // built-in default configuration for configs that have a default value - UNKNOWN // source unknown e.g. in the ConfigEntry used for alter requests where source is not set - } - - /** - * Class representing a configuration synonym of a {@link ConfigEntry}. - */ - public static class ConfigSynonym { - - private final String name; - private final String value; - private final ConfigSource source; - - /** - * Create a configuration synonym with the provided values. - * - * @param name Configuration name (this may be different from the name of the associated {@link ConfigEntry} - * @param value Configuration value - * @param source {@link ConfigSource} of this configuraton - */ - ConfigSynonym(String name, String value, ConfigSource source) { - this.name = name; - this.value = value; - this.source = source; - } - - /** - * Returns the name of this configuration. - */ - public String name() { - return name; - } - - /** - * Returns the value of this configuration, which may be null if the configuration is sensitive. - */ - public String value() { - return value; - } - - /** - * Returns the source of this configuration. - */ - public ConfigSource source() { - return source; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - ConfigSynonym that = (ConfigSynonym) o; - return Objects.equals(name, that.name) && Objects.equals(value, that.value) && source == that.source; - } - - @Override - public int hashCode() { - return Objects.hash(name, value, source); - } - - @Override - public String toString() { - return "ConfigSynonym(" + - "name=" + name + - ", value=" + value + - ", source=" + source + - ")"; - } - } -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/CreateAclsOptions.java b/clients/src/main/java/org/oracle/okafka/clients/admin/CreateAclsOptions.java deleted file mode 100644 index c8eb43b..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/CreateAclsOptions.java +++ /dev/null @@ -1,50 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.clients.admin; - -import java.util.Collection; - -import org.oracle.okafka.common.annotation.InterfaceStability; - -/** - * Options for {@link AdminClient#createAcls(Collection)}. - * - * The API of this class is evolving, see {@link AdminClient} for details. - */ -@InterfaceStability.Evolving -public class CreateAclsOptions extends AbstractOptions { - - /** - * Set the request timeout in milliseconds for this operation or {@code null} if the default request timeout for the - * AdminClient should be used. - * - */ - // This method is retained to keep binary compatibility with 0.11 - public CreateAclsOptions timeoutMs(Integer timeoutMs) { - this.timeoutMs = timeoutMs; - return this; - } - -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/CreateAclsResult.java b/clients/src/main/java/org/oracle/okafka/clients/admin/CreateAclsResult.java deleted file mode 100644 index ff5b6aa..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/CreateAclsResult.java +++ /dev/null @@ -1,44 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. - * - */ - - -package org.oracle.okafka.clients.admin; - -import java.util.Collection; - -import org.oracle.okafka.common.annotation.InterfaceStability; - -/** - * The result of the {@link AdminClient#createAcls(Collection)} call. - * - * The API of this class is evolving, see {@link AdminClient} for details. - */ -@InterfaceStability.Evolving -public class CreateAclsResult { -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/CreateDelegationTokenOptions.java b/clients/src/main/java/org/oracle/okafka/clients/admin/CreateDelegationTokenOptions.java deleted file mode 100644 index 2f705c5..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/CreateDelegationTokenOptions.java +++ /dev/null @@ -1,42 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. - * - */ - - -package org.oracle.okafka.clients.admin; - -import org.oracle.okafka.common.annotation.InterfaceStability; - -/** - * Options for {@link AdminClient#createDelegationToken(CreateDelegationTokenOptions)}. - * - * The API of this class is evolving, see {@link AdminClient} for details. - */ -@InterfaceStability.Evolving -public class CreateDelegationTokenOptions extends AbstractOptions { -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/CreateDelegationTokenResult.java b/clients/src/main/java/org/oracle/okafka/clients/admin/CreateDelegationTokenResult.java deleted file mode 100644 index 3d860fe..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/CreateDelegationTokenResult.java +++ /dev/null @@ -1,42 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. - * - */ - - -package org.oracle.okafka.clients.admin; - -import org.oracle.okafka.common.annotation.InterfaceStability; - -/** - * The result of the {@link KafkaAdminClient#createDelegationToken(CreateDelegationTokenOptions)} call. - * - * The API of this class is evolving, see {@link AdminClient} for details. - */ -@InterfaceStability.Evolving -public class CreateDelegationTokenResult { -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/CreatePartitionsOptions.java b/clients/src/main/java/org/oracle/okafka/clients/admin/CreatePartitionsOptions.java deleted file mode 100644 index 4026e4f..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/CreatePartitionsOptions.java +++ /dev/null @@ -1,58 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.clients.admin; - -import java.util.Map; - -import org.oracle.okafka.common.annotation.InterfaceStability; - -/** - * Options for {@link AdminClient#createPartitions(Map)}. - * - * The API of this class is evolving, see {@link AdminClient} for details. - */ -@InterfaceStability.Evolving -public class CreatePartitionsOptions extends AbstractOptions { - - private boolean validateOnly = false; - - public CreatePartitionsOptions() { - } - - /** - * Return true if the request should be validated without creating new partitions. - */ - public boolean validateOnly() { - return validateOnly; - } - - /** - * Set to true if the request should be validated without creating new partitions. - */ - public CreatePartitionsOptions validateOnly(boolean validateOnly) { - this.validateOnly = validateOnly; - return this; - } -} \ No newline at end of file diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/CreatePartitionsResult.java b/clients/src/main/java/org/oracle/okafka/clients/admin/CreatePartitionsResult.java deleted file mode 100644 index ebbd3a8..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/CreatePartitionsResult.java +++ /dev/null @@ -1,60 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.clients.admin; - -import java.util.Map; - -import org.oracle.okafka.common.KafkaFuture; -import org.oracle.okafka.common.annotation.InterfaceStability; - -/** - * The result of the {@link AdminClient#createPartitions(Map)} call. - * - * The API of this class is evolving, see {@link AdminClient} for details. - */ -@InterfaceStability.Evolving -public class CreatePartitionsResult { - - private final Map> values; - - CreatePartitionsResult(Map> values) { - this.values = values; - } - - /** - * Return a map from topic names to futures, which can be used to check the status of individual - * partition creations. - */ - public Map> values() { - return values; - } - - /** - * Return a future which succeeds if all the partition creations succeed. - */ - public KafkaFuture all() { - return KafkaFuture.allOf(values.values().toArray(new KafkaFuture[0])); - } -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/CreateTopicsOptions.java b/clients/src/main/java/org/oracle/okafka/clients/admin/CreateTopicsOptions.java deleted file mode 100644 index 9d49bc1..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/CreateTopicsOptions.java +++ /dev/null @@ -1,67 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.clients.admin; - -import java.util.Collection; - -import org.oracle.okafka.common.annotation.InterfaceStability; - -/** - * This feature is not yet supported. - * Options for {@link AdminClient#createTopics(Collection)}. - * - * The API of this class is evolving, see {@link AdminClient} for details. - */ -@InterfaceStability.Evolving -public class CreateTopicsOptions extends AbstractOptions { - - private boolean validateOnly = false; - - /** - * Set the request timeout in milliseconds for this operation or {@code null} if the default request timeout for the - * AdminClient should be used. - * - */ - public CreateTopicsOptions timeoutMs(Integer timeoutMs) { - this.timeoutMs = timeoutMs; - return this; - } - - /** - * Set to true if the request should be validated without creating the topic. - */ - public CreateTopicsOptions validateOnly(boolean validateOnly) { - this.validateOnly = validateOnly; - return this; - } - - /** - * Return true if the request should be validated without creating the topic. - */ - public boolean shouldValidateOnly() { - return validateOnly; - } - -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/CreateTopicsResult.java b/clients/src/main/java/org/oracle/okafka/clients/admin/CreateTopicsResult.java index 471f8f6..afe67f9 100644 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/CreateTopicsResult.java +++ b/clients/src/main/java/org/oracle/okafka/clients/admin/CreateTopicsResult.java @@ -1,9 +1,9 @@ /* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ + ** OKafka Java Client version 23.4. + ** + ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. + ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + */ /* * Licensed to the Apache Software Foundation (ASF) under one or more @@ -23,37 +23,22 @@ */ package org.oracle.okafka.clients.admin; +import org.apache.kafka.common.KafkaFuture; +import org.apache.kafka.common.annotation.InterfaceStability; import java.util.Collection; import java.util.Map; -import org.oracle.okafka.common.KafkaFuture; -import org.oracle.okafka.common.annotation.InterfaceStability; - /** - * The result of {@link AdminClient#createTopics(Collection)}. + * The result of {@link Admin#createTopics(Collection)}. * - * The API of this class is evolving, see {@link AdminClient} for details. + * The API of this class is evolving, see {@link Admin} for details. */ @InterfaceStability.Evolving -public class CreateTopicsResult { - private final Map> futures; - - CreateTopicsResult(Map> futures) { - this.futures = futures; - } - - /** - * Return a map from topic names to futures, which can be used to check the status of individual - * topic creations. - */ - public Map> values() { - return futures; - } - - /** - * Return a future which succeeds if all the topic creations succeed. - */ - public KafkaFuture all() { - return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])); - } +public class CreateTopicsResult extends org.apache.kafka.clients.admin.CreateTopicsResult { + + public CreateTopicsResult(Map> futures) + { + super(futures); + } + } diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/DeleteAclsOptions.java b/clients/src/main/java/org/oracle/okafka/clients/admin/DeleteAclsOptions.java deleted file mode 100644 index 0e35f48..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/DeleteAclsOptions.java +++ /dev/null @@ -1,50 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.clients.admin; - -import java.util.Collection; - -import org.oracle.okafka.common.annotation.InterfaceStability; - -/** - * Options for the {@link AdminClient#deleteAcls(Collection)} call. - * - * The API of this class is evolving, see {@link AdminClient} for details. - */ -@InterfaceStability.Evolving -public class DeleteAclsOptions extends AbstractOptions { - - /** - * Set the request timeout in milliseconds for this operation or {@code null} if the default request timeout for the - * AdminClient should be used. - * - */ - // This method is retained to keep binary compatibility with 0.11 - public DeleteAclsOptions timeoutMs(Integer timeoutMs) { - this.timeoutMs = timeoutMs; - return this; - } - -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/DeleteAclsResult.java b/clients/src/main/java/org/oracle/okafka/clients/admin/DeleteAclsResult.java deleted file mode 100644 index 37cc85f..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/DeleteAclsResult.java +++ /dev/null @@ -1,41 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. - * - */ - -package org.oracle.okafka.clients.admin; - -import org.oracle.okafka.common.annotation.InterfaceStability; - -/** - * The result of the {@link AdminClient#deleteAcls(Collection)} call. - * - * The API of this class is evolving, see {@link AdminClient} for details. - */ -@InterfaceStability.Evolving -public class DeleteAclsResult { -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/DeleteConsumerGroupsOptions.java b/clients/src/main/java/org/oracle/okafka/clients/admin/DeleteConsumerGroupsOptions.java deleted file mode 100644 index a8a3fd9..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/DeleteConsumerGroupsOptions.java +++ /dev/null @@ -1,38 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.oracle.okafka.clients.admin; - -import java.util.Collection; - -import org.oracle.okafka.common.annotation.InterfaceStability; - -/** - * Options for the {@link AdminClient#deleteConsumerGroups(Collection)} call. - * - * The API of this class is evolving, see {@link AdminClient} for details. - */ -@InterfaceStability.Evolving -public class DeleteConsumerGroupsOptions extends AbstractOptions { - -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/DeleteConsumerGroupsResult.java b/clients/src/main/java/org/oracle/okafka/clients/admin/DeleteConsumerGroupsResult.java deleted file mode 100644 index 052a8ae..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/DeleteConsumerGroupsResult.java +++ /dev/null @@ -1,60 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.clients.admin; - -import java.util.Collection; -import java.util.Map; - -import org.oracle.okafka.common.KafkaFuture; -import org.oracle.okafka.common.annotation.InterfaceStability; - -/** - * The result of the {@link AdminClient#deleteConsumerGroups(Collection)} call. - * - * The API of this class is evolving, see {@link AdminClient} for details. - */ -@InterfaceStability.Evolving -public class DeleteConsumerGroupsResult { - private final Map> futures; - - DeleteConsumerGroupsResult(final Map> futures) { - this.futures = futures; - } - - /** - * Return a map from group id to futures which can be used to check the status of - * individual deletions. - */ - public Map> deletedGroups() { - return futures; - } - - /** - * Return a future which succeeds only if all the consumer group deletions succeed. - */ - public KafkaFuture all() { - return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])); - } -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/DeleteRecordsOptions.java b/clients/src/main/java/org/oracle/okafka/clients/admin/DeleteRecordsOptions.java deleted file mode 100644 index a1eff7b..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/DeleteRecordsOptions.java +++ /dev/null @@ -1,39 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.clients.admin; - -import java.util.Map; - -import org.oracle.okafka.common.annotation.InterfaceStability; - -/** - * Options for {@link AdminClient#deleteRecords(Map, DeleteRecordsOptions)}. - * - * The API of this class is evolving, see {@link AdminClient} for details. - */ -@InterfaceStability.Evolving -public class DeleteRecordsOptions extends AbstractOptions { - -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/DeleteRecordsResult.java b/clients/src/main/java/org/oracle/okafka/clients/admin/DeleteRecordsResult.java deleted file mode 100644 index 8389957..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/DeleteRecordsResult.java +++ /dev/null @@ -1,61 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.clients.admin; - -import java.util.Map; - -import org.oracle.okafka.common.KafkaFuture; -import org.oracle.okafka.common.TopicPartition; -import org.oracle.okafka.common.annotation.InterfaceStability; - -/** - * The result of the {@link AdminClient#deleteRecords(Map)} call. - * - * The API of this class is evolving, see {@link AdminClient} for details. - */ -@InterfaceStability.Evolving -public class DeleteRecordsResult { - - private final Map> futures; - - public DeleteRecordsResult(Map> futures) { - this.futures = futures; - } - - /** - * Return a map from topic partition to futures which can be used to check the status of - * individual deletions. - */ - public Map> lowWatermarks() { - return futures; - } - - /** - * Return a future which succeeds only if all the records deletions succeed. - */ - public KafkaFuture all() { - return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])); - } -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/DeleteTopicsOptions.java b/clients/src/main/java/org/oracle/okafka/clients/admin/DeleteTopicsOptions.java index bcca17e..97f4549 100644 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/DeleteTopicsOptions.java +++ b/clients/src/main/java/org/oracle/okafka/clients/admin/DeleteTopicsOptions.java @@ -1,9 +1,9 @@ /* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ + ** OKafka Java Client version 23.4. + ** + ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. + ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + */ /* * Licensed to the Apache Software Foundation (ASF) under one or more @@ -24,28 +24,16 @@ package org.oracle.okafka.clients.admin; -import java.util.Collection; +import org.apache.kafka.common.annotation.InterfaceStability; -import org.oracle.okafka.common.annotation.InterfaceStability; +import java.util.Collection; /** - * This feature is not yet supported. - * Options for {@link AdminClient#deleteTopics(Collection)}. + * Options for {@link Admin#deleteTopics(Collection)}. * - * The API of this class is evolving, see {@link AdminClient} for details. + * The API of this class is evolving, see {@link Admin} for details. */ @InterfaceStability.Evolving -public class DeleteTopicsOptions extends AbstractOptions { - - /** - * Set the request timeout in milliseconds for this operation or {@code null} if the default request timeout for the - * AdminClient should be used. - * - */ - // This method is retained to keep binary compatibility with 0.11 - public DeleteTopicsOptions timeoutMs(Integer timeoutMs) { - this.timeoutMs = timeoutMs; - return this; - } +public class DeleteTopicsOptions extends org.apache.kafka.clients.admin.DeleteTopicsOptions { } diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/DeleteTopicsResult.java b/clients/src/main/java/org/oracle/okafka/clients/admin/DeleteTopicsResult.java index 717cd5f..7d591e4 100644 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/DeleteTopicsResult.java +++ b/clients/src/main/java/org/oracle/okafka/clients/admin/DeleteTopicsResult.java @@ -1,9 +1,9 @@ /* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ + ** OKafka Java Client version 23.4. + ** + ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. + ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + */ /* * Licensed to the Apache Software Foundation (ASF) under one or more @@ -24,16 +24,16 @@ package org.oracle.okafka.clients.admin; +import org.apache.kafka.common.KafkaFuture; +import org.apache.kafka.common.annotation.InterfaceStability; + import java.util.Collection; import java.util.Map; -import org.oracle.okafka.common.KafkaFuture; -import org.oracle.okafka.common.annotation.InterfaceStability; - /** - * The result of the {@link AdminClient#deleteTopics(Collection)} call. + * The result of the {@link Admin#deleteTopics(Collection)} call. * - * The API of this class is evolving, see {@link AdminClient} for details. + * The API of this class is evolving, see {@link Admin} for details. */ @InterfaceStability.Evolving public class DeleteTopicsResult { diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/DeletedRecords.java b/clients/src/main/java/org/oracle/okafka/clients/admin/DeletedRecords.java deleted file mode 100644 index ba159c7..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/DeletedRecords.java +++ /dev/null @@ -1,54 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.clients.admin; - -import org.oracle.okafka.common.annotation.InterfaceStability; - -/** - * Represents information about deleted records - * - * The API for this class is still evolving and we may break compatibility in minor releases, if necessary. - */ -@InterfaceStability.Evolving -public class DeletedRecords { - - private final long lowWatermark; - - /** - * Create an instance of this class with the provided parameters. - * - * @param lowWatermark "low watermark" for the topic partition on which the deletion was executed - */ - public DeletedRecords(long lowWatermark) { - this.lowWatermark = lowWatermark; - } - - /** - * Return the "low watermark" for the topic partition on which the deletion was executed - */ - public long lowWatermark() { - return lowWatermark; - } -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/DescribeAclsOptions.java b/clients/src/main/java/org/oracle/okafka/clients/admin/DescribeAclsOptions.java deleted file mode 100644 index 41c5932..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/DescribeAclsOptions.java +++ /dev/null @@ -1,48 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.clients.admin; - -import org.oracle.okafka.common.annotation.InterfaceStability; - -/** - * Options for {@link AdminClient#describeAcls(AclBindingFilter)}. - * - * The API of this class is evolving, see {@link AdminClient} for details. - */ -@InterfaceStability.Evolving -public class DescribeAclsOptions extends AbstractOptions { - - /** - * Set the request timeout in milliseconds for this operation or {@code null} if the default request timeout for the - * AdminClient should be used. - * - */ - // This method is retained to keep binary compatibility with 0.11 - public DescribeAclsOptions timeoutMs(Integer timeoutMs) { - this.timeoutMs = timeoutMs; - return this; - } - -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/DescribeAclsResult.java b/clients/src/main/java/org/oracle/okafka/clients/admin/DescribeAclsResult.java deleted file mode 100644 index 41cc524..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/DescribeAclsResult.java +++ /dev/null @@ -1,42 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. - * - */ - - -package org.oracle.okafka.clients.admin; - -import org.oracle.okafka.common.annotation.InterfaceStability; - -/** - * The result of the {@link KafkaAdminClient#describeAcls(AclBindingFilter)} call. - * - * The API of this class is evolving, see {@link AdminClient} for details. - */ -@InterfaceStability.Evolving -public class DescribeAclsResult { -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/DescribeClusterOptions.java b/clients/src/main/java/org/oracle/okafka/clients/admin/DescribeClusterOptions.java deleted file mode 100644 index c47ab1c..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/DescribeClusterOptions.java +++ /dev/null @@ -1,48 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.clients.admin; - -import org.oracle.okafka.common.annotation.InterfaceStability; - -/** - * Options for {@link AdminClient#describeCluster()}. - * - * The API of this class is evolving, see {@link AdminClient} for details. - */ -@InterfaceStability.Evolving -public class DescribeClusterOptions extends AbstractOptions { - - /** - * Set the request timeout in milliseconds for this operation or {@code null} if the default request timeout for the - * AdminClient should be used. - * - */ - // This method is retained to keep binary compatibility with 0.11 - public DescribeClusterOptions timeoutMs(Integer timeoutMs) { - this.timeoutMs = timeoutMs; - return this; - } - -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/DescribeClusterResult.java b/clients/src/main/java/org/oracle/okafka/clients/admin/DescribeClusterResult.java deleted file mode 100644 index acbd0fa..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/DescribeClusterResult.java +++ /dev/null @@ -1,74 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.clients.admin; - -import java.util.Collection; - -import org.oracle.okafka.common.KafkaFuture; -import org.oracle.okafka.common.Node; -import org.oracle.okafka.common.annotation.InterfaceStability; - -/** - * The result of the {@link KafkaAdminClient#describeCluster()} call. - * - * The API of this class is evolving, see {@link AdminClient} for details. - */ -@InterfaceStability.Evolving -public class DescribeClusterResult { - private final KafkaFuture> nodes; - private final KafkaFuture controller; - private final KafkaFuture clusterId; - - DescribeClusterResult(KafkaFuture> nodes, - KafkaFuture controller, - KafkaFuture clusterId) { - this.nodes = nodes; - this.controller = controller; - this.clusterId = clusterId; - } - - /** - * Returns a future which yields a collection of nodes. - */ - public KafkaFuture> nodes() { - return nodes; - } - - /** - * Returns a future which yields the current controller id. - * Note that this may yield null, if the controller ID is not yet known. - */ - public KafkaFuture controller() { - return controller; - } - - /** - * Returns a future which yields the current cluster id. The future value will be non-null if the - * broker version is 0.10.1.0 or higher and null otherwise. - */ - public KafkaFuture clusterId() { - return clusterId; - } -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/DescribeConfigsOptions.java b/clients/src/main/java/org/oracle/okafka/clients/admin/DescribeConfigsOptions.java deleted file mode 100644 index fc54a10..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/DescribeConfigsOptions.java +++ /dev/null @@ -1,67 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.clients.admin; - -import java.util.Collection; - -import org.oracle.okafka.common.annotation.InterfaceStability; - -/** - * Options for {@link AdminClient#describeConfigs(Collection)}. - * - * The API of this class is evolving, see {@link AdminClient} for details. - */ -@InterfaceStability.Evolving -public class DescribeConfigsOptions extends AbstractOptions { - - private boolean includeSynonyms = false; - - /** - * Set the request timeout in milliseconds for this operation or {@code null} if the default request timeout for the - * AdminClient should be used. - * - */ - // This method is retained to keep binary compatibility with 0.11 - public DescribeConfigsOptions timeoutMs(Integer timeoutMs) { - this.timeoutMs = timeoutMs; - return this; - } - - /** - * Return true if synonym configs should be returned in the response. - */ - public boolean includeSynonyms() { - return includeSynonyms; - } - - /** - * Set to true if synonym configs should be returned in the response. - */ - public DescribeConfigsOptions includeSynonyms(boolean includeSynonyms) { - this.includeSynonyms = includeSynonyms; - return this; - } - -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/DescribeConfigsResult.java b/clients/src/main/java/org/oracle/okafka/clients/admin/DescribeConfigsResult.java deleted file mode 100644 index 91b35dd..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/DescribeConfigsResult.java +++ /dev/null @@ -1,41 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. - * - */ - -package org.oracle.okafka.clients.admin; - -import org.oracle.okafka.common.annotation.InterfaceStability; - -/** - * The result of the {@link KafkaAdminClient#describeConfigs(Collection)} call. - * - * The API of this class is evolving, see {@link AdminClient} for details. - */ -@InterfaceStability.Evolving -public class DescribeConfigsResult { -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/DescribeConsumerGroupsOptions.java b/clients/src/main/java/org/oracle/okafka/clients/admin/DescribeConsumerGroupsOptions.java deleted file mode 100644 index a0bf698..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/DescribeConsumerGroupsOptions.java +++ /dev/null @@ -1,38 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.clients.admin; - -import java.util.Collection; - -import org.oracle.okafka.common.annotation.InterfaceStability; - -/** - * Options for {@link AdminClient#describeConsumerGroups(Collection, DescribeConsumerGroupsOptions)}. - *

- * The API of this class is evolving, see {@link AdminClient} for details. - */ -@InterfaceStability.Evolving -public class DescribeConsumerGroupsOptions extends AbstractOptions { -} \ No newline at end of file diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/DescribeConsumerGroupsResult.java b/clients/src/main/java/org/oracle/okafka/clients/admin/DescribeConsumerGroupsResult.java deleted file mode 100644 index 5de8d2f..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/DescribeConsumerGroupsResult.java +++ /dev/null @@ -1,41 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. - * - */ - -package org.oracle.okafka.clients.admin; - -import org.oracle.okafka.common.annotation.InterfaceStability; - -/** - * The result of the {@link KafkaAdminClient#describeConsumerGroups(Collection, DescribeConsumerGroupsOptions)}} call. - * - * The API of this class is evolving, see {@link AdminClient} for details. - */ -@InterfaceStability.Evolving -public class DescribeConsumerGroupsResult { -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/DescribeDelegationTokenOptions.java b/clients/src/main/java/org/oracle/okafka/clients/admin/DescribeDelegationTokenOptions.java deleted file mode 100644 index 9797c85..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/DescribeDelegationTokenOptions.java +++ /dev/null @@ -1,41 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. - * - */ - -package org.oracle.okafka.clients.admin; - -import org.oracle.okafka.common.annotation.InterfaceStability; - -/** - * Options for {@link AdminClient#describeDelegationToken(DescribeDelegationTokenOptions)}. - * - * The API of this class is evolving, see {@link AdminClient} for details. - */ -@InterfaceStability.Evolving -public class DescribeDelegationTokenOptions extends AbstractOptions { -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/DescribeDelegationTokenResult.java b/clients/src/main/java/org/oracle/okafka/clients/admin/DescribeDelegationTokenResult.java deleted file mode 100644 index 0875617..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/DescribeDelegationTokenResult.java +++ /dev/null @@ -1,41 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. - * - */ - -package org.oracle.okafka.clients.admin; - -import org.oracle.okafka.common.annotation.InterfaceStability; - -/** - * The result of the {@link KafkaAdminClient#describeDelegationToken(DescribeDelegationTokenOptions)} call. - * - * The API of this class is evolving, see {@link AdminClient} for details. - */ -@InterfaceStability.Evolving -public class DescribeDelegationTokenResult { -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/DescribeLogDirsOptions.java b/clients/src/main/java/org/oracle/okafka/clients/admin/DescribeLogDirsOptions.java deleted file mode 100644 index 5943af0..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/DescribeLogDirsOptions.java +++ /dev/null @@ -1,40 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.clients.admin; - -import java.util.Collection; - -import org.oracle.okafka.common.annotation.InterfaceStability; - - -/** - * Options for {@link AdminClient#describeLogDirs(Collection)} - * - * The API of this class is evolving, see {@link AdminClient} for details. - */ -@InterfaceStability.Evolving -public class DescribeLogDirsOptions extends AbstractOptions { - -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/DescribeLogDirsResult.java b/clients/src/main/java/org/oracle/okafka/clients/admin/DescribeLogDirsResult.java deleted file mode 100644 index 637b91d..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/DescribeLogDirsResult.java +++ /dev/null @@ -1,41 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. - * - */ - -package org.oracle.okafka.clients.admin; - -import org.oracle.okafka.common.annotation.InterfaceStability; - -/** - * The result of the {@link AdminClient#describeLogDirs(Collection)} call. - * - * The API of this class is evolving, see {@link AdminClient} for details. - */ -@InterfaceStability.Evolving -public class DescribeLogDirsResult { -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/DescribeReplicaLogDirsOptions.java b/clients/src/main/java/org/oracle/okafka/clients/admin/DescribeReplicaLogDirsOptions.java deleted file mode 100644 index 0f257ba..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/DescribeReplicaLogDirsOptions.java +++ /dev/null @@ -1,39 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.clients.admin; - -import java.util.Collection; - -import org.oracle.okafka.common.annotation.InterfaceStability; - -/** - * Options for {@link AdminClient#describeReplicaLogDirs(Collection)}. - * - * The API of this class is evolving, see {@link AdminClient} for details. - */ -@InterfaceStability.Evolving -public class DescribeReplicaLogDirsOptions extends AbstractOptions { - -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/DescribeReplicaLogDirsResult.java b/clients/src/main/java/org/oracle/okafka/clients/admin/DescribeReplicaLogDirsResult.java deleted file mode 100644 index 14456fa..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/DescribeReplicaLogDirsResult.java +++ /dev/null @@ -1,42 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. - * - */ - -package org.oracle.okafka.clients.admin; - -import org.oracle.okafka.common.annotation.InterfaceStability; - -/** - * The result of {@link AdminClient#describeReplicaLogDirs(Collection)}. - * - * The API of this class is evolving, see {@link AdminClient} for details. - */ -@InterfaceStability.Evolving -public class DescribeReplicaLogDirsResult { - -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/DescribeTopicsOptions.java b/clients/src/main/java/org/oracle/okafka/clients/admin/DescribeTopicsOptions.java deleted file mode 100644 index 8dfc7f8..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/DescribeTopicsOptions.java +++ /dev/null @@ -1,50 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.clients.admin; - -import java.util.Collection; - -import org.oracle.okafka.common.annotation.InterfaceStability; - -/** - * Options for {@link AdminClient#describeTopics(Collection)}. - * - * The API of this class is evolving, see {@link AdminClient} for details. - */ -@InterfaceStability.Evolving -public class DescribeTopicsOptions extends AbstractOptions { - - /** - * Set the request timeout in milliseconds for this operation or {@code null} if the default request timeout for the - * AdminClient should be used. - * - */ - // This method is retained to keep binary compatibility with 0.11 - public DescribeTopicsOptions timeoutMs(Integer timeoutMs) { - this.timeoutMs = timeoutMs; - return this; - } - -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/DescribeTopicsResult.java b/clients/src/main/java/org/oracle/okafka/clients/admin/DescribeTopicsResult.java deleted file mode 100644 index 9f5791b..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/DescribeTopicsResult.java +++ /dev/null @@ -1,41 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. - * - */ - -package org.oracle.okafka.clients.admin; - -import org.oracle.okafka.common.annotation.InterfaceStability; - -/** - * The result of the {@link KafkaAdminClient#describeTopics(Collection)} call. - * - * The API of this class is evolving, see {@link AdminClient} for details. - */ -@InterfaceStability.Evolving -public class DescribeTopicsResult { -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/ExpireDelegationTokenOptions.java b/clients/src/main/java/org/oracle/okafka/clients/admin/ExpireDelegationTokenOptions.java deleted file mode 100644 index d8c24e3..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/ExpireDelegationTokenOptions.java +++ /dev/null @@ -1,46 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.clients.admin; - -import org.oracle.okafka.common.annotation.InterfaceStability; - -/** - * Options for {@link AdminClient#expireDelegationToken(byte[], ExpireDelegationTokenOptions)}. - * - * The API of this class is evolving, see {@link AdminClient} for details. - */ -@InterfaceStability.Evolving -public class ExpireDelegationTokenOptions extends AbstractOptions { - private long expiryTimePeriodMs = -1L; - - public ExpireDelegationTokenOptions expiryTimePeriodMs(long expiryTimePeriodMs) { - this.expiryTimePeriodMs = expiryTimePeriodMs; - return this; - } - - public long expiryTimePeriodMs() { - return expiryTimePeriodMs; - } -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/ExpireDelegationTokenResult.java b/clients/src/main/java/org/oracle/okafka/clients/admin/ExpireDelegationTokenResult.java deleted file mode 100644 index 032f886..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/ExpireDelegationTokenResult.java +++ /dev/null @@ -1,49 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.clients.admin; - -import org.oracle.okafka.common.KafkaFuture; -import org.oracle.okafka.common.annotation.InterfaceStability; - -/** - * The result of the {@link KafkaAdminClient#expireDelegationToken(byte[], ExpireDelegationTokenOptions)} call. - * - * The API of this class is evolving, see {@link AdminClient} for details. - */ -@InterfaceStability.Evolving -public class ExpireDelegationTokenResult { - private final KafkaFuture expiryTimestamp; - - ExpireDelegationTokenResult(KafkaFuture expiryTimestamp) { - this.expiryTimestamp = expiryTimestamp; - } - - /** - * Returns a future which yields expiry timestamp - */ - public KafkaFuture expiryTimestamp() { - return expiryTimestamp; - } -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/KafkaAdminClient.java b/clients/src/main/java/org/oracle/okafka/clients/admin/KafkaAdminClient.java index b835996..b0d2371 100644 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/KafkaAdminClient.java +++ b/clients/src/main/java/org/oracle/okafka/clients/admin/KafkaAdminClient.java @@ -1,9 +1,9 @@ /* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ + ** OKafka Java Client version 23.4. + ** + ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. + ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + */ /* * Licensed to the Apache Software Foundation (ASF) under one or more @@ -29,59 +29,181 @@ package org.oracle.okafka.clients.admin; -import org.oracle.okafka.clients.ClientRequest; -import org.oracle.okafka.clients.ClientResponse; -import org.oracle.okafka.clients.ClientUtils; +import org.apache.kafka.clients.ClientDnsLookup; +import org.apache.kafka.clients.ClientRequest; +import org.apache.kafka.clients.ClientResponse; +import org.apache.kafka.clients.ClientUtils; +import org.oracle.okafka.clients.admin.AdminClientConfig; +import org.oracle.okafka.clients.admin.KafkaAdminClient.Call; +import org.apache.kafka.clients.admin.AbortTransactionOptions; +import org.apache.kafka.clients.admin.AbortTransactionResult; +import org.apache.kafka.clients.admin.AbortTransactionSpec; +import org.apache.kafka.clients.admin.AlterClientQuotasOptions; +import org.apache.kafka.clients.admin.AlterClientQuotasResult; +import org.apache.kafka.clients.admin.AlterConfigOp; +import org.apache.kafka.clients.admin.AlterConfigsOptions; +import org.apache.kafka.clients.admin.AlterConfigsResult; +import org.apache.kafka.clients.admin.AlterConsumerGroupOffsetsOptions; +import org.apache.kafka.clients.admin.AlterConsumerGroupOffsetsResult; +import org.apache.kafka.clients.admin.AlterPartitionReassignmentsOptions; +import org.apache.kafka.clients.admin.AlterPartitionReassignmentsResult; +import org.apache.kafka.clients.admin.AlterReplicaLogDirsOptions; +import org.apache.kafka.clients.admin.AlterReplicaLogDirsResult; +import org.apache.kafka.clients.admin.AlterUserScramCredentialsOptions; +import org.apache.kafka.clients.admin.AlterUserScramCredentialsResult; +import org.apache.kafka.clients.admin.Config; +import org.apache.kafka.clients.admin.CreateAclsOptions; +import org.apache.kafka.clients.admin.CreateAclsResult; +import org.apache.kafka.clients.admin.CreateDelegationTokenOptions; +import org.apache.kafka.clients.admin.CreateDelegationTokenResult; +import org.apache.kafka.clients.admin.CreatePartitionsOptions; +import org.apache.kafka.clients.admin.CreatePartitionsResult; +import org.apache.kafka.clients.admin.CreateTopicsOptions; +import org.apache.kafka.clients.admin.CreateTopicsResult; +import org.apache.kafka.clients.admin.DeleteAclsOptions; +import org.apache.kafka.clients.admin.DeleteAclsResult; +import org.apache.kafka.clients.admin.DeleteConsumerGroupOffsetsOptions; +import org.apache.kafka.clients.admin.DeleteConsumerGroupOffsetsResult; +import org.apache.kafka.clients.admin.DeleteConsumerGroupsOptions; +import org.apache.kafka.clients.admin.DeleteConsumerGroupsResult; +import org.apache.kafka.clients.admin.DeleteRecordsOptions; +import org.apache.kafka.clients.admin.DeleteRecordsResult; +import org.apache.kafka.clients.admin.DeleteTopicsOptions; +import org.apache.kafka.clients.admin.DeleteTopicsResult; +import org.apache.kafka.clients.admin.DescribeAclsOptions; +import org.apache.kafka.clients.admin.DescribeAclsResult; +import org.apache.kafka.clients.admin.DescribeClientQuotasOptions; +import org.apache.kafka.clients.admin.DescribeClientQuotasResult; +import org.apache.kafka.clients.admin.DescribeClusterOptions; +import org.apache.kafka.clients.admin.DescribeClusterResult; +import org.apache.kafka.clients.admin.DescribeConfigsOptions; +import org.apache.kafka.clients.admin.DescribeConfigsResult; +import org.apache.kafka.clients.admin.DescribeConsumerGroupsOptions; +import org.apache.kafka.clients.admin.DescribeConsumerGroupsResult; +import org.apache.kafka.clients.admin.DescribeDelegationTokenOptions; +import org.apache.kafka.clients.admin.DescribeDelegationTokenResult; +import org.apache.kafka.clients.admin.DescribeFeaturesOptions; +import org.apache.kafka.clients.admin.DescribeFeaturesResult; +import org.apache.kafka.clients.admin.DescribeLogDirsOptions; +import org.apache.kafka.clients.admin.DescribeLogDirsResult; +import org.apache.kafka.clients.admin.DescribeMetadataQuorumOptions; +import org.apache.kafka.clients.admin.DescribeMetadataQuorumResult; +import org.apache.kafka.clients.admin.DescribeProducersOptions; +import org.apache.kafka.clients.admin.DescribeProducersResult; +import org.apache.kafka.clients.admin.DescribeReplicaLogDirsOptions; +import org.apache.kafka.clients.admin.DescribeReplicaLogDirsResult; +import org.apache.kafka.clients.admin.DescribeTopicsOptions; +import org.apache.kafka.clients.admin.DescribeTopicsResult; +import org.apache.kafka.clients.admin.DescribeTransactionsOptions; +import org.apache.kafka.clients.admin.DescribeTransactionsResult; +import org.apache.kafka.clients.admin.DescribeUserScramCredentialsOptions; +import org.apache.kafka.clients.admin.DescribeUserScramCredentialsResult; +import org.apache.kafka.clients.admin.ElectLeadersOptions; +import org.apache.kafka.clients.admin.ElectLeadersResult; +import org.apache.kafka.clients.admin.ExpireDelegationTokenOptions; +import org.apache.kafka.clients.admin.ExpireDelegationTokenResult; +import org.apache.kafka.clients.admin.FeatureUpdate; +import org.apache.kafka.clients.admin.FenceProducersOptions; +import org.apache.kafka.clients.admin.FenceProducersResult; +import org.apache.kafka.clients.admin.ListClientMetricsResourcesOptions; +import org.apache.kafka.clients.admin.ListClientMetricsResourcesResult; +import org.apache.kafka.clients.admin.ListConsumerGroupOffsetsOptions; +import org.apache.kafka.clients.admin.ListConsumerGroupOffsetsResult; +import org.apache.kafka.clients.admin.ListConsumerGroupOffsetsSpec; +import org.apache.kafka.clients.admin.ListConsumerGroupsOptions; +import org.apache.kafka.clients.admin.ListConsumerGroupsResult; +import org.apache.kafka.clients.admin.ListOffsetsOptions; +import org.apache.kafka.clients.admin.ListOffsetsResult; +import org.apache.kafka.clients.admin.ListPartitionReassignmentsOptions; +import org.apache.kafka.clients.admin.ListPartitionReassignmentsResult; +import org.apache.kafka.clients.admin.ListTopicsResult; +import org.apache.kafka.clients.admin.ListTransactionsOptions; +import org.apache.kafka.clients.admin.ListTransactionsResult; +import org.apache.kafka.clients.admin.NewPartitionReassignment; +import org.apache.kafka.clients.admin.NewPartitions; +import org.apache.kafka.clients.admin.NewTopic; +//import org.apache.kafka.clients.admin.NewTopic; +import org.apache.kafka.clients.admin.OffsetSpec; +import org.apache.kafka.clients.admin.RecordsToDelete; +import org.apache.kafka.clients.admin.RemoveMembersFromConsumerGroupOptions; +import org.apache.kafka.clients.admin.RemoveMembersFromConsumerGroupResult; +import org.apache.kafka.clients.admin.RenewDelegationTokenOptions; +import org.apache.kafka.clients.admin.RenewDelegationTokenResult; +import org.apache.kafka.clients.admin.UnregisterBrokerOptions; +import org.apache.kafka.clients.admin.UnregisterBrokerResult; +import org.apache.kafka.clients.admin.UpdateFeaturesOptions; +import org.apache.kafka.clients.admin.UpdateFeaturesResult; +import org.apache.kafka.clients.admin.UserScramCredentialAlteration; +import org.apache.kafka.clients.admin.CreateTopicsResult.TopicMetadataAndConfig; +/* +import org.apache.kafka.clients.admin.KafkaAdminClient.Call; +import org.apache.kafka.clients.admin.KafkaAdminClient.ControllerNodeProvider; +import org.apache.kafka.clients.admin.KafkaAdminClient.MetadataUpdateNodeIdProvider; +import org.apache.kafka.clients.admin.KafkaAdminClient.NodeProvider; +import org.apache.kafka.clients.admin.KafkaAdminClient.TimeoutProcessor; + */ import org.oracle.okafka.clients.CommonClientConfigs; import org.oracle.okafka.clients.KafkaClient; import org.oracle.okafka.clients.NetworkClient; -import org.oracle.okafka.clients.admin.internals.AdminMetadataManager; +import org.apache.kafka.clients.admin.internals.AdminMetadataManager; +import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.oracle.okafka.clients.producer.ProducerConfig; -import org.oracle.okafka.common.Cluster; -import org.oracle.okafka.common.KafkaException; -import org.oracle.okafka.common.KafkaFuture; -import org.oracle.okafka.common.Node; -import org.oracle.okafka.common.TopicPartition; -import org.oracle.okafka.common.TopicPartitionReplica; -import org.oracle.okafka.common.acl.AclBinding; -import org.oracle.okafka.common.acl.AclBindingFilter; -import org.oracle.okafka.common.annotation.InterfaceStability; -import org.oracle.okafka.common.config.ConfigResource; +import org.apache.kafka.common.Cluster; +import org.apache.kafka.common.ElectionType; +import org.apache.kafka.common.KafkaException; +import org.apache.kafka.common.KafkaFuture; +import org.apache.kafka.common.Metric; +import org.apache.kafka.common.MetricName; +import org.apache.kafka.common.Node; +import org.apache.kafka.common.TopicCollection; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.TopicPartitionReplica; +import org.apache.kafka.common.Uuid; +import org.apache.kafka.common.acl.AclBinding; +import org.apache.kafka.common.acl.AclBindingFilter; +import org.apache.kafka.common.annotation.InterfaceStability; +import org.apache.kafka.common.config.ConfigResource; import org.oracle.okafka.common.config.SslConfigs; -import org.oracle.okafka.common.errors.ApiException; -import org.oracle.okafka.common.errors.AuthenticationException; +import org.apache.kafka.common.errors.ApiException; +import org.apache.kafka.common.errors.AuthenticationException; +import org.apache.kafka.common.errors.DisconnectException; import org.oracle.okafka.common.errors.FeatureNotSupportedException; -import org.oracle.okafka.common.errors.InvalidConfigurationException; import org.oracle.okafka.common.errors.InvalidLoginCredentialsException; -import org.oracle.okafka.common.errors.InvalidTopicException; -import org.oracle.okafka.common.errors.TimeoutException; -import org.oracle.okafka.common.errors.TopicExistsException; -import org.oracle.okafka.common.internals.KafkaFutureImpl; -import org.oracle.okafka.common.metrics.JmxReporter; -import org.oracle.okafka.common.metrics.MetricConfig; -import org.oracle.okafka.common.metrics.Metrics; -import org.oracle.okafka.common.metrics.MetricsReporter; -import org.oracle.okafka.common.metrics.Sensor; +import org.apache.kafka.common.errors.InvalidTopicException; +import org.apache.kafka.common.errors.RetriableException; +import org.apache.kafka.common.errors.TimeoutException; +import org.apache.kafka.common.errors.UnsupportedVersionException; +import org.apache.kafka.common.internals.KafkaFutureImpl; +import org.apache.kafka.common.metrics.JmxReporter; +import org.apache.kafka.common.metrics.MetricConfig; +import org.apache.kafka.common.metrics.Metrics; +import org.apache.kafka.common.metrics.MetricsReporter; +import org.apache.kafka.common.metrics.Sensor; +import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.quota.ClientQuotaAlteration; +import org.apache.kafka.common.quota.ClientQuotaFilter; import org.oracle.okafka.common.requests.AbstractRequest; -import org.oracle.okafka.common.requests.AbstractResponse; import org.oracle.okafka.common.requests.CreateTopicsRequest; import org.oracle.okafka.common.requests.CreateTopicsResponse; import org.oracle.okafka.common.requests.DeleteTopicsRequest; import org.oracle.okafka.common.requests.DeleteTopicsResponse; import org.oracle.okafka.common.requests.MetadataRequest; import org.oracle.okafka.common.requests.MetadataResponse; -import org.oracle.okafka.common.utils.AppInfoParser; -import org.oracle.okafka.common.utils.KafkaThread; -import org.oracle.okafka.common.utils.LogContext; +import org.oracle.okafka.common.requests.CreateTopicsRequest.TopicDetails; +import org.apache.kafka.common.utils.AppInfoParser; +import org.apache.kafka.common.utils.KafkaThread; +import org.apache.kafka.common.utils.LogContext; import org.oracle.okafka.common.utils.TNSParser; -import org.oracle.okafka.common.utils.Time; +import org.apache.kafka.common.utils.Time; import org.oracle.okafka.clients.admin.internals.AQKafkaAdmin; +import org.oracle.okafka.clients.consumer.ConsumerConfig; import org.slf4j.Logger; -import static org.oracle.okafka.common.utils.Utils.closeQuietly; +import static org.apache.kafka.common.utils.Utils.closeQuietly; import java.net.InetSocketAddress; import java.sql.SQLException; +import java.time.Duration; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -91,6 +213,8 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.Optional; +import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; @@ -101,8 +225,8 @@ * is created by invoking one of the {@code create()} methods in * {@code AdminClient}. Users should not refer to this class directly. * - * The API of this class is evolving, see {@link AdminClient} for details. - * Note: Topic name has to be in uppercase wherever used. + * The API of this class is evolving, see {@link AdminClient} for details. Note: + * Topic name has to be in uppercase wherever used. */ @InterfaceStability.Evolving public class KafkaAdminClient extends AdminClient { @@ -146,11 +270,16 @@ public class KafkaAdminClient extends AdminClient { */ private final Time time; + /** + * The timeout to use for a single request. + */ + private final int requestTimeoutMs; + /** * The cluster metadata manager used by the KafkaClient. */ private final AdminMetadataManager metadataManager; - + /** * The metrics for this KafkaAdminClient. */ @@ -192,8 +321,8 @@ public class KafkaAdminClient extends AdminClient { * * @param map The map to get or create the element from. * @param key The key. - * @param The key type. - * @param The value type. + * @param The key type. + * @param The value type. * @return The list value. */ static List getOrCreateListValue(Map> map, K key) { @@ -210,7 +339,7 @@ static List getOrCreateListValue(Map> map, K key) { * * @param futures The collection of KafkaFutureImpl objects. * @param exc The exception - * @param The KafkaFutureImpl result type. + * @param The KafkaFutureImpl result type. */ private static void completeAllExceptionally(Collection> futures, Throwable exc) { for (KafkaFutureImpl future : futures) { @@ -283,11 +412,11 @@ static KafkaAdminClient createInternal(AdminClientConfig config, TimeoutProcesso Time time = Time.SYSTEM; String clientId = generateClientId(config); LogContext logContext = createLogContext(clientId); - KafkaClient client = null; + KafkaClient client = null; try { AdminMetadataManager metadataManager = new AdminMetadataManager(logContext, config.getLong(AdminClientConfig.RETRY_BACKOFF_MS_CONFIG), - config.getLong(AdminClientConfig.METADATA_MAX_AGE_CONFIG)); + config.getLong(AdminClientConfig.METADATA_MAX_AGE_CONFIG), false); List reporters = config .getConfiguredInstances(AdminClientConfig.METRIC_REPORTER_CLASSES_CONFIG, MetricsReporter.class); Map metricTags = Collections.singletonMap("client-id", clientId); @@ -300,8 +429,10 @@ static KafkaAdminClient createInternal(AdminClientConfig config, TimeoutProcesso .tags(metricTags); reporters.add(new JmxReporter(JMX_PREFIX)); metrics = new Metrics(metricConfig, reporters, time); - AQKafkaAdmin admin= new AQKafkaAdmin(logContext, config, time); - client = new NetworkClient(admin, metadataManager.updater(), clientId, + + AQKafkaAdmin admin = new AQKafkaAdmin(logContext, config, metadataManager, time); + + client = new NetworkClient(admin, metadataManager, clientId, config.getLong(AdminClientConfig.RECONNECT_BACKOFF_MS_CONFIG), config.getLong(AdminClientConfig.RECONNECT_BACKOFF_MAX_MS_CONFIG), config.getInt(AdminClientConfig.SEND_BUFFER_CONFIG), @@ -325,7 +456,7 @@ static KafkaAdminClient createInternal(AdminClientConfig config, KafkaClient cli LogContext logContext = createLogContext(clientId); AdminMetadataManager metadataManager = new AdminMetadataManager(logContext, config.getLong(AdminClientConfig.RETRY_BACKOFF_MS_CONFIG), - config.getLong(AdminClientConfig.METADATA_MAX_AGE_CONFIG)); + config.getLong(AdminClientConfig.METADATA_MAX_AGE_CONFIG), false); return new KafkaAdminClient(config, clientId, time, metadataManager, metrics, client, null, logContext); } catch (Throwable exc) { closeQuietly(metrics, "Metrics"); @@ -338,44 +469,72 @@ static LogContext createLogContext(String clientId) { } private KafkaAdminClient(AdminClientConfig config, String clientId, Time time, AdminMetadataManager metadataManager, - Metrics metrics, KafkaClient client, TimeoutProcessorFactory timeoutProcessorFactory, - LogContext logContext) throws Exception { + Metrics metrics, KafkaClient client, TimeoutProcessorFactory timeoutProcessorFactory, LogContext logContext) + throws Exception { this.defaultTimeoutMs = config.getInt(AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG); this.clientId = clientId; this.log = logContext.logger(KafkaAdminClient.class); this.time = time; this.metadataManager = metadataManager; - + this.requestTimeoutMs = config.getInt(AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG); + List addresses = null; - String serviceName = null; - String instanceName = null; - System.setProperty("oracle.net.tns_admin", config.getString(AdminClientConfig.ORACLE_NET_TNS_ADMIN)); - if( config.getString( CommonClientConfigs.SECURITY_PROTOCOL_CONFIG).equalsIgnoreCase("PLAINTEXT")) - addresses = ClientUtils.parseAndValidateAddresses(config.getList(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG)); - else { - if( config.getString(SslConfigs.TNS_ALIAS) == null) - throw new InvalidLoginCredentialsException("Please provide valid connection string"); - TNSParser parser = new TNSParser(config); - parser.readFile(); - String connStr = parser.getConnectionString(config.getString(SslConfigs.TNS_ALIAS).toUpperCase()); - if (connStr == null) - throw new InvalidLoginCredentialsException("Please provide valid connection string"); - String host = parser.getProperty(connStr, "HOST"); - String portStr = parser.getProperty(connStr, "PORT"); - serviceName = parser.getProperty(connStr, "SERVICE_NAME"); - int port; - if( host == null || portStr == null || serviceName == null) - throw new InvalidLoginCredentialsException("Please provide valid connection string"); - try { - port = Integer.parseInt(portStr); - } catch(NumberFormatException nfe) { - throw new InvalidLoginCredentialsException("Please provide valid connection string"); - } - instanceName = parser.getProperty(connStr, "INSTANCE_NAME"); - addresses = new ArrayList<>(); - addresses.add(new InetSocketAddress(host, port)); - } - metadataManager.update(Cluster.bootstrap(addresses, config, serviceName, instanceName), time.milliseconds()); + String serviceName = null; + String instanceName = null; + System.setProperty("oracle.net.tns_admin", config.getString(ProducerConfig.ORACLE_NET_TNS_ADMIN)); + + if (config.getString(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG).trim().equalsIgnoreCase("PLAINTEXT")) { + + addresses = ClientUtils.parseAndValidateAddresses(config.getList(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG), + ClientDnsLookup.RESOLVE_CANONICAL_BOOTSTRAP_SERVERS_ONLY); + serviceName = config.getString(ConsumerConfig.ORACLE_SERVICE_NAME); + instanceName = config.getString(ConsumerConfig.ORACLE_INSTANCE_NAME); + } else { + if (config.getString(SslConfigs.TNS_ALIAS) == null) + throw new InvalidLoginCredentialsException("Please provide valid connection string"); + TNSParser parser = new TNSParser(config); + parser.readFile(); + String connStr = parser.getConnectionString(config.getString(SslConfigs.TNS_ALIAS).toUpperCase()); + if (connStr == null) + throw new InvalidLoginCredentialsException("Please provide valid connection string"); + String host = parser.getProperty(connStr, "HOST"); + String portStr = parser.getProperty(connStr, "PORT"); + serviceName = parser.getProperty(connStr, "SERVICE_NAME"); + int port; + if (host == null || portStr == null || serviceName == null) + throw new InvalidLoginCredentialsException("Please provide valid connection string"); + try { + port = Integer.parseInt(portStr); + } catch (NumberFormatException nfe) { + throw new InvalidLoginCredentialsException("Please provide valid connection string"); + } + instanceName = parser.getProperty(connStr, "INSTANCE_NAME"); + addresses = new ArrayList<>(); + addresses.add(new InetSocketAddress(host, port)); + } + + { // Changes for 2.8.1 :: Create Bootstrap Cluster and pass it to metadata.update + // We must have OKafka Node with Service Name and Instance Name placed in the + // bootstrap cluster. + // For cluster created here, isBootstrapConfigured is not set to TRUE because it + // is not public + + ArrayList bootStrapNodeList = new ArrayList(addresses.size()); + int id = -1; + for (InetSocketAddress inetAddr : addresses) { + org.oracle.okafka.common.Node bootStrapNode = new org.oracle.okafka.common.Node(id--, + inetAddr.getHostName(), inetAddr.getPort(), serviceName, instanceName); + bootStrapNodeList.add((Node) bootStrapNode); + } + Cluster bootStrapCluster = new Cluster(null, bootStrapNodeList, new ArrayList<>(0), Collections.emptySet(), + Collections.emptySet()); + + this.metadataManager.update(bootStrapCluster, time.milliseconds()); + } + + // metadataManager.update(Cluster.bootstrap(addresses, config, serviceName, + // instanceName), time.milliseconds()); + this.metrics = metrics; this.client = client; this.runnable = new AdminClientRunnable(); @@ -386,7 +545,7 @@ private KafkaAdminClient(AdminClientConfig config, String clientId, Time time, A this.maxRetries = config.getInt(AdminClientConfig.RETRIES_CONFIG); this.retryBackoffMs = config.getLong(AdminClientConfig.RETRY_BACKOFF_MS_CONFIG); config.logUnused(); - AppInfoParser.registerAppInfo(JMX_PREFIX, clientId, metrics); + AppInfoParser.registerAppInfo(JMX_PREFIX, clientId, metrics, time.milliseconds()); log.debug("Kafka admin client initialized"); thread.start(); } @@ -405,22 +564,22 @@ public void close(long duration, TimeUnit unit) { while (true) { if (hardShutdownTimeMs.compareAndSet(prev, newHardShutdownTimeMs)) { if (prev == INVALID_SHUTDOWN_TIME) { - log.debug("Initiating close operation."); + log.info("Initiating close operation."); } else { - log.debug("Moving hard shutdown time forward."); + log.info("Moving hard shutdown time forward."); } break; } prev = hardShutdownTimeMs.get(); if (prev < newHardShutdownTimeMs) { - log.debug("Hard shutdown time is already earlier than requested."); + log.info("Hard shutdown time is already earlier than requested."); newHardShutdownTimeMs = prev; break; } } if (log.isDebugEnabled()) { long deltaMs = Math.max(0, newHardShutdownTimeMs - time.milliseconds()); - log.debug("Waiting for the I/O thread to exit. Hard shutdown in {} ms.", deltaMs); + log.info("Waiting for the I/O thread to exit. Hard shutdown in {} ms.", deltaMs); } try { // Wait for the thread to be joined. @@ -529,23 +688,69 @@ protected Node curNode() { * Handle a failure. * * Depending on what the exception is and how many times we have already tried, - * we may choose to fail the Call, or retry it. + * we may choose to fail the Call, or retry it. It is important to print the + * stack traces here in some cases, since they are not necessarily preserved in + * ApiVersionException objects. * * @param now The current time in milliseconds. * @param throwable The failure exception. */ final void fail(long now, Throwable throwable) { + if (aborted) { + // If the call was aborted while in flight due to a timeout, deliver a + // TimeoutException. In this case, we do not get any more retries - the call has + // failed. We increment tries anyway in order to display an accurate log + // message. + tries++; + failWithTimeout(now, throwable); + return; + } + // If this is an UnsupportedVersionException that we can retry, do so. Note that + // a + // protocol downgrade will not count against the total number of retries we get + // for + // this RPC. That is why 'tries' is not incremented. + if ((throwable instanceof UnsupportedVersionException) + && handleUnsupportedVersionException((UnsupportedVersionException) throwable)) { + log.debug("{} attempting protocol downgrade and then retry.", this); + runnable.enqueue(this, now); + return; + } + tries++; + nextAllowedTryMs = now + retryBackoffMs; // If the call has timed out, fail. if (calcTimeoutMsRemainingAsInt(now, deadlineMs) < 0) { + failWithTimeout(now, throwable); + return; + } + // If the exception is not retriable, fail. + if (!(throwable instanceof RetriableException)) { if (log.isDebugEnabled()) { - log.debug("{} timed out at {} after {} attempt(s)", this, now, tries, + log.debug("{} failed with non-retriable exception after {} attempt(s)", this, tries, new Exception(prettyPrintException(throwable))); } handleFailure(throwable); return; } - handleFailure(throwable); + // If we are out of retries, fail. + if (tries > maxRetries) { + failWithTimeout(now, throwable); + return; + } + if (log.isDebugEnabled()) { + log.debug("{} failed: {}. Beginning retry #{}", this, prettyPrintException(throwable), tries); + } + runnable.enqueue(this, now); + } + + private void failWithTimeout(long now, Throwable cause) { + if (log.isDebugEnabled()) { + log.debug("{} timed out at {} after {} attempt(s)", this, now, tries, + new Exception(prettyPrintException(cause))); + } + handleFailure( + new TimeoutException(this + " timed out at " + now + " after " + tries + " attempt(s)", cause)); } /** @@ -555,6 +760,7 @@ final void fail(long now, Throwable throwable) { * * @return The AbstractRequest builder. */ + @SuppressWarnings("rawtypes") abstract AbstractRequest.Builder createRequest(int timeoutMs); /** @@ -563,19 +769,31 @@ final void fail(long now, Throwable throwable) { * @param abstractResponse The AbstractResponse. * */ - abstract void handleResponse(AbstractResponse abstractResponse); + abstract void handleResponse(org.apache.kafka.common.requests.AbstractResponse abstractResponse); /** * Handle a failure. This will only be called if the failure exception was not - * retryable, or if we hit a timeout. + * retriable, or if we hit a timeout. * * @param throwable The exception. */ abstract void handleFailure(Throwable throwable); + /** + * Handle an UnsupportedVersionException. + * + * @param exception The exception. + * + * @return True if the exception can be handled; false otherwise. + */ + boolean handleUnsupportedVersionException(UnsupportedVersionException exception) { + return false; + } + @Override public String toString() { - return "Call(callName=" + callName + ", deadlineMs=" + deadlineMs + ")"; + return "Call(callName=" + callName + ", deadlineMs=" + deadlineMs + ", tries=" + tries + + ", nextAllowedTryMs=" + nextAllowedTryMs + ")"; } public boolean isInternal() { @@ -624,7 +842,7 @@ int handleTimeouts(Collection calls, String msg) { Call call = iter.next(); int remainingMs = calcTimeoutMsRemainingAsInt(now, call.deadlineMs); if (remainingMs < 0) { - call.fail(now, new TimeoutException(msg)); + call.fail(now, new TimeoutException(msg + " Call: " + call.callName)); iter.remove(); numTimedOut++; } else { @@ -633,24 +851,6 @@ int handleTimeouts(Collection calls, String msg) { } return numTimedOut; } - - /** - * Timeout all the calls. - * - * @param calls The collection of calls. - * - * @return The number of calls which were timed out. - */ - int timeoutAll(Collection calls, String msg) { - int numTimedOut = 0; - for (Iterator iter = calls.iterator(); iter.hasNext();) { - Call call = iter.next(); - call.fail(now, new TimeoutException(msg)); - iter.remove(); - numTimedOut++; - } - return numTimedOut; - } /** * Check whether a call should be timed out. The remaining milliseconds until @@ -685,6 +885,18 @@ private final class AdminClientRunnable implements Runnable { */ private final Map> callsToSend = new HashMap<>(); + /** + * Maps node ID strings to calls that have been sent. Only accessed from this + * thread. + */ + private final Map> callsInFlight = new HashMap<>(); + + /** + * Maps correlation IDs to calls that have been sent. Only accessed from this + * thread. + */ + private final Map correlationIdToCalls = new HashMap<>(); + /** * Pending calls. Protected by the object monitor. This will be null only if the * thread has shut down. @@ -716,21 +928,6 @@ private int timeoutCallsToSend(TimeoutProcessor processor) { log.debug("Timed out {} call(s) with assigned nodes.", numTimedOut); return numTimedOut; } - - /** - * Time out calls which have been assigned to nodes. - * - * @param processor The timeout processor. - */ - private int timeoutAllCallsToSend(TimeoutProcessor processor, String msg) { - int numTimedOut = 0; - for (List callList : callsToSend.values()) { - numTimedOut += processor.timeoutAll(callList, msg); - } - if (numTimedOut > 0) - log.debug("Timed out {} call(s) with assigned nodes.", numTimedOut); - return numTimedOut; - } /** * Drain all the calls from newCalls into pendingCalls. @@ -802,8 +999,8 @@ private boolean maybeDrainPendingCall(Call call, long now) { * @param now The current time in milliseconds. * @return The minimum timeout we need for poll(). */ - private void sendEligibleCalls(long now) { - + private long sendEligibleCalls(long now) { + long pollTimeout = Long.MAX_VALUE; for (Iterator>> iter = callsToSend.entrySet().iterator(); iter.hasNext();) { Map.Entry> entry = iter.next(); List calls = entry.getValue(); @@ -812,40 +1009,41 @@ private void sendEligibleCalls(long now) { continue; } Node node = entry.getKey(); - try { - if (!client.ready(node, now)) { - long nodeTimeout = client.pollDelayMs(node, now); - log.trace("Client is not ready to send to {}. Must delay {} ms", node, nodeTimeout); - continue; - } - } catch(InvalidLoginCredentialsException ilc) { - calls.remove(0).fail(now, new AuthenticationException(ilc.getMessage())); + if (!client.ready((org.oracle.okafka.common.Node) node, now)) { + long nodeTimeout = client.pollDelayMs((org.oracle.okafka.common.Node) node, now); + pollTimeout = Math.min(pollTimeout, nodeTimeout); + log.trace("Client is not ready to send to {}. Must delay {} ms", node, nodeTimeout); continue; } Call call = calls.remove(0); - int timeoutMs = calcTimeoutMsRemainingAsInt(now, call.deadlineMs); - if(timeoutMs< 0) { - call.fail(now, new TimeoutException("Timed out waiting to send the call.")); - continue; - } - - AbstractRequest.Builder requestBuilder = null; + int requestTimeoutMs = Math.min(KafkaAdminClient.this.requestTimeoutMs, + calcTimeoutMsRemainingAsInt(now, call.deadlineMs)); + AbstractRequest.Builder requestBuilder; try { - requestBuilder = call.createRequest(timeoutMs); + requestBuilder = call.createRequest(requestTimeoutMs); } catch (Throwable throwable) { call.fail(now, new KafkaException(String.format("Internal error sending %s to %s.", call.callName, node))); continue; } - ClientRequest clientRequest = client.newClientRequest(node, requestBuilder, now, true); - log.trace("Sending {} to {}. correlationId={}", requestBuilder, node, clientRequest.correlationId()); + + ClientRequest clientRequest = client.newClientRequest((org.oracle.okafka.common.Node) node, + requestBuilder, now, true, requestTimeoutMs, null); + log.info("Sending {} to {}. correlationId={}", requestBuilder, node, clientRequest.correlationId()); ClientResponse response = client.send(clientRequest, now); - log.trace("Received response for {} from {}. correlationId={}", requestBuilder, node, response.requestHeader().correlationId()); + + getOrCreateListValue(callsInFlight, node.idString()).add(call); + correlationIdToCalls.put(clientRequest.correlationId(), call); + + log.trace("Received response for {} from {}. correlationId={}", requestBuilder, node, + response.requestHeader().correlationId()); handleResponse(time.milliseconds(), call, response); + correlationIdToCalls.remove(clientRequest.correlationId()); + } + return pollTimeout; } - /** * Handle responses from the server. * @@ -855,19 +1053,149 @@ private void sendEligibleCalls(long now) { private void handleResponse(long now, Call call, ClientResponse response) { try { - if(response.wasDisconnected()) { - client.disconnected(response.destination(), now); - metadataManager.requestUpdate(); - - } - call.handleResponse(response.responseBody()); - } catch (Throwable t) { - if (log.isTraceEnabled()) - log.trace("{} handleResponse failed with {}", call, prettyPrintException(t)); - call.fail(now, t); + if (response.wasDisconnected()) { + client.disconnected((org.oracle.okafka.common.Node) (metadataManager + .nodeById(Integer.parseInt(response.destination()))), now); + metadataManager.requestUpdate(); + + } + call.handleResponse(response.responseBody()); + } catch (Throwable t) { + if (log.isTraceEnabled()) + log.trace("{} handleResponse failed with {}", call, prettyPrintException(t)); + call.fail(now, t); + } + } + + /** + * Time out expired calls that are in flight. + * + * Calls that are in flight may have been partially or completely sent over the + * wire. They may even be in the process of being processed by the remote + * server. At the moment, our only option to time them out is to close the + * entire connection. + * + * @param processor The timeout processor. + */ + private void timeoutCallsInFlight(TimeoutProcessor processor) { + int numTimedOut = 0; + for (Map.Entry> entry : callsInFlight.entrySet()) { + List contexts = entry.getValue(); + if (contexts.isEmpty()) + continue; + String nodeId = entry.getKey(); + // We assume that the first element in the list is the earliest. So it should be + // the + // only one we need to check the timeout for. + Call call = contexts.get(0); + if (processor.callHasExpired(call)) { + if (call.aborted) { + log.warn("Aborted call {} is still in callsInFlight.", call); + } else { + log.debug("Closing connection to {} to time out {}", nodeId, call); + call.aborted = true; + client.disconnect( + (org.oracle.okafka.common.Node) metadataManager.nodeById(Integer.parseInt(nodeId))); + numTimedOut++; + // We don't remove anything from the callsInFlight data structure. Because the + // connection + // has been closed, the calls should be returned by the next client#poll(), + // and handled at that point. + } + } + } + if (numTimedOut > 0) + log.debug("Timed out {} call(s) in flight.", numTimedOut); + } + + /** + * Handle responses from the server. + * + * @param now The current time in milliseconds. + * @param responses The latest responses from KafkaClient. + **/ + private void handleResponses(long now, List responses) { + for (ClientResponse response : responses) { + int correlationId = response.requestHeader().correlationId(); + + Call call = correlationIdToCalls.get(correlationId); + if (call == null) { + // If the server returns information about a correlation ID we didn't use yet, + // an internal server error has occurred. Close the connection and log an error + // message. + log.error( + "Internal server error on {}: server returned information about unknown " + + "correlation ID {}, requestHeader = {}", + response.destination(), correlationId, response.requestHeader()); + client.disconnect((org.oracle.okafka.common.Node) metadataManager + .nodeById(Integer.parseInt(response.destination()))); + continue; } + + // Stop tracking this call. + correlationIdToCalls.remove(correlationId); + List calls = callsInFlight.get(response.destination()); + if ((calls == null) || (!calls.remove(call))) { + log.error("Internal server error on {}: ignoring call {} in correlationIdToCall " + + "that did not exist in callsInFlight", response.destination(), call); + continue; + } + + // Handle the result of the call. This may involve retrying the call, if we got + // a + // retriable exception. + if (response.versionMismatch() != null) { + call.fail(now, response.versionMismatch()); + } else if (response.wasDisconnected()) { + AuthenticationException authException = client + .authenticationException((org.oracle.okafka.common.Node) call.curNode()); + if (authException != null) { + call.fail(now, authException); + } else { + call.fail(now, + new DisconnectException(String.format( + "Cancelled %s request with correlation id %s due to node %s being disconnected", + call.callName, correlationId, response.destination()))); + } + metadataManager.requestUpdate(); + } else { + try { + call.handleResponse(response.responseBody()); + if (log.isTraceEnabled()) + log.trace("{} got response {}", call, response.responseBody()); + } catch (Throwable t) { + if (log.isTraceEnabled()) + log.trace("{} handleResponse failed with {}", call, prettyPrintException(t)); + call.fail(now, t); + } + } + } } - + + /** + * Unassign calls that have not yet been sent based on some predicate. For + * example, this is used to reassign the calls that have been assigned to a + * disconnected node. + * + * @param shouldUnassign Condition for reassignment. If the predicate is true, + * then the calls will be put back in the pendingCalls + * collection and they will be reassigned + */ + private void unassignUnsentCalls(Predicate shouldUnassign) { + for (Iterator>> iter = callsToSend.entrySet().iterator(); iter.hasNext();) { + Map.Entry> entry = iter.next(); + org.oracle.okafka.common.Node node = (org.oracle.okafka.common.Node) entry.getKey(); + List awaitingCalls = entry.getValue(); + + if (awaitingCalls.isEmpty()) { + iter.remove(); + } else if (shouldUnassign.test(node)) { + pendingCalls.addAll(awaitingCalls); + iter.remove(); + } + } + } + private boolean hasActiveExternalCalls(Collection calls) { for (Call call : calls) { if (!call.isInternal()) { @@ -876,7 +1204,7 @@ private boolean hasActiveExternalCalls(Collection calls) { } return false; } - + /** * Return true if there are currently active external calls. */ @@ -889,17 +1217,17 @@ private boolean hasActiveExternalCalls() { return true; } } - return false; + return hasActiveExternalCalls(correlationIdToCalls.values()); } - private boolean threadShouldExit(long now, long curHardShutdownTimeMs) { - if (!hasActiveExternalCalls()) { - log.trace("All work has been completed, and the I/O thread is now exiting."); + boolean activeExtrnalCalls = hasActiveExternalCalls(); + if (!activeExtrnalCalls) { + log.debug("All work has been completed, and the I/O thread is now exiting."); return true; } if (now >= curHardShutdownTimeMs) { - log.info("Forcing a hard I/O thread shutdown. Requests in progress will be aborted."); + log.debug("Forcing a hard I/O thread shutdown. Requests in progress will be aborted."); return true; } log.debug("Hard shutdown in {} ms.", curHardShutdownTimeMs - now); @@ -908,24 +1236,58 @@ private boolean threadShouldExit(long now, long curHardShutdownTimeMs) { @Override public void run() { - long now = time.milliseconds(); log.trace("Thread starting"); + try { + processRequests(); + } finally { + AppInfoParser.unregisterAppInfo(JMX_PREFIX, clientId, metrics); + + int numTimedOut = 0; + TimeoutProcessor timeoutProcessor = new TimeoutProcessor(Long.MAX_VALUE); + synchronized (this) { + numTimedOut += timeoutProcessor.handleTimeouts(newCalls, "The AdminClient thread has exited."); + newCalls = null; + } + numTimedOut += timeoutProcessor.handleTimeouts(pendingCalls, "The AdminClient thread has exited."); + numTimedOut += timeoutCallsToSend(timeoutProcessor); + numTimedOut += timeoutProcessor.handleTimeouts(correlationIdToCalls.values(), + "The AdminClient thread has exited."); + if (numTimedOut > 0) { + log.debug("Timed out {} remaining operation(s).", numTimedOut); + } + closeQuietly(client, "KafkaClient"); + closeQuietly(metrics, "Metrics"); + log.debug("Exiting AdminClientRunnable thread."); + } + } + + private void processRequests() { + long now = time.milliseconds(); while (true) { // Copy newCalls into pendingCalls. drainNewCalls(); // Check if the AdminClient thread should shut down. long curHardShutdownTimeMs = hardShutdownTimeMs.get(); - if ((curHardShutdownTimeMs != INVALID_SHUTDOWN_TIME) && threadShouldExit(now, curHardShutdownTimeMs)) - break; + if (curHardShutdownTimeMs != INVALID_SHUTDOWN_TIME) { + if (threadShouldExit(now, curHardShutdownTimeMs)) { + break; + } + } // Handle timeouts. TimeoutProcessor timeoutProcessor = timeoutProcessorFactory.create(now); timeoutPendingCalls(timeoutProcessor); timeoutCallsToSend(timeoutProcessor); - - maybeDrainPendingCalls(now); - + timeoutCallsInFlight(timeoutProcessor); + + long pollTimeout = Math.min(1200000, timeoutProcessor.nextTimeoutMs()); + if (curHardShutdownTimeMs != INVALID_SHUTDOWN_TIME) { + pollTimeout = Math.min(pollTimeout, curHardShutdownTimeMs - now); + } + + // Choose nodes for our pending calls. + pollTimeout = Math.min(pollTimeout, maybeDrainPendingCalls(now)); long metadataFetchDelayMs = metadataManager.metadataFetchDelayMs(now); if (metadataFetchDelayMs == 0) { metadataManager.transitionToUpdatePending(now); @@ -937,32 +1299,32 @@ public void run() { if (!maybeDrainPendingCall(metadataCall, now)) pendingCalls.add(metadataCall); } + pollTimeout = Math.min(pollTimeout, sendEligibleCalls(now)); + + if (metadataFetchDelayMs > 0) { + pollTimeout = Math.min(pollTimeout, metadataFetchDelayMs); + } - sendEligibleCalls(now); + // Ensure that we use a small poll timeout if there are pending calls which need + // to be sent + if (!pendingCalls.isEmpty()) + pollTimeout = Math.min(pollTimeout, retryBackoffMs); + + /* + * OKafka uses synchronous network I/O. No need to wait // Wait for network + * responses. log.trace("Entering KafkaClient#poll(timeout={})", pollTimeout); + * List responses = client.poll(pollTimeout, now); + * log.trace("KafkaClient#poll retrieved {} response(s)", responses.size()); + */ + // unassign calls to disconnected nodes unassignUnsentCalls(client::connectionFailed); - now = time.milliseconds(); - } - int numTimedOut = 0; - TimeoutProcessor timeoutProcessor = new TimeoutProcessor(Long.MAX_VALUE); - - numTimedOut += timeoutProcessor.timeoutAll(pendingCalls, "The AdminClient thread has exited."); - numTimedOut += timeoutAllCallsToSend(timeoutProcessor, "The AdminClient thread has exited."); - synchronized (this) { - numTimedOut += timeoutProcessor.timeoutAll(newCalls, "The AdminClient thread has exited."); - newCalls = null; - } - if (numTimedOut > 0) { - log.debug("Timed out {} remaining operation(s).", numTimedOut); - } - //closeQuietly(client, "KafkaClient"); - try { - client.close(); - } catch(Exception e) { - log.trace("Failed to close network client"); + + /* + * For OKafka Response is handled in sendEligibleCalls itself // Update the + * current time and handle the latest responses. now = time.milliseconds(); + * handleResponses(now, responses); + */ } - - closeQuietly(metrics, "Metrics"); - log.debug("Exiting AdminClientRunnable thread."); } /** @@ -976,6 +1338,11 @@ public void run() { * @param now The current time in milliseconds. */ void enqueue(Call call, long now) { + if (call.tries > maxRetries) { + log.debug("Max retries {} for {} reached", maxRetries, call); + call.fail(time.milliseconds(), new TimeoutException()); + return; + } if (log.isDebugEnabled()) { log.debug("Queueing {} with a timeout {} ms from now.", call, call.deadlineMs - now); } @@ -986,10 +1353,12 @@ void enqueue(Call call, long now) { accepted = true; } } - if (!accepted) { + if (accepted) { + client.wakeup(); // wake the thread if it is in poll() + } else { log.debug("The AdminClient thread has exited. Timing out {}.", call); - call.fail(Long.MAX_VALUE, new TimeoutException("The AdminClient thread has exited.")); - } + call.fail(Long.MAX_VALUE, new TimeoutException("The AdminClient thread has exited.")); + } } /** @@ -1008,26 +1377,30 @@ void call(Call call, long now) { enqueue(call, now); } } - + /** * Create a new metadata call. */ private Call makeMetadataCall(long now) { - return new Call(true, "fetchMetadata", calcDeadlineMs(now, defaultTimeoutMs), + return new Call(true, "fetchMetadata", calcDeadlineMs(now, requestTimeoutMs), new MetadataUpdateNodeIdProvider()) { @Override - public AbstractRequest.Builder createRequest(int timeoutMs) { + public MetadataRequest.Builder createRequest(int timeoutMs) { // Since this only requests node information, it's safe to pass true // for allowAutoTopicCreation (and it simplifies communication with // older brokers) - return new MetadataRequest.Builder(Collections.emptyList(), true); + /* + * return new MetadataRequest.Builder(new MetadataRequestData() + * .setTopics(Collections.emptyList()) .setAllowAutoTopicCreation(true)); + */ + return new MetadataRequest.Builder(Collections.emptyList(), true, Collections.emptyList()); } @Override - public void handleResponse(AbstractResponse abstractResponse) { + public void handleResponse(org.apache.kafka.common.requests.AbstractResponse abstractResponse) { MetadataResponse response = (MetadataResponse) abstractResponse; long now = time.milliseconds(); - metadataManager.update(response.cluster(null), now); + metadataManager.update(response.cluster(), now); // Unassign all unsent requests after a metadata refresh to allow for a new // destination to be selected from the new metadata @@ -1040,32 +1413,6 @@ public void handleFailure(Throwable e) { } }; } - - /** - * Unassign calls that have not yet been sent based on some predicate. For - * example, this is used to reassign the calls that have been assigned to a - * disconnected node. - * - * @param shouldUnassign Condition for reassignment. If the predicate is true, - * then the calls will be put back in the pendingCalls - * collection and they will be reassigned - */ - private void unassignUnsentCalls(Predicate shouldUnassign) { - for (Iterator>> iter = callsToSend.entrySet().iterator(); iter.hasNext();) { - Map.Entry> entry = iter.next(); - Node node = entry.getKey(); - List awaitingCalls = entry.getValue(); - - if (awaitingCalls.isEmpty()) { - iter.remove(); - } else if (shouldUnassign.test(node)) { - pendingCalls.addAll(awaitingCalls); - iter.remove(); - } - } - } - - } private static boolean topicNameIsUnrepresentable(String topicName) { @@ -1075,20 +1422,207 @@ private static boolean topicNameIsUnrepresentable(String topicName) { private static boolean groupIdIsUnrepresentable(String groupId) { return groupId == null; } + /* + * @Override public CreateTopicsResult createTopics(final Collection + * newTopics, final CreateTopicsOptions options) { final Map> topicFutures = new + * HashMap<>(newTopics.size()); final CreatableTopicCollection topics = new + * CreatableTopicCollection(); for (NewTopic newTopic : newTopics) { if + * (topicNameIsUnrepresentable(newTopic.name())) { + * KafkaFutureImpl future = new KafkaFutureImpl<>(); + * future.completeExceptionally(new + * InvalidTopicException("The given topic name '" + newTopic.name() + + * "' cannot be represented in a request.")); topicFutures.put(newTopic.name(), + * future); } else if (!topicFutures.containsKey(newTopic.name())) { + * topicFutures.put(newTopic.name(), new KafkaFutureImpl<>()); + * + * // Copied logic from convertToCreatableTopic of NewTopic CreatableTopic + * creatableTopic = new CreatableTopic(). setName(newTopic.name()). + * setNumPartitions(newTopic.numPartitions()). + * setReplicationFactor(newTopic.replicationFactor()); + * + * if (newTopic.configs() != null) { for (Entry entry : + * newTopic.configs().entrySet()) { creatableTopic.configs().add( new + * CreateableTopicConfig(). setName(entry.getKey()). + * setValue(entry.getValue())); } } + * + * topics.add(creatableTopic); } } if (!topics.isEmpty()) { final long now = + * time.milliseconds(); final long deadline = calcDeadlineMs(now, + * options.timeoutMs()); final Call call = getCreateTopicsCall(options, + * topicFutures, topics, Collections.emptyMap(), now, deadline); + * runnable.call(call, now); } return new + * org.oracle.okafka.clients.admin.CreateTopicsResult(new + * HashMap<>(topicFutures)); } + * + * private Call getCreateTopicsCall(final CreateTopicsOptions options, final + * Map> futures, final + * CreatableTopicCollection topics, final Map quotaExceededExceptions, final long now, + * final long deadline) { return new Call("createTopics", deadline, new + * ControllerNodeProvider()) { + * + * @Override public CreateTopicsRequest.Builder createRequest(int timeoutMs) { + * return new CreateTopicsRequest.Builder( new CreateTopicsRequestData() + * .setTopics(topics) .setTimeoutMs(timeoutMs) + * .setValidateOnly(options.shouldValidateOnly())); } + * + * @Override public void + * handleResponse(org.apache.kafka.common.requests.AbstractResponse + * abstractResponse) { // Check for controller change + * handleNotControllerError(abstractResponse); // Handle server responses for + * particular topics. final CreateTopicsResponse response = + * (CreateTopicsResponse) abstractResponse; final CreatableTopicCollection + * retryTopics = new CreatableTopicCollection(); final Map retryTopicQuotaExceededExceptions = new + * HashMap<>(); for (CreatableTopicResult result : response.data().topics()) { + * KafkaFutureImpl future = futures.get(result.name()); + * if (future == null) { log.warn("Server response mentioned unknown topic {}", + * result.name()); } else { ApiError error = new ApiError(result.errorCode(), + * result.errorMessage()); if (error.isFailure()) { if + * (error.is(Errors.THROTTLING_QUOTA_EXCEEDED)) { + * ThrottlingQuotaExceededException quotaExceededException = new + * ThrottlingQuotaExceededException( response.throttleTimeMs(), + * error.messageWithFallback()); if (options.shouldRetryOnQuotaViolation()) { + * retryTopics.add(topics.find(result.name()).duplicate()); + * retryTopicQuotaExceededExceptions.put(result.name(), quotaExceededException); + * } else { future.completeExceptionally(quotaExceededException); } } else { + * future.completeExceptionally(error.exception()); } } else { + * TopicMetadataAndConfig topicMetadataAndConfig; if + * (result.topicConfigErrorCode() != Errors.NONE.code()) { + * topicMetadataAndConfig = new TopicMetadataAndConfig( + * Errors.forCode(result.topicConfigErrorCode()).exception()); } else if + * (result.numPartitions() == CreateTopicsResult.UNKNOWN) { + * topicMetadataAndConfig = new TopicMetadataAndConfig(new + * UnsupportedVersionException( + * "Topic metadata and configs in CreateTopics response not supported")); } else + * { List configs = result.configs(); Config topicConfig + * = new Config(configs.stream() .map(this::configEntry) + * .collect(Collectors.toSet())); topicMetadataAndConfig = new + * TopicMetadataAndConfig(result.topicId(), result.numPartitions(), + * result.replicationFactor(), topicConfig); } + * future.complete(topicMetadataAndConfig); } } } // If there are topics to + * retry, retry them; complete unrealized futures otherwise. if + * (retryTopics.isEmpty()) { // The server should send back a response for every + * topic. But do a sanity check anyway. + * completeUnrealizedFutures(futures.entrySet().stream(), topic -> + * "The controller response did not contain a result for topic " + topic); } + * else { final long now = time.milliseconds(); final Call call = + * getCreateTopicsCall(options, futures, retryTopics, + * retryTopicQuotaExceededExceptions, now, deadline); runnable.call(call, now); + * } } + * + * private ConfigEntry configEntry(CreatableTopicConfigs config) { return new + * ConfigEntry( config.name(), config.value(), + * configSource(DescribeConfigsResponse.ConfigSource.forId(config.configSource() + * )), config.isSensitive(), config.readOnly(), Collections.emptyList(), null, + * null); } + * + * @Override void handleFailure(Throwable throwable) { // If there were any + * topics retries due to a quota exceeded exception, we propagate // the initial + * error back to the caller if the request timed out. + * maybeCompleteQuotaExceededException(options.shouldRetryOnQuotaViolation(), + * throwable, futures, quotaExceededExceptions, (int) (time.milliseconds() - + * now)); // Fail all the other remaining futures + * completeAllExceptionally(futures.values(), throwable); } }; } + * + * @Override public DeleteTopicsResult deleteTopics(final Collection + * topicNames, final DeleteTopicsOptions options) { final Map> topicFutures = new HashMap<>(topicNames.size()); final + * List validTopicNames = new ArrayList<>(topicNames.size()); for + * (String topicName : topicNames) { if (topicNameIsUnrepresentable(topicName)) + * { KafkaFutureImpl future = new KafkaFutureImpl<>(); + * future.completeExceptionally(new + * InvalidTopicException("The given topic name '" + topicName + + * "' cannot be represented in a request.")); topicFutures.put(topicName, + * future); } else if (!topicFutures.containsKey(topicName)) { + * topicFutures.put(topicName, new KafkaFutureImpl<>()); + * validTopicNames.add(topicName); } } if (!validTopicNames.isEmpty()) { final + * long now = time.milliseconds(); final long deadline = calcDeadlineMs(now, + * options.timeoutMs()); final Call call = getDeleteTopicsCall(options, + * topicFutures, validTopicNames, Collections.emptyMap(), now, deadline); + * runnable.call(call, now); } return new DeleteTopicsResult(new + * HashMap<>(topicFutures)); } + * + * private Call getDeleteTopicsCall(final DeleteTopicsOptions options, final + * Map> futures, final List topics, final + * Map quotaExceededExceptions, final + * long now, final long deadline) { return new Call("deleteTopics", deadline, + * new ControllerNodeProvider()) { + * + * @Override DeleteTopicsRequest.Builder createRequest(int timeoutMs) { return + * new DeleteTopicsRequest.Builder( new DeleteTopicsRequestData() + * .setTopicNames(topics) .setTimeoutMs(timeoutMs)); } + * + * @Override void + * handleResponse(org.apache.kafka.common.requests.AbstractResponse + * abstractResponse) { // Check for controller change + * handleNotControllerError(abstractResponse); // Handle server responses for + * particular topics. final DeleteTopicsResponse response = + * (DeleteTopicsResponse) abstractResponse; final List retryTopics = new + * ArrayList<>(); final Map + * retryTopicQuotaExceededExceptions = new HashMap<>(); for + * (DeletableTopicResult result : response.data().responses()) { + * KafkaFutureImpl future = futures.get(result.name()); if (future == + * null) { log.warn("Server response mentioned unknown topic {}", + * result.name()); } else { ApiError error = new ApiError(result.errorCode(), + * result.errorMessage()); if (error.isFailure()) { if + * (error.is(Errors.THROTTLING_QUOTA_EXCEEDED)) { + * ThrottlingQuotaExceededException quotaExceededException = new + * ThrottlingQuotaExceededException( response.throttleTimeMs(), + * error.messageWithFallback()); if (options.shouldRetryOnQuotaViolation()) { + * retryTopics.add(result.name()); + * retryTopicQuotaExceededExceptions.put(result.name(), quotaExceededException); + * } else { future.completeExceptionally(quotaExceededException); } } else { + * future.completeExceptionally(error.exception()); } } else { + * future.complete(null); } } } // If there are topics to retry, retry them; + * complete unrealized futures otherwise. if (retryTopics.isEmpty()) { // The + * server should send back a response for every topic. But do a sanity check + * anyway. completeUnrealizedFutures(futures.entrySet().stream(), topic -> + * "The controller response did not contain a result for topic " + topic); } + * else { final long now = time.milliseconds(); final Call call = + * getDeleteTopicsCall(options, futures, retryTopics, + * retryTopicQuotaExceededExceptions, now, deadline); runnable.call(call, now); + * } } + * + * @Override void handleFailure(Throwable throwable) { // If there were any + * topics retries due to a quota exceeded exception, we propagate // the initial + * error back to the caller if the request timed out. + * maybeCompleteQuotaExceededException(options.shouldRetryOnQuotaViolation(), + * throwable, futures, quotaExceededExceptions, (int) (time.milliseconds() - + * now)); // Fail all the other remaining futures + * completeAllExceptionally(futures.values(), throwable); } }; } + * + */ @Override public CreateTopicsResult createTopics(final Collection newTopics, final CreateTopicsOptions options) { - final Map> topicFutures = new HashMap<>(newTopics.size()); + final Map> topicFutures = new HashMap<>(newTopics.size()); final Map topicsMap = new HashMap<>(newTopics.size()); for (NewTopic newTopic : newTopics) { if (topicNameIsUnrepresentable(newTopic.name())) { - KafkaFutureImpl future = new KafkaFutureImpl<>(); + KafkaFutureImpl future = new KafkaFutureImpl<>(); future.completeExceptionally(new InvalidTopicException( "The given topic name '" + newTopic.name() + "' cannot be represented in a request.")); topicFutures.put(newTopic.name(), future); } else if (!topicFutures.containsKey(newTopic.name())) { - topicFutures.put(newTopic.name(), new KafkaFutureImpl()); - topicsMap.put(newTopic.name(), newTopic.convertToTopicDetails()); + topicFutures.put(newTopic.name(), new KafkaFutureImpl()); + TopicDetails topicDetails = null; + + if (newTopic.replicasAssignments() != null) { + if (newTopic.configs() != null) { + topicDetails = new TopicDetails(newTopic.replicasAssignments(), newTopic.configs()); + } else { + topicDetails = new TopicDetails(newTopic.replicasAssignments()); + } + } else { + if (newTopic.configs() != null) { + topicDetails = new TopicDetails(newTopic.numPartitions(), newTopic.replicationFactor(), + newTopic.configs()); + } else { + topicDetails = new TopicDetails(newTopic.numPartitions(), newTopic.replicationFactor()); + } + } + topicsMap.put(newTopic.name(), topicDetails); } } final long now = time.milliseconds(); @@ -1100,17 +1634,18 @@ public AbstractRequest.Builder createRequest(int timeoutMs) { } @Override - public void handleResponse(AbstractResponse abstractResponse) { + public void handleResponse(org.apache.kafka.common.requests.AbstractResponse abstractResponse) { CreateTopicsResponse response = (CreateTopicsResponse) abstractResponse; + // Handle server responses for particular topics. for (Map.Entry entry : response.errors().entrySet()) { - KafkaFutureImpl future = topicFutures.get(entry.getKey()); + KafkaFutureImpl future = topicFutures.get(entry.getKey()); if (future == null) { log.warn("Server response mentioned unknown topic {}", entry.getKey()); } else { Exception exception = entry.getValue(); if (exception != null) { - future.completeExceptionally(exception); + future.completeExceptionally(exception); } else { future.complete(null); } @@ -1118,13 +1653,14 @@ public void handleResponse(AbstractResponse abstractResponse) { } // The server should send back a response for every topic. But do a sanity check // anyway. - for (Map.Entry> entry : topicFutures.entrySet()) { - KafkaFutureImpl future = entry.getValue(); + for (Map.Entry> entry : topicFutures.entrySet()) { + KafkaFutureImpl future = entry.getValue(); if (!future.isDone()) { - if(response.getResult() != null) { - future.completeExceptionally(response.getResult()); - } else future.completeExceptionally(new ApiException( - "The server response did not " + "contain a reference to node " + entry.getKey())); + if (response.getResult() != null) { + future.completeExceptionally(response.getResult()); + } else + future.completeExceptionally(new ApiException( + "The server response did not " + "contain a reference to node " + entry.getKey())); } } } @@ -1137,11 +1673,26 @@ void handleFailure(Throwable throwable) { if (!topicsMap.isEmpty()) { runnable.call(call, now); } - return new CreateTopicsResult(new HashMap>(topicFutures)); + org.apache.kafka.clients.admin.CreateTopicsResult createTopicResults = new org.oracle.okafka.clients.admin.CreateTopicsResult( + new HashMap>(topicFutures)); + return createTopicResults; } @Override public DeleteTopicsResult deleteTopics(Collection topicNames, DeleteTopicsOptions options) { + org.oracle.okafka.clients.admin.DeleteTopicsResult delResult = deleteTopics(topicNames, + new org.oracle.okafka.clients.admin.DeleteTopicsOptions()); + KafkaFuture ftr = delResult.all(); + try { + ftr.get(); + } catch (Exception e) { + log.error("Exception from deleteTopics", e); + } + return null; + } + + public org.oracle.okafka.clients.admin.DeleteTopicsResult deleteTopics(Collection topicNames, + org.oracle.okafka.clients.admin.DeleteTopicsOptions options) { final Map> topicFutures = new HashMap<>(topicNames.size()); final List validTopicNames = new ArrayList<>(topicNames.size()); for (String topicName : topicNames) { @@ -1164,7 +1715,7 @@ AbstractRequest.Builder createRequest(int timeoutMs) { } @Override - void handleResponse(AbstractResponse abstractResponse) { + void handleResponse(org.apache.kafka.common.requests.AbstractResponse abstractResponse) { DeleteTopicsResponse response = (DeleteTopicsResponse) abstractResponse; // Handle server responses for particular topics. for (Map.Entry entry : response.errors().entrySet()) { @@ -1185,10 +1736,11 @@ void handleResponse(AbstractResponse abstractResponse) { for (Map.Entry> entry : topicFutures.entrySet()) { KafkaFutureImpl future = entry.getValue(); if (!future.isDone()) { - if(response.getResult() != null) { - future.completeExceptionally(response.getResult()); - } else future.completeExceptionally(new ApiException( - "The server response did not " + "contain a reference to node " + entry.getKey())); + if (response.getResult() != null) { + future.completeExceptionally(response.getResult()); + } else + future.completeExceptionally(new ApiException( + "The server response did not " + "contain a reference to node " + entry.getKey())); } } } @@ -1201,11 +1753,25 @@ void handleFailure(Throwable throwable) { if (!validTopicNames.isEmpty()) { runnable.call(call, now); } - return new DeleteTopicsResult(new HashMap>(topicFutures)); + return new org.oracle.okafka.clients.admin.DeleteTopicsResult( + new HashMap>(topicFutures)); + } + + private void handleNotControllerError(org.apache.kafka.common.requests.AbstractResponse response) + throws ApiException { + if (response.errorCounts().containsKey(Errors.NOT_CONTROLLER)) { + handleNotControllerError(Errors.NOT_CONTROLLER); + } + } + + private void handleNotControllerError(Errors error) throws ApiException { + metadataManager.clearController(); + metadataManager.requestUpdate(); + throw error.exception(); } @Override - public ListTopicsResult listTopics(final ListTopicsOptions options) { + public ListTopicsResult listTopics(final org.apache.kafka.clients.admin.ListTopicsOptions options) { throw new FeatureNotSupportedException("This feature is not suported for this release."); } @@ -1321,4 +1887,162 @@ public DeleteConsumerGroupsResult deleteConsumerGroups(Collection groupI throw new FeatureNotSupportedException("This feature is not suported for this release."); } + + @Override + public void close(Duration timeout) { + // TODO Auto-generated method stub + + } + + @Override + public AlterConfigsResult incrementalAlterConfigs(Map> configs, + AlterConfigsOptions options) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + @Override + public DeleteConsumerGroupOffsetsResult deleteConsumerGroupOffsets(String groupId, Set partitions, + DeleteConsumerGroupOffsetsOptions options) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + @Override + public ElectLeadersResult electLeaders(ElectionType electionType, Set partitions, + ElectLeadersOptions options) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + @Override + public AlterPartitionReassignmentsResult alterPartitionReassignments( + Map> reassignments, + AlterPartitionReassignmentsOptions options) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + @Override + public ListPartitionReassignmentsResult listPartitionReassignments(Optional> partitions, + ListPartitionReassignmentsOptions options) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + @Override + public RemoveMembersFromConsumerGroupResult removeMembersFromConsumerGroup(String groupId, + RemoveMembersFromConsumerGroupOptions options) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + @Override + public AlterConsumerGroupOffsetsResult alterConsumerGroupOffsets(String groupId, + Map offsets, AlterConsumerGroupOffsetsOptions options) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + @Override + public ListOffsetsResult listOffsets(Map topicPartitionOffsets, + ListOffsetsOptions options) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + @Override + public DescribeClientQuotasResult describeClientQuotas(ClientQuotaFilter filter, + DescribeClientQuotasOptions options) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + @Override + public AlterClientQuotasResult alterClientQuotas(Collection entries, + AlterClientQuotasOptions options) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + @Override + public DescribeUserScramCredentialsResult describeUserScramCredentials(List users, + DescribeUserScramCredentialsOptions options) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + @Override + public AlterUserScramCredentialsResult alterUserScramCredentials(List alterations, + AlterUserScramCredentialsOptions options) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + @Override + public DescribeFeaturesResult describeFeatures(DescribeFeaturesOptions options) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + @Override + public UpdateFeaturesResult updateFeatures(Map featureUpdates, + UpdateFeaturesOptions options) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + @Override + public UnregisterBrokerResult unregisterBroker(int brokerId, UnregisterBrokerOptions options) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + @Override + public Map metrics() { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + @Override + public DeleteTopicsResult deleteTopics(TopicCollection topics, DeleteTopicsOptions options) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + @Override + public DescribeTopicsResult describeTopics(TopicCollection topics, DescribeTopicsOptions options) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + @Override + public ListConsumerGroupOffsetsResult listConsumerGroupOffsets(Map groupSpecs, + ListConsumerGroupOffsetsOptions options) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + @Override + public DescribeMetadataQuorumResult describeMetadataQuorum(DescribeMetadataQuorumOptions options) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + @Override + public DescribeProducersResult describeProducers(Collection partitions, + DescribeProducersOptions options) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + @Override + public DescribeTransactionsResult describeTransactions(Collection transactionalIds, + DescribeTransactionsOptions options) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + @Override + public AbortTransactionResult abortTransaction(AbortTransactionSpec spec, AbortTransactionOptions options) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + @Override + public ListTransactionsResult listTransactions(ListTransactionsOptions options) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + @Override + public FenceProducersResult fenceProducers(Collection transactionalIds, FenceProducersOptions options) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + @Override + public Uuid clientInstanceId(Duration timeout) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + @Override + public ListClientMetricsResourcesResult listClientMetricsResources(ListClientMetricsResourcesOptions arg0) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } } diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/ListConsumerGroupOffsetsOptions.java b/clients/src/main/java/org/oracle/okafka/clients/admin/ListConsumerGroupOffsetsOptions.java deleted file mode 100644 index 8823c3d..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/ListConsumerGroupOffsetsOptions.java +++ /dev/null @@ -1,60 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.clients.admin; - -import java.util.List; - -import org.oracle.okafka.common.TopicPartition; -import org.oracle.okafka.common.annotation.InterfaceStability; - -/** - * Options for {@link AdminClient#listConsumerGroupOffsets(String)}. - *

- * The API of this class is evolving, see {@link AdminClient} for details. - */ -@InterfaceStability.Evolving -public class ListConsumerGroupOffsetsOptions extends AbstractOptions { - - private List topicPartitions = null; - - /** - * Set the topic partitions to list as part of the result. - * {@code null} includes all topic partitions. - * - * @param topicPartitions List of topic partitions to include - * @return This ListGroupOffsetsOptions - */ - public ListConsumerGroupOffsetsOptions topicPartitions(List topicPartitions) { - this.topicPartitions = topicPartitions; - return this; - } - - /** - * Returns a list of topic partitions to add as part of the result. - */ - public List topicPartitions() { - return topicPartitions; - } -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/ListConsumerGroupOffsetsResult.java b/clients/src/main/java/org/oracle/okafka/clients/admin/ListConsumerGroupOffsetsResult.java deleted file mode 100644 index 913527d..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/ListConsumerGroupOffsetsResult.java +++ /dev/null @@ -1,55 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.clients.admin; - -import org.oracle.okafka.clients.consumer.OffsetAndMetadata; -import org.oracle.okafka.common.KafkaFuture; -import org.oracle.okafka.common.TopicPartition; -import org.oracle.okafka.common.annotation.InterfaceStability; - -import java.util.Map; - -/** - * The result of the {@link AdminClient#listConsumerGroupOffsets(String)} call. - *

- * The API of this class is evolving, see {@link AdminClient} for details. - */ -@InterfaceStability.Evolving -public class ListConsumerGroupOffsetsResult { - - final KafkaFuture> future; - - ListConsumerGroupOffsetsResult(KafkaFuture> future) { - this.future = future; - } - - /** - * Return a future which yields a map of topic partitions to OffsetAndMetadata objects. - */ - public KafkaFuture> partitionsToOffsetAndMetadata() { - return future; - } - -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/ListConsumerGroupsOptions.java b/clients/src/main/java/org/oracle/okafka/clients/admin/ListConsumerGroupsOptions.java deleted file mode 100644 index e17e734..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/ListConsumerGroupsOptions.java +++ /dev/null @@ -1,36 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.clients.admin; - -import org.oracle.okafka.common.annotation.InterfaceStability; - -/** - * Options for {@link AdminClient#listConsumerGroups()}. - * - * The API of this class is evolving, see {@link AdminClient} for details. - */ -@InterfaceStability.Evolving -public class ListConsumerGroupsOptions extends AbstractOptions { -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/ListConsumerGroupsResult.java b/clients/src/main/java/org/oracle/okafka/clients/admin/ListConsumerGroupsResult.java deleted file mode 100644 index 86bec01..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/ListConsumerGroupsResult.java +++ /dev/null @@ -1,41 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. - * - */ - -package org.oracle.okafka.clients.admin; - -import org.oracle.okafka.common.annotation.InterfaceStability; - -/** - * The result of the {@link AdminClient#listConsumerGroups()} call. - *

- * The API of this class is evolving, see {@link AdminClient} for details. - */ -@InterfaceStability.Evolving -public class ListConsumerGroupsResult { -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/ListTopicsOptions.java b/clients/src/main/java/org/oracle/okafka/clients/admin/ListTopicsOptions.java deleted file mode 100644 index 0b45a3d..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/ListTopicsOptions.java +++ /dev/null @@ -1,68 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.clients.admin; - -import org.oracle.okafka.common.annotation.InterfaceStability; - -/** - * Options for {@link AdminClient#listTopics()}. - * - * The API of this class is evolving, see {@link AdminClient} for details. - */ -@InterfaceStability.Evolving -public class ListTopicsOptions extends AbstractOptions { - - private boolean listInternal = false; - - /** - * Set the request timeout in milliseconds for this operation or {@code null} if the default request timeout for the - * AdminClient should be used. - * - */ - // This method is retained to keep binary compatibility with 0.11 - public ListTopicsOptions timeoutMs(Integer timeoutMs) { - this.timeoutMs = timeoutMs; - return this; - } - - /** - * Set whether we should list internal topics. - * - * @param listInternal Whether we should list internal topics. null means to use - * the default. - * @return This ListTopicsOptions object. - */ - public ListTopicsOptions listInternal(boolean listInternal) { - this.listInternal = listInternal; - return this; - } - - /** - * Return true if we should list internal topics. - */ - public boolean shouldListInternal() { - return listInternal; - } -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/ListTopicsResult.java b/clients/src/main/java/org/oracle/okafka/clients/admin/ListTopicsResult.java deleted file mode 100644 index 0815ae8..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/ListTopicsResult.java +++ /dev/null @@ -1,42 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. - * - */ - -package org.oracle.okafka.clients.admin; - -import org.oracle.okafka.common.annotation.InterfaceStability; - -/** - * The result of the {@link AdminClient#listTopics()} call. - * - * The API of this class is evolving, see {@link AdminClient} for details. - */ -@InterfaceStability.Evolving -public class ListTopicsResult { - -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/NewPartitions.java b/clients/src/main/java/org/oracle/okafka/clients/admin/NewPartitions.java deleted file mode 100644 index 88a97b1..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/NewPartitions.java +++ /dev/null @@ -1,106 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.clients.admin; - -import java.util.List; -import java.util.Map; - -import org.oracle.okafka.common.annotation.InterfaceStability; - -/** - * Describes new partitions for a particular topic in a call to {@link AdminClient#createPartitions(Map)}. - * - * The API of this class is evolving, see {@link AdminClient} for details. - */ -@InterfaceStability.Evolving -public class NewPartitions { - - private int totalCount; - - private List> newAssignments; - - private NewPartitions(int totalCount, List> newAssignments) { - this.totalCount = totalCount; - this.newAssignments = newAssignments; - } - - /** - * Increase the partition count for a topic to the given {@code totalCount}. - * The assignment of new replicas to brokers will be decided by the broker. - * - * @param totalCount The total number of partitions after the operation succeeds. - */ - public static NewPartitions increaseTo(int totalCount) { - return new NewPartitions(totalCount, null); - } - - /** - *

Increase the partition count for a topic to the given {@code totalCount} - * assigning the new partitions according to the given {@code newAssignments}. - * The length of the given {@code newAssignments} should equal {@code totalCount - oldCount}, since - * the assignment of existing partitions are not changed. - * Each inner list of {@code newAssignments} should have a length equal to - * the topic's replication factor. - * The first broker id in each inner list is the "preferred replica".

- * - *

For example, suppose a topic currently has a replication factor of 2, and - * has 3 partitions. The number of partitions can be increased to 6 using a - * {@code NewPartition} constructed like this:

- * - *

-     * NewPartitions.increaseTo(6, asList(asList(1, 2),
-     *                                    asList(2, 3),
-     *                                    asList(3, 1)))
-     * 
- *

In this example partition 3's preferred leader will be broker 1, partition 4's preferred leader will be - * broker 2 and partition 5's preferred leader will be broker 3.

- * - * @param totalCount The total number of partitions after the operation succeeds. - * @param newAssignments The replica assignments for the new partitions. - */ - public static NewPartitions increaseTo(int totalCount, List> newAssignments) { - return new NewPartitions(totalCount, newAssignments); - } - - /** - * The total number of partitions after the operation succeeds. - */ - public int totalCount() { - return totalCount; - } - - /** - * The replica assignments for the new partitions, or null if the assignment will be done by the controller. - */ - public List> assignments() { - return newAssignments; - } - - @Override - public String toString() { - return "(totalCount=" + totalCount() + ", newAssignments=" + assignments() + ")"; - } - -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/NewTopic.java b/clients/src/main/java/org/oracle/okafka/clients/admin/NewTopic.java deleted file mode 100644 index ca589cf..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/NewTopic.java +++ /dev/null @@ -1,142 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.clients.admin; - -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.Map; - -import org.oracle.okafka.common.requests.CreateTopicsRequest.TopicDetails; - -/** - * A new topic to be created via {@link AdminClient#createTopics(Collection)}. - * Note: Topic name has to be in uppercase wherever used. - */ -public class NewTopic { - private final String name; - private final int numPartitions; - private final short replicationFactor; - private final Map> replicasAssignments; - private Map configs = null; - - /** - * A new topic with the specified replication factor and number of partitions. Replication factor is not necessary for TEQ. Hence this value is ignored. - */ - public NewTopic(String name, int numPartitions, short replicationFactor) { - this.name = name; - this.numPartitions = numPartitions; - this.replicationFactor = replicationFactor; - this.replicasAssignments = null; - } - - /** - * A new topic with the specified replica assignment configuration. - * - * @param name the topic name. - * @param replicasAssignments ReplicaAssignments factor is not necessary for TEQ. Hence this value is ignored. - */ - public NewTopic(String name, Map> replicasAssignments) { - this.name = name; - this.numPartitions = -1; - this.replicationFactor = -1; - this.replicasAssignments = Collections.unmodifiableMap(replicasAssignments); - } - - /** - * The name of the topic to be created. - */ - public String name() { - return name; - } - - /** - * The number of partitions for the new topic or -1 if a replica assignment has been specified. - */ - public int numPartitions() { - return numPartitions; - } - - /** - * The replication factor for the new topic or -1 if a replica assignment has been specified. - */ - public short replicationFactor() { - return replicationFactor; - } - - /** - * A map from partition id to replica ids (i.e. broker ids) or null if the number of partitions and replication - * factor have been specified instead. - */ - public Map> replicasAssignments() { - return replicasAssignments; - } - - /** - * Set the configuration to use on the new topic. - * - * @param configs The configuration map. - * @return This NewTopic object. - */ - public NewTopic configs(Map configs) { - this.configs = configs; - return this; - } - - /** - * The configuration for the new topic or null if no configs ever specified. - */ - public Map configs() { - return configs; - } - - TopicDetails convertToTopicDetails() { - if (replicasAssignments != null) { - if (configs != null) { - return new TopicDetails(replicasAssignments, configs); - } else { - return new TopicDetails(replicasAssignments); - } - } else { - if (configs != null) { - return new TopicDetails(numPartitions, replicationFactor, configs); - } else { - return new TopicDetails(numPartitions, replicationFactor); - } - } - } - - @Override - public String toString() { - StringBuilder bld = new StringBuilder(); - bld.append("(name=").append(name). - append(", numPartitions=").append(numPartitions). - append(", replicationFactor=").append(replicationFactor). - append(", replicasAssignments=").append(replicasAssignments). - append(", configs=").append(configs). - append(")"); - return bld.toString(); - } -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/RecordsToDelete.java b/clients/src/main/java/org/oracle/okafka/clients/admin/RecordsToDelete.java deleted file mode 100644 index 4102586..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/RecordsToDelete.java +++ /dev/null @@ -1,80 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.clients.admin; - -import java.util.Map; - -import org.oracle.okafka.common.annotation.InterfaceStability; - -/** - * Describe records to delete in a call to {@link AdminClient#deleteRecords(Map)} - * - * The API of this class is evolving, see {@link AdminClient} for details. - */ -@InterfaceStability.Evolving -public class RecordsToDelete { - - private long offset; - - private RecordsToDelete(long offset) { - this.offset = offset; - } - - /** - * Delete all the records before the given {@code offset} - * - * @param offset the offset before which all records will be deleted - */ - public static RecordsToDelete beforeOffset(long offset) { - return new RecordsToDelete(offset); - } - - /** - * The offset before which all records will be deleted - */ - public long beforeOffset() { - return offset; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - RecordsToDelete that = (RecordsToDelete) o; - - return this.offset == that.offset; - } - - @Override - public int hashCode() { - return (int) offset; - } - - @Override - public String toString() { - return "(beforeOffset = " + offset + ")"; - } -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/RenewDelegationTokenOptions.java b/clients/src/main/java/org/oracle/okafka/clients/admin/RenewDelegationTokenOptions.java deleted file mode 100644 index f8a30e8..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/RenewDelegationTokenOptions.java +++ /dev/null @@ -1,46 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.clients.admin; - -import org.oracle.okafka.common.annotation.InterfaceStability; - -/** - * Options for {@link AdminClient#renewDelegationToken(byte[], RenewDelegationTokenOptions)}. - * - * The API of this class is evolving, see {@link AdminClient} for details. - */ -@InterfaceStability.Evolving -public class RenewDelegationTokenOptions extends AbstractOptions { - private long renewTimePeriodMs = -1; - - public RenewDelegationTokenOptions renewTimePeriodMs(long renewTimePeriodMs) { - this.renewTimePeriodMs = renewTimePeriodMs; - return this; - } - - public long renewTimePeriodMs() { - return renewTimePeriodMs; - } -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/RenewDelegationTokenResult.java b/clients/src/main/java/org/oracle/okafka/clients/admin/RenewDelegationTokenResult.java deleted file mode 100644 index 9680427..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/RenewDelegationTokenResult.java +++ /dev/null @@ -1,49 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.clients.admin; - -import org.oracle.okafka.common.KafkaFuture; -import org.oracle.okafka.common.annotation.InterfaceStability; - -/** - * The result of the {@link KafkaAdminClient#expireDelegationToken(byte[], ExpireDelegationTokenOptions)} call. - * - * The API of this class is evolving, see {@link AdminClient} for details. - */ -@InterfaceStability.Evolving -public class RenewDelegationTokenResult { - private final KafkaFuture expiryTimestamp; - - RenewDelegationTokenResult(KafkaFuture expiryTimestamp) { - this.expiryTimestamp = expiryTimestamp; - } - - /** - * Returns a future which yields expiry timestamp - */ - public KafkaFuture expiryTimestamp() { - return expiryTimestamp; - } -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/internals/AQKafkaAdmin.java b/clients/src/main/java/org/oracle/okafka/clients/admin/internals/AQKafkaAdmin.java index c46f0c7..357bb38 100644 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/internals/AQKafkaAdmin.java +++ b/clients/src/main/java/org/oracle/okafka/clients/admin/internals/AQKafkaAdmin.java @@ -1,7 +1,7 @@ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. */ @@ -10,13 +10,17 @@ import java.sql.CallableStatement; import java.sql.Connection; import java.sql.SQLException; +import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; +import java.util.Iterator; +import java.util.List; import java.util.Map; import java.util.Set; -import org.oracle.okafka.clients.ClientRequest; -import org.oracle.okafka.clients.ClientResponse; +import org.apache.kafka.clients.ClientRequest; +import org.apache.kafka.clients.ClientResponse; +import org.apache.kafka.clients.admin.internals.AdminMetadataManager; import org.oracle.okafka.clients.admin.AdminClientConfig; import org.oracle.okafka.common.Node; import org.oracle.okafka.common.errors.ConnectionException; @@ -30,8 +34,8 @@ import org.oracle.okafka.common.requests.CreateTopicsRequest.TopicDetails; import org.oracle.okafka.common.utils.ConnectionUtils; import org.oracle.okafka.common.utils.CreateTopics; -import org.oracle.okafka.common.utils.LogContext; -import org.oracle.okafka.common.utils.Time; +import org.apache.kafka.common.utils.LogContext; +import org.apache.kafka.common.utils.Time; /** * AQ client for publishing requests to AQ and generating reponses. @@ -43,12 +47,15 @@ public class AQKafkaAdmin extends AQClient{ private final AdminClientConfig configs; private final Time time; private final Map connections; + private final AdminMetadataManager metadataManager; + private boolean forceMetadata = false; - public AQKafkaAdmin(LogContext logContext, AdminClientConfig configs, Time time) { + public AQKafkaAdmin(LogContext logContext, AdminClientConfig configs, AdminMetadataManager _metadataManager, Time time) { super(logContext.logger(AQKafkaAdmin.class), configs); this.configs = configs; this.time = time; - this.connections = new HashMap<>(); + this.connections = new HashMap(); + this.metadataManager = _metadataManager; } /** @@ -56,7 +63,7 @@ public AQKafkaAdmin(LogContext logContext, AdminClientConfig configs, Time time) */ @Override public ClientResponse send(ClientRequest request) { - return parseRequest(request, request.apiKey()); + return parseRequest(request, ApiKeys.convertToOracleApiKey(request.apiKey())); } @@ -84,15 +91,16 @@ private ClientResponse parseRequest( ClientRequest request, ApiKeys key) { */ private ClientResponse createTopics(ClientRequest request) { - Node node = request.destination(); + Node node = (org.oracle.okafka.common.Node) metadataManager.nodeById(Integer.parseInt(request.destination())); CreateTopicsRequest.Builder builder= (CreateTopicsRequest.Builder)request.requestBuilder(); Map topics = builder.build().topics(); Connection jdbcConn = connections.get(node); - Map result = new HashMap<>(); + Map result = new HashMap(); SQLException exception = null; try { result = CreateTopics.createTopics(jdbcConn, topics) ; } catch(SQLException sql) { + sql.printStackTrace(); exception = sql; log.trace("Unexcepted error occured with connection to node {}, closing the connection", request.destination()); log.trace("Failed to create topics {}", topics.keySet()); @@ -119,9 +127,9 @@ private ClientResponse createTopicsResponse(Map errors, Excep CreateTopicsResponse topicResponse = new CreateTopicsResponse(errors); if(result != null) topicResponse.setResult(result); - ClientResponse response = new ClientResponse(request.makeHeader(), + ClientResponse response = new ClientResponse(request.makeHeader((short)1), request.callback(), request.destination(), request.createdTimeMs(), - time.milliseconds(), disconnected, topicResponse); + time.milliseconds(), disconnected, null, null, topicResponse); return response; } @@ -132,7 +140,7 @@ private ClientResponse createTopicsResponse(Map errors, Excep * @return response for delete topics request. */ private ClientResponse deleteTopics(ClientRequest request) { - Node node = request.destination(); + Node node =(org.oracle.okafka.common.Node) metadataManager.nodeById(Integer.parseInt(request.destination())); DeleteTopicsRequest.Builder builder= (DeleteTopicsRequest.Builder)request.requestBuilder(); Set topics = builder.build().topics(); Connection jdbcConn = connections.get(node); @@ -185,16 +193,32 @@ private ClientResponse deleteTopicsResponse(Map errors, Ex DeleteTopicsResponse topicResponse = new DeleteTopicsResponse(errors); if(result != null) topicResponse.setResult(result); - ClientResponse response = new ClientResponse(request.makeHeader(), + ClientResponse response = new ClientResponse(request.makeHeader((short)1), request.callback(), request.destination(), request.createdTimeMs(), - time.milliseconds(), disconnected, topicResponse); + time.milliseconds(), disconnected, null, null, topicResponse); return response; } private ClientResponse getMetadata(ClientRequest request) { - ClientResponse response = getMetadataNow(request, connections.get(request.destination())); - if(response.wasDisconnected()) - connections.remove(request.destination()); + Node node =(org.oracle.okafka.common.Node) metadataManager.nodeById(Integer.parseInt(request.destination())); + if (node == null) + { + List nodeList = metadataManager.updater().fetchNodes(); + for(org.apache.kafka.common.Node nodeNow : nodeList) + { + if(nodeNow.id() == Integer.parseInt(request.destination())) + { + node = (org.oracle.okafka.common.Node)nodeNow; + } + } + + } + ClientResponse response = getMetadataNow(request, connections.get(node), node, forceMetadata); + if(response.wasDisconnected()) { + + connections.remove(node); + forceMetadata = true; + } return response; } @@ -207,11 +231,16 @@ private ClientResponse getMetadata(ClientRequest request) { */ private Connection getConnection(Node node) { try { - connections.put(node, ConnectionUtils.createJDBCConnection(node, configs)); - } catch(SQLException sqlException) { - if(sqlException.getErrorCode()== 1017) - throw new InvalidLoginCredentialsException(sqlException); - throw new ConnectionException(sqlException.getMessage()); + Connection newConn = ConnectionUtils.createJDBCConnection(node, configs); + connections.put(node, newConn); + } catch(SQLException excp) { + log.error("Exception while connecting to Oracle Database " + excp, excp); + + excp.printStackTrace(); + if(excp.getErrorCode()== 1017) + throw new InvalidLoginCredentialsException(excp); + + throw new ConnectionException(excp.getMessage()); } return connections.get(node); } @@ -226,9 +255,9 @@ public void connect(Node node) { */ @Override public void close() { - for(Map.Entry connection: connections.entrySet()) { - close(connection.getKey()); - } + List closeNodes = new ArrayList(); + closeNodes.addAll(connections.keySet()); + closeNodes.forEach(n->close(n)); } /** @@ -248,7 +277,4 @@ public void close(Node node) { } } - - - } diff --git a/clients/src/main/java/org/oracle/okafka/clients/admin/internals/AdminMetadataManager.java b/clients/src/main/java/org/oracle/okafka/clients/admin/internals/AdminMetadataManager.java deleted file mode 100644 index ee8b933..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/admin/internals/AdminMetadataManager.java +++ /dev/null @@ -1,268 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.clients.admin.internals; - -import org.oracle.okafka.clients.MetadataUpdater; -import org.oracle.okafka.common.Cluster; -import org.oracle.okafka.common.Node; -import org.oracle.okafka.common.PartitionInfo; -import org.oracle.okafka.common.errors.AuthenticationException; -import org.oracle.okafka.common.requests.MetadataResponse; -import org.oracle.okafka.common.requests.RequestHeader; -import org.oracle.okafka.common.utils.LogContext; -import org.slf4j.Logger; - -import java.util.Collections; -import java.util.List; - -/** - * Manages the metadata for KafkaAdminClient. - * - * This class is not thread-safe. It is only accessed from the AdminClient - * service thread (which also uses the NetworkClient). - */ -public class AdminMetadataManager { - private Logger log; - - /** - * The minimum amount of time that we should wait between subsequent - * retries, when fetching metadata. - */ - private final long refreshBackoffMs; - - /** - * The minimum amount of time that we should wait before triggering an - * automatic metadata refresh. - */ - private final long metadataExpireMs; - - /** - * Used to update the NetworkClient metadata. - */ - private final AdminMetadataUpdater updater; - - /** - * The current metadata state. - */ - private State state = State.QUIESCENT; - - /** - * The time in wall-clock milliseconds when we last updated the metadata. - */ - private long lastMetadataUpdateMs = 0; - - /** - * The time in wall-clock milliseconds when we last attempted to fetch new - * metadata. - */ - private long lastMetadataFetchAttemptMs = 0; - - /** - * The current cluster information. - */ - private Cluster cluster = Cluster.empty(); - - /** - * If we got an authorization exception when we last attempted to fetch - * metadata, this is it; null, otherwise. - */ - private AuthenticationException authException = null; - - public class AdminMetadataUpdater implements MetadataUpdater { - @Override - public List fetchNodes() { - return cluster.nodes(); - } - - @Override - public boolean isUpdateDue(long now) { - return false; - } - - @Override - public long maybeUpdate(long now) { - return Long.MAX_VALUE; - } - - @Override - public void handleCompletedMetadataResponse(RequestHeader requestHeader, long now, MetadataResponse metadataResponse) { - // Do nothing - } - - @Override - public void handleDisconnection(String destination) { - // Do nothing - } - - @Override - public void handleAuthenticationFailure(AuthenticationException e) { - updateFailed(e); - } - - @Override - public void requestUpdate() { - AdminMetadataManager.this.requestUpdate(); - } - - @Override - public void close() { - } - } - - /** - * The current AdminMetadataManager state. - */ - enum State { - QUIESCENT, - UPDATE_REQUESTED, - UPDATE_PENDING - } - - public AdminMetadataManager(LogContext logContext, long refreshBackoffMs, long metadataExpireMs) { - this.log = logContext.logger(AdminMetadataManager.class); - this.refreshBackoffMs = refreshBackoffMs; - this.metadataExpireMs = metadataExpireMs; - this.updater = new AdminMetadataUpdater(); - } - - public AdminMetadataUpdater updater() { - return updater; - } - - public boolean isReady() { - if (authException != null) { - log.debug("Metadata is not usable: failed to get metadata.", authException); - throw authException; - } - if (cluster.nodes().isEmpty()) { - log.trace("Metadata is not ready: bootstrap nodes have not been " + - "initialized yet."); - return false; - } - if (cluster.isBootstrapConfigured()) { - log.trace("Metadata is not ready: we have not fetched metadata from " + - "the bootstrap nodes yet."); - return false; - } - log.trace("Metadata is ready to use."); - return true; - } - - public Node controller() { - return cluster.controller(); - } - - public Node nodeById(int nodeId) { - return cluster.nodeById(nodeId); - } - - public void requestUpdate() { - if (state == State.QUIESCENT) { - state = State.UPDATE_REQUESTED; - log.debug("Requesting metadata update."); - } - } - - public void clearController() { - if (cluster.controller() != null) { - log.trace("Clearing cached controller node {}.", cluster.controller()); - this.cluster = new Cluster(cluster.clusterResource().clusterId(), - cluster.nodes(), - Collections.emptySet(), - Collections.emptySet(), - Collections.emptySet(), - null); - } - } - - /** - * Determine if the AdminClient should fetch new metadata. - */ - public long metadataFetchDelayMs(long now) { - switch (state) { - case QUIESCENT: - // Calculate the time remaining until the next periodic update. - // We want to avoid making many metadata requests in a short amount of time, - // so there is a metadata refresh backoff period. - return Math.max(delayBeforeNextAttemptMs(now), delayBeforeNextExpireMs(now)); - case UPDATE_REQUESTED: - // Respect the backoff, even if an update has been requested - return delayBeforeNextAttemptMs(now); - default: - // An update is already pending, so we don't need to initiate another one. - return Long.MAX_VALUE; - } - } - - private long delayBeforeNextExpireMs(long now) { - long timeSinceUpdate = now - lastMetadataUpdateMs; - return Math.max(0, metadataExpireMs - timeSinceUpdate); - } - - private long delayBeforeNextAttemptMs(long now) { - long timeSinceAttempt = now - lastMetadataFetchAttemptMs; - return Math.max(0, refreshBackoffMs - timeSinceAttempt); - } - - /** - * Transition into the UPDATE_PENDING state. Updates lastMetadataFetchAttemptMs. - */ - public void transitionToUpdatePending(long now) { - this.state = State.UPDATE_PENDING; - this.lastMetadataFetchAttemptMs = now; - } - - public void updateFailed(Throwable exception) { - // We depend on pending calls to request another metadata update - this.state = State.QUIESCENT; - - if (exception instanceof AuthenticationException) { - log.warn("Metadata update failed due to authentication error", exception); - this.authException = (AuthenticationException) exception; - } else { - log.info("Metadata update failed", exception); - } - } - - /** - * Receive new metadata, and transition into the QUIESCENT state. - * Updates lastMetadataUpdateMs, cluster, and authException. - */ - public void update(Cluster cluster, long now) { - if (cluster.isBootstrapConfigured()) { - log.debug("Setting bootstrap cluster metadata {}.", cluster); - } else { - log.debug("Updating cluster metadata to {}", cluster); - this.lastMetadataUpdateMs = now; - } - - this.state = State.QUIESCENT; - this.authException = null; - - if (!cluster.nodes().isEmpty()) { - this.cluster = cluster; - } - } -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/consumer/Consumer.java b/clients/src/main/java/org/oracle/okafka/clients/consumer/Consumer.java deleted file mode 100644 index 9ec1289..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/consumer/Consumer.java +++ /dev/null @@ -1,256 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.oracle.okafka.clients.consumer; - -import java.io.Closeable; -import java.time.Duration; -import java.util.Collection; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.TimeUnit; -import java.util.regex.Pattern; - -import org.oracle.okafka.common.Metric; -import org.oracle.okafka.common.MetricName; -import org.oracle.okafka.common.PartitionInfo; -import org.oracle.okafka.common.TopicPartition; - -/** - * @see KafkaConsumer - */ -public interface Consumer extends Closeable { - - /** - * @see KafkaConsumer#assignment() - */ - Set assignment(); - - /** - * @see KafkaConsumer#subscription() - */ - Set subscription(); - - /** - * @see KafkaConsumer#subscribe(Collection) - */ - void subscribe(Collection topics); - - /** - * @see KafkaConsumer#subscribe(Collection, ConsumerRebalanceListener) - */ - void subscribe(Collection topics, ConsumerRebalanceListener callback); - - /** - * @see KafkaConsumer#assign(Collection) - */ - void assign(Collection partitions); - - /** - * @see KafkaConsumer#subscribe(Pattern, ConsumerRebalanceListener) - */ - void subscribe(Pattern pattern, ConsumerRebalanceListener callback); - - /** - * @see KafkaConsumer#subscribe(Pattern) - */ - void subscribe(Pattern pattern); - - /** - * @see KafkaConsumer#unsubscribe() - */ - void unsubscribe(); - - /** - * @see KafkaConsumer#poll(long) - */ - @Deprecated - ConsumerRecords poll(long timeout); - - /** - * @see KafkaConsumer#poll(Duration) - */ - ConsumerRecords poll(Duration timeout); - - /** - * @see KafkaConsumer#commitSync() - */ - void commitSync(); - - /** - * @see KafkaConsumer#commitSync(Duration) - */ - void commitSync(Duration timeout); - - /** - * @see KafkaConsumer#commitSync(Map) - */ - void commitSync(Map offsets); - - /** - * @see KafkaConsumer#commitSync(Map, Duration) - */ - void commitSync(final Map offsets, final Duration timeout); - /** - * @see KafkaConsumer#commitAsync() - */ - void commitAsync(); - - /** - * @see KafkaConsumer#commitAsync(OffsetCommitCallback) - */ - void commitAsync(OffsetCommitCallback callback); - - /** - * @see KafkaConsumer#commitAsync(Map, OffsetCommitCallback) - */ - void commitAsync(Map offsets, OffsetCommitCallback callback); - - /** - * @see KafkaConsumer#seek(TopicPartition, long) - */ - void seek(TopicPartition partition, long offset); - - /** - * @see KafkaConsumer#seekToBeginning(Collection) - */ - void seekToBeginning(Collection partitions); - - /** - * @see KafkaConsumer#seekToEnd(Collection) - */ - void seekToEnd(Collection partitions); - - /** - * @see KafkaConsumer#position(TopicPartition) - */ - long position(TopicPartition partition); - - /** - * @see KafkaConsumer#position(TopicPartition, Duration) - */ - long position(TopicPartition partition, final Duration timeout); - - /** - * @see KafkaConsumer#committed(TopicPartition) - */ - OffsetAndMetadata committed(TopicPartition partition); - - /** - * @see KafkaConsumer#committed(TopicPartition, Duration) - */ - OffsetAndMetadata committed(TopicPartition partition, final Duration timeout); - - /** - * @see KafkaConsumer#metrics() - */ - Map metrics(); - - /** - * @see KafkaConsumer#partitionsFor(String) - */ - List partitionsFor(String topic); - - /** - * @see KafkaConsumer#partitionsFor(String, Duration) - */ - List partitionsFor(String topic, Duration timeout); - - /** - * @see KafkaConsumer#listTopics() - */ - Map> listTopics(); - - /** - * @see KafkaConsumer#listTopics(Duration) - */ - Map> listTopics(Duration timeout); - - /** - * @see KafkaConsumer#paused() - */ - Set paused(); - - /** - * @see KafkaConsumer#pause(Collection) - */ - void pause(Collection partitions); - - /** - * @see KafkaConsumer#resume(Collection) - */ - void resume(Collection partitions); - - /** - * @see KafkaConsumer#offsetsForTimes(Map) - */ - Map offsetsForTimes(Map timestampsToSearch); - - /** - * @see KafkaConsumer#offsetsForTimes(Map, Duration) - */ - Map offsetsForTimes(Map timestampsToSearch, Duration timeout); - - /** - * @see KafkaConsumer#beginningOffsets(Collection) - */ - Map beginningOffsets(Collection partitions); - - /** - * @see KafkaConsumer#beginningOffsets(Collection, Duration) - */ - Map beginningOffsets(Collection partitions, Duration timeout); - - /** - * @see KafkaConsumer#endOffsets(Collection) - */ - Map endOffsets(Collection partitions); - - /** - * @see KafkaConsumer#endOffsets(Collection, Duration) - */ - Map endOffsets(Collection partitions, Duration timeout); - - /** - * @see KafkaConsumer#close() - */ - void close(); - - /** - * @see KafkaConsumer#close(long, TimeUnit) - */ - @Deprecated - void close(long timeout, TimeUnit unit); - - /** - * @see KafkaConsumer#close(Duration) - */ - void close(Duration timeout); - - /** - * @see KafkaConsumer#wakeup() - */ - void wakeup(); - -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/consumer/ConsumerConfig.java b/clients/src/main/java/org/oracle/okafka/clients/consumer/ConsumerConfig.java index 6937fe1..7845239 100644 --- a/clients/src/main/java/org/oracle/okafka/clients/consumer/ConsumerConfig.java +++ b/clients/src/main/java/org/oracle/okafka/clients/consumer/ConsumerConfig.java @@ -1,7 +1,7 @@ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. */ @@ -29,24 +29,31 @@ package org.oracle.okafka.clients.consumer; -import org.oracle.okafka.clients.CommonClientConfigs; -import org.oracle.okafka.common.config.AbstractConfig; -import org.oracle.okafka.common.config.ConfigDef; -import org.oracle.okafka.common.config.ConfigDef.Importance; -import org.oracle.okafka.common.config.ConfigDef.Type; -import org.oracle.okafka.common.metrics.Sensor; -import org.oracle.okafka.common.requests.IsolationLevel; -import org.oracle.okafka.common.serialization.Deserializer; - -import static org.oracle.okafka.common.config.ConfigDef.Range.atLeast; -import static org.oracle.okafka.common.config.ConfigDef.ValidString.in; +import org.apache.kafka.clients.ClientDnsLookup; +import org.apache.kafka.clients.CommonClientConfigs; +import org.apache.kafka.common.IsolationLevel; +import org.apache.kafka.common.config.AbstractConfig; +import org.apache.kafka.common.config.ConfigDef; +import org.apache.kafka.common.config.ConfigDef.Importance; +import org.apache.kafka.common.config.ConfigDef.Type; +import org.apache.kafka.common.config.SecurityConfig; +import org.apache.kafka.common.errors.InvalidConfigurationException; +import org.apache.kafka.common.metrics.Sensor; +import org.apache.kafka.common.requests.JoinGroupRequest; +import org.apache.kafka.common.serialization.Deserializer; +import org.oracle.okafka.common.config.SslConfigs; import java.util.Collections; import java.util.HashMap; import java.util.Locale; import java.util.Map; +import java.util.Optional; import java.util.Properties; import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.apache.kafka.common.config.ConfigDef.Range.atLeast; +import static org.apache.kafka.common.config.ConfigDef.ValidString.in; /** * The consumer configuration keys @@ -60,16 +67,22 @@ public class ConsumerConfig extends AbstractConfig { */ //public static final String ORACLE_SID = CommonClientConfigs.ORACLE_SID; - public static final String ORACLE_SERVICE_NAME = CommonClientConfigs.ORACLE_SERVICE_NAME; - public static final String ORACLE_INSTANCE_NAME = CommonClientConfigs.ORACLE_INSTANCE_NAME; + public static final String ORACLE_SERVICE_NAME = org.oracle.okafka.clients.CommonClientConfigs.ORACLE_SERVICE_NAME; + public static final String ORACLE_INSTANCE_NAME = org.oracle.okafka.clients.CommonClientConfigs.ORACLE_INSTANCE_NAME; /** oracle.net.tns_admin */ - public static final String ORACLE_NET_TNS_ADMIN = CommonClientConfigs.ORACLE_NET_TNS_ADMIN; + public static final String ORACLE_NET_TNS_ADMIN = org.oracle.okafka.clients.CommonClientConfigs.ORACLE_NET_TNS_ADMIN; /** * group.id */ - public static final String GROUP_ID_CONFIG = "group.id"; - private static final String GROUP_ID_DOC = "A unique string that identifies the consumer group this consumer belongs to. This property is must if consumer wants to consume from subscribed topic."; + public static final String GROUP_ID_CONFIG = CommonClientConfigs.GROUP_ID_CONFIG; + private static final String GROUP_ID_DOC = CommonClientConfigs.GROUP_ID_DOC; + + /** + * group.instance.id + */ + public static final String GROUP_INSTANCE_ID_CONFIG = CommonClientConfigs.GROUP_INSTANCE_ID_CONFIG; + private static final String GROUP_INSTANCE_ID_DOC = CommonClientConfigs.GROUP_INSTANCE_ID_DOC; /** max.poll.records */ public static final String MAX_POLL_RECORDS_CONFIG = "max.poll.records"; @@ -108,6 +121,9 @@ public class ConsumerConfig extends AbstractConfig { */ public static final String BOOTSTRAP_SERVERS_CONFIG = CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG; + /** client.dns.lookup */ + public static final String CLIENT_DNS_LOOKUP_CONFIG = CommonClientConfigs.CLIENT_DNS_LOOKUP_CONFIG; + /** * enable.auto.commit */ @@ -124,7 +140,19 @@ public class ConsumerConfig extends AbstractConfig { * partition.assignment.strategy */ public static final String PARTITION_ASSIGNMENT_STRATEGY_CONFIG = "partition.assignment.strategy"; - private static final String PARTITION_ASSIGNMENT_STRATEGY_DOC = "The class name of the partition assignment strategy that the client will use to distribute partition ownership amongst consumer instances when group management is used. This property is not yet supported."; + private static final String PARTITION_ASSIGNMENT_STRATEGY_DOC = "A list of class names or class types, " + + "ordered by preference, of supported partition assignment strategies that the client will use to distribute " + + "partition ownership amongst consumer instances when group management is used. Available options are:" + + "
    " + + "
  • org.apache.kafka.clients.consumer.RangeAssignor: The default assignor, which works on a per-topic basis.
  • " + + "
  • org.apache.kafka.clients.consumer.RoundRobinAssignor: Assigns partitions to consumers in a round-robin fashion.
  • " + + "
  • org.apache.kafka.clients.consumer.StickyAssignor: Guarantees an assignment that is " + + "maximally balanced while preserving as many existing partition assignments as possible.
  • " + + "
  • org.apache.kafka.clients.consumer.CooperativeStickyAssignor: Follows the same StickyAssignor " + + "logic, but allows for cooperative rebalancing.
  • " + + "
" + + "

Implementing the org.apache.kafka.clients.consumer.ConsumerPartitionAssignor " + + "interface allows you to plug in a custom assignment strategy."; /** * auto.offset.reset @@ -182,6 +210,11 @@ public class ConsumerConfig extends AbstractConfig { */ public static final String CLIENT_ID_CONFIG = CommonClientConfigs.CLIENT_ID_CONFIG; + /** + * client.rack + */ + public static final String CLIENT_RACK_CONFIG = CommonClientConfigs.CLIENT_RACK_CONFIG; + /** * reconnect.backoff.ms */ @@ -225,11 +258,17 @@ public class ConsumerConfig extends AbstractConfig { /** key.deserializer */ public static final String KEY_DESERIALIZER_CLASS_CONFIG = "key.deserializer"; - public static final String KEY_DESERIALIZER_CLASS_DOC = "Deserializer class for key that implements the org.oracle.okafka.common.serialization.Deserializer interface."; + public static final String KEY_DESERIALIZER_CLASS_DOC = "Deserializer class for key that implements the org.apache.kafka.common.serialization.Deserializer interface."; /** value.deserializer */ public static final String VALUE_DESERIALIZER_CLASS_CONFIG = "value.deserializer"; - public static final String VALUE_DESERIALIZER_CLASS_DOC = "Deserializer class for value that implements the org.oracle.okafka.common.serialization.Deserializer interface."; + public static final String VALUE_DESERIALIZER_CLASS_DOC = "Deserializer class for value that implements the org.apache.kafka.common.serialization.Deserializer interface."; + + /** socket.connection.setup.timeout.ms */ + public static final String SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG = CommonClientConfigs.SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG; + + /** socket.connection.setup.timeout.max.ms */ + public static final String SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG = CommonClientConfigs.SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG; /** connections.max.idle.ms */ public static final String CONNECTIONS_MAX_IDLE_MS_CONFIG = CommonClientConfigs.CONNECTIONS_MAX_IDLE_MS_CONFIG; @@ -245,7 +284,7 @@ public class ConsumerConfig extends AbstractConfig { /** interceptor.classes */ public static final String INTERCEPTOR_CLASSES_CONFIG = "interceptor.classes"; public static final String INTERCEPTOR_CLASSES_DOC = "A list of classes to use as interceptors. " - + "Implementing the org.oracle.okafka.clients.consumer.ConsumerInterceptor interface allows you to intercept (and possibly mutate) records " + + "Implementing the org.apache.kafka.clients.consumer.ConsumerInterceptor interface allows you to intercept (and possibly mutate) records " + "received by the consumer. By default, there are no interceptors."; @@ -266,6 +305,20 @@ public class ConsumerConfig extends AbstractConfig { */ static final String LEAVE_GROUP_ON_CLOSE_CONFIG = "internal.leave.group.on.close"; + /** + * internal.throw.on.fetch.stable.offset.unsupported + * Whether or not the consumer should throw when the new stable offset feature is supported. + * If set to true then the client shall crash upon hitting it. + * The purpose of this flag is to prevent unexpected broker downgrade which makes + * the offset fetch protection against pending commit invalid. The safest approach + * is to fail fast to avoid introducing correctness issue. + * + *

+ * Note: this is an internal configuration and could be changed in the future in a backward incompatible way + * + */ + static final String THROW_ON_FETCH_STABLE_OFFSET_UNSUPPORTED = "internal.throw.on.fetch.stable.offset.unsupported"; + /** isolation.level */ public static final String ISOLATION_LEVEL_CONFIG = "isolation.level"; public static final String ISOLATION_LEVEL_DOC = "

Controls how to read messages written transactionally. If set to read_committed, consumer.poll() will only return" + @@ -277,7 +330,23 @@ public class ConsumerConfig extends AbstractConfig { " return the LSO. This property is not yet supported."; public static final String DEFAULT_ISOLATION_LEVEL = IsolationLevel.READ_UNCOMMITTED.toString().toLowerCase(Locale.ROOT); - + + /** allow.auto.create.topics */ + public static final String ALLOW_AUTO_CREATE_TOPICS_CONFIG = "allow.auto.create.topics"; + private static final String ALLOW_AUTO_CREATE_TOPICS_DOC = "Allow automatic topic creation on the broker when" + + " subscribing to or assigning a topic. A topic being subscribed to will be automatically created only if the" + + " broker allows for it using `auto.create.topics.enable` broker configuration. This configuration must" + + " be set to `false` when using brokers older than 0.11.0"; + public static final boolean DEFAULT_ALLOW_AUTO_CREATE_TOPICS = true; + + /** + * security.providers + */ + public static final String SECURITY_PROVIDERS_CONFIG = SecurityConfig.SECURITY_PROVIDERS_CONFIG; + private static final String SECURITY_PROVIDERS_DOC = SecurityConfig.SECURITY_PROVIDERS_DOC; + + private static final AtomicInteger CONSUMER_CLIENT_ID_SEQUENCE = new AtomicInteger(1); + static { CONFIG = new ConfigDef().define(BOOTSTRAP_SERVERS_CONFIG, Type.LIST, @@ -285,7 +354,20 @@ public class ConsumerConfig extends AbstractConfig { new ConfigDef.NonNullValidator(), Importance.HIGH, CommonClientConfigs.BOOTSTRAP_SERVERS_DOC) - .define(GROUP_ID_CONFIG, Type.STRING, "", Importance.HIGH, GROUP_ID_DOC) + .define(CLIENT_DNS_LOOKUP_CONFIG, + Type.STRING, + ClientDnsLookup.USE_ALL_DNS_IPS.toString(), + in(ClientDnsLookup.USE_ALL_DNS_IPS.toString(), + ClientDnsLookup.USE_ALL_DNS_IPS.toString(), + ClientDnsLookup.RESOLVE_CANONICAL_BOOTSTRAP_SERVERS_ONLY.toString()), + Importance.MEDIUM, + CommonClientConfigs.CLIENT_DNS_LOOKUP_DOC) + .define(GROUP_ID_CONFIG, Type.STRING, null, Importance.HIGH, GROUP_ID_DOC) + .define(GROUP_INSTANCE_ID_CONFIG, + Type.STRING, + null, + Importance.MEDIUM, + GROUP_INSTANCE_ID_DOC) .define(SESSION_TIMEOUT_MS_CONFIG, Type.INT, 10000, @@ -296,12 +378,13 @@ public class ConsumerConfig extends AbstractConfig { 3000, Importance.HIGH, HEARTBEAT_INTERVAL_MS_DOC) - /*.define(PARTITION_ASSIGNMENT_STRATEGY_CONFIG, + .define(PARTITION_ASSIGNMENT_STRATEGY_CONFIG, Type.LIST, - Collections.singletonList(RangeAssignor.class), + //Collections.singletonList(RoundRobinAssignor.class), + Collections.singletonList(TxEQAssignor.class), new ConfigDef.NonNullValidator(), Importance.MEDIUM, - PARTITION_ASSIGNMENT_STRATEGY_DOC)*/ + PARTITION_ASSIGNMENT_STRATEGY_DOC) .define(METADATA_MAX_AGE_CONFIG, Type.LONG, 5 * 60 * 1000, @@ -324,6 +407,11 @@ public class ConsumerConfig extends AbstractConfig { "", Importance.LOW, CommonClientConfigs.CLIENT_ID_DOC) + .define(CLIENT_RACK_CONFIG, + Type.STRING, + "", + Importance.LOW, + CommonClientConfigs.CLIENT_RACK_DOC) .define(MAX_PARTITION_FETCH_BYTES_CONFIG, Type.INT, DEFAULT_MAX_PARTITION_FETCH_BYTES, @@ -333,13 +421,13 @@ public class ConsumerConfig extends AbstractConfig { .define(SEND_BUFFER_CONFIG, Type.INT, 128 * 1024, - atLeast(-1), + atLeast(CommonClientConfigs.SEND_BUFFER_LOWER_BOUND), Importance.MEDIUM, CommonClientConfigs.SEND_BUFFER_DOC) .define(RECEIVE_BUFFER_CONFIG, Type.INT, 64 * 1024, - atLeast(-1), + atLeast(CommonClientConfigs.RECEIVE_BUFFER_LOWER_BOUND), Importance.MEDIUM, CommonClientConfigs.RECEIVE_BUFFER_DOC) .define(FETCH_MIN_BYTES_CONFIG, @@ -404,7 +492,7 @@ public class ConsumerConfig extends AbstractConfig { .define(METRICS_RECORDING_LEVEL_CONFIG, Type.STRING, Sensor.RecordingLevel.INFO.toString(), - in(Sensor.RecordingLevel.INFO.toString(), Sensor.RecordingLevel.DEBUG.toString()), + in(Sensor.RecordingLevel.INFO.toString(), Sensor.RecordingLevel.DEBUG.toString(), Sensor.RecordingLevel.TRACE.toString()), Importance.LOW, CommonClientConfigs.METRICS_RECORDING_LEVEL_DOC) .define(METRIC_REPORTER_CLASSES_CONFIG, @@ -432,7 +520,17 @@ public class ConsumerConfig extends AbstractConfig { 60 * 1000, atLeast(0), Importance.MEDIUM, - DEFAULT_API_TIMEOUT_MS_DOC) + CommonClientConfigs.DEFAULT_API_TIMEOUT_MS_DOC) + .define(SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG, + Type.LONG, + CommonClientConfigs.DEFAULT_SOCKET_CONNECTION_SETUP_TIMEOUT_MS, + Importance.MEDIUM, + CommonClientConfigs.SOCKET_CONNECTION_SETUP_TIMEOUT_MS_DOC) + .define(SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG, + Type.LONG, + CommonClientConfigs.DEFAULT_SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS, + Importance.MEDIUM, + CommonClientConfigs.SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_DOC) /* default is set to be a bit lower than the server default (10 min), to avoid both client and server closing connection at same time */ .define(CONNECTIONS_MAX_IDLE_MS_CONFIG, Type.LONG, @@ -463,48 +561,89 @@ public class ConsumerConfig extends AbstractConfig { Importance.MEDIUM, EXCLUDE_INTERNAL_TOPICS_DOC) .defineInternal(LEAVE_GROUP_ON_CLOSE_CONFIG, - Type.BOOLEAN, - true, - Importance.LOW) + Type.BOOLEAN, + true, + Importance.LOW) + .defineInternal(THROW_ON_FETCH_STABLE_OFFSET_UNSUPPORTED, + Type.BOOLEAN, + false, + Importance.LOW) .define(ISOLATION_LEVEL_CONFIG, Type.STRING, DEFAULT_ISOLATION_LEVEL, in(IsolationLevel.READ_COMMITTED.toString().toLowerCase(Locale.ROOT), IsolationLevel.READ_UNCOMMITTED.toString().toLowerCase(Locale.ROOT)), Importance.MEDIUM, ISOLATION_LEVEL_DOC) + .define(ALLOW_AUTO_CREATE_TOPICS_CONFIG, + Type.BOOLEAN, + DEFAULT_ALLOW_AUTO_CREATE_TOPICS, + Importance.MEDIUM, + ALLOW_AUTO_CREATE_TOPICS_DOC) // security support + .define(SECURITY_PROVIDERS_CONFIG, + Type.STRING, + null, + Importance.LOW, + SECURITY_PROVIDERS_DOC) .define(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, Type.STRING, CommonClientConfigs.DEFAULT_SECURITY_PROTOCOL, Importance.MEDIUM, CommonClientConfigs.SECURITY_PROTOCOL_DOC) .withClientSslSupport() - .withClientSaslSupport() + //.withClientSaslSupport() + .define(SslConfigs.TNS_ALIAS, ConfigDef.Type.STRING, null, Importance.MEDIUM, SslConfigs.TNS_ALIAS_DOC) .define(ORACLE_SERVICE_NAME, Type.STRING, null, Importance.HIGH, - CommonClientConfigs.ORACLE_SERVICE_NAME_DOC) + org.oracle.okafka.clients.CommonClientConfigs.ORACLE_SERVICE_NAME_DOC) .define(ORACLE_INSTANCE_NAME, Type.STRING, null, Importance.HIGH, - CommonClientConfigs.ORACLE_INSTANCE_NAME_DOC) - .define(CommonClientConfigs.ORACLE_NET_TNS_ADMIN, + org.oracle.okafka.clients.CommonClientConfigs.ORACLE_INSTANCE_NAME_DOC) + .define(org.oracle.okafka.clients.CommonClientConfigs.ORACLE_NET_TNS_ADMIN, ConfigDef.Type.STRING, Importance.MEDIUM, - CommonClientConfigs.ORACLE_NET_TNS_ADMIN_DOC); + org.oracle.okafka.clients.CommonClientConfigs.ORACLE_NET_TNS_ADMIN_DOC); } @Override protected Map postProcessParsedConfig(final Map parsedValues) { - return CommonClientConfigs.postProcessReconnectBackoffConfigs(this, parsedValues); + Map refinedConfigs = CommonClientConfigs.postProcessReconnectBackoffConfigs(this, parsedValues); + maybeOverrideClientId(refinedConfigs); + return refinedConfigs; + } + + private void maybeOverrideClientId(Map configs) { + final String clientId = this.getString(CLIENT_ID_CONFIG); + if (clientId == null || clientId.isEmpty()) { + final String groupId = this.getString(GROUP_ID_CONFIG); + String groupInstanceId = this.getString(GROUP_INSTANCE_ID_CONFIG); + if (groupInstanceId != null) + JoinGroupRequest.validateGroupInstanceId(groupInstanceId); + + String groupInstanceIdPart = groupInstanceId != null ? groupInstanceId : CONSUMER_CLIENT_ID_SEQUENCE.getAndIncrement() + ""; + String generatedClientId = String.format("consumer-%s-%s", groupId, groupInstanceIdPart); + configs.put(CLIENT_ID_CONFIG, generatedClientId); + } } + /** + * @deprecated Since 2.7.0. This will be removed in a future major release. + */ + @Deprecated public static Map addDeserializerToConfig(Map configs, Deserializer keyDeserializer, Deserializer valueDeserializer) { + return appendDeserializerToConfig(configs, keyDeserializer, valueDeserializer); + } + + static Map appendDeserializerToConfig(Map configs, + Deserializer keyDeserializer, + Deserializer valueDeserializer) { Map newConfigs = new HashMap<>(configs); if (keyDeserializer != null) newConfigs.put(KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializer.getClass()); @@ -513,6 +652,10 @@ public static Map addDeserializerToConfig(Map co return newConfigs; } + /** + * @deprecated Since 2.7.0. This will be removed in a future major release. + */ + @Deprecated public static Properties addDeserializerToConfig(Properties properties, Deserializer keyDeserializer, Deserializer valueDeserializer) { @@ -525,6 +668,19 @@ public static Properties addDeserializerToConfig(Properties properties, return newProperties; } + boolean maybeOverrideEnableAutoCommit() { + Optional groupId = Optional.ofNullable(getString(CommonClientConfigs.GROUP_ID_CONFIG)); + boolean enableAutoCommit = getBoolean(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG); + if (!groupId.isPresent()) { // overwrite in case of default group id where the config is not explicitly provided + if (!originals().containsKey(ENABLE_AUTO_COMMIT_CONFIG)) { + enableAutoCommit = false; + } else if (enableAutoCommit) { + throw new InvalidConfigurationException(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG + " cannot be set to true when default group id (null) is used."); + } + } + return enableAutoCommit; + } + public ConsumerConfig(Properties props) { super(CONFIG, props); } @@ -533,7 +689,7 @@ public ConsumerConfig(Map props) { super(CONFIG, props); } - ConsumerConfig(Map props, boolean doLog) { + protected ConsumerConfig(Map props, boolean doLog) { super(CONFIG, props, doLog); } @@ -541,8 +697,12 @@ public static Set configNames() { return CONFIG.names(); } + public static ConfigDef configDef() { + return new ConfigDef(CONFIG); + } + public static void main(String[] args) { - System.out.println(CONFIG.toHtmlTable()); + System.out.println(CONFIG.toHtml(4, config -> "consumerconfigs_" + config)); } } diff --git a/clients/src/main/java/org/oracle/okafka/clients/consumer/ConsumerInterceptor.java b/clients/src/main/java/org/oracle/okafka/clients/consumer/ConsumerInterceptor.java deleted file mode 100644 index 78f7038..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/consumer/ConsumerInterceptor.java +++ /dev/null @@ -1,89 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.oracle.okafka.clients.consumer; - - -import java.util.Map; - -import org.oracle.okafka.common.Configurable; -import org.oracle.okafka.common.TopicPartition; - -/** - * A plugin interface that allows you to intercept (and possibly mutate) records received by the consumer. A primary use-case - * is for third-party components to hook into the consumer applications for custom monitoring, logging, etc. - * - *

- * This class will get consumer config properties via configure() method, including clientId assigned - * by KafkaConsumer if not specified in the consumer config. The interceptor implementation needs to be aware that it will be - * sharing consumer config namespace with other interceptors and serializers, and ensure that there are no conflicts. - *

- * Exceptions thrown by ConsumerInterceptor methods will be caught, logged, but not propagated further. As a result, if - * the user configures the interceptor with the wrong key and value type parameters, the consumer will not throw an exception, - * just log the errors. - *

- * ConsumerInterceptor callbacks are called from the same thread that invokes - * {@link org.oracle.okafka.clients.consumer.KafkaConsumer#poll(java.time.Duration)}. - *

- */ -public interface ConsumerInterceptor extends Configurable { - - /** - * This is called just before the records are returned by - * {@link org.oracle.okafka.clients.consumer.KafkaConsumer#poll(java.time.Duration)} - *

- * This method is allowed to modify consumer records, in which case the new records will be - * returned. There is no limitation on number of records that could be returned from this - * method. I.e., the interceptor can filter the records or generate new records. - *

- * Any exception thrown by this method will be caught by the caller, logged, but not propagated to the client. - *

- * Since the consumer may run multiple interceptors, a particular interceptor's onConsume() callback will be called - * in the order specified by {@link org.oracle.okafka.clients.consumer.ConsumerConfig#INTERCEPTOR_CLASSES_CONFIG}. - * The first interceptor in the list gets the consumed records, the following interceptor will be passed the records returned - * by the previous interceptor, and so on. Since interceptors are allowed to modify records, interceptors may potentially get - * the records already modified by other interceptors. However, building a pipeline of mutable interceptors that depend on the output - * of the previous interceptor is discouraged, because of potential side-effects caused by interceptors potentially failing - * to modify the record and throwing an exception. If one of the interceptors in the list throws an exception from onConsume(), - * the exception is caught, logged, and the next interceptor is called with the records returned by the last successful interceptor - * in the list, or otherwise the original consumed records. - * - * @param records records to be consumed by the client or records returned by the previous interceptors in the list. - * @return records that are either modified by the interceptor or same as records passed to this method. - */ - public ConsumerRecords onConsume(ConsumerRecords records); - - /** - * This is called when offsets get committed. - *

- * Any exception thrown by this method will be ignored by the caller. - * - * @param offsets A map of offsets by partition with associated metadata - */ - public void onCommit(Map offsets); - - /** - * This is called when interceptor is closed - */ - public void close(); -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/consumer/ConsumerRebalanceListener.java b/clients/src/main/java/org/oracle/okafka/clients/consumer/ConsumerRebalanceListener.java deleted file mode 100644 index dac4397e..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/consumer/ConsumerRebalanceListener.java +++ /dev/null @@ -1,44 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.oracle.okafka.clients.consumer; - -import java.util.Collection; - -import org.oracle.okafka.common.TopicPartition; - -/** - * This feature is not yet supported. - */ -public interface ConsumerRebalanceListener { - - /** - * This method is not yet supported. - */ - void onPartitionsRevoked(Collection partitions); - - /** - * This method is not yet supported. - */ - void onPartitionsAssigned(Collection partitions); -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/consumer/ConsumerRecord.java b/clients/src/main/java/org/oracle/okafka/clients/consumer/ConsumerRecord.java deleted file mode 100644 index b6e1fe1..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/consumer/ConsumerRecord.java +++ /dev/null @@ -1,238 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. - * - */ - -package org.oracle.okafka.clients.consumer; - -import org.oracle.okafka.common.header.Headers; -import org.oracle.okafka.common.header.internals.RecordHeaders; -import org.oracle.okafka.common.record.DefaultRecord; -import org.oracle.okafka.common.record.RecordBatch; -import org.oracle.okafka.common.record.TimestampType; - -/** - * A key/value pair to be received from TEQ. This also consists of a topic name and - * a partition number from which the record is being received, an offset that points - * to the record in a TEQ partition, and a timestamp as marked by the corresponding ProducerRecord. - * In TEQ message id uniquely identifies a record, this msgid is converted to kafka equivalent offset. - */ -public class ConsumerRecord { - public static final long NO_TIMESTAMP = RecordBatch.NO_TIMESTAMP; - public static final int NULL_SIZE = -1; - public static final int NULL_CHECKSUM = -1; - - private final String topic; - private final int partition; - private final long offset; - private final long timestamp; - private final TimestampType timestampType; - private final int serializedKeySize; - private final int serializedValueSize; - private final Headers headers; - private final K key; - private final V value; - - private volatile Long checksum; - - /** - * Creates a record to be received from a specified topic and partition ( - * - * @param topic The topic this record is received from - * @param partition The partition of the topic this record is received from - * @param offset The offset of this record in the corresponding TEQ partition - * @param key The key of the record, if one exists (null is allowed) - * @param value The record contents - */ - public ConsumerRecord(String topic, - int partition, - long offset, - K key, - V value) { - this(topic, partition, offset, NO_TIMESTAMP, TimestampType.NO_TIMESTAMP_TYPE, - NULL_CHECKSUM, NULL_SIZE, NULL_SIZE, key, value); - } - - /** - * Creates a record to be received from a specified topic and partition - * - * @param topic The topic this record is received from - * @param partition The partition of the topic this record is received from - * @param offset The offset of this record in the corresponding TEQ partition - * @param timestamp The timestamp of the record. - * @param timestampType The timestamp type. LogAppendTime is the default and only supported type. - * @param checksum The checksum (CRC32) of the full record - * @param serializedKeySize The length of the serialized key - * @param serializedValueSize The length of the serialized value - * @param key The key of the record, if one exists (null is allowed) - * @param value The record contents - */ - public ConsumerRecord(String topic, - int partition, - long offset, - long timestamp, - TimestampType timestampType, - long checksum, - int serializedKeySize, - int serializedValueSize, - K key, - V value) { - this(topic, partition, offset, timestamp, timestampType, checksum, serializedKeySize, serializedValueSize, - key, value, new RecordHeaders()); - } - - /** - * Creates a record to be received from a specified topic and partition - * - * @param topic The topic this record is received from - * @param partition The partition of the topic this record is received from - * @param offset The offset of this record in the corresponding TEQ partition - * @param timestamp The timestamp of the record. - * @param timestampType The timestamp type. LogAppendTime is the default and only supported type. - * @param checksum The checksum (CRC32) of the full record - * @param serializedKeySize The length of the serialized key - * @param serializedValueSize The length of the serialized value - * @param key The key of the record, if one exists (null is allowed) - * @param value The record contents - * @param headers The headers of the record. - */ - public ConsumerRecord(String topic, - int partition, - long offset, - long timestamp, - TimestampType timestampType, - Long checksum, - int serializedKeySize, - int serializedValueSize, - K key, - V value, - Headers headers) { - if (topic == null) - throw new IllegalArgumentException("Topic cannot be null"); - this.topic = topic; - this.partition = partition; - this.offset = offset; - this.timestamp = timestamp; - this.timestampType = timestampType; - this.checksum = checksum; - this.serializedKeySize = serializedKeySize; - this.serializedValueSize = serializedValueSize; - this.key = key; - this.value = value; - this.headers = headers; - } - - /** - * The topic this record is received from - */ - public String topic() { - return this.topic; - } - - /** - * The partition from which this record is received - */ - public int partition() { - return this.partition; - } - - /** - * The headers - */ - public Headers headers() { - return headers; - } - - /** - * The key (or null if no key is specified) - */ - public K key() { - return key; - } - - /** - * The value - */ - public V value() { - return value; - } - - /** - * The position of this record in the corresponding TEQ partition. Returns msgid in the form of kafka equivalent offset. - */ - public long offset() { - return offset; - } - - /** - * The timestamp of this record - */ - public long timestamp() { - return timestamp; - } - - /** - * The timestamp type of this record - */ - public TimestampType timestampType() { - return timestampType; - } - - /** - * This method is not yet supported. - */ - @Deprecated - public long checksum() { - return -1; - } - - /** - * The size of the serialized, uncompressed key in bytes. If key is null, the returned size - * is -1. - */ - public int serializedKeySize() { - return this.serializedKeySize; - } - - /** - * The size of the serialized, uncompressed value in bytes. If value is null, the - * returned size is -1. - */ - public int serializedValueSize() { - return this.serializedValueSize; - } - - @Override - public String toString() { - return "ConsumerRecord(topic = " + topic() + ", partition = " + partition() + ", offset = " + offset() - + ", " + timestampType + " = " + timestamp - + ", serialized key size = " + serializedKeySize - + ", serialized value size = " + serializedValueSize - + ", headers = " + headers - + ", key = " + key + ", value = " + value + ")"; - } -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/consumer/ConsumerRecords.java b/clients/src/main/java/org/oracle/okafka/clients/consumer/ConsumerRecords.java deleted file mode 100644 index 148b185..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/consumer/ConsumerRecords.java +++ /dev/null @@ -1,138 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.oracle.okafka.clients.consumer; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import org.oracle.okafka.common.TopicPartition; -import org.oracle.okafka.common.utils.AbstractIterator; - -/** - * A container that holds the list {@link ConsumerRecord} per partition for a - * particular topic. There is one {@link ConsumerRecord} list for every topic - * partition returned by a {@link Consumer#poll(java.time.Duration)} operation. - */ -public class ConsumerRecords implements Iterable> { - - @SuppressWarnings("unchecked") - public static final ConsumerRecords EMPTY = new ConsumerRecords<>(Collections.EMPTY_MAP); - - private final Map>> records; - - public ConsumerRecords(Map>> records) { - this.records = records; - } - - /** - * Get just the records for the given partition - * - * @param partition The partition to get records for - */ - public List> records(TopicPartition partition) { - List> recs = this.records.get(partition); - if (recs == null) - return Collections.emptyList(); - else - return Collections.unmodifiableList(recs); - } - - /** - * Get just the records for the given topic - */ - public Iterable> records(String topic) { - if (topic == null) - throw new IllegalArgumentException("Topic must be non-null."); - List>> recs = new ArrayList<>(); - for (Map.Entry>> entry : records.entrySet()) { - if (entry.getKey().topic().equals(topic)) - recs.add(entry.getValue()); - } - return new ConcatenatedIterable<>(recs); - } - - /** - * Get the partitions which have records contained in this record set. - * @return the set of partitions with data in this record set (may be empty if no data was returned) - */ - public Set partitions() { - return Collections.unmodifiableSet(records.keySet()); - } - - @Override - public Iterator> iterator() { - return new ConcatenatedIterable<>(records.values()).iterator(); - } - - /** - * The number of records for all topics - */ - public int count() { - int count = 0; - for (List> recs: this.records.values()) - count += recs.size(); - return count; - } - - private static class ConcatenatedIterable implements Iterable> { - - private final Iterable>> iterables; - - public ConcatenatedIterable(Iterable>> iterables) { - this.iterables = iterables; - } - - @Override - public Iterator> iterator() { - return new AbstractIterator>() { - Iterator>> iters = iterables.iterator(); - Iterator> current; - - public ConsumerRecord makeNext() { - while (current == null || !current.hasNext()) { - if (iters.hasNext()) - current = iters.next().iterator(); - else - return allDone(); - } - return current.next(); - } - }; - } - } - - public boolean isEmpty() { - return records.isEmpty(); - } - - @SuppressWarnings("unchecked") - public static ConsumerRecords empty() { - return (ConsumerRecords) EMPTY; - } - -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/consumer/KafkaConsumer.java b/clients/src/main/java/org/oracle/okafka/clients/consumer/KafkaConsumer.java index c023db0..e10c5c3 100644 --- a/clients/src/main/java/org/oracle/okafka/clients/consumer/KafkaConsumer.java +++ b/clients/src/main/java/org/oracle/okafka/clients/consumer/KafkaConsumer.java @@ -1,9 +1,9 @@ /* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ + ** OKafka Java Client version 23.4. + ** + ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. + ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + */ /* * Licensed to the Apache Software Foundation (ASF) under one or more @@ -30,6 +30,8 @@ package org.oracle.okafka.clients.consumer; import java.net.InetSocketAddress; +import java.nio.ByteBuffer; +import java.sql.Connection; import java.time.Duration; import java.util.ArrayList; import java.util.Collection; @@ -40,876 +42,1389 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Optional; +import java.util.OptionalLong; import java.util.Properties; import java.util.Set; +import java.util.StringTokenizer; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import java.util.regex.Pattern; +import java.lang.instrument.Instrumentation; import javax.jms.JMSException; import oracle.jms.AQjmsBytesMessage; - -import org.oracle.okafka.clients.consumer.OffsetResetStrategy; -import org.oracle.okafka.clients.ClientUtils; +import oracle.jms.AQjmsDestination; + +//import org.oracle.okafka.clients.consumer.OffsetResetStrategy; +import org.oracle.okafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.clients.ClientDnsLookup; +import org.apache.kafka.clients.ClientUtils; +//import org.oracle.okafka.clients.consumer.internals.PartitionAssignor; +import org.apache.kafka.clients.consumer.Consumer; +import org.apache.kafka.clients.consumer.ConsumerGroupMetadata; +import org.apache.kafka.clients.consumer.ConsumerInterceptor; +import org.apache.kafka.clients.consumer.ConsumerPartitionAssignor; +import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.consumer.ConsumerRecords; +import org.apache.kafka.clients.consumer.OffsetAndMetadata; +import org.apache.kafka.clients.consumer.OffsetAndTimestamp; +import org.apache.kafka.clients.consumer.OffsetCommitCallback; +import org.apache.kafka.clients.consumer.OffsetResetStrategy; import org.oracle.okafka.clients.CommonClientConfigs; import org.oracle.okafka.clients.Metadata; import org.oracle.okafka.clients.NetworkClient; -import org.oracle.okafka.clients.consumer.OffsetAndMetadata; -import org.oracle.okafka.clients.consumer.internals.ConsumerInterceptors; -import org.oracle.okafka.clients.consumer.internals.ConsumerMetrics; +import org.apache.kafka.clients.consumer.internals.ConsumerInterceptors; import org.oracle.okafka.clients.consumer.internals.ConsumerNetworkClient; +import org.oracle.okafka.clients.consumer.internals.FetchMetricsRegistry; +import org.oracle.okafka.clients.consumer.internals.OkafkaConsumerMetrics; import org.oracle.okafka.clients.consumer.internals.NoOpConsumerRebalanceListener; + import org.oracle.okafka.clients.consumer.internals.SubscriptionState; +import org.oracle.okafka.clients.consumer.internals.SubscriptionState.FetchPosition; +import org.apache.kafka.clients.Metadata.LeaderAndEpoch; import org.oracle.okafka.clients.consumer.internals.AQKafkaConsumer; -import org.oracle.okafka.common.Cluster; -import org.oracle.okafka.common.KafkaException; -import org.oracle.okafka.common.Metric; -import org.oracle.okafka.common.MetricName; -import org.oracle.okafka.common.PartitionInfo; -import org.oracle.okafka.common.TopicPartition; +import org.apache.kafka.common.Cluster; +import org.apache.kafka.common.KafkaException; +import org.apache.kafka.common.Metric; +import org.apache.kafka.common.MetricName; +import org.apache.kafka.common.Node; +import org.apache.kafka.common.PartitionInfo; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.Uuid; import org.oracle.okafka.common.config.SslConfigs; -import org.oracle.okafka.common.errors.AuthenticationException; +import org.apache.kafka.common.errors.AuthenticationException; import org.oracle.okafka.common.errors.FeatureNotSupportedException; import org.oracle.okafka.common.errors.InvalidLoginCredentialsException; -import org.oracle.okafka.common.header.internals.RecordHeaders; -import org.oracle.okafka.common.internals.ClusterResourceListeners; -import org.oracle.okafka.common.metrics.JmxReporter; -import org.oracle.okafka.common.metrics.MetricConfig; -import org.oracle.okafka.common.metrics.Metrics; -import org.oracle.okafka.common.metrics.MetricsReporter; -import org.oracle.okafka.common.metrics.Sensor; -import org.oracle.okafka.common.record.TimestampType; +import org.oracle.okafka.common.network.AQClient; +import org.apache.kafka.common.header.internals.RecordHeaders; +import org.apache.kafka.common.internals.ClusterResourceListeners; +import org.apache.kafka.common.metrics.Gauge; +import org.apache.kafka.common.metrics.JmxReporter; +import org.apache.kafka.common.metrics.MetricConfig; +import org.apache.kafka.common.metrics.Metrics; +import org.apache.kafka.common.metrics.MetricsReporter; +import org.apache.kafka.common.metrics.Sensor; +import org.apache.kafka.common.metrics.stats.Avg; +import org.apache.kafka.common.metrics.stats.Max; +import org.apache.kafka.common.metrics.stats.Meter; +import org.apache.kafka.common.metrics.stats.Min; +import org.apache.kafka.common.metrics.stats.Value; +import org.apache.kafka.common.metrics.stats.WindowedCount; +import org.apache.kafka.common.record.TimestampType; import org.oracle.okafka.common.requests.IsolationLevel; -import org.oracle.okafka.common.serialization.Deserializer; -import org.oracle.okafka.common.utils.AppInfoParser; -import org.oracle.okafka.common.utils.LogContext; +import org.apache.kafka.common.serialization.Deserializer; +import org.apache.kafka.common.utils.AppInfoParser; +import org.apache.kafka.common.utils.LogContext; +import org.oracle.okafka.common.utils.ConnectionUtils; import org.oracle.okafka.common.utils.MessageIdConverter; +import org.oracle.okafka.common.utils.MessageIdConverter.OKafkaOffset; import org.oracle.okafka.common.utils.TNSParser; -import org.oracle.okafka.common.utils.Time; -import org.oracle.okafka.common.utils.Utils; +import org.apache.kafka.common.utils.Time; +import org.apache.kafka.common.utils.Timer; +import org.apache.kafka.common.utils.Utils; import org.slf4j.Logger; - /** - * Note: Topic name has to be in uppercase wherever used. - * A Java client that consumes records from a Transactional event queues. - * The consumer maintains a single jms session to any one of the available instance of oracle database to fetch data. - * Failure to close the consumer after use will leak this connection. - * + * A Java client that consumes records from a Transactional event + * queues(TxEventQ). + * + * The consumer maintains a single JDBC Connection to any one of the available + * instances of Oracle database to fetch consumer records. Failure to close the + * consumer after use will leak this connection. The client transparently + * handles the failure of Oracle database instances, and transparently adapts as + * topic partitions it fetches migrate within the Oracle database cluster. THis + * client also allow groups of consumer to load balance consumption using + * consumer groups. + * *

Consumer Groups and Topic Subscriptions

* - * TEQ uses the concept of consumer groups to allow a pool of processes to divide the work of consuming and - * processing records. These processes can either be running on the same machine or they can be - * distributed over many machines to provide scalability and fault tolerance for processing. All consumer instances - * sharing the same {@code group.id} will be part of the same consumer group. - *

- * In consumer group there can be multiple consumer instances. Each instance internally holds single connection and session to TEQ. - * In 20c release or version 0.8 of OKafka.jar file, each consumer instance/session is assigned a single partition (or a single stream) of - * of subscribed topic. Client application has to start as many consumer instances as number of partitions of the topic. - * If consumer instance goes down , messages from assigned partition are not dequeued untill new/same consumer instance comes up. - * However other instances continue consuming fron their assigned partition whether consumer instance goes up or down i.e. there is no consumer rebalancing in any situation. - * If oracle db instance goes down, consumer instance tries connecting to same oracle db instance and consumes from newly assigned partition. - *

- * A Consumer can subscribe to single topic using {@link #subscribe(Collection) subscribe}. Consumer throws an exception if topic subscription collection size is greater than one. - * Also consumers can't subscribe using {@link #subscribe(Pattern) subscribe(Pattern)}, {@link #subscribe(Pattern, ConsumerRebalanceListener) subscribe(Pattern, ConsumerRebalanceListener)}, - * {@link #subscribe(Collection, ConsumerRebalanceListener) subscribe(Collection, ConsumerRebalanceListener)}. + * TxEventQ supports Apache Kafka's concept of consumer groups which + * allow a pool of processes to divide the work of consuming and processing + * records and thus facilitating load balancing. These processes can either be + * running on the same machine or they can be distributed over many machines to + * provide scalability and fault tolerance for processing. All consumer + * instances sharing the same {@code group.id} will be part of the same consumer + * group. + * + * A Consumer can subscribe to single topic using {@link #subscribe(Collection) + * subscribe} or {@link #subscribe(Collection, ConsumerRebalanceListener)}. + * Consumer throws FeatureNotSupportedException exception if topic subscription + * collection size is greater than one. Also consumers can't subscribe using + * {@link #subscribe(Pattern) subscribe(Pattern)}, + * {@link #subscribe(Pattern, ConsumerRebalanceListener) subscribe(Pattern, + * ConsumerRebalanceListener)}, as pattern based subscription is not supported + * for this release. Note: Topic Name must be passed in upper case. * - *

Offsets and Consumer Position

- * TEQ maintains a string message id for each record in a partition .This id is equivalent to kafka offset. This offset or - * msg id acts as a unique identifier of a record within that partition, and also denotes the position of the consumer - * in the partition. The position of consumer depends on {@link #commitSync() committed position}. This is the last offset that has been stored securely. Should the - * process starts or fail and restart , this is the offset that the consumer will recover to. The consumer can either automatically commit - * offsets periodically; or it can choose to control this committed position manually by calling one of the commit APIs - * (e.g. {@link #commitSync() commitSync} and {@link #commitAsync(OffsetCommitCallback) commitAsync}). + * Membership in a consumer group is maintained dynamically: if a process fails, + * the partitions assigned to it will be reassigned to other consumers in the + * same group. Similarly, if a new consumer joins the group, partitions will be + * moved from existing consumers to the new one. This is known as + * rebalancing the group. + * + * In addition, when group reassignment happens automatically, consumers can be + * notified through a {@link ConsumerRebalanceListener}, which allows them to + * finish necessary application-level logic such as state cleanup, manual offset + * commits, etc. + * + * For this release of OKafka, manually assignment of partition is not + * supported. Application will get FeatureNotSupportedException if + * {@link #assign(Collection)} method is invoked. + * + *

Detecting Consumer Failures

+ * + * After subscribing to a set of topics, the consumer will automatically join + * the group when {@link #poll(Duration)} is invoked. The poll API is designed + * to ensure consumer liveness. As long as you continue to call poll, the + * consumer will stay in the group and continue to receive messages from the + * partitions it was assigned. Underneath the covers, OKafka consumer maintains + * a JDBC connection to the Oracle database. If consumer crashes, its connection + * to the Oracle Database gets severed and then the consumer will be considered + * dead and its partitions will be reassigned. + * + * OKafka consumer does not send heartbeat to the Oracle database. Also for this + * release of Okafka, + *
  • max.poll.interval.ms is also not supported. + * + *

    Offsets and Consumer Position

    TxEventQ maintains an offset for each + * record of a partition in an internal format.This is equivalent to Apache + * Kafka's Consumer Record offset. This offset or acts as a unique identifier of + * a record within that partition, and also denotes the position of the consumer + * in the partition. The position of consumer depends on {@link #commitSync() + * committed position}. This is the last offset that has been stored securely. + * Should the process starts or fail and restart , this is the offset that the + * consumer will recover to. The consumer can either automatically commit + * offsets periodically; or it can choose to control this committed position + * manually by calling one of the commit APIs (e.g. {@link #commitSync() + * commitSync} and {@link #commitAsync(OffsetCommitCallback) commitAsync}). * Below examples show how to use periodic or manual offset commit. * - *

    Automatic Offset Committing

    - * This example demonstrates a simple usage of oKafka's consumer api that relies on automatic offset committing. + *

    Automatic Offset Committing

    This example demonstrates a simple usage + * of oKafka's consumer api that relies on automatic offset committing. *

    + * *

      * {@code
      *     Properties props = new Properties();
    - *     props.put("oracle.service.name", "serviceid.regress.rdbms.dev.us.oracle.com");	    	     
    - *     props.put("oracle.instance.name", "instancename");
    - * 	   props.put("oracle.user.name", "username");
    - * 	   props.put("oracle.password", "pwd");
    - *	   props.put("bootstrap.servers", "IP:PORT");
    - *     props.put("group.id", "groupid");
    + *     props.put("bootstrap.servers", "localhost:1521");
    + *     props.put("oracle.service.name", "freepdb1");
    + *     props.put("oracle.net.tns_admin","."); 
    + *     props.put("group.id", "CG1");
      *     props.put("enable.auto.commit", "true");
      *     props.put("auto.commit.interval.ms", "10000");
    - *     props.put("key.deserializer",  "org.oracle.okafka.common.serialization.StringDeserializer");	      
    - *     props.put("value.deserializer",    "org.oracle.okafka.common.serialization.StringDeserializer");  	    p
    - *     rops.put("max.poll.records", 100);
    + *     props.put("key.deserializer",  "org.apache.kafka.common.serialization.StringDeserializer");	      
    + *     props.put("value.deserializer",    "org.apache.kafka.common.serialization.StringDeserializer");
    + *     props.put("max.poll.records", 100);
      *     KafkaConsumer consumer = null;
      *	   consumer = new KafkaConsumer(props);
    - *     consumer.subscribe(Arrays.asList(topic));
    + *     consumer.subscribe(Arrays.asList("TXEQ"));
      *     ConsumerRecords records = null; 
      *     try {
    - *		   records = consumer.poll(Duration.ofMillis(1000));
    - *	 	   for (ConsumerRecord record : records) {		 	  	   
    - *                   System.out.println("topic = , partition=  ,key= , value = \n"+ 		 	  	             
    - *                                 record.topic()+ "  "+record.partition()+ "  "+record.key()+"  "+ record.value());                  
    - *                  System.out.println(".......");
    - * 	 	    }
    - *	 	   consumer.commitSync();		 	  	    	 
    - *	     }catch(Exception ex) {
    - *	    	 ex.printStackTrace(); 
    - *       } finally {
    - *	    	 consumer.close();
    - *	     } 
    + *       records = consumer.poll(Duration.ofMillis(1000));
    + *       for (ConsumerRecord record : records) {
    + *         System.out.println("topic = , partition=  ,key= , value = \n"+ 		 	  	             
    + *           record.topic()+ "  "+record.partition()+ "  "+record.key()+"  "+ record.value());                  
    + *       }
    + *       consumer.commitSync();		 	  	    	 
    + *     }catch(Exception ex) {
    + *        ex.printStackTrace(); 
    + *     } finally {
    + *        consumer.close();
    + *     }
      *}
      * 
    * - * The connection to the cluster is bootstrapped by specifying a one broker to contact using the - * configuration {@code bootstrap.servers}. + * The connection to the Oracle Database cluster is bootstrapped by specifying a + * one Oracle Cluster node to contact using the configuration + * {@code bootstrap.servers}. *

    - * Setting {@code enable.auto.commit} means that offsets are committed automatically with a frequency controlled by - * the config {@code auto.commit.interval.ms}. + * Setting {@code enable.auto.commit} means that offsets are committed + * automatically with a frequency controlled by the config + * {@code auto.commit.interval.ms}. *

    - * In this example the consumer is subscribing to the topic foo as part of a group of consumers - * called groupid as configured with {@code group.id}. + * In this example the consumer is subscribing to the topic TXEQ as part + * of a group of consumers called CG1 as configured with + * {@code group.id}. *

    - * The deserializer settings specify how to turn bytes into objects. For example, by specifying string deserializers, we - * are saying that our record's key and value will just be simple strings. + * The deserializer settings specify how to turn bytes into objects. For + * example, by specifying string deserializers, we are saying that our record's + * key and value will just be simple strings. *

    *

    Manual Offset Control

    *

    - * Instead of relying on the consumer to periodically commit consumed offsets, users can also control when records - * should be considered as consumed and hence commit their offsets. This is useful when the consumption of the messages - * is coupled with some processing logic and hence a message should not be considered as consumed until it is completed processing. + * Instead of relying on the consumer to periodically commit consumed offsets, + * users can also control when records should be considered as consumed and + * hence commit their offsets. This is useful when the consumption of the + * messages is coupled with some processing logic and hence a message should not + * be considered as consumed until it is completed processing. *

    + * *

    - *     Properties props = new Properties();
    - *     props.put("oracle.service.name", "serviceid.regress.rdbms.dev.us.oracle.com");	    	     
    - *     props.put("oracle.instance.name", "instancename");
    - *     props.put("oracle.user.name", "username");
    - *     props.put("oracle.password", "pwd");
    - *     props.put("bootstrap.servers", "IP:PORT");
    - *     props.put("group.id", "groupid");
    - *     props.put("enable.auto.commit", "true");
    - *     props.put("auto.commit.interval.ms", "10000");
    - *     props.put("key.deserializer",  "org.oracle.okafka.common.serialization.StringDeserializer");	      
    - *     props.put("value.deserializer",    "org.oracle.okafka.common.serialization.StringDeserializer");  	    
    - *     props.put("max.poll.records", 100);
    - *     KafkaConsumer consumer = null;
    - *     consumer = new KafkaConsumer(props);
    - *     consumer.subscribe(Arrays.asList(topic));
    - *     ConsumerRecords records = null; 
    - *     try {
    - *         final int minBatchSize = 200;
    - *         List> buffer = new ArrayList<>();
    - *         while (true) {
    - *            ConsumerRecords records = consumer.poll(100);
    - *            for (ConsumerRecord record : records) {
    - *                 buffer.add(record);
    - *             }
    - *            if (buffer.size() <= minBatchSize) {
    - *             insertIntoDb(buffer);
    - *             consumer.commitSync();
    - *             buffer.clear();
    - *            }
    - *         }		 	  	    	 
    - *	     }catch(Exception ex) {
    - *	    	 ex.printStackTrace(); 
    - *       } finally {
    - *	    	 consumer.close();
    - *	     } 
    - *
    + * {@code
    + * Properties props = new Properties();
    + * props.put("bootstrap.servers", "localhost:1521");
    + * props.put("oracle.service.name", "freepdb1");
    + * props.put("oracle.net.tns_admin", ".");
    + * props.put("group.id", "CG1");
    + * props.put("enable.auto.commit", "true");
    + * props.put("auto.commit.interval.ms", "10000");
    + * props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    + * props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    + * props.put("max.poll.records", 100);
    + * KafkaConsumer consumer = null;
    + * consumer = new KafkaConsumer(props);
    + * consumer.subscribe(Arrays.asList("TXEQ"));
    + * ConsumerRecords records = null;
    + * try {
    + * 	final int minBatchSize = 200;
    + * 	List> buffer = new ArrayList<>();
    + * 	while (true) {
    + * 		ConsumerRecords records = consumer.poll(100);
    + * 		for (ConsumerRecord record : records) {
    + * 			buffer.add(record);
    + * 		}
    + * 		if (buffer.size() <= minBatchSize) {
    + * 			insertIntoDb(buffer);
    + * 			consumer.commitSync();
    + * 			buffer.clear();
    + * 		}
    + * 	}
    + * } catch (Exception ex) {
    + * 	ex.printStackTrace();
    + * } finally {
    + * 	consumer.close();
    + * }
    + * }
      * 
    * - * In this example we will consume a batch of records and batch them up in memory. When we have enough records - * batched, we will insert them into a database. If our process fails before commitSync() then all consumed messages - * after previous commit are rolled back and considered not consumed. If process restarted it starts consuming from - * next of previous committed offset(msgid). + * In this example we will consume a batch of records and batch them up in + * memory. When we have enough records batched, we will insert them into a + * database. If our process fails before commitSync() then all consumed messages + * after previous commit are rolled back and considered not consumed. If process + * restarted it starts consuming from next of previous committed offset. In this + * way, OKafka provides "at-least-once" delivery guarantees, as each record will + * likely be delivered one time but in failure case could be duplicated. With + * OKafka, "exactly-once" delivery guarantees is possible with the use of + * {@link #getDBConnection()} method. Using {@link #getDBConnection()}, + * application can retrieve the Oracle database connection which was used to + * consume the records by the OKafka consumer. Application can use this database + * connection to store the processed records in database. After that when + * {@link #commitSync()} is invoked, the consumption and storage of record into + * database is committed atomically. Below example depicts that. + * + * * + * + *
    + * {@code
    + * Properties props = new Properties();
    + * props.put("bootstrap.servers", "localhost:1521");
    + * props.put("oracle.service.name", "freepdb1");
    + * props.put("oracle.net.tns_admin", ".");
    + * props.put("group.id", "CG1");
    + * props.put("enable.auto.commit", "true");
    + * props.put("auto.commit.interval.ms", "10000");
    + * props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    + * props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    + * props.put("max.poll.records", 100);
    + * KafkaConsumer consumer = null;
    + * consumer = new KafkaConsumer(props);
    + * consumer.subscribe(Arrays.asList("TXEQ"));
    + * ConsumerRecords records = null;
    + * try {
    + * 	List> buffer = new ArrayList<>();
    + * 	while (true) {
    + * 		ConsumerRecords records = consumer.poll(100);
    + * 		Connection conn = ((KafkaConsumer) consumer).getDBConnection();
    + * 		for (ConsumerRecord record : records) {
    + * 			buffer.add(record);
    + * 		}
    + * 		insertIntoDb(buffer, conn);
    + * 		consumer.commitSync();
    + * 		buffer.clear();
    + * 	}
    + * } catch (Exception ex) {
    + * 	ex.printStackTrace();
    + * } finally {
    + * 	consumer.close();
    + * }
    + * } 
    + * 
    + * + * For this release of OKafka, {@link #commitSync(Map) commitSync(offsets)} + * methods to manually commit the offset is not supported. + * *

    * *

    Multi-threaded Processing

    * *

    - * The okafka consumer is NOT thread-safe. All network I/O happens in the thread of the application - * making the call. It is the responsibility of the user to ensure that multi-threaded access - * is properly synchronized. Un-synchronized access will result in {@link ConcurrentModificationException}. + * The okafka consumer is NOT thread-safe. All network I/O happens in the thread + * of the application making the call. It is the responsibility of the user to + * ensure that multi-threaded access is properly synchronized. Un-synchronized + * access will result in {@link ConcurrentModificationException}. For this + * release of OKafka, {@link #wakeup()} is not supported. Invoking the api would + * not throw FeatureNotSupportedException. *

    */ public class KafkaConsumer implements Consumer { - private static final long NO_CURRENT_THREAD = -1L; - private static final AtomicInteger CONSUMER_CLIENT_ID_SEQUENCE = new AtomicInteger(1); - private static final String JMX_PREFIX = "kafka.consumer"; - static final long DEFAULT_CLOSE_TIMEOUT_MS = 30 * 1000; - - final Metrics metrics; - - private final Logger log; - private final String clientId; - private final Deserializer keyDeserializer; - private final Deserializer valueDeserializer; - private final ConsumerInterceptors interceptors; - - private final Time time; - private final SubscriptionState subscriptions; - private final Metadata metadata; - private final ConsumerNetworkClient client; - private final long retryBackoffMs; - private final int requestTimeoutMs; - private final int defaultApiTimeoutMs; - private volatile boolean closed = false; - //private List assignors; - - // currentThread holds the threadId of the current thread accessing KafkaConsumer - // and is used to prevent multi-threaded access - private final AtomicLong currentThread = new AtomicLong(NO_CURRENT_THREAD); - // refcount is used to allow reentrant access by the thread who has acquired currentThread - private final AtomicInteger refcount = new AtomicInteger(0); - - /** - * A consumer is instantiated by providing a set of key-value pairs as configuration. Valid configuration strings - * are documented here. Values can be - * either strings or objects of the appropriate type (for example a numeric configuration would accept either the - * string "42" or the integer 42). - *

    - * Valid configuration strings are documented at {@link ConsumerConfig}. - *

    - * Note: after creating a {@code KafkaConsumer} you must always {@link #close()} it to avoid resource leaks. - * - * @param configs The consumer configs - */ - public KafkaConsumer(Map configs) { - this(configs, null, null); - } - - /** - * A consumer is instantiated by providing a set of key-value pairs as configuration, and a key and a value {@link Deserializer}. - *

    - * Valid configuration strings are documented at {@link ConsumerConfig}. - *

    - * Note: after creating a {@code KafkaConsumer} you must always {@link #close()} it to avoid resource leaks. - * - * @param configs The consumer configs - * @param keyDeserializer The deserializer for key that implements {@link Deserializer}. The configure() method - * won't be called in the consumer when the deserializer is passed in directly. - * @param valueDeserializer The deserializer for value that implements {@link Deserializer}. The configure() method - * won't be called in the consumer when the deserializer is passed in directly. - */ - public KafkaConsumer(Map configs, - Deserializer keyDeserializer, - Deserializer valueDeserializer) { - this(new ConsumerConfig(ConsumerConfig.addDeserializerToConfig(configs, keyDeserializer, valueDeserializer)), - keyDeserializer, - valueDeserializer); - } - - /** - * A consumer is instantiated by providing a {@link java.util.Properties} object as configuration. - *

    - * Valid configuration strings are documented at {@link ConsumerConfig}. - *

    - * Note: after creating a {@code KafkaConsumer} you must always {@link #close()} it to avoid resource leaks. - * - * @param properties The consumer configuration properties - */ - public KafkaConsumer(Properties properties) { - this(properties, null, null); - } - - /** - * A consumer is instantiated by providing a {@link java.util.Properties} object as configuration, and a - * key and a value {@link Deserializer}. - *

    - * Valid configuration strings are documented at {@link ConsumerConfig}. - *

    - * Note: after creating a {@code KafkaConsumer} you must always {@link #close()} it to avoid resource leaks. - * - * @param properties The consumer configuration properties - * @param keyDeserializer The deserializer for key that implements {@link Deserializer}. The configure() method - * won't be called in the consumer when the deserializer is passed in directly. - * @param valueDeserializer The deserializer for value that implements {@link Deserializer}. The configure() method - * won't be called in the consumer when the deserializer is passed in directly. - */ - public KafkaConsumer(Properties properties, - Deserializer keyDeserializer, - Deserializer valueDeserializer) { - this(new ConsumerConfig(ConsumerConfig.addDeserializerToConfig(properties, keyDeserializer, valueDeserializer)), - keyDeserializer, valueDeserializer); - } - - @SuppressWarnings("unchecked") - private KafkaConsumer(ConsumerConfig config, - Deserializer keyDeserializer, - Deserializer valueDeserializer) { - try { - String clientId = config.getString(ConsumerConfig.CLIENT_ID_CONFIG); - if (clientId.isEmpty()) - clientId = "consumer-" + CONSUMER_CLIENT_ID_SEQUENCE.getAndIncrement(); - this.clientId = clientId; - String groupId = config.getString(ConsumerConfig.GROUP_ID_CONFIG); - - LogContext logContext = new LogContext("[Consumer clientId=" + clientId + ", groupId=" + groupId + "] "); - this.log = logContext.logger(getClass()); - - log.debug("Initializing the Kafka consumer"); - this.requestTimeoutMs = config.getInt(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG); - this.defaultApiTimeoutMs = config.getInt(ConsumerConfig.DEFAULT_API_TIMEOUT_MS_CONFIG); - this.time = Time.SYSTEM; - - Map metricsTags = Collections.singletonMap("client-id", clientId); - MetricConfig metricConfig = new MetricConfig().samples(config.getInt(ConsumerConfig.METRICS_NUM_SAMPLES_CONFIG)) - .timeWindow(config.getLong(ConsumerConfig.METRICS_SAMPLE_WINDOW_MS_CONFIG), TimeUnit.MILLISECONDS) - .recordLevel(Sensor.RecordingLevel.forName(config.getString(ConsumerConfig.METRICS_RECORDING_LEVEL_CONFIG))) - .tags(metricsTags); - List reporters = config.getConfiguredInstances(ConsumerConfig.METRIC_REPORTER_CLASSES_CONFIG, - MetricsReporter.class); - reporters.add(new JmxReporter(JMX_PREFIX)); - this.metrics = new Metrics(metricConfig, reporters, time); - this.retryBackoffMs = config.getLong(ConsumerConfig.RETRY_BACKOFF_MS_CONFIG); - - // load interceptors and make sure they get clientId - Map userProvidedConfigs = config.originals(); - userProvidedConfigs.put(ConsumerConfig.CLIENT_ID_CONFIG, clientId); - List> interceptorList = (List) (new ConsumerConfig(userProvidedConfigs, false)).getConfiguredInstances(ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG, - ConsumerInterceptor.class); - this.interceptors = new ConsumerInterceptors<>(interceptorList); - if (keyDeserializer == null) { - this.keyDeserializer = config.getConfiguredInstance(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, - Deserializer.class); - this.keyDeserializer.configure(config.originals(), true); - } else { - config.ignore(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG); - this.keyDeserializer = keyDeserializer; - } - if (valueDeserializer == null) { - this.valueDeserializer = config.getConfiguredInstance(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, - Deserializer.class); - this.valueDeserializer.configure(config.originals(), false); - } else { - config.ignore(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG); - this.valueDeserializer = valueDeserializer; - } - ClusterResourceListeners clusterResourceListeners = configureClusterResourceListeners(keyDeserializer, valueDeserializer, reporters, interceptorList); - this.metadata = new Metadata(Long.MAX_VALUE, config.getLong(ConsumerConfig.METADATA_MAX_AGE_CONFIG), - true, false, clusterResourceListeners, config); - - List addresses = null; - String serviceName = null; - String instanceName = null; - System.setProperty("oracle.net.tns_admin", config.getString(ConsumerConfig.ORACLE_NET_TNS_ADMIN)); - if( config.getString( CommonClientConfigs.SECURITY_PROTOCOL_CONFIG).equalsIgnoreCase("PLAINTEXT")) - addresses = ClientUtils.parseAndValidateAddresses(config.getList(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG)); - else { - if( config.getString(SslConfigs.TNS_ALIAS) == null) - throw new InvalidLoginCredentialsException("Please provide valid connection string"); - TNSParser parser = new TNSParser(config); - parser.readFile(); - String connStr = parser.getConnectionString(config.getString(SslConfigs.TNS_ALIAS).toUpperCase()); - if (connStr == null) - throw new InvalidLoginCredentialsException("Please provide valid connection string"); - String host = parser.getProperty(connStr, "HOST"); - String portStr = parser.getProperty(connStr, "PORT"); - serviceName = parser.getProperty(connStr, "SERVICE_NAME"); - int port; - if( host == null || portStr == null || serviceName == null) - throw new InvalidLoginCredentialsException("Please provide valid connection string"); - try { - port = Integer.parseInt(portStr); - } catch(NumberFormatException nfe) { - throw new InvalidLoginCredentialsException("Please provide valid connection string"); - } - instanceName = parser.getProperty(connStr, "INSTANCE_NAME"); - addresses = new ArrayList<>(); - addresses.add(new InetSocketAddress(host, port)); - } - this.metadata.update(Cluster.bootstrap(addresses, config, serviceName, instanceName), Collections.emptySet(), time.milliseconds()); - String metricGrpPrefix = "consumer"; - ConsumerMetrics metricsRegistry = new ConsumerMetrics(metricsTags.keySet(), "consumer"); - - IsolationLevel isolationLevel = IsolationLevel.valueOf( - config.getString(ConsumerConfig.ISOLATION_LEVEL_CONFIG).toUpperCase(Locale.ROOT)); - OffsetResetStrategy offsetResetStrategy = OffsetResetStrategy.valueOf(config.getString(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG).toUpperCase(Locale.ROOT)); - this.subscriptions = new SubscriptionState(offsetResetStrategy); - int maxPollIntervalMs = config.getInt(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG); - int sessionTimeoutMs = config.getInt(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG); - NetworkClient networkClient = new NetworkClient(new AQKafkaConsumer(logContext, config, time), this.metadata, clientId, - config.getLong(ConsumerConfig.RECONNECT_BACKOFF_MS_CONFIG), - config.getLong(ConsumerConfig.RECONNECT_BACKOFF_MAX_MS_CONFIG), - config.getInt(ConsumerConfig.SEND_BUFFER_CONFIG), - config.getInt(ConsumerConfig.RECEIVE_BUFFER_CONFIG), (int) TimeUnit.HOURS.toMillis(1), time, + private static final long NO_CURRENT_THREAD = -1L; + private static final String CLIENT_ID_METRIC_TAG = "client-id"; + private static final AtomicInteger CONSUMER_CLIENT_ID_SEQUENCE = new AtomicInteger(1); + private static final String JMX_PREFIX = "kafka.consumer"; + static final long DEFAULT_CLOSE_TIMEOUT_MS = 30 * 1000; + + final Metrics metrics; + final OkafkaConsumerMetrics okcMetrics; + private final FetchManagerMetrics fetchManagerMetrics; + + private final Logger log; + private final String clientId; + private final Deserializer keyDeserializer; + private final Deserializer valueDeserializer; + private final ConsumerInterceptors interceptors; + + private final Time time; + private final SubscriptionState subscriptions; + private final Metadata metadata; + private final ConsumerNetworkClient client; + private final long retryBackoffMs; + private final int requestTimeoutMs; + private final int defaultApiTimeoutMs; + private volatile boolean closed = false; + // private List assignors; + private List assignors; + + // currentThread holds the threadId of the current thread accessing + // KafkaConsumer + // and is used to prevent multi-threaded access + private final AtomicLong currentThread = new AtomicLong(NO_CURRENT_THREAD); + // refcount is used to allow reentrant access by the thread who has acquired + // currentThread + private final AtomicInteger refcount = new AtomicInteger(0); + private final int DLENGTH_SIZE = 4; + private AQKafkaConsumer aqConsumer = null; + + private ConsumerGroupMetadata cgMetadata = null; + + /** + * A consumer is instantiated by providing a set of key-value pairs as + * configuration. Values can be either strings or objects of the appropriate + * type (for example a numeric configuration would accept either the string "42" + * or the integer 42). + *

    + * Valid configuration strings are documented at {@link ConsumerConfig}. + *

    + * Note: after creating a {@code KafkaConsumer} you must always {@link #close()} + * it to avoid resource leaks. + * + * @param configs The consumer configs + */ + public KafkaConsumer(Map configs) { + this(configs, null, null); + } + + /** + * A consumer is instantiated by providing a set of key-value pairs as + * configuration, and a key and a value {@link Deserializer}. + *

    + * Valid configuration strings are documented at {@link ConsumerConfig}. + *

    + * Note: after creating a {@code KafkaConsumer} you must always {@link #close()} + * it to avoid resource leaks. + * + * @param configs The consumer configs + * @param keyDeserializer The deserializer for key that implements + * {@link Deserializer}. The configure() method won't + * be called in the consumer when the deserializer is + * passed in directly. + * @param valueDeserializer The deserializer for value that implements + * {@link Deserializer}. The configure() method won't + * be called in the consumer when the deserializer is + * passed in directly. + */ + public KafkaConsumer(Map configs, Deserializer keyDeserializer, + Deserializer valueDeserializer) { + this(new ConsumerConfig(ConsumerConfig.addDeserializerToConfig(configs, keyDeserializer, valueDeserializer)), + keyDeserializer, valueDeserializer); + } + + /** + * A consumer is instantiated by providing a {@link java.util.Properties} object + * as configuration. + *

    + * Valid configuration strings are documented at {@link ConsumerConfig}. + *

    + * Note: after creating a {@code KafkaConsumer} you must always {@link #close()} + * it to avoid resource leaks. + * + * @param properties The consumer configuration properties + */ + public KafkaConsumer(Properties properties) { + this(properties, null, null); + } + + /** + * A consumer is instantiated by providing a {@link java.util.Properties} object + * as configuration, and a key and a value {@link Deserializer}. + *

    + * Valid configuration strings are documented at {@link ConsumerConfig}. + *

    + * Note: after creating a {@code KafkaConsumer} you must always {@link #close()} + * it to avoid resource leaks. + * + * @param properties The consumer configuration properties + * @param keyDeserializer The deserializer for key that implements + * {@link Deserializer}. The configure() method won't + * be called in the consumer when the deserializer is + * passed in directly. + * @param valueDeserializer The deserializer for value that implements + * {@link Deserializer}. The configure() method won't + * be called in the consumer when the deserializer is + * passed in directly. + */ + public KafkaConsumer(Properties properties, Deserializer keyDeserializer, Deserializer valueDeserializer) { + this(new ConsumerConfig(ConsumerConfig.addDeserializerToConfig(properties, keyDeserializer, valueDeserializer)), + keyDeserializer, valueDeserializer); + } + + @SuppressWarnings("unchecked") + private KafkaConsumer(ConsumerConfig config, Deserializer keyDeserializer, Deserializer valueDeserializer) { + try { + + // System.setProperty("oracle.jms.conservativeNavigation","1"); + + String clientId = config.getString(ConsumerConfig.CLIENT_ID_CONFIG); + if (clientId.isEmpty()) + clientId = "consumer-" + CONSUMER_CLIENT_ID_SEQUENCE.getAndIncrement(); + this.clientId = clientId; + + String groupId = config.getString(ConsumerConfig.GROUP_ID_CONFIG); + + LogContext logContext = new LogContext("[Consumer clientId=" + clientId + ", groupId=" + groupId + "] "); + this.log = logContext.logger(getClass()); + + log.debug("Initializing Kafka Consumer"); + + this.requestTimeoutMs = config.getInt(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG); + this.defaultApiTimeoutMs = config.getInt(ConsumerConfig.DEFAULT_API_TIMEOUT_MS_CONFIG); + this.time = Time.SYSTEM; + + Map metricsTags = Collections.singletonMap("client-id", clientId); + + MetricConfig metricConfig = new MetricConfig() + .samples(config.getInt(ConsumerConfig.METRICS_NUM_SAMPLES_CONFIG)) + .timeWindow(config.getLong(ConsumerConfig.METRICS_SAMPLE_WINDOW_MS_CONFIG), TimeUnit.MILLISECONDS) + .recordLevel(Sensor.RecordingLevel + .forName(config.getString(ConsumerConfig.METRICS_RECORDING_LEVEL_CONFIG))) + .tags(metricsTags); + + List reporters = config + .getConfiguredInstances(ConsumerConfig.METRIC_REPORTER_CLASSES_CONFIG, MetricsReporter.class); + + reporters.add(new JmxReporter(JMX_PREFIX)); + + this.metrics = new Metrics(metricConfig, reporters, time); + this.retryBackoffMs = config.getLong(ConsumerConfig.RETRY_BACKOFF_MS_CONFIG); + + // load interceptors and make sure they get clientId + Map userProvidedConfigs = config.originals(); + userProvidedConfigs.put(ConsumerConfig.CLIENT_ID_CONFIG, clientId); + List> interceptorList = (List) (new ConsumerConfig(userProvidedConfigs, false)) + .getConfiguredInstances(ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG, ConsumerInterceptor.class); + this.interceptors = new ConsumerInterceptors<>(interceptorList); + + if (keyDeserializer == null) { + this.keyDeserializer = config.getConfiguredInstance(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, + Deserializer.class); + this.keyDeserializer.configure(config.originals(), true); + } else { + config.ignore(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG); + this.keyDeserializer = keyDeserializer; + } + + if (valueDeserializer == null) { + this.valueDeserializer = config.getConfiguredInstance(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, + Deserializer.class); + this.valueDeserializer.configure(config.originals(), false); + } else { + config.ignore(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG); + this.valueDeserializer = valueDeserializer; + } + + ClusterResourceListeners clusterResourceListeners = configureClusterResourceListeners(keyDeserializer, + valueDeserializer, reporters, interceptorList); + this.metadata = new Metadata(retryBackoffMs, config.getLong(ConsumerConfig.METADATA_MAX_AGE_CONFIG), true, + false, clusterResourceListeners, config); + + List addresses = null; + String serviceName = config.getString(ConsumerConfig.ORACLE_SERVICE_NAME); + String instanceName = null; + + System.setProperty("oracle.net.tns_admin", config.getString(ConsumerConfig.ORACLE_NET_TNS_ADMIN)); + if (config.getString(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG).equalsIgnoreCase("PLAINTEXT")) { + // Changes for 2.8.1 + // addresses = + // ClientUtils.parseAndValidateAddresses(config.getList(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG)); + addresses = ClientUtils.parseAndValidateAddresses( + config.getList(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG), + ClientDnsLookup.RESOLVE_CANONICAL_BOOTSTRAP_SERVERS_ONLY); + serviceName = config.getString(ConsumerConfig.ORACLE_SERVICE_NAME); + instanceName = config.getString(ConsumerConfig.ORACLE_INSTANCE_NAME); + } else { + if (config.getString(SslConfigs.TNS_ALIAS) == null) + throw new InvalidLoginCredentialsException("Please provide valid connection string"); + TNSParser parser = new TNSParser(config); + parser.readFile(); + String connStr = parser.getConnectionString(config.getString(SslConfigs.TNS_ALIAS).toUpperCase()); + if (connStr == null) + throw new InvalidLoginCredentialsException("Please provide valid connection string"); + String host = parser.getProperty(connStr, "HOST"); + String portStr = parser.getProperty(connStr, "PORT"); + serviceName = parser.getProperty(connStr, "SERVICE_NAME"); + int port; + if (host == null || portStr == null || serviceName == null) + throw new InvalidLoginCredentialsException("Please provide valid connection string"); + try { + port = Integer.parseInt(portStr); + } catch (NumberFormatException nfe) { + throw new InvalidLoginCredentialsException("Please provide valid connection string"); + } + instanceName = parser.getProperty(connStr, "INSTANCE_NAME"); + addresses = new ArrayList<>(); + addresses.add(new InetSocketAddress(host, port)); + } + // this.metadata.update(Cluster.bootstrap(addresses, config, serviceName, + // instanceName), Collections.emptySet(), time.milliseconds()); + + { // Changes for 2.8.1 :: Create Bootstrap Cluster and pass it to metadata.update + // We must have OKafka Node with Service Name and Instance Name placed in the + // bootstrap cluster. + // For cluster created here, metadata.update has isBootstrapConfigured passed as + // TRUE because the field is not public + + ArrayList bootStrapNodeList = new ArrayList(addresses.size()); + int id = -1; + ConnectionUtils.remDuplicateEntries(addresses); + for (InetSocketAddress inetAddr : addresses) { + org.oracle.okafka.common.Node bootStrapNode = new org.oracle.okafka.common.Node(id--, + inetAddr.getHostName(), inetAddr.getPort(), serviceName, instanceName); + bootStrapNodeList.add((Node) bootStrapNode); + } + Cluster bootStrapCluster = new Cluster(null, bootStrapNodeList, new ArrayList<>(0), + Collections.emptySet(), Collections.emptySet()); + log.debug("Setting up bootstrap cluster"); + this.metadata.update(bootStrapCluster, Collections.emptySet(), time.milliseconds(), true); + } + + String metricGrpPrefix = "consumer"; + FetchMetricsRegistry metricsRegistry = new FetchMetricsRegistry(Collections.singleton(CLIENT_ID_METRIC_TAG), + metricGrpPrefix); + this.fetchManagerMetrics = new FetchManagerMetrics(metrics, metricsRegistry); + + IsolationLevel isolationLevel = IsolationLevel + .valueOf(config.getString(ConsumerConfig.ISOLATION_LEVEL_CONFIG).toUpperCase(Locale.ROOT)); + OffsetResetStrategy offsetResetStrategy = OffsetResetStrategy + .valueOf(config.getString(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG).toUpperCase(Locale.ROOT)); + // Changes for 2.8.1 + // this.subscriptions = new SubscriptionState(offsetResetStrategy); + this.subscriptions = new SubscriptionState(logContext, offsetResetStrategy); + + int maxPollIntervalMs = config.getInt(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG); + int sessionTimeoutMs = config.getInt(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG); + + this.assignors = config.getConfiguredInstances(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, + ConsumerPartitionAssignor.class); + + /* + * this.assignors = config.getConfiguredInstances(ConsumerConfig. + * PARTITION_ASSIGNMENT_STRATEGY_CONFIG, ConsumerPartitionAssignor.class); + */ + + // AQKafkaConsumer to talk to Oracle Database + AQKafkaConsumer aqConsumer = new AQKafkaConsumer(logContext, config, time, this.metadata, this.metrics); + aqConsumer.setAssignors(assignors); + + // Network Client to wrap aqConsumer. Maintains metadata + NetworkClient networkClient = new NetworkClient(aqConsumer, this.metadata, clientId, + config.getLong(ConsumerConfig.RECONNECT_BACKOFF_MS_CONFIG), + config.getLong(ConsumerConfig.RECONNECT_BACKOFF_MAX_MS_CONFIG), + config.getInt(ConsumerConfig.SEND_BUFFER_CONFIG), + config.getInt(ConsumerConfig.RECEIVE_BUFFER_CONFIG), (int) TimeUnit.HOURS.toMillis(1), time, logContext); - this.client = new ConsumerNetworkClient( - logContext, - networkClient, - metadata, - subscriptions, - config.getBoolean(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG), - config.getInt(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG), - time, - retryBackoffMs, - maxPollIntervalMs, - this.requestTimeoutMs, - sessionTimeoutMs, - defaultApiTimeoutMs); - config.logUnused(); - AppInfoParser.registerAppInfo(JMX_PREFIX, clientId, metrics); - - log.debug("Kafka consumer initialized"); - } catch (Throwable t) { - // call close methods if internal objects are already constructed - // this is to prevent resource leak. - close(0, true); - // now propagate the exception - throw new KafkaException("Failed to construct kafka consumer", t); - } - } - - /** - * This method is not yet supported. - */ - public Set assignment() { - throw new FeatureNotSupportedException("This feature is not suported for this release."); - } - - /** - * This method is not yet supported. - */ - public Set subscription() { - throw new FeatureNotSupportedException("This feature is not suported for this release."); - } - - /** - * Subscribe to the given list of topics to get dynamically assigned partitions. However OKafka 0.8 supports - * only subscription to single topic and only one partition is assigned dynamically to consumer. Consumer fetches - * messages from this partition for its lifetime. If consumer goes down then messages from this partition remains - * unconsumed. Client has to start a new consumer to consume from this partition. - * - * - * Topic subscriptions are not incremental. This list will replace the current - * assignment (if there is one). - * . - * - * If the given list of topics is empty, it is treated the same as {@link #unsubscribe()}. - * This call has effect only when poll is invoked. - *

    - * okafka 0.8 doesn't support consumer group rebalance listener i.e. ConsumerRebalanceListener. - * - * @param topics The list of topics to subscribe to - * @param listener null if not null is ignored - * @throws IllegalArgumentException If topics is null or contains null or empty elements or size of topics is greater than one. - */ - @Override - public void subscribe(Collection topics, ConsumerRebalanceListener listener) { - - acquireAndEnsureOpen(); - try { - if (topics == null) { - throw new IllegalArgumentException("Topic collection to subscribe to cannot be null"); - } else if (topics.isEmpty()) { - // treat subscribing to empty topic list as the same as unsubscribing - this.unsubscribe(); - } else { - if(topics.size() > 1) - throw new IllegalArgumentException("Only one topic can be subscribed"); - for (String topic : topics) { - if (topic == null || topic.trim().isEmpty()) - throw new IllegalArgumentException("Topic collection to subscribe to cannot contain null or empty topic"); - } - log.debug("Subscribed to topic(s): {}", Utils.join(topics, ", ")); - this.subscriptions.subscribe(new HashSet<>(topics), listener); - //metadata.setTopics(subscriptions.groupSubscription()); - } - } finally { - release(); - } - } - - /** - * Subscribe to the given list of topics to get dynamically assigned partitions. However OKafka 0.8 supports - * only subscription to single topic and only one partition is assigned dynamically to consumer. Consumer fetches - * messages from this partition for its lifetime. If consumer goes down then messages from this partition remains - * unconsumed. Client has to start a new consumer to consume from this partition. - * - * - * Topic subscriptions are not incremental. This list will replace the current - * assignment (if there is one). - * . - * - * If the given list of topics is empty, it is treated the same as {@link #unsubscribe()}. - * This call has effect only when poll is invoked. - *

    - * This is a short-hand for {@link #subscribe(Collection, ConsumerRebalanceListener)}, which - * uses a no-op listener. okafka 0.8 doesn't support consumer group rebalance listener i.e. ConsumerRebalanceListener. - * - * @param topics The list of topics to subscribe to - * @throws IllegalArgumentException If topics is null or contains null or empty elements or size of topics is greater than one. - */ - @Override - public void subscribe(Collection topics) { - subscribe(topics, new NoOpConsumerRebalanceListener()); - } - - /** - * This method is not yet supported. - */ - @Override - public void subscribe(Pattern pattern, ConsumerRebalanceListener listener) { - throw new FeatureNotSupportedException("This feature is not suported for this release."); - } - - /** - * This method is not yet supported. - */ - @Override - public void subscribe(Pattern pattern) { - subscribe(pattern, new NoOpConsumerRebalanceListener()); - } - - /** - * Unsubscribe from topic currently subscribed with {@link #subscribe(Collection)}. - */ - public void unsubscribe() { - acquireAndEnsureOpen(); - try { - log.debug("Unsubscribed to all topics "); - this.subscriptions.unsubscribe(); - this.client.clearSubscription(); - this.client.unsubscribe(); - this.metadata.needMetadataForAllTopics(false); - } finally { - release(); - } - } - - /** - * This method is not yet supported. - */ - @Override - public void assign(Collection partitions) { - throw new FeatureNotSupportedException("This feature is not suported for this release."); - } - - /** - * Fetch data for the topic specified using {@link #subscribe(Collection)} APIs. It is an error to not have - * subscribed to any topic before polling for data Consumer maintains a single connection/session to any one of the oracle database instance. Each consumer(session )in a group is assigned a single unique partition of subscribed topic. - * Hence, Poll fetches data from its assigned partition till connection/session exists. If existing connection lost and connected to any instance of database then consumer(session) might be assigned with new partition of subscribed topic. - *

    - * On each poll consumer tries to fetch from last consumed message id(offset). If consumer goes down without commiting then all consumed messages are rolled back. - * and next consumer instance of same group who got this partition starts consuming from last committed msgid or from rolled back point. - * - * As of 0.8 okafka, there is no group balancing since each instance sticks with its partition. - * - * @param timeout The time, in milliseconds, spent waiting in poll. - * @return map of topic to records since the last fetch for the subscribed list of topic. - * - * @throws org.oracle.okafka.common.KafkaException for any other unrecoverable errors (e.g. errors deserializing key/value pairs, - * or any new error cases in future versions) - * @throws java.lang.IllegalArgumentException if the timeout value is negative - * @throws java.lang.IllegalStateException if the consumer is not subscribed to any topic. - * - */ - @Deprecated - @Override - public ConsumerRecords poll(final long timeout) { - return poll(timeout, false); - } - - /** - * Fetch data for the topic specified using {@link #subscribe(Collection)} APIs. It is an error to not have - * subscribed to any topic before polling for data. Each consumer(session )in a group is assigned a single unique partition of subscribed topic. - * Hence, Poll fetches data from its assigned partition till connection/session exists. If existing connection lost and connected to any instance of database then consumer(session) might be assigned with new partition of subscribed topic. - *

    - * On each poll, consumer will try to use the last consumed offset as the starting offset and fetch sequentially. - * - * @param timeout The time, in milliseconds, spent waiting in poll. - * @return map of topic to records since the last fetch for the subscribed list of topic. - * - * @throws org.oracle.okafka.common.KafkaException for any other unrecoverable errors (e.g. errors deserializing key/value pairs) - * @throws java.lang.IllegalArgumentException if the timeout value is negative - * @throws java.lang.IllegalStateException if the consumer is not subscribed to any topic. - * @throws java.lang.ArithmeticException if the timeout is greater than - * {@link Long#MAX_VALUE} milliseconds. - * - */ - @Override - public ConsumerRecords poll(final Duration timeout) { - return poll(timeout.toMillis(), true); - } - - private ConsumerRecords poll(final long timeoutMs, final boolean includeMetadataInTimeout) { - acquireAndEnsureOpen(); - try { - if (timeoutMs < 0) throw new IllegalArgumentException("Timeout must not be negative"); - - if (this.subscriptions.hasNoSubscription()) { - throw new IllegalStateException("Consumer is not subscribed to any topics or assigned any partitions"); - } - - // poll for new data until the timeout expires - long elapsedTime = 0L; - final long metadataEnd; - if(includeMetadataInTimeout) { - final long metadataStart = time.milliseconds(); - if (!updateMetadataAndSubscribeIfNeeded(remainingTimeAtLeastZero(timeoutMs, elapsedTime))) { - return ConsumerRecords.empty(); - } - metadataEnd = time.milliseconds(); - elapsedTime += metadataEnd - metadataStart; - - } else { - while(!updateMetadataAndSubscribeIfNeeded(Long.MAX_VALUE)) { - log.warn("Still waiting for metadata"); - } - - metadataEnd = time.milliseconds(); - } - - - final long syncStart = time.milliseconds(); - client.maybeAutoCommitOffsetsSync(time.milliseconds()); - final long syncEnd = time.milliseconds(); - elapsedTime += syncStart - syncEnd; - //final long fetchStart = time.milliseconds(); - final Map>> records = pollForFetches(remainingTimeAtLeastZero(timeoutMs, elapsedTime)); - - if (!records.isEmpty()) { - - return this.interceptors.onConsume(new ConsumerRecords<>(records)); - } - - //final long fetchEnd = time.milliseconds(); - //elapsedTime += fetchEnd - fetchStart; - return ConsumerRecords.empty(); - } catch(InvalidLoginCredentialsException exception) { - log.error(exception.getMessage()); - log.info("Closing the consumer"); - close(); - throw new AuthenticationException(exception.getMessage()); - } finally { - release(); - } - } - - private boolean updateMetadataAndSubscribeIfNeeded(long timeout) { - long elapsed = 0L; - long subscriptionStart = time.milliseconds(); - if( !client.mayBeTriggerSubcription(timeout)) { - return false; - } - - elapsed += time.milliseconds() - subscriptionStart; - if(elapsed <= timeout) { - Set partitions = subscriptions.partitionsNeedingReset(time.milliseconds()); - if (partitions.isEmpty()) - return true; - - final Map offsetResetTimestamps = new HashMap<>(); - for (final TopicPartition partition : partitions) { - Long timestamp = offsetResetStrategyTimestamp(partition); - if (timestamp != null) - offsetResetTimestamps.put(partition, timestamp); - } - - return client.resetOffsetsSync(offsetResetTimestamps, timeout - elapsed); - } - return true; - } - - private Map>> pollForFetches(final long timeoutMs) { - if(timeoutMs <= 0) return Collections.>>emptyMap(); - return createConsumerRecordsMap(client.poll(timeoutMs)); - } - - private Map>> createConsumerRecordsMap(List messages) { - if(messages.size() == 0 ) return Collections.>>emptyMap(); - - Map>> consumerRecords = new HashMap<>(); - ConsumerRecord record; - String topic = null; - int partition = -1; - for(AQjmsBytesMessage message : messages) { - try { - byte[] valueByteArray = message.getBytesData(); - byte[] keyByteArray = message.getJMSCorrelationIDAsBytes(); - topic = message.getStringProperty("topic"); - partition = message.getIntProperty("AQINTERNAL_PARTITION")/2; - K key = this.keyDeserializer.deserialize(message.getStringProperty("topic"), keyByteArray); - V value = this.valueDeserializer.deserialize(message.getStringProperty("topic"), valueByteArray); - - record = new ConsumerRecord<>(topic, partition, MessageIdConverter.getOffset(message.getJMSMessageID()), - message.getJMSTimestamp(), TimestampType.LOG_APPEND_TIME, null, valueByteArray.length == 0 ? ConsumerRecord.NULL_SIZE : valueByteArray.length, - valueByteArray.length == 0 ? ConsumerRecord.NULL_SIZE : valueByteArray.length, - key, value, new RecordHeaders()); - } catch(JMSException exception) { - record = new ConsumerRecord<>("", -1, -1, -1, TimestampType.NO_TIMESTAMP_TYPE, null, ConsumerRecord.NULL_SIZE, - ConsumerRecord.NULL_SIZE, null, null, new RecordHeaders()); - } - TopicPartition tp = new TopicPartition(topic, partition); - if(tp != null && partition != -1) { - OffsetAndMetadata offset = subscriptions.allConsumed().get(tp); - if(offset == null) - subscriptions.allConsumed().put(tp , new OffsetAndMetadata(record.offset())); - else { - if(offset.offset() < record.offset()) - subscriptions.allConsumed().put(tp , new OffsetAndMetadata(record.offset())); - } - } - - if(!consumerRecords.containsKey(tp)) - consumerRecords.put(tp, new ArrayList>()); - consumerRecords.get(tp).add(record); - - } - return consumerRecords; - } - - - private long remainingTimeAtLeastZero(final long timeoutMs, final long elapsedTime) { - return Math.max(0, timeoutMs - elapsedTime); - } - - /** - * Commits messages(offsets) consumed using {@link #poll(Duration) poll()} since last commit in this session. - * - * Commit on session is either successfull or rollback. Commit fails only in rare cases like shutdown. Commit failure results in rollback. - * If rollback occurs then consumed messages since last commit are considered not consumed. - * If process restarts after failure then it starts consuming from this position. - *

    - * This is a synchronous commit and will block until either the commit succeeds or rollback happens. - * Commit does not take any timeout into account for completion of call. - *

    - * - * @throws org.oracle.okafka.common.KafkaException for any other unrecoverable errors (i.e topic doesn't exist, session rolled back as db shutdown). - */ - @Override - public void commitSync() { - commitSync(Duration.ofMillis(defaultApiTimeoutMs)); - } - - /** - * Commits messages(offsets) consumed using {@link #poll(Duration) poll()} since last commit in this session. - * - * Commit on session is either successfull or rollback. Commit fails only in rare cases like shutdown. Commit failure results in rollback. - * If rollback occurs then consumed messages since last commit are considered not consumed. - * If process restarts after failure then it starts consuming from this position. - *

    - * This is a synchronous commit and will block until either the commit succeeds or rollback happens. - * Commit does not take any timeout into account for completion of call. This call is equivalent to commitSync(). - *

    - * - * @throws org.oracle.okafka.common.KafkaException for any other unrecoverable errors . - */ - @Override - public void commitSync(Duration timeout) { - acquireAndEnsureOpen(); - - try { - client.commitOffsetsSync(subscriptions.allConsumed(), timeout.toMillis()); - interceptors.onCommit(this.subscriptions.allConsumed()); - } catch (Exception exception) { - throw new KafkaException("failed to commit messages", exception); - } finally { - release(); - } - } - - /** - * This method not yet supported. - */ - @Override - public void commitSync(final Map offsets) { - throw new FeatureNotSupportedException("This feature is not suported for this release."); - } - - /** - * This method not yet supported. - */ - @Override - public void commitSync(final Map offsets, final Duration timeout) { - throw new FeatureNotSupportedException("This feature is not suported for this release."); - } - - /** - * Commit mesages consumed using {@link #poll(Duration)} for the subscribed topic and assigned partition in this session since last commit. - * This call is equivalent to {@link #commitAsync(OffsetCommitCallback)} with null callback. - */ - @Override - public void commitAsync() { - commitAsync(null); - } - - /** - * Commits messages(offsets) consumed using {@link #poll(Duration) )} since last commit in this session. - * - * Commit on session is either successfull or rollback. Commit fails only in rare cases like shutdown. Commit failure results in rollback. - * If rollback occurs then consumed messages since last commit are considered not consumed. - * If process restarts after failure then it starts consuming from this rollback position. - *

    - * Internally this is an synchronous call and blocks until either commit is successful or rolled back. Any errors encountered are either passed to the callback - * (if provided) or discarded. - *

    - * @param callback Callback to invoke when the commit completes - */ - @Override - public void commitAsync(OffsetCommitCallback callback) { - try { - client.commitOffsetsSync(subscriptions.allConsumed(), defaultApiTimeoutMs); - if(callback!= null) - callback.onComplete(this.subscriptions.allConsumed(), null); - } catch( Exception exception) { - if(callback !=null) - callback.onComplete(this.subscriptions.allConsumed(), new KafkaException("failed to commit the current consumed offsets", exception)); - } - } - - /** - * This method is not yet supported. - */ - @Override - public void commitAsync(final Map offsets, OffsetCommitCallback callback) { - throw new FeatureNotSupportedException("This feature is not suported for this release."); - } - - /** - * Overrides the fetch offset that the consumer will use on the next + + // ConsumerNetworkClient uses network client to perform all consumer operations + // i.e. poll/subscribe/joingroup/sync/commit/seek + this.client = new ConsumerNetworkClient(groupId, logContext, networkClient, metadata, subscriptions, + this.assignors, config.getBoolean(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG), + config.getInt(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG), time, retryBackoffMs, + maxPollIntervalMs, this.requestTimeoutMs, sessionTimeoutMs, defaultApiTimeoutMs, aqConsumer, + metrics); + + this.okcMetrics = new OkafkaConsumerMetrics(metrics, metricGrpPrefix); + + cgMetadata = new ConsumerGroupMetadata(groupId); + + config.logUnused(); + AppInfoParser.registerAppInfo(JMX_PREFIX, clientId, metrics, time.milliseconds()); + + log.debug("Kafka consumer initialized"); + } catch (Throwable t) { + // call close methods if internal objects are already constructed + // this is to prevent resource leak. + close(0, true); + // now propagate the exception + throw new KafkaException("Failed to construct kafka consumer", t); + } + } + + /** + * Get the Oracle database connection used to consume records from Oracle + * Transactional Event Queue. + */ + public Connection getDBConnection() throws KafkaException { + return ((ConsumerNetworkClient) client).getDBConnection(); + } + + /** + * Get the set of partitions currently assigned to this consumer using topic + * subscription. (which may be none if the assignment hasn't happened yet, or + * the partitions are in the process of getting reassigned). + * + */ + public Set assignment() { + return subscriptions.assignedPartitions(); + } + + /** + * Get the current subscription. Will return the same topics used in the most + * recent call to {@link #subscribe(Collection, ConsumerRebalanceListener)} , or + * an empty set if no such call has been made. + */ + public Set subscription() { + return subscriptions.subscription(); + } + + /** + * Subscribe to the given list of topics to get partitions assigned dynamically. + * However OKafka 23.4.0.0 supports subscription to only a single topic. + * Partitions are assigned dynamically to consumer based on partition.assignment.strategy. + *

    + * This method takes an object of {@link org.apache.kafka.clients.consumer.ConsumerRebalanceListener ConsumerRebalanceListener}. + * Its {@link ConsumerRebalanceListener#onPartitionsAssigned(Collection) onPartitionsAssigned} method will be invoked when + * partitions are assigned to this consumer. Similarly {@link ConsumerRebalanceListener#onPartitionsRevoked(Collection) onPartitionsRevoked} + * will be invoked when partitions are revoked. + *

    + * Topic subscriptions are not incremental. This list will replace the + * current assignment (if there is one). . + * + * If the given list of topics is empty, it is treated the same as + * {@link #unsubscribe()}. This call has effect only when poll is invoked. + *

    + * + * @param topics The list of topics to subscribe to + * @param listener null if not null is ignored + * @throws IllegalArgumentException If topics is null or contains null or empty + * elements or size of topics is greater than + * one. + */ + @Override + public void subscribe(Collection topics, ConsumerRebalanceListener listener) { + acquireAndEnsureOpen(); + try { + if (topics == null) { + throw new IllegalArgumentException("Topic collection to subscribe to cannot be null"); + + } else if (topics.isEmpty()) { + // treat subscribing to empty topic list as the same as unsubscribing + this.unsubscribe(); + } else { + if (topics.size() > 1) + throw new IllegalArgumentException("Only one topic can be subscribed"); + + for (String topic : topics) { + if (topic == null || topic.trim().isEmpty()) + throw new IllegalArgumentException( + "Topic collection to subscribe to cannot contain null or empty topic"); + } + // Only one topic can be subscribed, unsubcribe to previous topics before + // subscribing to new topic + Set Alltopics = subscriptions.metadataTopics(); + if (Alltopics.size() > 0) { + this.unsubscribe(); + } + + log.debug("Subscribed to topic(s): {}", Utils.join(topics, ", ")); + Set subscribedTopicSet = new HashSet<>(topics); + this.subscriptions.subscribe(subscribedTopicSet, listener); + // metadata.setTopics(subscriptions.groupSubscription()); + // Change for 2.8.1 groupSubscription() is not present any more + metadata.setTopics(subscribedTopicSet); + + } + } finally { + release(); + } + } + + /** + * Subscribe to the given list of topics to get partitions assigned dynamically. + * However OKafka 23.4.0.0 supports subscription to only a single topic. + * Partitions are assigned dynamically to consumer based on partition.assignment.strategy. + * + * Topic subscriptions are not incremental. This list will replace the + * current assignment (if there is one). . + * + * If the given list of topics is empty, it is treated the same as + * {@link #unsubscribe()}. This call has effect only when poll is invoked. + *

    + * This is a short-hand for + * {@link #subscribe(Collection, ConsumerRebalanceListener)}, which uses a no-op + * listener be default. + * + * @param topics The list of topics to subscribe to + * @throws IllegalArgumentException If topics is null or contains null or empty + * elements or size of topics is greater than + * one. + */ + @Override + public void subscribe(Collection topics) { + subscribe(topics, new NoOpConsumerRebalanceListener()); + } + + /** + * This method is not yet supported. + */ + @Override + public void subscribe(Pattern pattern, ConsumerRebalanceListener listener) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + /** + * This method is not yet supported. + */ + @Override + public void subscribe(Pattern pattern) { + subscribe(pattern, new NoOpConsumerRebalanceListener()); + } + + /** + * Unsubscribe from topic currently subscribed with + * {@link #subscribe(Collection)}. + */ + public void unsubscribe() { + acquireAndEnsureOpen(); + try { + log.debug("Unsubscribed to all topics "); + this.subscriptions.unsubscribe(); + this.client.clearSubscription(); + this.client.unsubscribe(); + this.metadata.needMetadataForAllTopics(false); + } finally { + release(); + } + } + + /** + * This method is not yet supported. + */ + @Override + public void assign(Collection partitions) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + /** + * Fetch data for the topic specified using {@link #subscribe(Collection)} APIs. + * It is an error to not have subscribed to any topic before polling for data + * Consumer maintains a single connection/session to any one of the oracle + * database instances. Each consumer in a consumer group is dynamically assigned partition(s) + * of subscribed topic. + *

    + * On each poll, consumer consumes messages from the last fetch position(offset). + * If consumer is closed or application crashes without committing the consumed records then + * all uncommitted consumed messages are made available again. Next consumer who consumed from these partitions will start + * consuming records from the last committed offset. + * + * This method returns immediately if there are records available. Otherwise, it will await the passed timeout. + * If the timeout expires, an empty record set will be returned. + * Note that this method may block beyond the timeout in order to complete partition rebalancing. + * @see "Section 'Consumer Groups and Topic Subscriptions' in org.oracle.okafka.clients.consumer.KafkaConsumer documentation to understand 'rebalancing'" + * + * @param timeout The time, in milliseconds, spent waiting in poll. + * @return map of topic to records since the last fetch for the subscribed list + * of topic. + * + * @throws org.oracle.okafka.common.KafkaException for any other unrecoverable + * errors (e.g. errors + * deserializing key/value + * pairs, or any new error cases + * in future versions) + * @throws java.lang.IllegalArgumentException if the timeout value is + * negative + * @throws java.lang.IllegalStateException if the consumer is not + * subscribed to any topic. + * + */ + @Deprecated + @Override + public ConsumerRecords poll(final long timeout) { + if (timeout < 0) + throw new IllegalArgumentException("Timeout must not be negative"); + + return poll(time.timer(timeout), false); + } + + /** + * Fetch data for the topic specified using {@link #subscribe(Collection)} APIs. + * It is an error to not have subscribed to any topic before polling for data. + * Each consumer(session )in a group is assigned a single unique partition of + * subscribed topic. Hence, Poll fetches data from its assigned partition till + * connection/session exists. If existing connection lost and connected to any + * instance of database then consumer(session) might be assigned with new + * partition of subscribed topic. + *

    + * On each poll, consumer will try to use the last consumed offset as the + * starting offset and fetch sequentially. + * + * @param timeout The time, in milliseconds, spent waiting in poll. + * @return map of topic to records since the last fetch for the subscribed list + * of topic. + * + * @throws org.oracle.okafka.common.KafkaException for any other unrecoverable + * errors (e.g. errors + * deserializing key/value + * pairs) + * @throws java.lang.IllegalArgumentException if the timeout value is + * negative + * @throws java.lang.IllegalStateException if the consumer is not + * subscribed to any topic. + * @throws java.lang.ArithmeticException if the timeout is greater + * than {@link Long#MAX_VALUE} + * milliseconds. + * + */ + @Override + public ConsumerRecords poll(final Duration timeout) { + if (timeout.toMillis() < 0) + throw new IllegalArgumentException("Timeout must not be negative"); + + return poll(time.timer(timeout), true); + } + + private ConsumerRecords poll(final Timer timer, final boolean includeMetadataInTimeout) { + acquireAndEnsureOpen(); + + try { + this.okcMetrics.recordPollStart(timer.currentTimeMs()); + // if (this.subscriptions.hasNoSubscription()) { + // Changes for 2.8.1 use hasNoSubscriptionOrUserAssignment instead + // hsNoSubscription + if (this.subscriptions.hasNoSubscriptionOrUserAssignment()) { + throw new IllegalStateException("Consumer is not subscribed to any topics or assigned any partitions"); + } + do { + // poll for new data until the timeout expires + long elapsedTime = 0L; + final long metadataEnd; + if (includeMetadataInTimeout) { + final long metadataStart = time.milliseconds(); + if (!updateMetadataAndSubscribeIfNeeded(timer.remainingMs())) { + return ConsumerRecords.empty(); + } + + timer.update(time.milliseconds()); + metadataEnd = time.milliseconds(); + elapsedTime += metadataEnd - metadataStart; + + } else { + while (!updateMetadataAndSubscribeIfNeeded(Long.MAX_VALUE)) { + log.warn("Still waiting for metadata"); + } + metadataEnd = time.milliseconds(); + timer.update(time.milliseconds()); + } + + final long syncStart = time.milliseconds(); + client.maybeAutoCommitOffsetsSync(time.milliseconds()); + final long syncEnd = time.milliseconds(); + elapsedTime += syncStart - syncEnd; + // final long fetchStart = time.milliseconds(); + final Map>> records = pollForFetches(timer.remainingMs()); + + if (!records.isEmpty()) { + + return this.interceptors.onConsume(new ConsumerRecords<>(records)); + } + + } while (timer.notExpired()); + // final long fetchEnd = time.milliseconds(); + // elapsedTime += fetchEnd - fetchStart; + return ConsumerRecords.empty(); + } catch (InvalidLoginCredentialsException exception) { + log.error("Exception from poll: " + exception.getMessage(), exception); + log.info("Closing the consumer due to exception : " + exception.getMessage()); + close(); + throw new AuthenticationException(exception.getMessage()); + } finally { + release(); + this.okcMetrics.recordPollEnd(timer.currentTimeMs()); + + } + } + + private boolean updateMetadataAndSubscribeIfNeeded(long timeout) { + long elapsed = 0L; + long subscriptionStart = time.milliseconds(); + client.maybeUpdateMetadata(timeout); + elapsed += time.milliseconds() - subscriptionStart; + if (!client.mayBeTriggerSubcription(timeout - elapsed)) { + return false; + } + elapsed += time.milliseconds() - subscriptionStart; + if (elapsed <= timeout) { + Set partitions = subscriptions.partitionsNeedingReset(time.milliseconds()); + if (partitions.isEmpty()) + return true; + + final Map offsetResetTimestamps = new HashMap<>(); + for (final TopicPartition partition : partitions) { + Long seekPos = offsetResetStrategyTimestamp(partition); + if (seekPos != null) + offsetResetTimestamps.put(partition, seekPos); + } + + return client.resetOffsetsSync(offsetResetTimestamps, timeout - elapsed); + + } + return true; + } + + private Map>> pollForFetches(final long timeoutMs) { + + if (timeoutMs <= 0) + return Collections.>>emptyMap(); + + return createConsumerRecordsMap(client.poll(timeoutMs, fetchManagerMetrics)); + } + /* + * private Map>> + * createConsumerRecordsMap(List messages, boolean obsolete) + * { + * + * if(messages.size() == 0 ) { return Collections.>>emptyMap(); } + * + * Map>> consumerRecords = new + * HashMap<>(); ConsumerRecord record; String topic = null; int partition + * = -1; for(AQjmsBytesMessage message : messages) { try { + * + * byte[] valueByteArray = message.getBytesData(); byte[] keyByteArray = + * message.getJMSCorrelationIDAsBytes(); + * + * //topic = message.getStringProperty("topic"); topic = + * ((AQjmsDestination)message.getJMSDestination()).getTopicName(); try { + * partition = message.getIntProperty(AQClient.PARTITION_PROPERTY)/2; } + * catch(Exception e) { try { partition = + * (int)message.getLongProperty(AQClient.PARTITION_PROPERTY)/2; }catch(Exception + * e1) { + * + * } } K key = this.keyDeserializer.deserialize(topic, keyByteArray); V value = + * this.valueDeserializer.deserialize(topic, valueByteArray); OKafkaOffset + * okOffset = MessageIdConverter.getOKafkaOffset(message.getJMSMessageID(), + * true, true); record = new ConsumerRecord<>(topic, partition, + * okOffset.getOffset(), message.getJMSTimestamp(), + * TimestampType.LOG_APPEND_TIME, null, valueByteArray.length == 0 ? + * ConsumerRecord.NULL_SIZE : valueByteArray.length, valueByteArray.length == 0 + * ? ConsumerRecord.NULL_SIZE : valueByteArray.length, key, value, new + * RecordHeaders()); } catch(JMSException exception) { + * log.error("JMS Exception while writing response " + exception, exception); + * record = new ConsumerRecord<>("", -1, -1, -1, + * TimestampType.NO_TIMESTAMP_TYPE, null, ConsumerRecord.NULL_SIZE, + * ConsumerRecord.NULL_SIZE, null, null, new RecordHeaders()); } catch(Exception + * e) { record = new ConsumerRecord<>("", -1, -1, -1, + * TimestampType.NO_TIMESTAMP_TYPE, null, ConsumerRecord.NULL_SIZE, + * ConsumerRecord.NULL_SIZE, null, null, new RecordHeaders()); + * log.error("Exception while writing response " + e,e); } TopicPartition tp = + * new TopicPartition(topic, partition); if(tp != null && partition != -1) { + * //Changes for 2.8.1 try { subscriptions.position(tp, new + * FetchPosition(record.offset(), Optional.empty(), new + * LeaderAndEpoch(Optional.empty(), Optional.empty()))); } + * catch(IllegalStateException isE) { if(metadata.getDBMajorVersion() < 23) { // + * Partition assigned by TEQ Server not through JoinGroup/Sync + * subscriptions.assignFromSubscribed( Collections.singleton(tp)); + * subscriptions.seek(tp,0); subscriptions.completeValidation(tp); + * subscriptions.position(tp, new FetchPosition(record.offset(), + * Optional.empty(), new LeaderAndEpoch(Optional.empty(), Optional.empty()))); } + * subscriptions.position(tp, new FetchPosition(record.offset(), + * Optional.empty(), new LeaderAndEpoch(Optional.empty(), Optional.empty()))); } + * catch(Exception e) { log.error("Exception while setting fetch position " + e + * , e); e.printStackTrace(); } / * OffsetAndMetadata offset = + * subscriptions.allConsumed().get(tp); if(offset == null) + * subscriptions.allConsumed().put(tp , new OffsetAndMetadata(record.offset())); + * else { if(offset.offset() < record.offset()) + * subscriptions.allConsumed().put(tp , new OffsetAndMetadata(record.offset())); + * } / } + * + * if(!consumerRecords.containsKey(tp)) consumerRecords.put(tp, new + * ArrayList>()); consumerRecords.get(tp).add(record); + * + * } return consumerRecords; } + */ + + private Map>> createConsumerRecordsMap(List messages) { + + if (messages.size() == 0) { + return Collections.>>emptyMap(); + } + + Map>> consumerRecords = new HashMap<>(); + Map headersSize = new HashMap(); + ConsumerRecord record; + String topic = null; + int partition = -1; + int messageVersion = 1; + byte[] keyArray = null; + byte[] valueArray = null; + int keyLen = 0; + int valueLen = 0; + int hSize = 0; + + for (AQjmsBytesMessage message : messages) { + keyArray = null; + valueArray = null; + keyLen = 0; + valueLen = 0; + + try { + RecordHeaders rcH = new RecordHeaders(); + try { + messageVersion = message.getIntProperty(AQClient.MESSAGE_VERSION); + } catch (Exception e) { + messageVersion = 1; + } + + /* + * Received Byte Payload in below format: | KEY LENGTH (4 Bytes Fixed) | KEY | | + * VALUE LENGTH (4 BYTES FIXED) | VALUE | | HEADER NAME LENGTH(4 BYTES FIXED) | + * HEADER NAME | | HEADER VALUE LENGTH (4 BYTES FIXED) | HEADER VALUE | | HEADER + * NAME LENGTH(4 BYTES FIXED) | HEADER NAME | | HEADER VALUE LENGTH (4 BYTES + * FIXED) | HEADER VALUE | + * + * For records with null key , KEY LENGTH is set to 0. For records with null + * value, VALUE LENGTH is set to 0. Number of headers are set in property + * "AQINTERNAL_HEADERCOUNT" + * + */ + if (messageVersion == 2) { + byte[] payloadArray = message.getBytesData(); + byte[] bLength = new byte[DLENGTH_SIZE]; + + // Read Key First + ByteBuffer pBuffer = ByteBuffer.wrap(payloadArray); + pBuffer.get(bLength, 0, DLENGTH_SIZE); + keyLen = ConnectionUtils.convertToInt(bLength); + keyArray = new byte[keyLen]; + pBuffer.get(keyArray, 0, keyLen); + + // Get Actual Payload + pBuffer.get(bLength, 0, DLENGTH_SIZE); + valueLen = ConnectionUtils.convertToInt(bLength); + + valueArray = new byte[valueLen]; + pBuffer.get(valueArray, 0, valueLen); + + int hCount = 0; + try { + hCount = message.getIntProperty(AQClient.HEADERCOUNT_PROPERTY); + } catch (Exception e) { + hCount = 0; + } + int hKeyLen = 0; + int hValueLen = 0; + + for (int i = 0; i < hCount; i++) { + pBuffer.get(bLength, 0, DLENGTH_SIZE); + hKeyLen = ConnectionUtils.convertToInt(bLength); + if (hKeyLen > 0) { + byte[] hKeyArray = new byte[hKeyLen]; + hSize += hKeyArray.length; + pBuffer.get(hKeyArray, 0, hKeyLen); + String hKey = new String(hKeyArray); + pBuffer.get(bLength, 0, DLENGTH_SIZE); + hValueLen = ConnectionUtils.convertToInt(bLength); + byte[] hValueArray = new byte[hValueLen]; + hSize += hValueArray.length; + pBuffer.get(hValueArray, 0, hValueLen); + rcH.add(hKey, hValueArray); + } + + } + } else { + keyArray = message.getJMSCorrelationIDAsBytes(); + valueArray = message.getBytesData(); + } + + topic = ((AQjmsDestination) message.getJMSDestination()).getTopicName(); + try { + partition = message.getIntProperty(AQClient.PARTITION_PROPERTY) / 2; + } catch (Exception e) { + try { + partition = (int) message.getLongProperty(AQClient.PARTITION_PROPERTY) / 2; + } catch (Exception e1) { + + } + } + K key = this.keyDeserializer.deserialize(topic, keyArray); + V value = this.valueDeserializer.deserialize(topic, valueArray); + OKafkaOffset okOffset = MessageIdConverter.getOKafkaOffset(message.getJMSMessageID(), true, true); + + record = new ConsumerRecord<>(topic, partition, okOffset.getOffset(), message.getJMSTimestamp(), + TimestampType.LOG_APPEND_TIME, null, keyLen == 0 ? ConsumerRecord.NULL_SIZE : keyLen, + valueLen == 0 ? ConsumerRecord.NULL_SIZE : valueLen, key, value, rcH); + + } catch (JMSException exception) { + log.error("JMS Exception while creting ConsumerRecord " + exception, exception); + record = new ConsumerRecord<>("", -1, -1, -1, TimestampType.NO_TIMESTAMP_TYPE, null, + ConsumerRecord.NULL_SIZE, ConsumerRecord.NULL_SIZE, null, null, new RecordHeaders()); + } catch (Exception e) { + record = new ConsumerRecord<>("", -1, -1, -1, TimestampType.NO_TIMESTAMP_TYPE, null, + ConsumerRecord.NULL_SIZE, ConsumerRecord.NULL_SIZE, null, null, new RecordHeaders()); + + log.error("Exception while creting ConsumerRecord " + e, e); + } + + TopicPartition tp = new TopicPartition(topic, partition); + if (tp != null && partition != -1) { + // Changes for 2.8.1 + try { + subscriptions.position(tp, new FetchPosition(record.offset(), Optional.empty(), + new LeaderAndEpoch(Optional.empty(), Optional.empty()))); + } catch (IllegalStateException isE) { + if (metadata.getDBMajorVersion() < 23) { + // Partition assigned by TEQ Server not through JoinGroup/Sync + subscriptions.assignFromSubscribed(Collections.singleton(tp)); + subscriptions.seek(tp, 0); + subscriptions.completeValidation(tp); + subscriptions.position(tp, new FetchPosition(record.offset(), Optional.empty(), + new LeaderAndEpoch(Optional.empty(), Optional.empty()))); + } + subscriptions.position(tp, new FetchPosition(record.offset(), Optional.empty(), + new LeaderAndEpoch(Optional.empty(), Optional.empty()))); + } catch (Exception e) { + log.error("Exception while setting fetch position " + e, e); + } + /* + * OffsetAndMetadata offset = subscriptions.allConsumed().get(tp); if(offset == + * null) subscriptions.allConsumed().put(tp , new + * OffsetAndMetadata(record.offset())); else { if(offset.offset() < + * record.offset()) subscriptions.allConsumed().put(tp , new + * OffsetAndMetadata(record.offset())); } + */ + } + + if (!consumerRecords.containsKey(tp)) + consumerRecords.put(tp, new ArrayList>()); + consumerRecords.get(tp).add(record); + + if (!headersSize.containsKey(tp)) + headersSize.put(tp, hSize); + headersSize.put(tp, headersSize.get(tp) + hSize); + + } + + Set topicPartitions = new HashSet(consumerRecords.keySet()); + FetchResponseMetricAggregator fetchResponseMetricAggregator = new FetchResponseMetricAggregator( + fetchManagerMetrics, topicPartitions); + + for (Map.Entry>> entry : consumerRecords.entrySet()) { + int bytes = 0; + List> recordList = entry.getValue(); + int recordCount = recordList.size(); + + for (int i = 0; i < recordCount; i++) { + bytes += recordList.get(i).serializedKeySize(); + bytes += recordList.get(i).serializedValueSize(); + bytes += hSize; + } + fetchResponseMetricAggregator.record(entry.getKey(), bytes, recordCount); + } + return consumerRecords; + } + + private long remainingTimeAtLeastZero(final long timeoutMs, final long elapsedTime) { + return Math.max(0, timeoutMs - elapsedTime); + } + + /** + * Commits messages(offsets) consumed using {@link #poll(Duration) poll()} since + * last commit in this session. + * + * Commit on session is either successfull or rollback. Commit fails only in + * rare cases like shutdown. Commit failure results in rollback. If rollback + * occurs then consumed messages since last commit are considered not consumed. + * If process restarts after failure then it starts consuming from this + * position. + *

    + * This is a synchronous commit and will block until either the commit succeeds + * or rollback happens. Commit does not take any timeout into account for + * completion of call. + *

    + * + * @throws org.oracle.okafka.common.KafkaException for any other unrecoverable + * errors (i.e topic doesn't + * exist, session rolled back as + * db shutdown). + */ + @Override + public void commitSync() { + commitSync(Duration.ofMillis(defaultApiTimeoutMs)); + } + + /** + * Commits messages(offsets) consumed using {@link #poll(Duration) poll()} since + * last commit in this session. + * + * Commit on session is either successfull or rollback. Commit fails only in + * rare cases like shutdown. Commit failure results in rollback. If rollback + * occurs then consumed messages since last commit are considered not consumed. + * If process restarts after failure then it starts consuming from this + * position. + *

    + * This is a synchronous commit and will block until either the commit succeeds + * or rollback happens. Commit does not take any timeout into account for + * completion of call. This call is equivalent to commitSync(). + *

    + * + * @throws org.oracle.okafka.common.KafkaException for any other unrecoverable + * errors . + */ + @Override + public void commitSync(Duration timeout) { + acquireAndEnsureOpen(); + long commitStart = time.nanoseconds(); + + try { + client.commitOffsetsSync(subscriptions.allConsumed(), timeout.toMillis()); + interceptors.onCommit(this.subscriptions.allConsumed()); + } catch (Exception exception) { + throw new KafkaException("failed to commit messages", exception); + } finally { + release(); + } + } + + /** + * This method not yet supported. + */ + @Override + public void commitSync(final Map offsets) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + /** + * This method not yet supported. + */ + @Override + public void commitSync(final Map offsets, final Duration timeout) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + /** + * Commit mesages consumed using {@link #poll(Duration)} for the subscribed + * topic and assigned partition in this session since last commit. This call is + * equivalent to {@link #commitAsync(OffsetCommitCallback)} with null callback. + */ + @Override + public void commitAsync() { + commitAsync(null); + } + + /** + * Commits messages(offsets) consumed using {@link #poll(Duration) )} since last + * commit in this session. + * + * Commit on session is either successfull or rollback. Commit fails only in + * rare cases like shutdown. Commit failure results in rollback. If rollback + * occurs then consumed messages since last commit are considered not consumed. + * If process restarts after failure then it starts consuming from this rollback + * position. + *

    + * Internally this is an synchronous call and blocks until either commit is + * successful or rolled back. Any errors encountered are either passed to the + * callback (if provided) or discarded. + *

    + * + * @param callback Callback to invoke when the commit completes + */ + @Override + public void commitAsync(OffsetCommitCallback callback) { + try { + client.commitOffsetsSync(subscriptions.allConsumed(), defaultApiTimeoutMs); + if (callback != null) + callback.onComplete(this.subscriptions.allConsumed(), null); + } catch (Exception exception) { + if (callback != null) + callback.onComplete(this.subscriptions.allConsumed(), + new KafkaException("failed to commit the current consumed offsets", exception)); + } + } + + /** + * This method is not yet supported. + */ + @Override + public void commitAsync(final Map offsets, OffsetCommitCallback callback) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + /** + * Overrides the fetch offset that the consumer will use on the next * {@link #poll(Duration) poll(timeout)}. If this API is invoked for the same * partition more than once, the latest offset will be used on the next poll(). - * Seeking to already consumed offset/message , - * in current or previous sessions, doesn't reconsume the message. + * Seeking to already consumed offset/message , in current or previous sessions, + * doesn't reconsume the message. * * @throws IllegalArgumentException if the provided offset is negative - */ - // @throws IllegalStateException if the provided TopicPartition is not - // assigned to this consumer - @Override - public void seek(TopicPartition partition, long offset) { - if (offset < 0) + */ + // @throws IllegalStateException if the provided TopicPartition is not + // assigned to this consumer + @Override + public void seek(TopicPartition partition, long offset) { + if (offset < 0) throw new IllegalArgumentException("seek offset must not be a negative number"); acquireAndEnsureOpen(); try { log.debug("Seeking to offset {} for partition {}", offset, partition); - this.subscriptions.seek(partition, offset); + Map offsetResetTimestamps = new HashMap<>(); + offsetResetTimestamps.put(partition, offset); + client.resetOffsetsSync(offsetResetTimestamps, offset); + } finally { release(); } - } - - /** - * Seek to the first available offset for each of the given partitions. This function - * evaluates lazily, seeking to the first offset in all partitions only when - * {@link #poll(Duration)} is called. - * Seeking to already consumed offset/message , - * in current or previous sessions, doesn't reconsume the message. + } + + /** + * Seek to the first available offset for each of the given partitions. This + * function evaluates lazily, seeking to the first offset in all partitions only + * when {@link #poll(Duration)} is called. Seeking to already consumed + * offset/message , in current or previous sessions, doesn't reconsume the + * message. * * @throws IllegalArgumentException if {@code partitions} is {@code null} - */ - // @throws IllegalStateException if the provided TopicPartition is not - // assigned to this consumer + */ + // @throws IllegalStateException if the provided TopicPartition is not + // assigned to this consumer - @Override - public void seekToBeginning(Collection partitions) { - if (partitions == null) + @Override + public void seekToBeginning(Collection partitions) { + if (partitions == null) throw new IllegalArgumentException("Partitions collection cannot be null"); acquireAndEnsureOpen(); @@ -923,23 +1438,22 @@ public void seekToBeginning(Collection partitions) { } finally { release(); } - } + } - /** - * Seek to the last offset for each of the given partitions. This function + /** + * Seek to the last offset for each of the given partitions. This function * evaluates lazily, seeking to the final offset in all partitions only when - * {@link #poll(Duration)} is called. - * Seeking to already consumed offset/message , - * in current or previous sessions, doesn't reconsume the message. + * {@link #poll(Duration)} is called. Seeking to already consumed offset/message + * , in current or previous sessions, doesn't reconsume the message. * * @throws IllegalArgumentException if {@code partitions} is {@code null} - */ - // @throws IllegalStateException if the provided TopicPartition is not - // assigned to this consumer + */ + // @throws IllegalStateException if the provided TopicPartition is not + // assigned to this consumer - @Override - public void seekToEnd(Collection partitions) { - if (partitions == null) + @Override + public void seekToEnd(Collection partitions) { + if (partitions == null) throw new IllegalArgumentException("Partitions collection cannot be null"); acquireAndEnsureOpen(); @@ -953,285 +1467,556 @@ public void seekToEnd(Collection partitions) { } finally { release(); } - } - - /** - * This method is not yet supported. - */ - @Override - public long position(TopicPartition partition) { - throw new FeatureNotSupportedException("This feature is not suported for this release."); - } - - /** - * This method is not yet supported. - */ - @Override - public long position(TopicPartition partition, final Duration timeout) { - throw new FeatureNotSupportedException("This feature is not suported for this release."); - } - - /** - * This method is not yet supported. - */ - @Override - public OffsetAndMetadata committed(TopicPartition partition) { - throw new FeatureNotSupportedException("This feature is not suported for this release."); - } - - /** - * This method is not yet supported. - */ - @Override - public OffsetAndMetadata committed(TopicPartition partition, final Duration timeout) { - throw new FeatureNotSupportedException("This feature is not suported for this release."); - } - - /** - * This method is not yet supported. - */ - @Override - public Map metrics() { - throw new FeatureNotSupportedException("This feature is not suported for this release."); - } - - /** - * This method is not yet supported. - */ - @Override - public List partitionsFor(String topic) { - throw new FeatureNotSupportedException("This feature is not suported for this release."); - } - - /** - * This method is not yet supported. - */ - @Override - public List partitionsFor(String topic, Duration timeout) { - throw new FeatureNotSupportedException("This feature is not suported for this release."); - } - - /** - * This method is not yet supported. - */ - @Override - public Map> listTopics() { - throw new FeatureNotSupportedException("This feature is not suported for this release."); - } - - /** - * This method is not yet supported. - */ - @Override - public Map> listTopics(Duration timeout) { - throw new FeatureNotSupportedException("This feature is not suported for this release."); - } - - /** - * This method is not yet supported. - */ - @Override - public void pause(Collection partitions) { - throw new FeatureNotSupportedException("This feature is not suported for this release."); - } - - /** - * This method is not yet supported. - */ - @Override - public void resume(Collection partitions) { - throw new FeatureNotSupportedException("This feature is not suported for this release."); - } - - /** - * This method is not yet supported. - */ - @Override - public Set paused() { - throw new FeatureNotSupportedException("This feature is not suported for this release."); - } - - /** - * This method is not yet supported. - */ - @Override - public Map offsetsForTimes(Map timestampsToSearch) { - throw new FeatureNotSupportedException("This feature is not suported for this release."); - } - - /** - * This method is not yet supported. - */ - @Override - public Map offsetsForTimes(Map timestampsToSearch, Duration timeout) { - throw new FeatureNotSupportedException("This feature is not suported for this release."); - } - - /** - * This method is not yet supported. - */ - @Override - public Map beginningOffsets(Collection partitions) { - throw new FeatureNotSupportedException("This feature is not suported for this release."); - } - - /** - * This method is not yet supported. - */ - @Override - public Map beginningOffsets(Collection partitions, Duration timeout) { - throw new FeatureNotSupportedException("This feature is not suported for this release."); - } - - /** - * This method is not yet supported. - */ - @Override - public Map endOffsets(Collection partitions) { - throw new FeatureNotSupportedException("This feature is not suported for this release."); - } - - /** - * This method is not yet supported. - */ - @Override - public Map endOffsets(Collection partitions, Duration timeout) { - throw new FeatureNotSupportedException("This feature is not suported for this release."); - } - - /** - * Tries to close the consumer cleanly. - * If auto-commit is enabled, this will commit the current offsets . Close doen't take timeout into consideration. - * - * @throws org.oracle.okafka.common.KafkaException for any other error during close - */ - @Override - public void close() { - close(Duration.ofMillis(DEFAULT_CLOSE_TIMEOUT_MS)); - } - - /** - * Tries to close the consumer cleanly. - * If auto-commit is enabled, this will commit the current offsets . Close doen't take timeout into consideration. - * - * @param timeout Not used - * @param timeUnit Not used - * @throws IllegalArgumentException If the {@code timeout} is negative. - * @throws org.oracle.okafka.common.KafkaException for any other error during close - * - */ - @Deprecated - @Override - public void close(long timeout, TimeUnit timeUnit) { - close(Duration.ofMillis(timeUnit.toMillis(timeout))); - } - - /** - * Tries to close the consumer cleanly. - * If auto-commit is enabled, this will commit the current offsets . Close doen't take timeout into consideration. - * - * @param timeout not used - * - * @throws IllegalArgumentException If the {@code timeout} is negative. - * @throws org.oracle.okafka.common.KafkaException for any other error during close - */ - @Override - public void close(Duration timeout) { - if (timeout.toMillis() < 0) - throw new IllegalArgumentException("The timeout cannot be negative."); - acquire(); - try { - if (!closed) { - closed = true; - close(timeout.toMillis(), false); - } - } finally { - release(); - } - } - - /** - * This method is not yet supported. - */ - @Override - public void wakeup() { - throw new FeatureNotSupportedException("This feature is not suported for this release."); - } - - private ClusterResourceListeners configureClusterResourceListeners(Deserializer keyDeserializer, Deserializer valueDeserializer, List... candidateLists) { - ClusterResourceListeners clusterResourceListeners = new ClusterResourceListeners(); - for (List candidateList: candidateLists) - clusterResourceListeners.maybeAddAll(candidateList); - - clusterResourceListeners.maybeAdd(keyDeserializer); - clusterResourceListeners.maybeAdd(valueDeserializer); - return clusterResourceListeners; - } - - private void close(long timeoutMs, boolean swallowException) { - log.trace("Closing the Kafka consumer"); - AtomicReference firstException = new AtomicReference<>(); - try { - if (client != null) - client.close(Math.min(timeoutMs, requestTimeoutMs)); - } catch (Throwable t) { - firstException.compareAndSet(null, t); - log.error("Failed to close OKafka consumer ", t); - } - ClientUtils.closeQuietly(interceptors, "consumer interceptors", firstException); - ClientUtils.closeQuietly(metrics, "consumer metrics", firstException); - ClientUtils.closeQuietly(keyDeserializer, "consumer key deserializer", firstException); - ClientUtils.closeQuietly(valueDeserializer, "consumer value deserializer", firstException); - AppInfoParser.unregisterAppInfo(JMX_PREFIX, clientId, metrics); - log.debug("Kafka consumer has been closed"); - Throwable exception = firstException.get(); - if (exception != null && !swallowException) { - throw new KafkaException("Failed to close kafka consumer", exception); - } - } - - private Long offsetResetStrategyTimestamp(final TopicPartition partition) { - OffsetResetStrategy strategy = subscriptions.resetStrategy(partition); - if (strategy == OffsetResetStrategy.EARLIEST) - return -2L; - else if (strategy == OffsetResetStrategy.LATEST) - return -1L; - else if (strategy == OffsetResetStrategy.TO_OFFSET) - return subscriptions.position(partition); - else - return null; - } - - /** - * Acquire the light lock and ensure that the consumer hasn't been closed. - * @throws IllegalStateException If the consumer has been closed - */ - private void acquireAndEnsureOpen() { - acquire(); - if (this.closed) { - release(); - throw new IllegalStateException("This consumer has already been closed."); - } - } - - /** - * Acquire the light lock protecting this consumer from multi-threaded access. Instead of blocking - * when the lock is not available, however, we just throw an exception (since multi-threaded usage is not - * supported). - * @throws ConcurrentModificationException if another thread already has the lock - */ - private void acquire() { - long threadId = Thread.currentThread().getId(); - if (threadId != currentThread.get() && !currentThread.compareAndSet(NO_CURRENT_THREAD, threadId)) - throw new ConcurrentModificationException("KafkaConsumer is not safe for multi-threaded access"); - refcount.incrementAndGet(); - } - - /** - * Release the light lock protecting the consumer from multi-threaded access. - */ - private void release() { - if (refcount.decrementAndGet() == 0) - currentThread.set(NO_CURRENT_THREAD); - } + } + + /** + * This method is not yet supported. + */ + @Override + public long position(TopicPartition partition) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + /** + * This method is not yet supported. + */ + @Override + public long position(TopicPartition partition, final Duration timeout) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + /** + * This method is not yet supported. + */ + @Override + public OffsetAndMetadata committed(TopicPartition partition) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + /** + * This method is not yet supported. + */ + @Override + public OffsetAndMetadata committed(TopicPartition partition, final Duration timeout) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + /** + * This method is not yet supported. + */ + @Override + public Map metrics() { + return Collections.unmodifiableMap(this.metrics.metrics()); +// throw new FeatureNotSupportedException("This feature is not suported for this release."); + + } + + /** + * This method is not yet supported. + */ + @Override + public List partitionsFor(String topic) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + /** + * This method is not yet supported. + */ + @Override + public List partitionsFor(String topic, Duration timeout) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + /** + * This method is not yet supported. + */ + @Override + public Map> listTopics() { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + /** + * This method is not yet supported. + */ + @Override + public Map> listTopics(Duration timeout) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + /** + * This method is not yet supported. + */ + @Override + public void pause(Collection partitions) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + /** + * This method is not yet supported. + */ + @Override + public void resume(Collection partitions) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + /** + * This method is not yet supported. + */ + @Override + public Set paused() { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + /** + * This method is not yet supported. + */ + @Override + public Map offsetsForTimes(Map timestampsToSearch) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + /** + * This method is not yet supported. + */ + @Override + public Map offsetsForTimes(Map timestampsToSearch, + Duration timeout) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + /** + * This method is not yet supported. + */ + @Override + public Map beginningOffsets(Collection partitions) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + /** + * This method is not yet supported. + */ + @Override + public Map beginningOffsets(Collection partitions, Duration timeout) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + /** + * This method is not yet supported. + */ + @Override + public Map endOffsets(Collection partitions) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + /** + * This method is not yet supported. + */ + @Override + public Map endOffsets(Collection partitions, Duration timeout) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + /** + * Tries to close the consumer cleanly. If auto-commit is enabled, this will + * commit the current offsets . Close doen't take timeout into consideration. + * + * @throws org.apache.kafka.common.KafkaException for any other error during close + */ + @Override + public void close() { + close(Duration.ofMillis(DEFAULT_CLOSE_TIMEOUT_MS)); + } + + /** + * Tries to close the consumer cleanly. If auto-commit is enabled, this will + * commit the current offsets . Close doen't take timeout into consideration. + * + * @param timeout Not used + * @param timeUnit Not used + * @throws IllegalArgumentException If the {@code timeout} is + * negative. + * @throws org.oracle.okafka.common.KafkaException for any other error during + * close + * + */ + @Deprecated + public void close(long timeout, TimeUnit timeUnit) { + close(Duration.ofMillis(timeUnit.toMillis(timeout))); + } + + /** + * Tries to close the consumer cleanly. If auto-commit is enabled, this will + * commit the current offsets . Close doen't take timeout into consideration. + * + * @param timeout not used + * + * @throws IllegalArgumentException If the {@code timeout} is + * negative. + * @throws org.oracle.okafka.common.KafkaException for any other error during + * close + */ + @Override + public void close(Duration timeout) { + if (timeout.toMillis() < 0) + throw new IllegalArgumentException("The timeout cannot be negative."); + acquire(); + try { + if (!closed) { + closed = true; + close(timeout.toMillis(), false); + } + } finally { + release(); + } + } + + /** + * This method is not yet supported. + */ + @Override + public void wakeup() { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + private ClusterResourceListeners configureClusterResourceListeners(Deserializer keyDeserializer, + Deserializer valueDeserializer, List... candidateLists) { + ClusterResourceListeners clusterResourceListeners = new ClusterResourceListeners(); + for (List candidateList : candidateLists) + clusterResourceListeners.maybeAddAll(candidateList); + + clusterResourceListeners.maybeAdd(keyDeserializer); + clusterResourceListeners.maybeAdd(valueDeserializer); + return clusterResourceListeners; + } + + private void close(long timeoutMs, boolean swallowException) { + log.trace("Closing the Kafka consumer"); + AtomicReference firstException = new AtomicReference<>(); + try { + if (client != null) + client.close(Math.min(timeoutMs, requestTimeoutMs)); + } catch (Throwable t) { + firstException.compareAndSet(null, t); + log.error("Failed to close OKafka consumer ", t); + } + /* + * ClientUtils.closeQuietly(interceptors, "consumer interceptors", + * firstException); ClientUtils.closeQuietly(metrics, "consumer metrics", + * firstException); ClientUtils.closeQuietly(keyDeserializer, + * "consumer key deserializer", firstException); + * ClientUtils.closeQuietly(valueDeserializer, "consumer value deserializer", + * firstException); + */ + // Change for 2.8.1 :: closeQuietly moved to Utils instead of CLientUtils + Utils.closeQuietly(interceptors, "consumer interceptors", firstException); + Utils.closeQuietly(metrics, "consumer metrics", firstException); + Utils.closeQuietly(keyDeserializer, "consumer key deserializer", firstException); + Utils.closeQuietly(valueDeserializer, "consumer value deserializer", firstException); + + AppInfoParser.unregisterAppInfo(JMX_PREFIX, clientId, metrics); + log.debug("Kafka consumer has been closed"); + Throwable exception = firstException.get(); + if (exception != null && !swallowException) { + throw new KafkaException("Failed to close kafka consumer", exception); + } + } + + private Long offsetResetStrategyTimestamp(final TopicPartition partition) { + OffsetResetStrategy strategy = subscriptions.resetStrategy(partition); + if (strategy == OffsetResetStrategy.EARLIEST) + return -2L; + else if (strategy == OffsetResetStrategy.LATEST) + return -1L; + /* + * Changes for 2.8.1 : Not sure from where TO_OFFSET was introduced. else if + * (strategy == OffsetResetStrategy.TO_OFFSET) return + * subscriptions.position(partition); + */ + else + return null; + } + + /** + * Acquire the light lock and ensure that the consumer hasn't been closed. + * + * @throws IllegalStateException If the consumer has been closed + */ + private void acquireAndEnsureOpen() { + acquire(); + if (this.closed) { + release(); + throw new IllegalStateException("This consumer has already been closed."); + } + } + + /** + * Acquire the light lock protecting this consumer from multi-threaded access. + * Instead of blocking when the lock is not available, however, we just throw an + * exception (since multi-threaded usage is not supported). + * + * @throws ConcurrentModificationException if another thread already has the + * lock + */ + private void acquire() { + long threadId = Thread.currentThread().getId(); + if (threadId != currentThread.get() && !currentThread.compareAndSet(NO_CURRENT_THREAD, threadId)) + throw new ConcurrentModificationException("KafkaConsumer is not safe for multi-threaded access"); + refcount.incrementAndGet(); + } + + /** + * Release the light lock protecting the consumer from multi-threaded access. + */ + private void release() { + if (refcount.decrementAndGet() == 0) + currentThread.set(NO_CURRENT_THREAD); + } + + @Override + public void seek(TopicPartition partition, OffsetAndMetadata offsetAndMetadata) { + // TODO Auto-generated method stub + + } + + @Override + public Map committed(Set partitions) { + // TODO Auto-generated method stub + return null; + } + + @Override + public Map committed(Set partitions, Duration timeout) { + // TODO Auto-generated method stub + return null; + } + + @Override + public ConsumerGroupMetadata groupMetadata() { + // TODO Auto-generated method stub + return cgMetadata; + } + + @Override + public void enforceRebalance() { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + + } + + @Override + public void enforceRebalance(String reason) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + + } + + @Override + public Uuid clientInstanceId(Duration timeout) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + @Override + public OptionalLong currentLag(TopicPartition topicPartition) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + private static class FetchResponseMetricAggregator { + private final FetchManagerMetrics sensors; + private final Set unrecordedPartitions; + + private final FetchMetrics fetchMetrics = new FetchMetrics(); + private final Map topicFetchMetrics = new HashMap<>(); + + private FetchResponseMetricAggregator(FetchManagerMetrics sensors, Set partitions) { + this.sensors = sensors; + this.unrecordedPartitions = partitions; + } + + /** + * After each partition is parsed, we update the current metric totals with the + * total bytes and number of records parsed. After all partitions have reported, + * we write the metric. + */ + public void record(TopicPartition partition, int bytes, int records) { + this.unrecordedPartitions.remove(partition); + this.fetchMetrics.increment(bytes, records); + + // collect and aggregate per-topic metrics + String topic = partition.topic(); + FetchMetrics topicFetchMetric = this.topicFetchMetrics.get(topic); + if (topicFetchMetric == null) { + topicFetchMetric = new FetchMetrics(); + this.topicFetchMetrics.put(topic, topicFetchMetric); + } + topicFetchMetric.increment(bytes, records); + + if (this.unrecordedPartitions.isEmpty()) { + // once all expected partitions from the fetch have reported in, record the + // metrics + this.sensors.bytesFetched.record(this.fetchMetrics.fetchBytes); + this.sensors.recordsFetched.record(this.fetchMetrics.fetchRecords); + + // also record per-topic metrics + for (Map.Entry entry : this.topicFetchMetrics.entrySet()) { + FetchMetrics metric = entry.getValue(); + this.sensors.recordTopicFetchMetrics(entry.getKey(), metric.fetchBytes, metric.fetchRecords); + } + } + } + + private static class FetchMetrics { + private int fetchBytes; + private int fetchRecords; + + protected void increment(int bytes, int records) { + this.fetchBytes += bytes; + this.fetchRecords += records; + } + } + } + + public static class FetchManagerMetrics { + private final Metrics metrics; + private FetchMetricsRegistry metricsRegistry; + private final Sensor bytesFetched; + private final Sensor recordsFetched; + private final Sensor fetchLatency; + + private int assignmentId = 0; + private Set assignedPartitions = Collections.emptySet(); + + private FetchManagerMetrics(Metrics metrics, FetchMetricsRegistry metricsRegistry) { + this.metrics = metrics; + this.metricsRegistry = metricsRegistry; + + this.bytesFetched = metrics.sensor("bytes-fetched"); + this.bytesFetched.add(metrics.metricInstance(metricsRegistry.fetchSizeAvg), new Avg()); + this.bytesFetched.add(metrics.metricInstance(metricsRegistry.fetchSizeMax), new Max()); + this.bytesFetched.add(new Meter(metrics.metricInstance(metricsRegistry.bytesConsumedRate), + metrics.metricInstance(metricsRegistry.bytesConsumedTotal))); + + this.recordsFetched = metrics.sensor("records-fetched"); + this.recordsFetched.add(metrics.metricInstance(metricsRegistry.recordsPerRequestAvg), new Avg()); + this.recordsFetched.add(new Meter(metrics.metricInstance(metricsRegistry.recordsConsumedRate), + metrics.metricInstance(metricsRegistry.recordsConsumedTotal))); + + this.fetchLatency = metrics.sensor("fetch-latency"); + this.fetchLatency.add(metrics.metricInstance(metricsRegistry.fetchLatencyAvg), new Avg()); + this.fetchLatency.add(metrics.metricInstance(metricsRegistry.fetchLatencyMax), new Max()); + this.fetchLatency + .add(new Meter(new WindowedCount(), metrics.metricInstance(metricsRegistry.fetchRequestRate), + metrics.metricInstance(metricsRegistry.fetchRequestTotal))); + /* + * this.recordsFetchLag = metrics.sensor("records-lag"); + * this.recordsFetchLag.add(metrics.metricInstance(metricsRegistry.recordsLagMax + * ), new Max()); + * + * this.recordsFetchLead = metrics.sensor("records-lead"); + * this.recordsFetchLead.add(metrics.metricInstance(metricsRegistry. + * recordsLeadMin), new Min()); + */ + } + + private void recordTopicFetchMetrics(String topic, int bytes, int records) { + // record bytes fetched + String name = "topic." + topic + ".bytes-fetched"; + Sensor bytesFetched = this.metrics.getSensor(name); + if (bytesFetched == null) { + Map metricTags = Collections.singletonMap("topic", topic.replace('.', '_')); + + bytesFetched = this.metrics.sensor(name); + bytesFetched.add(this.metrics.metricInstance(metricsRegistry.topicFetchSizeAvg, metricTags), new Avg()); + bytesFetched.add(this.metrics.metricInstance(metricsRegistry.topicFetchSizeMax, metricTags), new Max()); + bytesFetched + .add(new Meter(this.metrics.metricInstance(metricsRegistry.topicBytesConsumedRate, metricTags), + this.metrics.metricInstance(metricsRegistry.topicBytesConsumedTotal, metricTags))); + } + bytesFetched.record(bytes); + + // record records fetched + name = "topic." + topic + ".records-fetched"; + Sensor recordsFetched = this.metrics.getSensor(name); + if (recordsFetched == null) { + Map metricTags = new HashMap<>(1); + metricTags.put("topic", topic.replace('.', '_')); + + recordsFetched = this.metrics.sensor(name); + recordsFetched.add(this.metrics.metricInstance(metricsRegistry.topicRecordsPerRequestAvg, metricTags), + new Avg()); + recordsFetched.add( + new Meter(this.metrics.metricInstance(metricsRegistry.topicRecordsConsumedRate, metricTags), + this.metrics.metricInstance(metricsRegistry.topicRecordsConsumedTotal, metricTags))); + } + recordsFetched.record(records); + } + + public void recordFetchLatency(double value) { + fetchLatency.record(value); + } + /* + * private void maybeUpdateAssignment(SubscriptionState subscription) { int + * newAssignmentId = subscription.assignmentId(); if (this.assignmentId != + * newAssignmentId) { Set newAssignedPartitions = + * subscription.assignedPartitions(); for (TopicPartition tp : + * this.assignedPartitions) { if (!newAssignedPartitions.contains(tp)) { + * metrics.removeSensor(partitionLagMetricName(tp)); + * metrics.removeSensor(partitionLeadMetricName(tp)); + * metrics.removeMetric(partitionPreferredReadReplicaMetricName(tp)); } } + * + * for (TopicPartition tp : newAssignedPartitions) { if + * (!this.assignedPartitions.contains(tp)) { MetricName metricName = + * partitionPreferredReadReplicaMetricName(tp); if (metrics.metric(metricName) + * == null) { metrics.addMetric( metricName, (Gauge) (config, now) -> + * subscription.preferredReadReplica(tp, 0L).orElse(-1) ); } } } + * + * this.assignedPartitions = newAssignedPartitions; this.assignmentId = + * newAssignmentId; } } + * + * private void recordPartitionLead(TopicPartition tp, long lead) { + * this.recordsFetchLead.record(lead); + * + * String name = partitionLeadMetricName(tp); Sensor recordsLead = + * this.metrics.getSensor(name); if (recordsLead == null) { Map + * metricTags = topicPartitionTags(tp); + * + * recordsLead = this.metrics.sensor(name); + * + * recordsLead.add(this.metrics.metricInstance(metricsRegistry. + * partitionRecordsLead, metricTags), new Value()); + * recordsLead.add(this.metrics.metricInstance(metricsRegistry. + * partitionRecordsLeadMin, metricTags), new Min()); + * recordsLead.add(this.metrics.metricInstance(metricsRegistry. + * partitionRecordsLeadAvg, metricTags), new Avg()); } recordsLead.record(lead); + * } + * + * private void recordPartitionLag(TopicPartition tp, long lag) { + * this.recordsFetchLag.record(lag); + * + * String name = partitionLagMetricName(tp); Sensor recordsLag = + * this.metrics.getSensor(name); if (recordsLag == null) { Map + * metricTags = topicPartitionTags(tp); recordsLag = this.metrics.sensor(name); + * + * recordsLag.add(this.metrics.metricInstance(metricsRegistry. + * partitionRecordsLag, metricTags), new Value()); + * recordsLag.add(this.metrics.metricInstance(metricsRegistry. + * partitionRecordsLagMax, metricTags), new Max()); + * recordsLag.add(this.metrics.metricInstance(metricsRegistry. + * partitionRecordsLagAvg, metricTags), new Avg()); } recordsLag.record(lag); } + * + * private static String partitionLagMetricName(TopicPartition tp) { return tp + + * ".records-lag"; } + * + * private static String partitionLeadMetricName(TopicPartition tp) { return tp + * + ".records-lead"; } + * + * private MetricName partitionPreferredReadReplicaMetricName(TopicPartition tp) + * { Map metricTags = topicPartitionTags(tp); return + * this.metrics.metricInstance(metricsRegistry.partitionPreferredReadReplica, + * metricTags); } + * + * + * private Map topicPartitionTags(TopicPartition tp) { + * Map metricTags = new HashMap<>(2); metricTags.put("topic", + * tp.topic().replace('.', '_')); metricTags.put("partition", + * String.valueOf(tp.partition())); return metricTags; } + */ + } } diff --git a/clients/src/main/java/org/oracle/okafka/clients/consumer/OffsetAndMetadata.java b/clients/src/main/java/org/oracle/okafka/clients/consumer/OffsetAndMetadata.java deleted file mode 100644 index e48affd..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/consumer/OffsetAndMetadata.java +++ /dev/null @@ -1,94 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.clients.consumer; - -import java.io.Serializable; - -/** - * This feature is not yet supported. - */ -public class OffsetAndMetadata implements Serializable { - private final long offset; - private final String metadata; - public static final long INVALID_OFFSET = -1L; - public static final String NO_METADATA = ""; - /** - * Construct a new OffsetAndMetadata object for committing through {@link KafkaConsumer}. - * @param offset The offset to be committed - * @param metadata Non-null metadata - */ - public OffsetAndMetadata(long offset, String metadata) { - this.offset = offset; - // The server converts null metadata to an empty string. So we store it as an empty string as well on the client - // to be consistent. - if (metadata == null) - this.metadata = NO_METADATA; - else - this.metadata = metadata; - } - - /** - * Construct a new OffsetAndMetadata object for committing through {@link KafkaConsumer}. The metadata - * associated with the commit will be empty. - * @param offset The offset to be committed - */ - public OffsetAndMetadata(long offset) { - this(offset, ""); - } - - public long offset() { - return offset; - } - - public String metadata() { - return metadata; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - OffsetAndMetadata that = (OffsetAndMetadata) o; - - if (offset != that.offset) return false; - return metadata.equals(that.metadata); - } - - @Override - public int hashCode() { - int result = (int) (offset ^ (offset >>> 32)); - result = 31 * result + metadata.hashCode(); - return result; - } - - @Override - public String toString() { - return "OffsetAndMetadata{" + - "offset=" + offset + - ", metadata='" + metadata + '\'' + - '}'; - } -} \ No newline at end of file diff --git a/clients/src/main/java/org/oracle/okafka/clients/consumer/OffsetAndTimestamp.java b/clients/src/main/java/org/oracle/okafka/clients/consumer/OffsetAndTimestamp.java deleted file mode 100644 index 397db19..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/consumer/OffsetAndTimestamp.java +++ /dev/null @@ -1,67 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.oracle.okafka.clients.consumer; - -import org.oracle.okafka.common.utils.Utils; - -/** - * A container class for offset and timestamp. - */ -public final class OffsetAndTimestamp { - private final long timestamp; - private final long offset; - - public OffsetAndTimestamp(long offset, long timestamp) { - this.offset = offset; - assert this.offset >= 0; - this.timestamp = timestamp; - assert this.timestamp >= 0; - } - - public long timestamp() { - return timestamp; - } - - public long offset() { - return offset; - } - - @Override - public String toString() { - return "(timestamp=" + timestamp + ", offset=" + offset + ")"; - } - - @Override - public int hashCode() { - return 31 * Utils.longHashcode(timestamp) + Utils.longHashcode(offset); - } - - @Override - public boolean equals(Object o) { - if (o == null || !(o instanceof OffsetAndTimestamp)) - return false; - OffsetAndTimestamp other = (OffsetAndTimestamp) o; - return this.timestamp == other.timestamp() && this.offset == other.offset(); - } -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/consumer/OffsetCommitCallback.java b/clients/src/main/java/org/oracle/okafka/clients/consumer/OffsetCommitCallback.java deleted file mode 100644 index 328c85b..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/consumer/OffsetCommitCallback.java +++ /dev/null @@ -1,47 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.oracle.okafka.clients.consumer; - -import java.util.Collection; -import java.util.Map; - -import org.oracle.okafka.common.TopicPartition; - -/** - * A callback interface that the user can implement to trigger custom actions when a commit request completes. The callback - * may be executed in any thread calling {@link Consumer#poll(java.time.Duration) poll()}. - */ -public interface OffsetCommitCallback { - - /** - * A callback method the user can implement to provide asynchronous handling of commit request completion. - * This method will be called when the commit request sent to the server has been acknowledged. - * - * @param offsets A map of the offsets and associated metadata that this callback applies to - * @param exception The exception thrown during processing of the request, or null if the commit completed successfully - * @throws org.oracle.okafka.common.KafkaException for any other unrecoverable errors (e.g. if offset metadata - * is too large or if the committed offset is invalid). - */ - void onComplete(Map offsets, Exception exception); -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/consumer/OffsetResetStrategy.java b/clients/src/main/java/org/oracle/okafka/clients/consumer/OffsetResetStrategy.java deleted file mode 100644 index d3cc027..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/consumer/OffsetResetStrategy.java +++ /dev/null @@ -1,39 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. - * - */ - - -package org.oracle.okafka.clients.consumer; - -/** - * This feature is not yet supported - * - */ -public enum OffsetResetStrategy { - LATEST, EARLIEST, TO_OFFSET, NONE -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/consumer/TxEQAssignor.java b/clients/src/main/java/org/oracle/okafka/clients/consumer/TxEQAssignor.java new file mode 100644 index 0000000..257e216 --- /dev/null +++ b/clients/src/main/java/org/oracle/okafka/clients/consumer/TxEQAssignor.java @@ -0,0 +1,586 @@ +/* +** OKafka Java Client version 23.4. +** +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. +** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +*/ + +package org.oracle.okafka.clients.consumer; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.SortedSet; +import java.util.TreeSet; + +import org.apache.kafka.clients.consumer.internals.AbstractPartitionAssignor; +//import org.oracle.okafka.clients.consumer.internals.AbstractPartitionAssignor; +import org.apache.kafka.common.TopicPartition; +import org.oracle.okafka.common.internals.PartitionData; +import org.oracle.okafka.common.internals.SessionData; + +/** + * With Oracle Transactional Event Queue (TxEQ) and in Oracle RAC environment, different partitions of a Topic can be owned + * by different RAC instances. To get maximum throughput while consuming messages from these partitions, + * best case is to consume messages from local partitions. That means, if a session 1 is connected to Oracle RAC instance 1, + * then it is preferable to assign partitions owned by instance 1 to session 1. + * If session 1 is assigned a topic-partition owned by instance 2, then consuming messages from these 'Remote' partition + * involves additional overhead and slows down the performance. + * + * {@link TxEQAssignor#assign(org.apache.kafka.common.Cluster, org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription)} + * method tries to distribute partitions so that, preference is given to Local assignment. + * This follows below logic for assigning partitions. + * + * 1. Equal and fair distribution of partitions among alive consumer sessions + * 2. 1st Preference for Local Assignment + * 3. 2nd Preference for Sticky Assignment + * + */ + +public class TxEQAssignor extends AbstractPartitionAssignor { + + Map> instPListMap ; + Map> partitionMemberMap; + + public void setInstPListMap(Map> _instPListMap) + { + instPListMap = _instPListMap; + } + + public void setPartitionMemberMap(Map> _partitionMemberMap) + { + partitionMemberMap = _partitionMemberMap; + } + + + + + + // Not Invoked. To be removed + public Map> assign(Map partitionsPerTopic, + Map subscriptions, int oldVersion) + { + Map> assignment = new HashMap<>(); + Map partitionSessionMap = new HashMap(); + Map> instSessionDataMap = new HashMap>(); + + //System.out.println("TxEQAssignor 1:Creating assignment map for each member "); + for (String memberId : subscriptions.keySet()) + { + //System.out.println("TxEQAssignor 2:MemberID in Assignment Map " + memberId); + assignment.put(memberId, new ArrayList()); + + } + + /* create PartitionId to Session(TMember) mapping */ + for(String topicNow : partitionsPerTopic.keySet()) + { + ArrayList sDataForTopic = partitionMemberMap.get(topicNow); + for(SessionData sDataNow : sDataForTopic) + { + ArrayList instSDataList = instSessionDataMap.get(sDataNow.getInstanceId()); + if(instSDataList == null) + { + instSDataList = new ArrayList(); + instSessionDataMap.put(sDataNow.getInstanceId(),instSDataList); + } + instSDataList.add(sDataNow); + + List previousAssignment = sDataNow.getPreviousPartitions(); + for(PartitionData pDataNow : previousAssignment) + { + partitionSessionMap.put(pDataNow.getTopicPartition(),sDataNow); + + } + } + } + + /* + * Logic to Assign Partitions: + * FixedPartiton = Fixed number of partitions assigned to all sessions + * FloatingPartition: Some sessions can be assigned one additional partition. Number of such additional partitions is floatingPartitions + * + * Pass 1: FIXED + LOCAL + STICKY + * Sessions to be assigned Fixed number of partitions only. All assignment will be Local and Sticky. + * Pass 2: FIXED + LOCAL + * Sessions to be assigned Fixed number of partitions only. All assignment will be Local and new assignment ( non sticky). + * Pass 3: FLOATING + LOCAL + STICKY + * Some Sessions to be assigned Floating partitions. All assignment will be Local and Sticky. + * Pass 4: FLOATING + LOCAL + * Some Sessions to be assigned Floating partitions. All assignment will be Local and new assignment. + * Pass 5: REMOTE + STICKY + * Some Sessions to be assigned Fixed/Floating partitions. All assignment will be REMOTE and STICKY + * Pass 6: REMOTE + NEW + * Some Sessions to be assigned Fixed/Floating partitions. All assignment will be Remote and new assignment. + * + * Example: + * 2 Node RAC. 7 Partitions for a TOPIC T1. Partitions distributed 4 | 3 . i.e. Instance 1 owns 4 and instance 2 owns 3 partitions. + * Current 2 sessions connected to instance 1. + * + * + * */ + + + for(String topicNow : partitionsPerTopic.keySet()) + { + Map instPCntMap = new HashMap(); + //Calculate how many partitions to be owned by each member + ArrayList memberList = partitionMemberMap.get(topicNow); + + int membersCnt = memberList.size(); + //System.out.println("TxEQAssignor: Total members to be assigned partitions are " + membersCnt); + int partitionCnt = partitionsPerTopic.get(topicNow); + int fixedPartitions = partitionCnt/membersCnt; + int floatingPartitions = partitionCnt%membersCnt; + + for(SessionData tMem : memberList) + { + tMem.pendingCnt = fixedPartitions; + tMem.oneMore = false; + } + + //Assigning Session to the partition from partition Map + + for(Integer instNow : instPListMap.keySet()) + { + ArrayList pInstList = instPListMap.get(instNow); + ArrayList removeList = new ArrayList(); + /* Local Sticky Assignment */ + for(Integer pNow : pInstList) + { + //System.out.println("TxEQAssignor: Assigning Partition Now from Instance " + pNow + " , " + instNow); + TopicPartition tpNow = new TopicPartition(topicNow, pNow); + SessionData tMemNow = partitionSessionMap.get(tpNow); + if(tMemNow != null) + { + // If Locally assigned and previously owned and pending count has not exhausted + if(tMemNow.getInstanceId() == instNow && ( (tMemNow.pendingCnt > 0) || ( tMemNow.pendingCnt == 0 && !tMemNow.oneMore && floatingPartitions > 0 ) )) + { + PartitionData teqP = new PartitionData(topicNow, tMemNow.getQueueId(), pNow, tMemNow.getSubscriberName(), tMemNow.getSubscriberId(),tMemNow.getInstanceId(), true); + tMemNow.addAssignedPartitions(teqP); + if(tMemNow.pendingCnt <= 0) + { + tMemNow.oneMore = true; + ArrayList instSDataList = instSessionDataMap.get(tMemNow.getInstanceId()); + instSDataList.remove(tMemNow); + } + else + tMemNow.pendingCnt--; + + /*System.out.println("TxEQAssignor: Partition " + pNow +" Assigned to (" + tMemNow.getSessionId() + ", " + tMemNow.getInstanceId()+"). Pending " + +" Partition Count for this member " + tMemNow.pendingCnt + " OnMore Assigned ? " + tMemNow.oneMore);*/ + + removeList.add(pNow); + } + } + } + pInstList.removeAll(removeList); + removeList.clear(); + + /* Local New Assignment */ + for(Integer pNow : pInstList) + { + + } + } + + + } + return assignment; + } + + @Override + public Map> assign(Map partitionsPerTopic, + Map subscriptions ) { + + Map> assignment = new HashMap<>(); + + + //System.out.println("TxEQAssignor: TxEQ Assign 1:Creating assignment map for each member "); + for (String memberId : subscriptions.keySet()) + { + //System.out.println("TxEQAssignor 2:MemberID in Assignment Map " + memberId); + assignment.put(memberId, new ArrayList()); + } + + Map> instSessionDataMap = new HashMap>(); + Map partitionSessionMap = new HashMap(); + + for(String topicNow : partitionsPerTopic.keySet()) + { + instSessionDataMap.clear(); + partitionSessionMap.clear(); + + //Calculate how many partitions to be owned by each member + ArrayList memberList = partitionMemberMap.get(topicNow); + int membersCnt = memberList.size(); + + //Actual Partitions created + int totalPartitionsCreated = 0; + for(int instNow : instPListMap.keySet()) + { + ArrayList pList = instPListMap.get(instNow); + if(pList != null) + { + totalPartitionsCreated += pList.size(); + } + } + + //System.out.println("TxEQAssignor 2.1: Total members to be assigned partitions are " + membersCnt); + //int partitionCnt = partitionsPerTopic.get(topicNow); + + //Consider total partitions actually created instead of partitions which are supposed to be created + // TxEQ creates partitions as and when message is published into the topic partitions. + int partitionCnt = totalPartitionsCreated; + + int partForAll = partitionCnt/membersCnt; + int partForSome = partitionCnt%membersCnt; + //System.out.println("TxEQAssignor 2.2: Partitions for All " + partForAll + " Partitions for Some " + partForSome); + // All sessions will minimally get partitions. + for(SessionData tMem : memberList) + { + tMem.pendingCnt = partForAll; + } + + /* + * Some sessions will get 1 additional partition. + * Below logic decides which sessions should be assigned 1 more partition. + * Ideally, pick sessions which can be assigned partition locally. + */ + if(partForSome > 0 ) + { + int maxInst = 0; // MAX Instance id where a partition is owned or a session is connected + + // Prepare map to see how sessions are spread across instances. + for(SessionData sDataNow : memberList) + { + int instNow = sDataNow.getInstanceId(); + if(instNow > maxInst) + maxInst = instNow; + + ArrayList instSDataList = instSessionDataMap.get(instNow); + if(instSDataList == null) + { + instSDataList = new ArrayList(); + instSessionDataMap.put(instNow,instSDataList); + } + instSDataList.add(sDataNow); + //System.out.println("TxEQAssignor: Session count for inst " + instNow + " = " + instSDataList.size()); + + /* List previousAssignment = sDataNow.getPreviousPartitions(); + for(PartitionData pDataNow : previousAssignment) + { + partitionSessionMap.put(pDataNow.getTopicPartition(),sDataNow); + }*/ + } + for(int instNow : instPListMap.keySet()) + { + if(instNow > maxInst) { + maxInst = instNow; + } + } + + int extraPerInst[] = new int[maxInst]; + int extraPerInstMax[] = new int[maxInst]; + for(int i = 0 ; i instSessionDataList = instSessionDataMap.get(instNow); + //Sessions connected to this instance + int sessionCnt = instSessionDataList!=null?instSessionDataList.size():0; + // Maximum number of extra partitions that can be assigned to this instance + extraPerInstMax[instNow-1] = sessionCnt; + + // Number of extra partition that should be assigned locally at this instance + int moreForThisInst = pCnt - (sessionCnt * partForAll); + + //Minimum extra partitions must be assigned to this instance + int extraNow = (sessionCnt == 0 || pCnt == 0 )? 0 : Math.min(sessionCnt, moreForThisInst< 0 ? 0:moreForThisInst); + extraPerInst[instNow-1] = Math.min( partForSome, extraNow); + + //System.out.println("TxEQAssignor: Extra for Inst " + instNow + "= [" + extraPerInst[instNow-1] + "," + extraPerInstMax[instNow-1]+"]"); + partForSome = partForSome - extraPerInst[instNow-1]; + extraPerInstMax[instNow-1] = extraPerInstMax[instNow-1] - extraPerInst[instNow-1]; + + if(partForSome <= 0 ) + break; + } + + //Rest of the partitions can be distributed among any sessions as they will be assigned remotely + for(int i = 0; (i < maxInst && partForSome > 0) ; i++ ) + { + if(extraPerInstMax[i] > 0) + { + int extra = Math.min(extraPerInstMax[i], partForSome); + extraPerInst[i] += extra; + partForSome = partForSome - extraPerInst[i]; + extraPerInstMax[i] -= extra; + } + } + /*System.out.println("TxEQAssignor: Actual Extra per Partition "); + for(int i =0 ; i 0) + { + tMem.pendingCnt++; + extraPerInst[instNow-1]--; + } + //System.out.println("TxEQAssignor: Session " + tMem.name + " To be assigned " + tMem.pendingCnt); + } + } + + // Logic to actual assignment of partition to session based on pendingCnt + Iterator assigner = memberList.iterator(); + //System.out.println("TxEQAssignor: TxEQ Assign 3:Local Assignment"); + // Phase 1: Sticky-Local Assignment + while(assigner.hasNext()) + { + SessionData tMem = assigner.next(); + //System.out.println("TxEQAssignor: Member " + tMem.name + " Pending partitions " + tMem.pendingCnt); + if(tMem.pendingCnt <=0 ) + continue; + + ArrayList localPartitionList = instPListMap.get(tMem.getInstanceId()); + List previousPartitions = tMem.getPreviousPartitions(); + if(previousPartitions != null && previousPartitions.size() > 0) + { + for(PartitionData partNow : previousPartitions) + { + if(tMem.pendingCnt <= 0 ) + break; + + //Confirm that partition belongs to the local instance + //Check all partitions because due to instance shutdown a remote partition may have moved to local instance + int pIndex = localPartitionList.indexOf(partNow.getTopicPartition().partition()); + if(pIndex!= -1) + { + tMem.pendingCnt--; + tMem.addAssignedPartitions(partNow); + localPartitionList.remove(pIndex); + //System.out.println("Partitoin " +partNow.getTopicPartition().partition() + " assigned to " + tMem.name +" Pending " + tMem.pendingCnt); + } + } + previousPartitions.removeAll(tMem.getAssignedPartitions()); + } + } + //System.out.println("TxEQAssignor 4:Local New Assignment"); + assigner = memberList.iterator(); + //Phase 2: New Local Assignment + while(assigner.hasNext()) + { + SessionData tMem = assigner.next(); + //System.out.println("TxEQAssignor: Member " + tMem.name + " Pending partitions " + tMem.pendingCnt); + if(tMem.pendingCnt <=0 ) + continue; + + ArrayList localPartitionList = instPListMap.get(tMem.getInstanceId()); + + if(localPartitionList == null || localPartitionList.size() == 0) + break; + + ArrayList assignedNow = new ArrayList(); + + for(Integer pNow : localPartitionList) + { + if(tMem.pendingCnt <= 0) + break; + + PartitionData teqP = new PartitionData(topicNow, tMem.getQueueId(), pNow, tMem.getSubscriberName(), tMem.getSubscriberId(),tMem.getInstanceId(), true); + tMem.addAssignedPartitions(teqP); + tMem.pendingCnt--; + assignedNow.add(pNow); + //System.out.println("TxEQAssignor: Partitoin " +pNow + " assigned to " + tMem.name +" Pending " + tMem.pendingCnt); + } + localPartitionList.removeAll(assignedNow); + assignedNow.clear(); + } + + assigner = memberList.iterator(); + //Phase 3: Sticky Remote Assignment + //System.out.println("TxEQAssignor: TxEQ Assign 5:Sticky Remote Assignment"); + while(assigner.hasNext()) + { + SessionData tMem = assigner.next(); + //System.out.println("TxEQAssignor: Member " + tMem.name + " Pending partitions " + tMem.pendingCnt); + + if(tMem.pendingCnt <= 0 ) + continue; + + List previousPartitions = tMem.getPreviousPartitions(); + if(previousPartitions != null && previousPartitions.size() > 0) + { + ArrayList removeList = new ArrayList(); + for(PartitionData pNow : previousPartitions) + { + if(tMem.pendingCnt <= 0 ) + break; + int pInstLookup = pNow.getOwnerInstanceId(); + ArrayList remotePartitionList = instPListMap.get(pInstLookup); + + //Search if Partition is still available in the expected partition list + int pIndex = remotePartitionList.indexOf(pNow.getTopicPartition().partition()); + if(pIndex == -1) + { + //lookup in other instances if partition is shifted due to instance shutdown + for(Integer instnow : instPListMap.keySet()) + { + if( instnow.intValue() == pInstLookup) + continue; + + remotePartitionList = instPListMap.get(instnow.intValue()); + pIndex = remotePartitionList.indexOf(pNow.getTopicPartition().partition()); + if(pIndex != -1) + { + pInstLookup = instnow.intValue(); + break; + } + } + } + //Partition is available in any of the instance partition list + if(pIndex != -1) + { + pNow.setOwnerInstanceId(pInstLookup); + pNow.setLocal(false); + tMem.pendingCnt--; + tMem.addAssignedPartitions(pNow); + remotePartitionList.remove(pIndex); + removeList.add(pNow); + } + } + previousPartitions.removeAll(removeList); + } + } + + assigner = memberList.iterator(); + //Phase 4: New Remote Assignment + //System.out.println("TxEQAssignor: TxEQ Assign 6:Remote New Assignment"); + while(assigner.hasNext()) + { + SessionData tMem = assigner.next(); + //System.out.println("TxEQAssignor: Member " + tMem.name + " Pending partitions " + tMem.pendingCnt); + if(tMem.pendingCnt <=0 ) + { + continue; + } + ArrayList remotePartitionList = null; + + for(Integer instnow : instPListMap.keySet()) + { + if(tMem.pendingCnt <=0 ) + break; + int instLookup = instnow.intValue(); + //Local Instance list is already parsed + if(instLookup == tMem.getInstanceId()) + continue; + + ArrayList removePList = new ArrayList(); + remotePartitionList = instPListMap.get(instLookup); + for(Integer pNow : remotePartitionList) + { + if(tMem.pendingCnt <=0 ) + break; + + PartitionData teqP = new PartitionData(topicNow, tMem.getQueueId(), pNow.intValue(), tMem.getSubscriberName(), tMem.getSubscriberId(), instLookup, false); + tMem.addAssignedPartitions(teqP); + tMem.pendingCnt--; + removePList.add(pNow); + } + remotePartitionList.removeAll(removePList); + removePList.clear(); + } + } + + assigner = memberList.iterator(); + //Create assignments now + //System.out.println("TxEQAssignor: Final Partition Map "); + while(assigner.hasNext()) + { + SessionData tMem = assigner.next(); + //System.out.println("TxEQAssignor: Session: " + tMem.name ); + List previousPartitions = tMem.getPreviousPartitions(); + List assignedToMe = assignment.get(tMem.name); + List assignedPartitionList = tMem.getAssignedPartitions(); + // If no partitions is assigned, set -1 as assigned partition + if(assignedPartitionList == null || assignedPartitionList.size() == 0) + { + PartitionData teqP = new PartitionData(topicNow, tMem.getQueueId(), -1, tMem.getSubscriberName(), tMem.getSubscriberId(), tMem.getInstanceId(), true); + assignedPartitionList.add(teqP); + assignedToMe.add(teqP.getTopicPartition()); + //System.out.println("TxEQAssignor: " + teqP.getTopicPartition().partition()); + previousPartitions.clear(); + continue; + } + for(PartitionData teqPNow : assignedPartitionList) + { + assignedToMe.add(teqPNow.getTopicPartition()); + //System.out.println("TxEQAssignor: " + teqPNow.getTopicPartition().partition()); + } + previousPartitions.clear(); + previousPartitions.addAll(tMem.getAssignedPartitions()); + } + } + return assignment; + } + + /* if(clusterNow == null) + { + //clusterNow = updateMetaData(); + } + List partByTopic = clusterNow.availablePartitionsForTopic(topicNow); + Map> instPListMap = new HashMap>(); + + //Prepare Map of Partitions per Instance + for(PartitionInfo pInfoNow : partByTopic) + { + Node leaderNode = pInfoNow.leader(); + Integer pForThisNode = instPCntMap.get(leaderNode.id()); + if(pForThisNode == null) { + instPCntMap.put(leaderNode.id(),0); + pForThisNode = 0; + ArrayList pInstInfoListNow = new ArrayList(); + instPListMap.put(leaderNode.id(), pInstInfoListNow); + } + instPCntMap.put(leaderNode.id(), pForThisNode+1); + instPListMap.get(leaderNode.id()).add(pInfoNow); + } + */ + + public List allPartitionsSorted(Map partitionsPerTopic, + Map subscriptions) { + SortedSet topics = new TreeSet<>(); + for (Subscription subscription : subscriptions.values()) + topics.addAll(subscription.topics()); + + List allPartitions = new ArrayList<>(); + for (String topic : topics) { + Integer numPartitionsForTopic = partitionsPerTopic.get(topic); + if (numPartitionsForTopic != null) + allPartitions.addAll(AbstractPartitionAssignor.partitions(topic, numPartitionsForTopic)); + } + return allPartitions; + } + + @Override + public String name() { + return "TxEQAssignor"; + } + +} diff --git a/clients/src/main/java/org/oracle/okafka/clients/consumer/internals/AQKafkaConsumer.java b/clients/src/main/java/org/oracle/okafka/clients/consumer/internals/AQKafkaConsumer.java index 31b5a14..307c14f 100644 --- a/clients/src/main/java/org/oracle/okafka/clients/consumer/internals/AQKafkaConsumer.java +++ b/clients/src/main/java/org/oracle/okafka/clients/consumer/internals/AQKafkaConsumer.java @@ -1,58 +1,88 @@ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. */ package org.oracle.okafka.clients.consumer.internals; +import java.math.BigDecimal; +import java.sql.Array; import java.sql.CallableStatement; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; - +import java.sql.SQLException; +import java.sql.Statement; import java.sql.Types; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; import javax.jms.JMSException; import javax.jms.Message; +import javax.jms.Session; import javax.jms.Topic; import javax.jms.TopicConnection; import javax.jms.TopicSession; import javax.jms.TopicSubscriber; +import oracle.jdbc.OracleData; +import oracle.jdbc.OracleTypes; import oracle.jms.AQjmsBytesMessage; import oracle.jms.AQjmsConnection; import oracle.jms.AQjmsConsumer; import oracle.jms.AQjmsSession; -import org.oracle.okafka.clients.ClientRequest; -import org.oracle.okafka.clients.ClientResponse; +import org.apache.kafka.clients.ClientRequest; +import org.apache.kafka.clients.ClientResponse; +import org.oracle.okafka.clients.CommonClientConfigs; +import org.oracle.okafka.clients.Metadata; +import org.oracle.okafka.clients.NetworkClient; import org.oracle.okafka.clients.consumer.ConsumerConfig; -import org.oracle.okafka.clients.consumer.OffsetAndMetadata; +import org.apache.kafka.clients.consumer.ConsumerPartitionAssignor; +import org.apache.kafka.clients.consumer.OffsetAndMetadata; +import org.oracle.okafka.clients.consumer.TxEQAssignor; import org.oracle.okafka.common.Node; -import org.oracle.okafka.common.TopicPartition; +import org.apache.kafka.common.Cluster; +import org.apache.kafka.common.KafkaException; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.metrics.Metrics; + +import org.oracle.okafka.common.internals.PartitionData; +import org.oracle.okafka.common.internals.QPATInfo; +import org.oracle.okafka.common.internals.QPATInfoList; +import org.oracle.okafka.common.internals.QPIMInfo; +import org.oracle.okafka.common.internals.QPIMInfoList; +import org.oracle.okafka.common.internals.SessionData; import org.oracle.okafka.common.network.AQClient; +import org.oracle.okafka.common.network.SelectorMetrics; import org.oracle.okafka.common.protocol.ApiKeys; import org.oracle.okafka.common.requests.CommitRequest; import org.oracle.okafka.common.requests.CommitResponse; +import org.oracle.okafka.common.requests.ConnectMeRequest; +import org.oracle.okafka.common.requests.ConnectMeResponse; import org.oracle.okafka.common.requests.FetchRequest; import org.oracle.okafka.common.requests.FetchResponse; +import org.oracle.okafka.common.requests.JoinGroupRequest; +import org.oracle.okafka.common.requests.JoinGroupResponse; +import org.oracle.okafka.common.requests.MetadataResponse; import org.oracle.okafka.common.requests.OffsetResetRequest; import org.oracle.okafka.common.requests.OffsetResetResponse; import org.oracle.okafka.common.requests.SubscribeRequest; import org.oracle.okafka.common.requests.SubscribeResponse; +import org.oracle.okafka.common.requests.SyncGroupRequest; +import org.oracle.okafka.common.requests.SyncGroupResponse; import org.oracle.okafka.common.requests.UnsubscribeResponse; import org.oracle.okafka.common.utils.ConnectionUtils; -import org.oracle.okafka.common.utils.LogContext; +import org.apache.kafka.common.utils.LogContext; import org.oracle.okafka.common.utils.MessageIdConverter; -import org.oracle.okafka.common.utils.Time; -import org.oracle.okafka.common.utils.Utils; +import org.oracle.okafka.common.utils.MessageIdConverter.OKafkaOffset; +import org.apache.kafka.common.utils.Time; /** * This class consumes messages from AQ @@ -64,16 +94,39 @@ public final class AQKafkaConsumer extends AQClient{ private final ConsumerConfig configs; private final Time time; private String msgIdFormat = "00"; - public AQKafkaConsumer(LogContext logContext, ConsumerConfig configs, Time time) + private List assignors; + private final SelectorMetrics selectorMetrics; + private Metadata metadata; + + private boolean skipConnectMe = false; + private boolean externalConn = false; + + public AQKafkaConsumer(LogContext logContext, ConsumerConfig configs, Time time, Metadata metadata,Metrics metrics) + { super(logContext.logger(AQKafkaConsumer.class), configs); + System.setProperty("oracle.jms.conservativeNavigation","1"); this.configs = configs; this.topicConsumersMap = new HashMap(); this.time =time; + this.metadata = metadata; + this.selectorMetrics = new SelectorMetrics(metrics, "Selector", Collections.emptyMap(),true); + this.selectorMetrics.recordConnectionCount(topicConsumersMap);; + + } + + public void setAssignors(List _assignores ) + { + assignors = _assignores; } public ClientResponse send(ClientRequest request) { - return parseRequest(request, request.apiKey()); + this.selectorMetrics.requestCompletedSend(request.destination()); + ClientResponse cr = parseRequest(request, ApiKeys.convertToOracleApiKey(request.apiKey())); + if(cr!=null) { + this.selectorMetrics.recordCompletedReceive(cr.destination(),cr.requestLatencyMs()); + } + return cr; } /** @@ -83,18 +136,28 @@ public ClientResponse send(ClientRequest request) { * @return response for given request */ private ClientResponse parseRequest( ClientRequest request, ApiKeys key) { - if(key == ApiKeys.FETCH) + switch(key) + { + case FETCH: return receive(request); - if(key == ApiKeys.COMMIT) - return commit(request); - if(key == ApiKeys.SUBSCRIBE) + case COMMIT: + return commit(request); + case SUBSCRIBE: return subscribe(request); - if(key == ApiKeys.OFFSETRESET) - return seek(request); - if(key == ApiKeys.UNSUBSCRIBE) + case UNSUBSCRIBE: return unsubscribe(request); + case OFFSETRESET: + return seek(request); + case JOIN_GROUP: + return joinGroup(request); + case SYNC_GROUP: + return syncGroup(request); + case METADATA: + return getMetadata(request); + case CONNECT_ME: + return connectMe(request); + } return null; - } /** @@ -105,11 +168,12 @@ private ClientResponse parseRequest( ClientRequest request, ApiKeys key) { * @return */ public ClientResponse receive(ClientRequest request) { - Node node =request.destination(); + Node node =metadata.getNodeById(Integer.parseInt(request.destination())); FetchRequest.Builder builder = (FetchRequest.Builder)request.requestBuilder(); FetchRequest fetchRequest = builder.build(); String topic = fetchRequest.topic(); long timeoutMs = fetchRequest.pollTimeout(); + boolean disconnected = false; try { if(!topicConsumersMap.containsKey(node) ) { topicConsumersMap.put(node, new TopicConsumers(node)); @@ -117,45 +181,82 @@ public ClientResponse receive(ClientRequest request) { Message[] messages = null; TopicConsumers consumers = topicConsumersMap.get(node); TopicSubscriber subscriber = consumers.getTopicSubscriber(topic); - messages = ((AQjmsConsumer)subscriber).bulkReceive(configs.getInt(ConsumerConfig.MAX_POLL_RECORDS_CONFIG), timeoutMs); + log.debug("Invoking bulkReceive"); + int maxRecords = configs.getInt(ConsumerConfig.MAX_POLL_RECORDS_CONFIG); + if(maxRecords == 1) + { + Message msg = subscriber.receive(timeoutMs); + if(msg != null) + { + messages = new Message[1]; + messages[0] = msg; + } + } + else + { + messages = ((AQjmsConsumer)subscriber).bulkReceive(configs.getInt(ConsumerConfig.MAX_POLL_RECORDS_CONFIG), timeoutMs); + } + log.debug("After bulkreceive. #Message = " + (messages!=null?messages.length:"0") ); if(messages == null) - return createFetchResponse(request, topic, Collections.emptyList(), false); + return createFetchResponse(request, topic, Collections.emptyList(), false, null); List msgs = new ArrayList<>(); - for(int i=0; i < messages.length;i++) { if(messages[i] instanceof AQjmsBytesMessage) - msgs.add((AQjmsBytesMessage)messages[i]); + { + msgs.add((AQjmsBytesMessage)messages[i]); + } else { + log.debug("Received other than AQjmsBytesMessage"); try { - int partition = messages[i].getIntProperty("partition"); - - long offset = MessageIdConverter.getOffset(messages[i].getJMSMessageID()); + int partition = messages[i].getIntProperty(AQClient.PARTITION_PROPERTY); + OKafkaOffset okOffset = MessageIdConverter.getOKafkaOffset(messages[i].getJMSMessageID(), true, true); + long offset = okOffset.getOffset(); log.error("Message is not an instance of AQjmsBytesMessage: Topic {} partition {} offset{}",topic, partition, offset ); - } catch(JMSException exception) { + } catch(Exception exception) { //do nothing - } - } } - - return createFetchResponse(request, topic, msgs, false); + + return createFetchResponse(request, topic, msgs, false, null); } catch(JMSException exception) { - if(!exception.getErrorCode().equals("120")) { + log.debug("Exception in bulkReceive " + exception.getMessage(),exception ); + Exception linkedException = exception.getLinkedException(); + int errorCode = 0; + if(linkedException != null && linkedException instanceof SQLException ) + { + errorCode = ((SQLException)linkedException).getErrorCode(); + } + + if(errorCode != 24003) + { + try { + errorCode = Integer.parseInt(exception.getErrorCode()); + }catch(Exception parseEx) { + //Do Nothing. Keep original ErrorCode + } + } + log.debug("Dequeue Error Code = " + errorCode); + //If not Rebalancing Error and not Transient error then + if(!(errorCode == 24003 || errorCode == 120)) { + log.warn("Exception from bulkReceive " + exception.getMessage(), exception); close(node); + disconnected = true; log.error("failed to receive messages from topic: {}", topic); } - return createFetchResponse(request, topic, Collections.emptyList(), true); + return createFetchResponse(request, topic, Collections.emptyList(), disconnected, exception); } catch(Exception ex) { + log.error("Exception from bulkReceive " + ex, ex ); close(node); - return createFetchResponse(request, topic, Collections.emptyList(), true); + disconnected = true; + return createFetchResponse(request, topic, Collections.emptyList(), true, ex); } } - private ClientResponse createFetchResponse(ClientRequest request, String topic, List messages, boolean disconnected) { - return new ClientResponse(request.makeHeader(), request.callback(), request.destination(), - request.createdTimeMs(), time.milliseconds(), disconnected, - new FetchResponse(topic, messages)); + private ClientResponse createFetchResponse(ClientRequest request, String topic, List messages, boolean disconnected, Exception exception) { + return new ClientResponse(request.makeHeader((short)1), request.callback(), request.destination(), + request.createdTimeMs(), time.milliseconds(), disconnected, null,null, + new FetchResponse(topic, messages, exception)); } /** @@ -168,32 +269,45 @@ public ClientResponse commit(ClientRequest request) { Map offsets = commitRequest.offsets(); Map result = new HashMap<>(); boolean error = false; + log.debug("Commit Nodes. " + nodes.size()); for(Map.Entry> node : nodes.entrySet()) { - if(node.getValue().size() > 0) { TopicConsumers consumers = topicConsumersMap.get(node.getKey()); try { - - consumers.getSession().commit(); + log.debug("Committing now for node " + node.toString()); + TopicSession jmsSession =consumers.getSession(); + if(jmsSession != null) + { + log.debug("Committing now for node " + node.toString()); + jmsSession.commit(); + log.debug("Commit done"); + }else { + log.info("No valid session to commit for node " + node); + } result.put(node.getKey(), null); } catch(JMSException exception) { error = true; result.put(node.getKey(), exception); } + catch(Exception e) + { + log.error("Exception from commit " + e, e); + } + } + else { + log.info("Not Committing on Node " + node); } } return createCommitResponse(request, nodes, offsets, result, error); - - } private ClientResponse createCommitResponse(ClientRequest request, Map> nodes, Map offsets, Map result, boolean error) { - return new ClientResponse(request.makeHeader(), request.callback(), request.destination(), - request.createdTimeMs(), time.milliseconds(), false, + return new ClientResponse(request.makeHeader((short)1), request.callback(), request.destination(), + request.createdTimeMs(), time.milliseconds(), false, null,null, new CommitResponse(result, nodes, offsets, error)); } @@ -204,7 +318,7 @@ private String getMsgIdFormat(Connection con, String topic ) PreparedStatement msgFrmtStmt = null; ResultSet rs = null; try{ - String enqoteTopic = Utils.enquote(topic); + String enqoteTopic = ConnectionUtils.enquote(topic); String msgFrmtTxt = "select msgid from "+enqoteTopic+" where rownum = ?"; msgFrmtStmt = con.prepareStatement(msgFrmtTxt); msgFrmtStmt.setInt(1,1); @@ -235,7 +349,7 @@ private class SeekInput { int seekType; String seekMsgId; public SeekInput() { - priority = 1; + priority = -1; } } @@ -269,7 +383,7 @@ public ClientResponse seek(ClientRequest request) { try { OffsetResetRequest.Builder builder = (OffsetResetRequest.Builder)request.requestBuilder(); OffsetResetRequest offsetResetRequest = builder.build(); - Node node = request.destination(); + Node node = metadata.getNodeById(Integer.parseInt(request.destination())); Map offsetResetTimestamps = offsetResetRequest.offsetResetTimestamps(); Map> offsetResetTimeStampByTopic = new HashMap>() ; for(Map.Entry offsetResetTimestamp : offsetResetTimestamps.entrySet()) { @@ -285,6 +399,7 @@ public ClientResponse seek(ClientRequest request) { SeekInput[] seekInputs = null; String[] inArgs = new String[5]; + int indx =0; for(Map.Entry> offsetResetTimestampOfTopic : offsetResetTimeStampByTopic.entrySet()) { String topic = offsetResetTimestampOfTopic.getKey(); inArgs[0] = "Topic: " + topic + " "; @@ -295,7 +410,7 @@ public ClientResponse seek(ClientRequest request) { int inputSize = offsetResetTimestampOfTopic.getValue().entrySet().size(); seekInputs = new SeekInput[inputSize]; - int indx =0; + for(Map.Entry offsets : offsetResetTimestampOfTopic.getValue().entrySet()) { seekInputs[indx] = new SeekInput(); try { @@ -314,10 +429,11 @@ else if( offsets.getValue() == -1L) { seekInputs[indx].seekType = SeekInput.SEEK_MSGID; // Seek to MessageId inArgs[2]= "Seek Type: " + seekInputs[indx].seekType; inArgs[3] ="Seek to Offset: " + offsets.getValue(); - seekInputs[indx].seekMsgId = MessageIdConverter.getMsgId(tp, offsets.getValue(), msgIdFormat); - inArgs[4] = "Seek To MsgId: "+seekInputs[indx].seekMsgId ; + seekInputs[indx].seekMsgId = MessageIdConverter.getMsgId(tp, offsets.getValue(), msgIdFormat, 0); + inArgs[4] = "Seek To MsgId: "+seekInputs[indx].seekMsgId ; validateMsgId(seekInputs[indx].seekMsgId); } + indx++; }catch(IllegalArgumentException e ) { String errorMsg = ""; for(int i =0; i offsets : offsetResetTimestampOfTopic.getValue().entrySet()) { + responses.put(offsets.getKey(), null); + } + } catch(Exception e) { for(Map.Entry offsets : offsetResetTimestampOfTopic.getValue().entrySet()) { responses.put(offsets.getKey(), e); } - }finally { + }finally { if(seekStmt != null) { try { seekStmt.close(); @@ -377,11 +506,11 @@ else if( offsets.getValue() == -1L) { }// While Topics } catch(Exception e) { - return new ClientResponse(request.makeHeader(), request.callback(), request.destination(), - request.createdTimeMs(), time.milliseconds(), true, new OffsetResetResponse(responses, e)); + return new ClientResponse(request.makeHeader((short)1), request.callback(), request.destination(), + request.createdTimeMs(), time.milliseconds(), true, null,null, new OffsetResetResponse(responses, e)); } - return new ClientResponse(request.makeHeader(), request.callback(), request.destination(), - request.createdTimeMs(), time.milliseconds(), false, new OffsetResetResponse(responses, null)); + return new ClientResponse(request.makeHeader((short)1), request.callback(), request.destination(), + request.createdTimeMs(), time.milliseconds(), false, null,null, new OffsetResetResponse(responses, null)); } private ClientResponse unsubscribe(ClientRequest request) { @@ -397,31 +526,725 @@ private ClientResponse unsubscribe(ClientRequest request) { response.put(topicSubscriber.getKey(), jms); } } - try { ((AQjmsSession)topicConsumersByNode.getValue().getSession()).close(); + ((AQjmsConnection)topicConsumersByNode.getValue().getConnection()).close(); topicConsumersByNode.getValue().setSession(null); - } catch(JMSException jms) { - //log.error("Failed to close session: {} associated with connection: {} and node: {} ", consumers.getSession(), consumers.getConnection(), node ); + topicConsumersByNode.getValue().setConnection(null); + // ToDo: Delete User_queue_partition_assignment_table entry for this Consumer Session from Database + // Execute DBMS_TEQK.AQ$_REMOVE_SESSION() + + } + + catch(JMSException jms) { + //log.error("Failed to close session: {} associated with connection: {} and node: {} ", consumers.getSession(), topicConsumersMap.getConnection(), node ); } } - return new ClientResponse(request.makeHeader(), request.callback(), request.destination(), - request.createdTimeMs(), time.milliseconds(), false, new UnsubscribeResponse(response)); + + topicConsumersMap.clear(); + return new ClientResponse(request.makeHeader((short)1), request.callback(), request.destination(), + request.createdTimeMs(), time.milliseconds(), true,null,null, new UnsubscribeResponse(response)); + } + + private ClientResponse getMetadata(ClientRequest request) { + Connection conn = null; + Node node = null; + Cluster cluster = null; + //Cluster used for this metadata is still a bootstrap cluster and does not have all necessary information + //Pick any random node from the bootstrap nodes and send metadata request. + if(metadata.isBootstrap()) + { + cluster = metadata.fetch(); + List clusterNodes = NetworkClient.convertToOracleNodes(cluster.nodes()); + // Check if we have a node where connection already exists + Set nodesWithConn = topicConsumersMap.keySet(); + for(Node nodeNow: clusterNodes) + { + for(Node connectedNode : nodesWithConn) + { + if(connectedNode.equals(nodeNow)) + { + //Found a node with a connection to database. + node = nodeNow; + break; + } + } + } + if(node == null) + { + //No node with connection yet. Pick the first bootstrap node. + node = clusterNodes.get(0); + log.debug("No Connected Node Found. Picked first of bootstrap nodes.: " + node); + } + } + else + { + node = (org.oracle.okafka.common.Node)metadata.getNodeById(Integer.parseInt(request.destination())); + } + try { + TopicConsumers tConsumer = topicConsumersMap.get(node); + if(tConsumer == null) + throw new NullPointerException("TConsumer for Node "+ node); + + TopicSession tSession = tConsumer.getSession(); + if(tSession == null) + throw new NullPointerException ("TSesion for TConsumer for node" + node); + + conn = ((AQjmsSession)topicConsumersMap.get(node).getSession()).getDBConnection(); + } catch(JMSException jmsExcp) { + try { + log.trace("Unexcepted error occured with connection to node {}, closing the connection", request.destination()); + topicConsumersMap.get(node).getConnection().close(); + log.trace("Connection with node {} is closed", request.destination()); + } catch(JMSException jmsEx) { + log.trace("Failed to close connection with node {}", request.destination()); + } + } + + ClientResponse response = getMetadataNow(request, conn, node, metadata.updateRequested()); + + MetadataResponse metadataresponse = (MetadataResponse)response.responseBody(); + + org.apache.kafka.common.Cluster updatedCluster = metadataresponse.cluster(); + + for(String topic: updatedCluster.topics()) { + try { + super.fetchQueueParameters(topic, conn, metadata.topicParaMap); + } catch (SQLException e) { + log.error("Exception while fetching TEQ parameters and updating metadata " + e.getMessage()); + } + } + + + if(response.wasDisconnected()) { + topicConsumersMap.remove(node); + metadata.requestUpdate(); + } + return response; + } + + /** Sends a join_group request to TEQ. JOIN_GROUP call returns a list of sessions that are part of rebalancing and their previous assignment. + * @param request join group request + * @return + */ + private ClientResponse joinGroup(ClientRequest request) { + log.debug("Sending AQ Join Group Request"); + JoinGroupRequest.Builder builder = (JoinGroupRequest.Builder)request.requestBuilder(); + JoinGroupRequest joinRequest= builder.build(); + SessionData sessionData = joinRequest.getSessionData(); + CallableStatement joinStmt = null; + int sessionId = -1; + int instId = -1; + int joinGroupVersion = sessionData.getVersion(); + + try { + Node node = metadata.getNodeById(Integer.parseInt(request.destination())); + log.debug("Destination Node : " + node.toString()); + TopicConsumers consumers = topicConsumersMap.get(node); + Connection con = ((AQjmsSession)consumers.getSession()).getDBConnection(); + + final String qpimLstType = "SYS.AQ$_QPIM_INFO_LIST"; + final String qpatLstType = "SYS.AQ$_QPAT_INFO_LIST"; + log.debug("Assigned partition Size " + sessionData.getAssignedPartitions().size()); + QPATInfo[] a = new QPATInfo[sessionData.getAssignedPartitions().size()]; + int ind = 0; + for(PartitionData pData: sessionData.getAssignedPartitions()) { + QPATInfo qpat = new QPATInfo(); + //qpat.setSchema(sessionData.getSchema() != null ? ConnectionUtils.enquote(sessionData.getSchema().toUpperCase()) : null); + qpat.setSchema(sessionData.getSchema() != null ? (sessionData.getSchema().toUpperCase()) : null); + //qpat.setQueueName(ConnectionUtils.enquote(pData.getTopicPartition().topic().toUpperCase())); + qpat.setQueueName((pData.getTopicPartition().topic().toUpperCase())); + qpat.setQueueId(pData.getQueueId()); + String subscriberNameIn =pData.getSubName() == null ? configs.getString(ConsumerConfig.GROUP_ID_CONFIG).toUpperCase(): pData.getSubName().toUpperCase(); + /*if(subscriberNameIn != null) { + subscriberNameIn = ConnectionUtils.enquote(subscriberNameIn); + }*/ + qpat.setSubscriberName(subscriberNameIn); + qpat.setSubscriberId(pData.getSubId()); + qpat.setGroupLeader(sessionData.getLeader()); + qpat.setPartitionId(pData.getTopicPartition().partition() == -1 ? -1 : pData.getTopicPartition().partition() *2); + //System.out.println("Setting partition for this qpat to " + qpat.getPartitionId()); + qpat.setFlags(-1); + qpat.setVersion(sessionData.getVersion()); + qpat.setInstId(sessionData.getInstanceId()); + qpat.setSessionId(sessionData.getSessionId()); + qpat.setAuditId(sessionData.getAuditId()); + qpat.setTimeStamp(new java.sql.Time(System.currentTimeMillis())); + //qpat.setTimeStamp(new java.sql.Time(sessionData.createTime.getTime())); + a[ind] = qpat; + ind++; + } + + QPATInfoList qpatl = new QPATInfoList(); + qpatl.setArray(a); + joinStmt = con.prepareCall("{call DBMS_TEQK.AQ$_JOIN_GROUP(?, ?, ?, ? )}"); + joinStmt.setObject(1, qpatl, OracleTypes.ARRAY); + joinStmt.setInt(4, joinGroupVersion); + joinStmt.registerOutParameter(1, OracleTypes.ARRAY, qpatLstType); + joinStmt.registerOutParameter(2, OracleTypes.ARRAY, qpimLstType); + joinStmt.registerOutParameter(3, Types.INTEGER); + joinStmt.registerOutParameter(4, Types.INTEGER); + log.debug("Executing DBMS_TEQK.AQ$_JOIN_GROUP"); + joinStmt.execute(); + + QPATInfo[] qpatInfo = ((QPATInfoList)qpatl.create(joinStmt.getObject(1), 2002)).getArray(); + QPIMInfoList qpiml = new QPIMInfoList(); + OracleData odata = ((QPIMInfoList)qpiml.create(joinStmt.getObject(2), 2002)); + QPIMInfo[] qpimInfo = null; + if(odata != null) { + qpimInfo = ((QPIMInfoList)odata).getArray(); + } + + log.debug("Return from DBMS_TEQK.AQ$_JOIN_GROUP. QPATINFO Size " +qpatInfo.length ); + for(int i = 0; i < qpatInfo.length; i++) + { + + log.debug("QPAT[" +i +"]:(Inst,Session,GroupLeader,Partition,Flag,Version#) = ("+ + qpatInfo[i].getInstId()+","+qpatInfo[i].getSessionId()+"," + + qpatInfo[i].getGroupLeader()+","+qpatInfo[i].getPartitionId()+"," + + qpatInfo[i].getFlags()+","+qpatInfo[i].getVersion()); + } + + sessionId = getSessionId(con); + instId = getInstId(con); + return createJoinGroupResponse(request, sessionId, instId, qpatInfo, qpimInfo, joinStmt.getInt(4), null, false); + } catch(Exception exception) { + boolean disconnected = false; + log.error("Exception while executing JoinGroup " + exception.getMessage() , exception); + if(exception instanceof SQLException ) + { + SQLException sqlExcp = (SQLException)exception; + int errorCode = sqlExcp.getErrorCode(); + log.error("SQL Error:ORA-" + errorCode); + if(errorCode == 28 || errorCode == 17410) { + disconnected = true; + } + } + return createJoinGroupResponse(request, sessionId, instId, null, null, -1, exception, disconnected); + } + finally { + try { + if(joinStmt != null) + joinStmt.close(); + } catch(Exception ex) { + //do nothing + } + } + } + + private int getSessionId(Connection con) throws SQLException { + + Statement st = null; + ResultSet rs = null; + + try { + String sessionIdStr = ((oracle.jdbc.internal.OracleConnection)con).getServerSessionInfo().getProperty("AUTH_SESSION_ID"); + return Integer.parseInt(sessionIdStr); + }catch(Exception e) + { + // Failed to get session id from connection object. Execute query to find session id now + } + try { + + st = con.createStatement(); + rs = st.executeQuery("select sys_context('USERENV', 'SID') from dual"); + if(rs.next() ) { + return rs.getInt(1); + } + }catch(SQLException sqlException) { + //do nothing + } finally { + try { + if(rs != null) + rs.close(); + }catch(SQLException exception) { + + } + try { + if(st != null) + st.close(); + }catch(SQLException exception) { + + } + } + + throw new SQLException("Error in fetching Session Id"); + + } + + private int getInstId(Connection con) throws SQLException { + Statement st = null; + ResultSet rs = null; + try { + String instIdStr = ((oracle.jdbc.internal.OracleConnection)con).getServerSessionInfo().getProperty("AUTH_INSTANCE_NO"); + return Integer.parseInt(instIdStr); + }catch(Exception e) + { + //Failed to get instance number from connection object. Do Query now + } + try { + st = con.createStatement(); + rs = st.executeQuery("select sys_context('USERENV', 'INSTANCE') from dual"); + if(rs.next() ) { + return rs.getInt(1); + } + }catch(SQLException sqlException) { + //do nothing + } finally { + try { + if(rs != null) + rs.close(); + }catch(SQLException exception) { + + } + try { + if(st != null) + st.close(); + }catch(SQLException exception) { + + } + + } + + throw new SQLException("Error in fetching Instance Id"); + + } + + public int getSubcriberCount(Node node, String topic) throws SQLException { + int count =0; + PreparedStatement Stmt = null; + ResultSet rs = null; + Connection con; + try { + con = ((AQjmsSession)topicConsumersMap.get(node).getSession()).getDBConnection(); + String query = "select count(*) from user_durable_subs where name = :1 and queue_name = :2"; + Stmt = con.prepareStatement(query); + Stmt.setString(1, configs.getString(ConsumerConfig.GROUP_ID_CONFIG)); + Stmt.setString(2, topic); + rs = Stmt.executeQuery(); + + if(rs.next()) { + count = rs.getInt(1); + return count; + } + }catch(SQLException sqlException) { + //do nothing + } catch (JMSException e) { + //do nothing + } finally { + try { + if(rs != null) + rs.close(); + }catch(SQLException exception) { + + } + try { + if(Stmt != null) + Stmt.close(); + }catch(SQLException exception) { + + } + + } + + throw new SQLException("Error in getting the subscriber count"); + } + + public String getoffsetStartegy() { + return configs.getString(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG); } + + /* Returns a list of sessions that are part of rebalancing and their previous assignment */ + private ClientResponse createJoinGroupResponse(ClientRequest request, int sessionId, int instId, QPATInfo[] qpatInfo, QPIMInfo[] qpimInfo, int version, Exception exception, boolean disconnected) { + + Map memberPartitionMap = new HashMap(); + List partitions = new ArrayList<>(); + int leader = 0; + int length = 0; + if(qpatInfo != null) + length = qpatInfo.length; + + log.debug("Creating Join Group Response. QPAT Length: " +length); + try { + if(disconnected) + { + throw exception; + } + + if(qpatInfo != null) { + //Check if This session is Leader or not + for(int ind = 0; ind < length; ind++) { + if(qpatInfo[ind].getSessionId() == sessionId && qpatInfo[ind].getInstId() == instId ) { + if(qpatInfo[ind].getGroupLeader() == 1) + leader = 1; + break; + } + } + log.debug("Leader of the group? " + leader); + if(leader == 1) + { + //Set Partition ownership map + Map> instPListMap = new HashMap>(); + Map partitionInstMap = new HashMap(); + + String topic = qpatInfo[0]!=null?qpatInfo[0].getQueueName():null; + + if(qpimInfo != null && qpimInfo.length > 0) + { + log.debug("Partitions Created for topic " + +qpatInfo[0].getSchema()+"."+qpatInfo[0].getQueueName()+ " = " +qpimInfo.length); + for(QPIMInfo qpimNow : qpimInfo) + { + if(topic == null) + topic = qpimNow.getQueueName(); + + int instNow = qpimNow.getOwnerInstId(); + ArrayList pInstListNow = instPListMap.get(instNow); + if(pInstListNow == null) + { + pInstListNow = new ArrayList(); + instPListMap.put(instNow, pInstListNow); + } + pInstListNow.add(qpimNow.getPartitionId()/2); + partitionInstMap.put(qpimNow.getPartitionId()/2, instNow); + } + } + else + { + log.info("No partition yet created for Topic "+ + qpatInfo[0].getSchema()+"."+qpatInfo[0].getQueueName()); + } + ArrayList membersList = new ArrayList(); + for(QPATInfo qpatNow: qpatInfo) + { + + if(qpatNow.name == null ) + { + qpatNow.name = qpatNow.getInstId() +"_"+ qpatNow.getSessionId(); + } + + try { + //System.out.println("TxEQAssignor:Printing QPat " + qpatNow.name); + //System.out.println("TxEQAssignor:"+ qpatNow.toString()); + }catch(Exception e) + { + log.error("Exception while printing qpat " + qpatNow.name + " exception: " + e.getMessage()); + } + + String name = qpatNow.name; + SessionData teqSession = memberPartitionMap.get(name); + List teqPList = null; + if(teqSession == null) + { + teqSession = new SessionData( qpatNow.getSessionId(), qpatNow.getInstId(), + qpatNow.getSchema(),qpatNow.getQueueName(), qpatNow.getQueueId(), qpatNow.getSubscriberName(), + qpatNow.getSubscriberId(), null, // qpatNow.getTimeStamp()== null?new java.util.Date():new java.util.Date(qpatNow.getTimeStamp().getTime()), + qpatNow.getGroupLeader(), qpatNow.getVersion(), qpatNow.getAuditId()); + //System.out.println("createJoinGroupResponse 1: qpat With queue Id and subscriber ID " + qpatNow.getQueueId() + " " + qpatNow.getSubscriberId()); + log.debug("Member Added " + teqSession); + membersList.add(teqSession); + memberPartitionMap.put(name, teqSession); + } + if(qpatNow.getPartitionId() >= 0) + { + teqPList = teqSession.getPreviousPartitions(); + int pIdNow = qpatNow.getPartitionId()/2; + int ownerPid = partitionInstMap.get(pIdNow); + //System.out.println("createJoinGroupResponse 2: Partition " + pIdNow + " Added to " +qpatNow.name); + teqPList.add(new PartitionData(qpatNow.getQueueName(), qpatNow.getQueueId(), pIdNow, qpatNow.getSubscriberName(), qpatNow.getSubscriberId(), ownerPid, ownerPid == qpatNow.getInstId()? true:false)); + } + } + log.debug("Invoking Assignors"); + for(ConsumerPartitionAssignor assignor : assignors) + { + if(assignor instanceof TxEQAssignor) + { + log.debug("Using TEQ Assignor. "); + TxEQAssignor txEQAssignor = (TxEQAssignor) assignor; + txEQAssignor.setInstPListMap(instPListMap); + Map> topicMemberMap = new HashMap>(); + topicMemberMap.put(topic,membersList); + log.debug("Setting topicMembership Map. Member List Size " + membersList.size() +" Map Size " + topicMemberMap.size()); + txEQAssignor.setPartitionMemberMap(topicMemberMap); + } + } + } + if(qpimInfo != null) + { + for(int ind = 0; ind < qpimInfo.length; ind++) { + partitions.add(new PartitionData(qpimInfo[ind].getQueueName(), qpatInfo[0].getQueueId(), qpimInfo[ind].getPartitionId()/2, qpatInfo[0].getSubscriberName(), qpatInfo[0].getSubscriberId(), qpimInfo[ind].getOwnerInstId() , false)); + } + } + } + } catch(Exception excp) { + if(excp instanceof SQLException) + { + SQLException sqlEx = (SQLException)excp; + log.error("Exception in creating Join Group response " + sqlEx.getMessage(), sqlEx); + } + memberPartitionMap.clear(); + partitions.clear(); + leader = -1; + } + JoinGroupResponse jgResponse = new JoinGroupResponse(memberPartitionMap, partitions, leader, version, exception); + log.debug("Join Group Response Created"); + return new ClientResponse(request.makeHeader((short)1), request.callback(), request.destination(), + request.createdTimeMs(), time.milliseconds(), disconnected, null,null, jgResponse); + } + /** + * Sends a SYNC request to TEQ. Leader session performs assignment to all sessions which are part of consumer group(or participating in rebalancing) from subscribed topic using partition assignor and this assignment is sent to TEQ in sync group request. + * Follower sends an empty request. + * @param request + * @return + */ + private ClientResponse syncGroup(ClientRequest request) { + SyncGroupRequest.Builder builder = (SyncGroupRequest.Builder)request.requestBuilder(); + SyncGroupRequest syncRequest= builder.build(); + List sData = syncRequest.getSessionData(); + CallableStatement syncStmt = null; + Connection con = null; + //System.out.println("SyncGroup 1: Sending Sync Group Now"); + try { + Node node = metadata.getNodeById(Integer.parseInt(request.destination())); + TopicConsumers consumers = topicConsumersMap.get(node); + con = ((AQjmsSession)consumers.getSession()).getDBConnection(); + + final String typeList = "SYS.AQ$_QPAT_INFO_LIST"; + int size = 0; + + for(SessionData data : sData) { + size += data.getAssignedPartitions().size(); + } + log.debug("Before Sync, Assigned Partition List size "+ size); + QPATInfo[] a = new QPATInfo[size]; + + int ind = 0; + for(SessionData sessionData : sData) { + for(PartitionData pData: sessionData.getAssignedPartitions()) { + + QPATInfo qpat = new QPATInfo(); + //qpat.setSchema(sessionData.getSchema() != null ? ConnectionUtils.enquote(sessionData.getSchema().toUpperCase()) : null); + qpat.setSchema(sessionData.getSchema() != null ? (sessionData.getSchema().toUpperCase()) : null); + + //qpat.setQueueName(ConnectionUtils.enquote(pData.getTopicPartition().topic().toUpperCase())); + qpat.setQueueName((pData.getTopicPartition().topic().toUpperCase())); + qpat.setQueueId(sessionData.getQueueId()); + String subscriberNameIn =pData.getSubName() == null ? configs.getString(ConsumerConfig.GROUP_ID_CONFIG).toUpperCase(): pData.getSubName().toUpperCase(); + /*if(subscriberNameIn != null) { + subscriberNameIn = ConnectionUtils.enquote(subscriberNameIn); + }*/ + qpat.setSubscriberName(subscriberNameIn); + qpat.setSubscriberId(sessionData.getSubscriberId()); + qpat.setGroupLeader(sessionData.getLeader()); + int pId = pData.getTopicPartition().partition(); + // If partitions assigned is -1 then keep it as it is, else multiply with 2 as for TEQ partitions are even numbered + if(pId > 0 ) + pId *= 2; + + qpat.setPartitionId(pId); + qpat.setFlags(2); // DBMS_TEQK.ASSIGNED + qpat.setVersion(sessionData.getVersion()); + qpat.setInstId(sessionData.getInstanceId()); + qpat.setSessionId(sessionData.getSessionId()); + qpat.setAuditId(sessionData.getAuditId()); + + qpat.setTimeStamp(new java.sql.Time(System.currentTimeMillis())); + a[ind] = qpat; + ind++; + } + } + + QPATInfoList qpatl = new QPATInfoList(); + if(a.length > 0) + qpatl.setArray(a); + syncStmt = con.prepareCall("{call DBMS_TEQK.AQ$_SYNC(?, ?)}"); + syncStmt.setObject(1, qpatl, OracleTypes.ARRAY); + syncStmt.setInt(2, syncRequest.getVersion()); + syncStmt.registerOutParameter(1, OracleTypes.ARRAY, typeList); + syncStmt.registerOutParameter(2, Types.INTEGER); + //System.out.println("SyncGroup 8: Executing SYNC Procedure now"); + syncStmt.execute(); + //System.out.println("SyncGroup 9: Retrieved Response. creating qpatInfo array now"); + QPATInfo[] qpatInfo = ((QPATInfoList)qpatl.create(syncStmt.getObject(1), 2002)).getArray(); + + log.debug("Return from DBMS_TEQK.AQ$_SYNC. QPATINFO Size " +qpatInfo.length ); + for(int i = 0; i < qpatInfo.length; i++) + { + + log.debug("QPAT[" +i +"]:(Inst,Session,GroupLeader,Partition,Flag,Version#) = ("+ + qpatInfo[i].getInstId()+","+qpatInfo[i].getSessionId()+"," + + qpatInfo[i].getGroupLeader()+","+qpatInfo[i].getPartitionId()+"," + + qpatInfo[i].getFlags()+","+qpatInfo[i].getVersion()); + } + + //System.out.println("SyncGroup 10 : Sync Response Received. Assigned Partition count " + qpatInfo.length); + return createSyncResponse(request, qpatInfo, syncStmt.getInt(2), null, false); + } catch(Exception exception) { + boolean disconnected = false; + log.error("Exception in syncGroup " + exception.getMessage(), exception); + if(exception instanceof SQLException) + { + SQLException sqlExcp = (SQLException) exception; + int sqlErrorCode = sqlExcp.getErrorCode(); + if(sqlErrorCode == 28 || sqlErrorCode == 17410) + disconnected = true; + } + return createSyncResponse(request, null, -1, exception, disconnected); + } finally { + try { + if(syncStmt != null) + syncStmt.close(); + }catch(SQLException sqlException) { + //do nothing + } + } + } + + /* Returns a list of partitions assigned to this session */ + private ClientResponse createSyncResponse(ClientRequest request, QPATInfo[] qpatInfo, int version, Exception exception, boolean disconnected) { + SessionData data = null; + try { + if(exception == null) { + //System.out.println("Processing Sync Response. Printing Assigned Paritions"); + if(qpatInfo.length > 0) { + //System.out.println("Response for session : "+ qpatInfo[0].getInstId()+"_"+qpatInfo[0].getSessionId()); + + data = new SessionData(qpatInfo[0].getSessionId(), qpatInfo[0].getInstId(), qpatInfo[0].getSchema(), qpatInfo[0].getQueueName(), + qpatInfo[0].getQueueId(), qpatInfo[0].getSubscriberName(), qpatInfo[0].getSubscriberId(), new java.util.Date(), + /*new java.sql.Date(qpatInfo[0].getTimeStamp().getTime()), */ qpatInfo[0].getGroupLeader(), qpatInfo[0].getVersion(), qpatInfo[0].getAuditId()); + } + + for(int ind = 0; ind < qpatInfo.length; ind++) { + int pId = qpatInfo[ind].getPartitionId(); + if(pId >0) + pId = pId/2; + + //System.out.println("TxEQAssignor:Assigned Partition : " + pId); + data.addAssignedPartitions(new PartitionData(qpatInfo[ind].getQueueName(), qpatInfo[ind].getQueueId(), pId, + qpatInfo[ind].getSubscriberName(), qpatInfo[ind].getSubscriberId(), qpatInfo[ind].getInstId(), data.getInstanceId()==qpatInfo[ind].getInstId()?true:false )); + } + } + } catch(Exception ex) { + log.error("Exception from createSyncResponse " +ex, ex); + ex.printStackTrace(); + exception = ex; + } + + return new ClientResponse(request.makeHeader((short)1), request.callback(), request.destination(), + request.createdTimeMs(), time.milliseconds(), disconnected, null,null, new SyncGroupResponse(data, version, exception)); + } + + public ClientResponse connectMe(ClientRequest request) + { + ConnectMeRequest.Builder builder = (ConnectMeRequest.Builder)request.requestBuilder(); + ConnectMeRequest connectMeRequest= builder.build(); + Node nodeNow = metadata.getNodeById(Integer.parseInt(request.destination())); + TopicConsumers consumers = topicConsumersMap.get(nodeNow); + ConnectMeResponse connMeResponse = null; + + if(consumers != null) + { + try { + Connection conn = ((AQjmsSession)consumers.getSession()).getDBConnection(); + connMeResponse = connectMe(connectMeRequest, conn); + } catch(Exception e) + { + log.error("Exception while executing DBMS_TEQK.AQ$_connect_me " + e.getMessage(), e); + } + } + + if(connMeResponse == null) + { + connMeResponse = new ConnectMeResponse(); + connMeResponse.setInstId(0); + } + + return new ClientResponse(request.makeHeader((short)1), request.callback(), request.destination(), + request.createdTimeMs(), time.milliseconds(), false, null,null, connMeResponse ); + } + + + private ConnectMeResponse connectMe(ConnectMeRequest connMeRequest, Connection conn) + { + int instId = 0; + String url = ""; + int flags = 0; + ConnectMeResponse connMeResponse = new ConnectMeResponse(); + connMeResponse.setInstId(0); + + String connectProc = " call DBMS_TEQK.AQ$_CONNECT_ME( schema => :1 , queue_name => :2 , subscriber_name => :3 , inst_id => :4, url => :5, flags => :6, p_list => :7) "; + try (CallableStatement connectMeStmt = conn.prepareCall(connectProc)) { + + String schemaName = connMeRequest.getSchemaName(); + if(schemaName == null) + { + try { + schemaName = conn.getMetaData().getUserName(); + }catch(Exception e) + { + schemaName = ""; // Oracle DB Server will pick the current schema + } + } + connectMeStmt.setString(1, schemaName); + connectMeStmt.setString(2, connMeRequest.getToipcName()); + connectMeStmt.setString(3, connMeRequest.getGroupId()); + connectMeStmt.registerOutParameter(4, java.sql.Types.NUMERIC); + connectMeStmt.registerOutParameter(5, java.sql.Types.VARCHAR); + connectMeStmt.registerOutParameter(6, java.sql.Types.NUMERIC); + connectMeStmt.registerOutParameter(7, OracleTypes.ARRAY, "DBMS_TEQK.PARTITION_LIST"); + connectMeStmt.execute(); + + instId = connectMeStmt.getInt(4); + url = connectMeStmt.getString(5); + flags = connectMeStmt.getInt(6); + Array pArray = connectMeStmt.getArray(7); + BigDecimal[] partitionArr = (BigDecimal[])pArray.getArray(); + + connMeResponse.setInstId(instId); + connMeResponse.setUrl(url); + connMeResponse.setFlags(flags); + connMeResponse.setPartitionList(partitionArr); + log.info("Preferred Broker: " + instId+ " URL " + url); + + } catch(Exception connMeEx) + { + log.error("Exception while executing DBMS_TEQK.AQ$_CONNECTME " + connMeEx, connMeEx); + } + + ArrayList nodeList = connMeResponse.processUrl(); + String security = configs.getString(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG); + boolean plainText = security.equalsIgnoreCase("PLAINTEXT")?true:false; + if(nodeList != null) + { + for(Node nodeNow: nodeList) + { + if( (plainText && nodeNow.protocol().equalsIgnoreCase("TCP")) || + (!plainText && nodeNow.protocol().equalsIgnoreCase("TCPS"))) + { + connMeResponse.setPreferredNode(nodeNow); + break; + } + } + } + + return connMeResponse; + } + public void connect(Node node) throws JMSException{ if(!topicConsumersMap.containsKey(node)) { TopicConsumers nodeConsumers = null; try { nodeConsumers = new TopicConsumers(node); topicConsumersMap.put(node, nodeConsumers); + this.selectorMetrics.maybeRegisterConnectionMetrics(node); + this.selectorMetrics.connectionCreated.record(); } catch(JMSException e) { + log.error("Exception while creating Topic consumer " + e, e ); close(node, nodeConsumers); throw e; } - - } } @@ -462,16 +1285,21 @@ private void close(Node node, TopicConsumers consumers) { } } try { - ((AQjmsSession)consumers.getSession()).close(); + if ((AQjmsSession)consumers.getSession() != null) { + ((AQjmsSession)consumers.getSession()).close(); + } } catch(JMSException jms) { log.error("Failed to close session: {} associated with connection: {} and node: {} ", consumers.getSession(), consumers.getConnection(), node ); } try { - ((AQjmsConnection)consumers.getConnection()).close(); + if (consumers.getConnection() != null) { + ((AQjmsConnection)consumers.getConnection()).close(); + } + this.selectorMetrics.connectionClosed.record(); } catch(JMSException jms) { log.error("Failed to close connection: {} associated with node: {} ", consumers.getConnection(), node ); } - topicConsumersMap.remove(node); + //topicConsumersMap.remove(node); } /** * Creates topic connection to given node if connection doesn't exist. @@ -489,10 +1317,10 @@ private void createTopicConnection(Node node, int mode) throws JMSException { consumers = new TopicConsumers(node, mode); topicConsumersMap.put(node, consumers); } - } public ClientResponse subscribe(ClientRequest request) { + for(Map.Entry topicConsumersByNode: topicConsumersMap.entrySet()) { for(Map.Entry topicSubscriber : topicConsumersByNode.getValue().getTopicSubscriberMap().entrySet()) { @@ -503,10 +1331,17 @@ public ClientResponse subscribe(ClientRequest request) { //do nothing } } - + // ToDo:Check if we need this or not. Ideally when consumer is closed, not committed messages should be rolled back. try { - ((AQjmsSession)topicConsumersByNode.getValue().getSession()).close(); - topicConsumersByNode.getValue().setSession(null); + AQjmsSession sess = ((AQjmsSession)topicConsumersByNode.getValue().getSession()); + if(sess.children() > 0) + { + // ToDo: Maintain list of topic subscribed with this topicConsumerMap and remove only those which are not needed + log.info("Remove possible old subscribers from this KafkaConsumer"); + sess.close(); + topicConsumersByNode.getValue().setSession(null); + } + } catch(JMSException jms) { //log.error("Failed to close session: {} associated with connection: {} and node: {} ", consumers.getSession(), consumers.getConnection(), node ); } @@ -514,30 +1349,117 @@ public ClientResponse subscribe(ClientRequest request) { SubscribeRequest.Builder builder = (SubscribeRequest.Builder)request.requestBuilder(); SubscribeRequest commitRequest = builder.build(); String topic = commitRequest.getTopic(); - Node node = request.destination(); + Node node = metadata.getNodeById(Integer.parseInt(request.destination())); try { if(!topicConsumersMap.containsKey(node) ) { topicConsumersMap.put(node, new TopicConsumers(node)); } TopicConsumers consumers = topicConsumersMap.get(node); consumers.getTopicSubscriber(topic); - } catch(JMSException exception) { + metadata.setDBVersion(consumers.getDBVersion()); + } catch(JMSException exception) { + log.error("Exception during Subscribe request " + exception, exception); + log.info("Exception during Subscribe request. " + exception); + log.info("Closing connection to node. " + node); close(node); return createSubscribeResponse(request, topic, exception, false); } - return createSubscribeResponse(request, topic, null, false); - } private ClientResponse createSubscribeResponse(ClientRequest request, String topic, JMSException exception, boolean disconnected) { - return new ClientResponse(request.makeHeader(), request.callback(), request.destination(), - request.createdTimeMs(), time.milliseconds(), disconnected, + return new ClientResponse(request.makeHeader((short)1), request.callback(), request.destination(), + request.createdTimeMs(), time.milliseconds(), disconnected, null,null, new SubscribeResponse(topic, exception)); } + + public Connection getDBConnection(Node n) throws KafkaException + { + Connection dbConn = null; + try { + //Poll is not invoked yet + if(topicConsumersMap == null || topicConsumersMap.isEmpty()) + { + log.trace("Poll is not invoked yet. Creating database connection for the first time "); + TopicConsumers consumerToNode = new TopicConsumers((org.oracle.okafka.common.Node)n, Session.SESSION_TRANSACTED ); + topicConsumersMap.put((org.oracle.okafka.common.Node)n, consumerToNode); + } + + for(Node nodeNow: topicConsumersMap.keySet()) + { + if(nodeNow.id() == n.id()) + { + TopicConsumers tConsumer = topicConsumersMap.get(nodeNow); + dbConn = tConsumer.getDBConnection(); + log.debug("Returning Connection for node " + nodeNow.toString() ); + if(dbConn != null && !dbConn.isClosed()) + { + return dbConn; + } + else + { + log.debug("Connection to Node " + nodeNow + " is " + ((dbConn==null)?"null.":"closed.")); + } + } + } + }catch(Exception e) + { + throw new KafkaException("Failed to fetch Oracle Database Connection for this consumer", e); + } + + return dbConn; + } + + public Connection getDBConnection() throws KafkaException + { + Connection dbConn = null; + try { + //Poll is not invoked yet + if(topicConsumersMap == null || topicConsumersMap.isEmpty()) + { + log.trace("Poll is not invoked yet. Creating database connection for the first time "); + List bootStrapNodes = metadata.fetch().nodes(); + for(org.apache.kafka.common.Node bNode : bootStrapNodes) + { + TopicConsumers bootStrapConsumer = new TopicConsumers((org.oracle.okafka.common.Node)bNode, Session.SESSION_TRANSACTED ); + topicConsumersMap.put((org.oracle.okafka.common.Node)bNode, bootStrapConsumer); + break; + } + // A connection was created before poll is invoked. This connection is passed on to the application and may be used for transactionala activity. + // OKafkaConsumer must use this connection going forward. Hence it should not look for optimal database instance to consume records. Hence avoid connectMe call. + skipConnectMe = true; + } + for(Node n: topicConsumersMap.keySet()) + { + TopicConsumers tConsumer = topicConsumersMap.get(n); + dbConn = tConsumer.getDBConnection(); + if(dbConn != null && !dbConn.isClosed()) + return dbConn; + } + }catch(Exception e) + { + throw new KafkaException("Failed to fetch Oracle Database Connection for this consumer", e); + } + + return dbConn; + } + + public boolean skipConnectMe() + { + return this.skipConnectMe; + } + public void setSkipConnectMe(boolean skipConnectMe) + { + this.skipConnectMe = skipConnectMe; + } + + public boolean isExternalConn() + { + return this.externalConn; + } /**This class is used to create and manage connection to database instance. * Also creates, manages session associated with each connection and topic consumers associated with each session */ @@ -545,14 +1467,51 @@ private final class TopicConsumers { private TopicConnection conn = null; private TopicSession sess = null; private Map topicSubscribers = null; - private final Node node ; + private final Node node; + private String dbVersion; public TopicConsumers(Node node) throws JMSException { this(node, TopicSession.AUTO_ACKNOWLEDGE); } public TopicConsumers(Node node,int mode) throws JMSException { this.node = node; conn = createTopicConnection(node); + sess = createTopicSession(mode); + try { + Connection oConn = ((AQjmsSession)sess).getDBConnection(); + int instId = Integer.parseInt(((oracle.jdbc.internal.OracleConnection)oConn).getServerSessionInfo().getProperty("AUTH_INSTANCE_NO")); + String serviceName = ((oracle.jdbc.internal.OracleConnection)oConn).getServerSessionInfo().getProperty("SERVICE_NAME"); + String instanceName = ((oracle.jdbc.internal.OracleConnection)oConn).getServerSessionInfo().getProperty("INSTANCE_NAME"); + String user = oConn.getMetaData().getUserName(); + try { + String sessionId = ((oracle.jdbc.internal.OracleConnection)oConn).getServerSessionInfo().getProperty("AUTH_SESSION_ID"); + String serialNum = ((oracle.jdbc.internal.OracleConnection)oConn).getServerSessionInfo().getProperty("AUTH_SERIAL_NUM"); + String serverPid = ((oracle.jdbc.internal.OracleConnection)oConn).getServerSessionInfo().getProperty("AUTH_SERVER_PID"); + + log.info("Database Consumer Session Info: "+ sessionId +","+serialNum+". Process Id " + serverPid +" Instance Name "+ instanceName); + + try { + this.dbVersion = ConnectionUtils.getDBVersion(oConn); + }catch(Exception e) + { + log.error("Exception whle fetching DB Version " + e); + } + + }catch(Exception e) + { + log.error("Exception wnile getting database session information " + e); + } + + node.setId(instId); + node.setService(serviceName); + node.setInstanceName(instanceName); + node.setUser(user); + node.updateHashCode(); + }catch(Exception e) + { + log.error("Exception while getting instance id from conneciton " + e, e); + } + topicSubscribers = new HashMap<>(); } /** @@ -563,7 +1522,7 @@ public TopicConsumers(Node node,int mode) throws JMSException { */ public TopicConnection createTopicConnection(Node node) throws JMSException { if(conn == null) - conn = ConnectionUtils.createTopicConnection(node, configs); + conn = ConnectionUtils.createTopicConnection(node, configs, log); return conn; } @@ -594,7 +1553,7 @@ public TopicSession createTopicSession(int mode) throws JMSException { */ private TopicSubscriber createTopicSubscriber(String topic) throws JMSException { refresh(node); - Topic dest = ((AQjmsSession)sess).getTopic(ConnectionUtils.getUsername(configs), topic); + Topic dest = ((AQjmsSession)sess).getTopic((node!=null&&node.user()!=null)?node.user():ConnectionUtils.getUsername(configs), topic); TopicSubscriber subscriber = sess.createDurableSubscriber(dest, configs.getString(ConsumerConfig.GROUP_ID_CONFIG)); topicSubscribers.put(topic, subscriber); return subscriber; @@ -612,6 +1571,14 @@ public TopicSession getSession() { return sess; } + public Connection getDBConnection() throws JMSException + { + if(sess == null) + return null; + else + return ((AQjmsSession)sess).getDBConnection(); + } + public Map getTopicSubscriberMap() { return topicSubscribers; } @@ -628,5 +1595,11 @@ public void remove(String topic) { topicSubscribers.remove(topic); } + public String getDBVersion() + { + return dbVersion; + } + } + } diff --git a/clients/src/main/java/org/oracle/okafka/clients/consumer/internals/ConsumerInterceptors.java b/clients/src/main/java/org/oracle/okafka/clients/consumer/internals/ConsumerInterceptors.java deleted file mode 100644 index 7164b74..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/consumer/internals/ConsumerInterceptors.java +++ /dev/null @@ -1,110 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.oracle.okafka.clients.consumer.internals; - - -import org.oracle.okafka.clients.consumer.ConsumerInterceptor; -import org.oracle.okafka.clients.consumer.ConsumerRecords; -import org.oracle.okafka.clients.consumer.OffsetAndMetadata; -import org.oracle.okafka.common.TopicPartition; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.Closeable; -import java.util.List; -import java.util.Map; - -/** - * A container that holds the list {@link org.oracle.okafka.clients.consumer.ConsumerInterceptor} - * and wraps calls to the chain of custom interceptors. - */ -public class ConsumerInterceptors implements Closeable { - private static final Logger log = LoggerFactory.getLogger(ConsumerInterceptors.class); - private final List> interceptors; - - public ConsumerInterceptors(List> interceptors) { - this.interceptors = interceptors; - } - - /** - * This is called when the records are about to be returned to the user. - *

    - * This method calls {@link ConsumerInterceptor#onConsume(ConsumerRecords)} for each - * interceptor. Records returned from each interceptor get passed to onConsume() of the next interceptor - * in the chain of interceptors. - *

    - * This method does not throw exceptions. If any of the interceptors in the chain throws an exception, - * it gets caught and logged, and next interceptor in the chain is called with 'records' returned by the - * previous successful interceptor onConsume call. - * - * @param records records to be consumed by the client. - * @return records that are either modified by interceptors or same as records passed to this method. - */ - public ConsumerRecords onConsume(ConsumerRecords records) { - ConsumerRecords interceptRecords = records; - for (ConsumerInterceptor interceptor : this.interceptors) { - try { - interceptRecords = interceptor.onConsume(interceptRecords); - } catch (Exception e) { - // do not propagate interceptor exception, log and continue calling other interceptors - log.warn("Error executing interceptor onConsume callback", e); - } - } - return interceptRecords; - } - - /** - * This is called when commit request returns successfully from the broker. - *

    - * This method calls {@link ConsumerInterceptor#onCommit(Map)} method for each interceptor. - *

    - * This method does not throw exceptions. Exceptions thrown by any of the interceptors in the chain are logged, but not propagated. - * - * @param offsets A map of offsets by partition with associated metadata - */ - public void onCommit(Map offsets) { - for (ConsumerInterceptor interceptor : this.interceptors) { - try { - interceptor.onCommit(offsets); - } catch (Exception e) { - // do not propagate interceptor exception, just log - log.warn("Error executing interceptor onCommit callback", e); - } - } - } - - /** - * Closes every interceptor in a container. - */ - @Override - public void close() { - for (ConsumerInterceptor interceptor : this.interceptors) { - try { - interceptor.close(); - } catch (Exception e) { - log.error("Failed to close consumer interceptor ", e); - } - } - } -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/consumer/internals/ConsumerMetrics.java b/clients/src/main/java/org/oracle/okafka/clients/consumer/internals/ConsumerMetrics.java deleted file mode 100644 index 8141f7a..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/consumer/internals/ConsumerMetrics.java +++ /dev/null @@ -1,57 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.oracle.okafka.clients.consumer.internals; - -import java.util.ArrayList; -import java.util.HashSet; -import java.util.List; -import java.util.Set; - -import org.oracle.okafka.common.MetricNameTemplate; -import org.oracle.okafka.common.metrics.Metrics; - -public class ConsumerMetrics { - - public FetcherMetricsRegistry fetcherMetrics; - - public ConsumerMetrics(Set metricsTags, String metricGrpPrefix) { - this.fetcherMetrics = new FetcherMetricsRegistry(metricsTags, metricGrpPrefix); - } - - public ConsumerMetrics(String metricGroupPrefix) { - this(new HashSet(), metricGroupPrefix); - } - - private List getAllTemplates() { - List l = new ArrayList<>(this.fetcherMetrics.getAllTemplates()); - return l; - } - - public static void main(String[] args) { - Set tags = new HashSet<>(); - tags.add("client-id"); - ConsumerMetrics metrics = new ConsumerMetrics(tags, "consumer"); - System.out.println(Metrics.toHtmlTable("kafka.consumer", metrics.getAllTemplates())); - } -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/consumer/internals/ConsumerNetworkClient.java b/clients/src/main/java/org/oracle/okafka/clients/consumer/internals/ConsumerNetworkClient.java index 344275b..12ba54a 100644 --- a/clients/src/main/java/org/oracle/okafka/clients/consumer/internals/ConsumerNetworkClient.java +++ b/clients/src/main/java/org/oracle/okafka/clients/consumer/internals/ConsumerNetworkClient.java @@ -1,9 +1,9 @@ /* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ + ** OKafka Java Client version 23.4. + ** + ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. + ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + */ /* * Licensed to the Apache Software Foundation (ASF) under one or more @@ -29,152 +29,664 @@ package org.oracle.okafka.clients.consumer.internals; +import java.sql.Connection; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; +import java.util.concurrent.TimeUnit; import javax.jms.JMSException; -import org.oracle.okafka.clients.ClientRequest; -import org.oracle.okafka.clients.ClientResponse; +import org.apache.kafka.clients.consumer.ConsumerGroupMetadata; +import org.apache.kafka.clients.consumer.ConsumerPartitionAssignor; +import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; +import org.apache.kafka.common.errors.InterruptException; +import org.apache.kafka.common.errors.InvalidTopicException; +import org.apache.kafka.common.metrics.Measurable; +import org.apache.kafka.common.metrics.Metrics; +import org.apache.kafka.common.metrics.Sensor; +import org.apache.kafka.common.metrics.stats.Avg; +import org.apache.kafka.common.metrics.stats.CumulativeCount; +import org.apache.kafka.common.metrics.stats.CumulativeSum; +import org.apache.kafka.common.metrics.stats.Max; +import org.apache.kafka.common.metrics.stats.Meter; +import org.apache.kafka.common.metrics.stats.Rate; +import org.apache.kafka.common.metrics.stats.WindowedCount; +import org.apache.kafka.clients.ClientRequest; +import org.apache.kafka.clients.ClientResponse; import org.oracle.okafka.clients.KafkaClient; import org.oracle.okafka.clients.Metadata; -import org.oracle.okafka.clients.RequestCompletionHandler; -import org.oracle.okafka.clients.consumer.OffsetAndMetadata; -import org.oracle.okafka.common.Cluster; -import org.oracle.okafka.common.KafkaException; +import org.oracle.okafka.clients.NetworkClient; +import org.oracle.okafka.clients.consumer.KafkaConsumer.FetchManagerMetrics; +import org.oracle.okafka.clients.consumer.internals.SubscriptionState.FetchPosition; +import org.apache.kafka.clients.RequestCompletionHandler; +import org.apache.kafka.clients.consumer.OffsetAndMetadata; +import org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Assignment; +import org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupAssignment; +import org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription; +//import org.apache.kafka.clients.consumer.internals.SubscriptionState; +//import org.oracle.okafka.common.Cluster; +import org.apache.kafka.common.Cluster; +import org.apache.kafka.common.KafkaException; import org.oracle.okafka.common.Node; -import org.oracle.okafka.common.TopicPartition; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.config.ConfigException; +import org.oracle.okafka.common.internals.PartitionData; +import org.oracle.okafka.common.internals.SessionData; import org.oracle.okafka.common.requests.CommitRequest; import org.oracle.okafka.common.requests.CommitResponse; +import org.oracle.okafka.common.requests.ConnectMeRequest; +import org.oracle.okafka.common.requests.ConnectMeResponse; import org.oracle.okafka.common.requests.FetchRequest; import org.oracle.okafka.common.requests.FetchResponse; +import org.oracle.okafka.common.requests.JoinGroupRequest; +import org.oracle.okafka.common.requests.JoinGroupResponse; import org.oracle.okafka.common.requests.OffsetResetRequest; import org.oracle.okafka.common.requests.OffsetResetResponse; import org.oracle.okafka.common.requests.SubscribeRequest; import org.oracle.okafka.common.requests.SubscribeResponse; +import org.oracle.okafka.common.requests.SyncGroupRequest; +import org.oracle.okafka.common.requests.SyncGroupResponse; import org.oracle.okafka.common.requests.UnsubscribeRequest; import org.oracle.okafka.common.requests.UnsubscribeResponse; -import org.oracle.okafka.common.utils.LogContext; -import org.oracle.okafka.common.utils.Time; +import org.apache.kafka.common.utils.Time; import org.slf4j.Logger; - import oracle.jms.AQjmsBytesMessage; public class ConsumerNetworkClient { private static final int MAX_POLL_TIMEOUT_MS = 5000; private final Logger log; private final KafkaClient client; - private final Metadata metadata; - private final Time time; - private final boolean autoCommitEnabled; - private final int autoCommitIntervalMs; - private long nextAutoCommitDeadline; - private final long retryBackoffMs; - private final int maxPollTimeoutMs; - private final int requestTimeoutMs; - private final int sesssionTimeoutMs; - private final long defaultApiTimeoutMs; - private final SubscriptionState subscriptions; - private Set subcriptionSnapshot; - private final List messages = new ArrayList<>(); - public ConsumerNetworkClient(LogContext logContext, + private final Metadata metadata; + private final Time time; + private final boolean autoCommitEnabled; + private final int autoCommitIntervalMs; + private long nextAutoCommitDeadline; + private final long retryBackoffMs; + private final int maxPollTimeoutMs; + private final int requestTimeoutMs; + private final int sesssionTimeoutMs; + private final long defaultApiTimeoutMs; + private final SubscriptionState subscriptions; + private Set subscriptionSnapshot; + private boolean rejoin = false; + private boolean needsJoinPrepare = true; + private SessionData sessionData = null; + private final List assignors; + private final List messages = new ArrayList<>(); + private Node currentSession = null; + String consumerGroupId; + private final AQKafkaConsumer aqConsumer; + private final ConsumerCoordinatorMetrics sensors; + private long lastRebalanceStartMs = -1L; + private long lastRebalanceEndMs = -1L; + + + public ConsumerNetworkClient( + String groupId, + org.apache.kafka.common.utils.LogContext logContext, KafkaClient client, - Metadata metadata, - SubscriptionState subscriptions, - boolean autoCommitEnabled, - int autoCommitIntervalMs, - Time time, - long retryBackoffMs, - int requestTimeoutMs, - int maxPollTimeoutMs, - int sessionTimeoutMs, - long defaultApiTimeoutMs) { + Metadata metadata, + SubscriptionState subscriptions, + List assignors, + boolean autoCommitEnabled, + int autoCommitIntervalMs, + Time time, + long retryBackoffMs, + int requestTimeoutMs, + int maxPollTimeoutMs, + int sessionTimeoutMs, + long defaultApiTimeoutMs, + AQKafkaConsumer aqConsumer, + Metrics metrics) { + this.consumerGroupId = groupId; this.log = logContext.logger(ConsumerNetworkClient.class); - this.client = client; - this.metadata = metadata; - this.subscriptions = subscriptions; - this.autoCommitEnabled = autoCommitEnabled; - this.autoCommitIntervalMs = autoCommitIntervalMs; - this.time = time; - this.retryBackoffMs = retryBackoffMs; - this.maxPollTimeoutMs = Math.min(maxPollTimeoutMs, MAX_POLL_TIMEOUT_MS); - this.requestTimeoutMs = requestTimeoutMs; - this.sesssionTimeoutMs = sessionTimeoutMs; - //Snapshot of subscription. Useful for ensuring if all topics are subscribed. - this.subcriptionSnapshot = new HashSet<>(); - this.defaultApiTimeoutMs = defaultApiTimeoutMs; - - if (autoCommitEnabled) - this.nextAutoCommitDeadline = time.milliseconds() + autoCommitIntervalMs; + this.client = client; + this.metadata = metadata; + this.subscriptions = subscriptions; + this.assignors = assignors; + this.autoCommitEnabled = autoCommitEnabled; + this.autoCommitIntervalMs = autoCommitIntervalMs; + this.time = time; + this.retryBackoffMs = retryBackoffMs; + this.maxPollTimeoutMs = Math.min(maxPollTimeoutMs, MAX_POLL_TIMEOUT_MS); + this.requestTimeoutMs = requestTimeoutMs; + this.sesssionTimeoutMs = sessionTimeoutMs; + //Snapshot of subscription. Useful for ensuring if all topics are subscribed. + this.subscriptionSnapshot = new HashSet<>(); + this.defaultApiTimeoutMs = defaultApiTimeoutMs; + this.aqConsumer = aqConsumer; + this.sensors = new ConsumerCoordinatorMetrics(metrics, "consumer"); + if (autoCommitEnabled) + this.nextAutoCommitDeadline = time.milliseconds() + autoCommitIntervalMs; } + public Connection getDBConnection() + { + if(currentSession != null) + { + log.debug("Poll has been invoked. Return connection used to fetch the records"); + Connection conn = aqConsumer.getDBConnection(currentSession); + if(conn == null) + { + ((NetworkClient)client).initiateConnect(currentSession,0); + } + } + else if(this.metadata.isBootstrap()) + { + log.debug("No connection setup yet. Establishing connection to bootstrap node"); + List bootStrapNodeList = metadata.fetch().nodes(); + + //Always Connect to Node 0. + Node bootStrapNode = (org.oracle.okafka.common.Node)bootStrapNodeList.get(0); + log.debug("Setting up connection to bootstrap node" +bootStrapNode.toString()); + ((NetworkClient)client).initiateConnect(bootStrapNode,0); + log.debug("Bootstrap node updated after connection " +bootStrapNode.toString()); + + //Assigning currentSession to BootStrapNode. All further operation will go through the same session + currentSession = bootStrapNode; + metadata.setLeader(currentSession); + // A connection was created before poll is invoked. This connection is passed on to the application and may be used for transactionala activity. + // OKafkaConsumer must use this connection going forward. Hence it should not look for optimal database instance to consume records. Hence avoid connectMe call. + aqConsumer.setSkipConnectMe(true); + } + + else if(metadata.fetch().controller() != null) + { + log.debug("Fail-safe. Metadata has been updated but poll and connectMe are not invoked."); + currentSession = (Node)metadata.fetch().controller(); + + Connection conn = aqConsumer.getDBConnection(currentSession); + if(conn == null) + { + ((NetworkClient)client).initiateConnect(currentSession,0); + } + if(metadata.getLeader() == null) + { + metadata.setLeader(currentSession); + } + aqConsumer.setSkipConnectMe(true); + } + else { + log.warn("Unexpected state. Failed to fetch database connection."); + return null; + } + + return aqConsumer.getDBConnection(currentSession); + } + /** * Poll from subscribed topics. * Each node polls messages from a list of topic partitions for those it is a leader. * @param timeoutMs poll messages for all subscribed topics. + * @param fetchManagerMetrics for recording fetch Requests * @return messages consumed. */ - public List poll(final long timeoutMs) { - this.messages.clear(); - Map pollMap = getPollableMap(); - long now = time.milliseconds(); - RequestCompletionHandler callback = new RequestCompletionHandler() { - public void onComplete(ClientResponse response) { - //do nothing; - } - }; - for(Map.Entry poll : pollMap.entrySet()) { - Node node = poll.getKey(); - if(!this.client.ready(node, now)) { - log.info("Failed to consume messages from node: {}", node); - } else { - ClientRequest request = createFetchRequest(node, poll.getValue(), callback, requestTimeoutMs < timeoutMs ? requestTimeoutMs : (int)timeoutMs); - ClientResponse response = client.send(request, now); - handleResponse(response); - break; - } - - } - return this.messages; + public List poll(final long timeoutMs,FetchManagerMetrics fetchManagerMetrics) { + boolean retry = false; + long pollStartTime = System.currentTimeMillis(); + long timeSpent = 0; + do { + retry = false; + this.messages.clear(); + Map pollMap = getPollableMap(); + long now = time.milliseconds(); + RequestCompletionHandler callback = new RequestCompletionHandler() { + public void onComplete(ClientResponse response) { + //do nothing; + } + }; + log.debug("Polling for topics #" + pollMap.entrySet().size()); + for(Map.Entry poll : pollMap.entrySet()) { + Node node = poll.getKey(); + log.debug("Fetch Records for topic " + poll.getValue() + " from host " + node ); + String topic = poll.getValue(); + if(metadata.topicParaMap.get(topic).getStickyDeq() != 2) { + String errMsg = "Topic " + topic + " is not an Oracle kafka topic, Please drop and re-create topic" + +" using Admin.createTopics() or dbms_aqadm.create_database_kafka_topic procedure"; + throw new InvalidTopicException(errMsg); + } + if(!this.client.ready(node, now)) { + log.debug("Failed to consume messages from node: {}", node); + //ToDo: Retry poll to get new connection to same or different node. + if(currentSession != null && currentSession == node) + { + currentSession = null; + } + } else { + ClientRequest request = createFetchRequest(node, poll.getValue(), callback, requestTimeoutMs < timeoutMs ? requestTimeoutMs : (int)timeoutMs); + ClientResponse response = client.send(request, now); + fetchManagerMetrics.recordFetchLatency(response.requestLatencyMs()); + handleFetchResponse(response, timeoutMs); + if (response.wasDisconnected()) + retry = true; + + break; + } + } + timeSpent = System.currentTimeMillis() - pollStartTime; + }while(retry && timeSpent < timeoutMs ); + return this.messages; } + /** * * @return map of . Every node is leader for its corresponding topic. */ private Map getPollableMap() { try { - return Collections.singletonMap(metadata.fetch().leader(), this.subcriptionSnapshot.iterator().next()); + if(currentSession == null) { + List nodeList = NetworkClient.convertToOracleNodes(metadata.fetch().nodes()); + + //Use only one node if skipConnectMe is set + if(nodeList.size() == 1 || aqConsumer.skipConnectMe()) + { + currentSession = nodeList.get(0); + log.debug("Leader Node " + currentSession); + metadata.setLeader(currentSession); + return Collections.singletonMap(currentSession, this.subscriptionSnapshot.iterator().next()); + } + + //If more than 1 node available then, Pick a READY Node first. + //Use a READY Node, invoke DBMS_TEQK.AQ$_CONNECTME() and use the node returned from there + for(Node nodeNow: nodeList) + { + // Passing now=0 so that this check do not fail just because Metadata update is due + if(client.isReady(nodeNow,0)) + { + Node preferedNode = getPreferredNode(nodeNow, nodeNow.user(), subscriptionSnapshot.iterator().next() , consumerGroupId); + if(preferedNode == null) + { + currentSession = nodeNow; + } + else + { + int preferredInst = preferedNode.id(); + for(Node currentNode : nodeList) + { + if(currentNode.id() == preferredInst) + { + currentSession = currentNode; + break; + } + } + if(currentSession == null) + { + // This should not happen. MetaData.fetch.nodes() should get one node for all the instances. + currentSession = preferedNode; // No connection yet to this node. Connection will get setup eventually + nodeList.add(preferedNode); + } + // Closing other connections + for(Node closeNode : nodeList) + { + if(closeNode != currentSession) + { + //If DB connection exist for this node then close it + if(client.isReady(closeNode,0)) + { + log.debug("Closing exta node: " + closeNode); + client.close(closeNode); + } + } + } + } + break; + } + } + // If there is no READY node, then pick a node randomly + if(currentSession == null) { + Cluster cluster = metadata.fetch(); + if(cluster.controller() != null) + currentSession = (Node)cluster.controller(); + else + currentSession = nodeList.get(0); + + log.debug("There is no ready node available :using " + currentSession); + } + else { + //Node oldLeader = clusterLeaderMap.get(cluster.clusterResource().clusterId()); + log.debug("Leader for this metadata set to " + currentSession); + metadata.setLeader(currentSession); + //cluster.setLeader(currentSession); + } + } + return Collections.singletonMap(currentSession, this.subscriptionSnapshot.iterator().next()); } catch(java.util.NoSuchElementException exception) { //do nothing } return Collections.emptyMap(); } + + private ClientRequest createFetchRequest(Node destination, String topic, RequestCompletionHandler callback, int requestTimeoutMs) { return this.client.newClientRequest(destination, new FetchRequest.Builder(topic, requestTimeoutMs) , time.milliseconds(), true, requestTimeoutMs, callback); } - - private void handleResponse(ClientResponse response) { - if(response.wasDisconnected()) { - client.disconnected(response.destination(), time.milliseconds()); - //metadata.requestUpdate(); - } + + private void handleFetchResponse(ClientResponse response, long timeoutMs) { FetchResponse fetchResponse = (FetchResponse)response.responseBody(); messages.addAll(fetchResponse.getMessages()); + if(response.wasDisconnected()) { + client.disconnected(metadata.getNodeById(Integer.parseInt(response.destination())), time.milliseconds()); + rejoin = true; + currentSession = null; + if(sessionData != null) + { + log.info("Invalidating database session " + sessionData.name +". New one will get created."); + sessionData.invalidSessionData(); + } + return; + } + joinGroupifNeeded(response, timeoutMs); + } + + private void joinGroupifNeeded(ClientResponse response, long timeoutMs) { + try { + FetchResponse fResponse = (FetchResponse)response.responseBody(); + Exception exception = fResponse.getException(); + long elapsed = response.requestLatencyMs(); + long prevTime = time.milliseconds(); + long current; + + while(elapsed < timeoutMs && rejoinNeeded(exception)) { + log.debug("JoinGroup Is Needed"); + if (needsJoinPrepare) { + log.debug("Revoking"); + onJoinPrepare(); + needsJoinPrepare = false; + } + if (lastRebalanceStartMs == -1L) + lastRebalanceStartMs = time.milliseconds(); + log.debug("Sending Join Group Request to database via node " + response.destination()); + sendJoinGroupRequest(metadata.getNodeById(Integer.parseInt(response.destination()))); + log.debug("Join Group Response received"); + exception = null; + current = time.milliseconds(); + elapsed = elapsed + (current - prevTime); + prevTime = current; + } + } catch(Exception e) + { + log.error(e.getMessage(), e); + throw e; + } + } + + private boolean rejoinNeeded(Exception exception ) { + if (exception != null && exception instanceof JMSException) { + if( ((JMSException)exception).getLinkedException().getMessage().startsWith("ORA-24003") ) { + log.debug("Join Group is needed"); + return true; + } + } + + return rejoin; + } + + private void onJoinPrepare() { + maybeAutoCommitOffsetsSync(time.milliseconds()); + + // execute the user's callback before rebalance + ConsumerRebalanceListener listener = subscriptions.rebalanceListener(); + log.debug("Revoking previously assigned partitions {}", subscriptions.assignedPartitions()); + try { + Set revoked = new HashSet<>(subscriptions.assignedPartitions()); + listener.onPartitionsRevoked(revoked); + } catch (InterruptException e) { + throw e; + } catch (Exception e) { + log.error("User provided listener {} failed on partition revocation", listener.getClass().getName(), e); + } + // Changes for 2.8.1 : SubscriptionState.java copied from org.apache.kafka* to org.oracle.okafka* + subscriptions.resetGroupSubscription(); + } + private void sendJoinGroupRequest(Node node) { + log.debug("Sending JoinGroup"); + SessionData sessionData = this.sessionData; + if(sessionData == null || sessionData.isInvalid()) { + // First join group request + String topic = subscriptionSnapshot.iterator().next(); + sessionData = new SessionData(-1, -1, node.user(), topic,-1, null, -1,null, -1, -1, -1); + sessionData.addAssignedPartitions(new PartitionData(topic, -1, -1, + null, -1, -1, false)); + } + long now = time.milliseconds(); + ClientRequest request = this.client.newClientRequest(node, new JoinGroupRequest.Builder(sessionData), now, true); + log.debug("Sending JoinGroup Request"); + ClientResponse response = this.client.send(request, now); // Invokes AQKafkaConsumer.joinGroup + log.debug("Got JoinGroup Response, Handling Join Group Response"); + handleJoinGroupResponse(response); + log.debug("Handled JoinGroup Response"); + } + + private void handleJoinGroupResponse(ClientResponse response) { + JoinGroupResponse jResponse = (JoinGroupResponse)response.responseBody(); + + if(response.wasDisconnected()) { + log.info("Join Group failed as connection to database was severed."); + client.disconnected(metadata.getNodeById(Integer.parseInt(response.destination())), time.milliseconds()); + rejoin = true; + currentSession = null; + if(sessionData != null) + { + log.info("Invalidating database session " + sessionData.name +". New one will get created."); + sessionData.invalidSessionData(); + } + return; + } + sensors.joinSensor.record(response.requestLatencyMs()); + + //Map> sData = jResponse.getSessionData(); + int leader = jResponse.leader(); + if(leader == 1) { + log.debug("Invoking onJoinLeader "); + onJoinLeader(metadata.getNodeById(Integer.parseInt(response.destination())), jResponse); + } else { + log.debug("Invoking onJoinFollower "); + onJoinFollower(metadata.getNodeById(Integer.parseInt(response.destination())), jResponse); + } + + } + + private void onJoinFollower(Node node, JoinGroupResponse jResponse) { + List sData = new ArrayList<>(); + String topic = subscriptionSnapshot.iterator().next(); + SessionData sessionData = new SessionData(-1, -1, node.user(), topic, -1, null, -1, null, -1, -1, -1); + sessionData.addAssignedPartitions(new PartitionData(topic, -1, -1, + null, -1, -1, false)); + sData.add(sessionData); + sendSyncGroupRequest(node, sData, jResponse.version()); + } + + private void onJoinLeader(Node node, JoinGroupResponse jResponse) { + Map sData = jResponse.getSessionData(); + List partitions = jResponse.partitions(); + ConsumerPartitionAssignor assignor = lookUpAssignor(); + if (assignor == null) + throw new IllegalStateException("Coordinator selected invalid assignment protocol."); + + Set allSubscribedTopics = new HashSet<>(); + Map subscriptions = new HashMap<>(); + + String prevSession = null; + for(Map.Entry sessionEntry: sData.entrySet()) { + String sessionName = sessionEntry.getKey(); + if(prevSession == null || !prevSession.equals(sessionName)) + { + + List subTopics = new ArrayList<>(); + subTopics.add(sessionEntry.getValue().getSubscribedTopics()); + subscriptions.put(sessionName, new Subscription(subTopics, null)); + allSubscribedTopics.addAll(subTopics); + } + prevSession = sessionName; + } + + //Changes for 2.8.1 :; GroupSubscribe was changed to metadataTopics() + this.subscriptions.groupSubscribe(allSubscribedTopics); + metadata.setTopics(this.subscriptions.metadataTopics()); + + ConsumerPartitionAssignor.GroupSubscription gSub = new ConsumerPartitionAssignor.GroupSubscription(subscriptions); + + GroupAssignment gAssignment = assignor.assign(metadata.fetch(),gSub); + Map assignment = gAssignment.groupAssignment(); + + log.debug("Invoking geAssignment"); + List fAssignment = getAssignment(assignment, sData, partitions, jResponse.version()); + sendSyncGroupRequest(node, fAssignment, jResponse.version()); + } + + private void sendSyncGroupRequest(Node node, List sessionData, int version) { + long now = time.milliseconds(); + ClientRequest request = this.client.newClientRequest(node, new SyncGroupRequest.Builder(sessionData, version), now, true); + ClientResponse response = this.client.send(request, now); + handleSyncGroupResponse(response); + } + private void handleSyncGroupResponse(ClientResponse response) { + SyncGroupResponse syncResponse = (SyncGroupResponse)response.responseBody(); + Exception exception = syncResponse.getException(); + + if(response.wasDisconnected()) { + log.info("Sync Group failed as connection to database was severed."); + client.disconnected(metadata.getNodeById(Integer.parseInt(response.destination())), time.milliseconds()); + rejoin = true; + currentSession = null; + if(sessionData != null) + { + log.info("Invalidating database session " + sessionData.name +". New one will get created."); + sessionData.invalidSessionData(); + } + sensors.failedRebalanceSensor.record(); + return; + } + + if(exception == null) { + sensors.syncSensor.record(response.requestLatencyMs()); + onJoinComplete(syncResponse.getSessionData()); + rejoin = false; + needsJoinPrepare = true; + this.sessionData = syncResponse.getSessionData(); + } + + } + + protected void onJoinComplete(SessionData sessionData) { + log.debug("OnJoinComplete Invoked"); + List assignment = new ArrayList<>(); + for(PartitionData pData : sessionData.getAssignedPartitions()) { + log.debug("Assigned PartitionData " + pData.toString()); + assignment.add(pData.getTopicPartition()); + } + subscriptions.assignFromSubscribed(assignment); + //Changes for 2.8.1 + // Seek to current offset per say + assignment.stream().forEach(tp-> + { + subscriptions.seek(tp,0); + subscriptions.completeValidation(tp); + }); + + ConsumerPartitionAssignor assignor = lookUpAssignor(); + // give the assignor a chance to update internal state based on the received + // assignment + // Changes for 2.8.1: See if GroupInstanceId can be fetched from Local Config + ConsumerGroupMetadata cgMetaData = new ConsumerGroupMetadata(sessionData.getSubscriberName(), sessionData.getVersion(), sessionData.name, Optional.of(sessionData.name)); + assignor.onAssignment(new ConsumerPartitionAssignor.Assignment(assignment, null), cgMetaData); + + // reschedule the auto commit starting from now + this.nextAutoCommitDeadline = time.milliseconds() + autoCommitIntervalMs; + + // execute the user's callback after rebalance + ConsumerRebalanceListener listener = subscriptions.rebalanceListener(); + log.debug("Setting newly assigned partitions {}", subscriptions.assignedPartitions()); + try { + Set assigned = new HashSet<>(subscriptions.assignedPartitions()); + listener.onPartitionsAssigned(assigned); + lastRebalanceEndMs = time.milliseconds(); + sensors.successfulRebalanceSensor.record(lastRebalanceEndMs - lastRebalanceStartMs); + lastRebalanceStartMs = -1L; + } catch (InterruptException e) { + sensors.failedRebalanceSensor.record(); + throw e; + } catch (Exception e) { + sensors.failedRebalanceSensor.record(); + log.error("User provided listener {} failed on partition assignment", listener.getClass().getName(), e); + } } + private List getAssignment(Map assignment, Map sData, List partitions, int version) { + log.debug("Getting new assignment"); + List fAssignment = new ArrayList<>(); + + Map pDataBytp = new HashMap<>(); + Map pDataBytp2 = new HashMap<>(); + + for(SessionData data : sData.values()) { + for(PartitionData pData : data.getAssignedPartitions()) { + pDataBytp.put(pData.getTopicPartition(), pData); + } + } + + for(PartitionData pData : partitions) { + pDataBytp2.put(pData.getTopicPartition(), pData); + } + + for(Map.Entry assignmentEntry : assignment.entrySet()) { + String sessionName = assignmentEntry.getKey(); + SessionData prevData = sData.get(sessionName); + + SessionData data = new SessionData(prevData.getSessionId(), prevData.getInstanceId(), prevData.getSchema(),prevData.getSubscribedTopics(), prevData.getQueueId(), + prevData.getSubscriberName(), prevData.getSubscriberId(), prevData.createTime, prevData.getLeader(), version, prevData.getAuditId()); + for(TopicPartition tp : assignmentEntry.getValue().partitions()) { + + if(pDataBytp.get(tp) == null) + data.addAssignedPartitions(pDataBytp2.get(tp)); + else + data.addAssignedPartitions(pDataBytp.get(tp)); + } + + fAssignment.add(data); + + } + return fAssignment; + } +/* + * DummyCluster no longer being used + private Cluster getDummyCluster(List partitions) { + + Cluster cluster = metadata.fetch(); + List pInfo = new ArrayList<>(); + for(PartitionData pData : partitions) { + pInfo.add(new PartitionInfo(pData.getTopicPartition().topic(), pData.getTopicPartition().partition(), cluster.nodes().get(0), null, null)); + } + return new Cluster("dummy", cluster.nodes(), pInfo, + cluster.unauthorizedTopics(), cluster.internalTopics(), cluster.getConfigs()); + + }*/ + + private ConsumerPartitionAssignor lookUpAssignor() { + if(this.assignors.size() == 0) + return null; + return this.assignors.get(0); + } + /** * Subscribe to topic if not done * @return true if subscription is successsful else false. + * @throws Exception */ - public boolean mayBeTriggerSubcription(long timeout) { - if(!subscriptions.subscription().equals(subcriptionSnapshot)) { + + public boolean mayBeTriggerSubcription(long timeout) { + + if(!subscriptions.subscription().equals(subscriptionSnapshot)) { + boolean noSubExist = false; + rejoin = true; String topic = getSubscribableTopics(); long now = time.milliseconds(); Node node = client.leastLoadedNode(now); @@ -182,34 +694,82 @@ public boolean mayBeTriggerSubcription(long timeout) { log.error("Failed to subscribe to topic: {}", topic); return false; } - ClientRequest request = this.client.newClientRequest(node, new SubscribeRequest.Builder(topic), now, true, requestTimeoutMs < timeout ? requestTimeoutMs: (int)timeout, null); - ClientResponse response = this.client.send(request, now); - - return handleSubscribeResponse(response); + try { + if(aqConsumer.getSubcriberCount(node, topic) < 1) { + noSubExist = true; + } + + ClientRequest request = this.client.newClientRequest(node, new SubscribeRequest.Builder(topic), now, true, requestTimeoutMs < timeout ? requestTimeoutMs: (int)timeout, null); + ClientResponse response = this.client.send(request, now); + if(handleSubscribeResponse(response)) { + + if(noSubExist && aqConsumer.getoffsetStartegy().equalsIgnoreCase("earliest")) { + TopicPartition tp = new TopicPartition(topic, -1); + Map offsetResetTimestamps = new HashMap(); + offsetResetTimestamps.put(tp, -2L); + + return resetOffsetsSync(offsetResetTimestamps, timeout); + } + + else if(noSubExist && aqConsumer.getoffsetStartegy().equalsIgnoreCase("none")) { + throw new ConfigException("No previous offset found for the consumer group"); + } + } + else { + return false; + } + + } + catch(ConfigException exception) { + log.error("Exception while subscribing to the topic" + exception.getMessage(),exception); + log.info("Closing the consumer due to exception : " + exception.getMessage()); + throw new ConfigException("No previous offset found for the consumer group"); + } + catch(Exception e){ + log.error("Exception while subscribing to the topic" + e.getMessage(),e); + } + } return true; - + } - + + public void maybeUpdateMetadata(long timeout) { + Cluster cluster = metadata.fetch(); + long curr = time.milliseconds(); + if(cluster.isBootstrapConfigured() || metadata.timeToNextUpdate(curr) == 0 ? true : false) { + int lastVersion = metadata.version(); + long elapsed = 0; + long prev; + + while ((elapsed <= timeout) && (metadata.version() <= lastVersion) && !metadata.isClosed()) { + prev = time.milliseconds(); + client.maybeUpdateMetadata(curr); + curr = time.milliseconds(); + elapsed = elapsed + (prev - curr); + } + log.debug("Metadata updated:Current Metadata Version " + metadata.version()); + + } + } + private boolean handleSubscribeResponse(ClientResponse response) { if(response.wasDisconnected()) { - client.disconnected(response.destination(), time.milliseconds()); - metadata.requestUpdate(); - - } + client.disconnected(metadata.getNodeById(Integer.parseInt(response.destination())), time.milliseconds()); + } SubscribeResponse subscribeResponse = (SubscribeResponse)response.responseBody(); JMSException exception = subscribeResponse.getException(); if(exception != null) { log.error("failed to subscribe to topic {}", subscribeResponse.getTopic()); return false; }else { - this.subcriptionSnapshot.add(subscribeResponse.getTopic()); + this.subscriptionSnapshot.add(subscribeResponse.getTopic()); } return true; - + } - + /** * Updates subscription snapshot and returns subscribed topic. * @return subscribed topic @@ -218,39 +778,53 @@ private String getSubscribableTopics() { //this.subcriptionSnapshot = new HashSet<>(subscriptions.subscription()); return getSubscribedTopic(); } + /** - * return subscribed topic. + * return subscribed topic. */ private String getSubscribedTopic() { HashSet subscribableTopics = new HashSet<>(); for(String topic : subscriptions.subscription()) { - if(!this.subcriptionSnapshot.contains(topic)) { + if(!this.subscriptionSnapshot.contains(topic)) { subscribableTopics.add(topic); - this.subcriptionSnapshot.clear(); + this.subscriptionSnapshot.clear(); } } return subscribableTopics.iterator().next(); } - + public boolean commitOffsetsSync(Map offsets, long timeout) throws Exception{ - log.debug("Sending synchronous commit of offsets: {} request", offsets); - - long elapsed = 0; + try { + log.debug("Sending synchronous commit of offsets: {} request", offsets); + //long elapsed = 0; ClientRequest request; - ClientResponse response; - request = this.client.newClientRequest(null, new CommitRequest.Builder(getCommitableNodes(offsets), offsets), time.milliseconds(), true); + ClientResponse response; + //Changes for 2.8.1:: Send leader Node explicitly here + //request = this.client.newClientRequest(null, new CommitRequest.Builder(getCommitableNodes(offsets), offsets), time.milliseconds(), true); + Map> commitableNodes = getCommitableNodes(offsets); + if(commitableNodes == null || commitableNodes.size() == 0) + { + log.debug("No offsets to commit. Return"); + return true; + } + request = this.client.newClientRequest(metadata.getLeader(), new CommitRequest.Builder(commitableNodes, offsets), time.milliseconds(), true); response = this.client.send(request, time.milliseconds()); handleCommitResponse(response); if(((CommitResponse)response.responseBody()).error()) { - + throw ((CommitResponse)response.responseBody()).getResult() - .entrySet().iterator().next().getValue(); + .entrySet().iterator().next().getValue(); } - + }catch(Exception e) + { + log.error("Exception while committing messages " + e,e); + throw e; + } + return true; } - + private void handleCommitResponse(ClientResponse response) { CommitResponse commitResponse = (CommitResponse)response.responseBody(); Map> nodes = commitResponse.getNodes(); @@ -258,6 +832,7 @@ private void handleCommitResponse(ClientResponse response) { Map result = commitResponse.getResult(); for(Map.Entry nodeResult : result.entrySet()) { if(nodeResult.getValue() == null) { + this.sensors.commitSensor.record(response.requestLatencyMs()); for(TopicPartition tp : nodes.get(nodeResult.getKey())) { log.debug("Commited to topic partiton: {} with offset: {} ", tp, offsets.get(tp)); offsets.remove(tp); @@ -267,32 +842,51 @@ private void handleCommitResponse(ClientResponse response) { for(TopicPartition tp : nodes.get(nodeResult.getKey())) { log.error("Failed to commit to topic partiton: {} with offset: {} ", tp, offsets.get(tp)); } - + } } } - + /** * Returns nodes that have sessions ready for commit. * @param offsets Recently consumed offset for each partition since last commit. * @return map of node , list of partitions(node is leader for its corresponding partition list) that are ready for commit. */ private Map> getCommitableNodes(Map offsets) { - Map> nodes = new HashMap<>(); + Map> nodeTPMap = new HashMap<>(); Cluster cluster = metadata.fetch(); + + Node leaderNode = metadata.getLeader(); + if(leaderNode== null) + { + //Find a ready node + List kafkaNodeList = cluster.nodes(); + for(org.apache.kafka.common.Node node :kafkaNodeList ) + { + if(client.isReady((org.oracle.okafka.common.Node)node, 0)) + { + leaderNode = (org.oracle.okafka.common.Node)node; + log.info("Leader Node not present. Picked first ready node: " + leaderNode); + break; + } + } + } + log.debug("Sending Commit request to leader Node " + leaderNode); + for(Map.Entry metadata : offsets.entrySet()) { - if(!client.ready(cluster.leader(), time.milliseconds())) { + if(!client.ready(leaderNode, time.milliseconds())) { + log.info("Failed to send commit as Leader node is not ready to send commit: " + leaderNode); log.error("Failed to commit to topic partiton: {} with offset: {} ", metadata.getKey(), metadata.getValue()); } else { - if(nodes.get(cluster.leader()) == null) { - nodes.put(cluster.leader(), new ArrayList()); - } - nodes.get(cluster.leader()).add(metadata.getKey()); + List nodeTPList= nodeTPMap.get(leaderNode); + if(nodeTPList == null) { + nodeTPList = new ArrayList(); + nodeTPMap.put(leaderNode, nodeTPList ); + } + nodeTPList.add(metadata.getKey()); } - } - return nodes; - + return nodeTPMap; } public boolean resetOffsetsSync(Map offsetResetTimestamps, long timeout) { long now = time.milliseconds(); @@ -300,100 +894,120 @@ public boolean resetOffsetsSync(Map offsetResetTimestamps if( node == null || !client.ready(node, now) ) return false; ClientResponse response = client.send(client.newClientRequest(node, new OffsetResetRequest.Builder(offsetResetTimestamps, 0), now, true, requestTimeoutMs < timeout ? requestTimeoutMs: (int)timeout, null), now); - return handleOffsetResetResponse(response, offsetResetTimestamps); + return handleOffsetResetResponse(response, offsetResetTimestamps); } - + public boolean handleOffsetResetResponse(ClientResponse response, Map offsetResetTimestamps) { - OffsetResetResponse offsetResetResponse = (OffsetResetResponse)response.responseBody(); - Map result = offsetResetResponse.offsetResetResponse(); - Set failed = new HashSet<>(); - for(Map.Entry tpResult : result.entrySet()) { - Long offsetLong = offsetResetTimestamps.get(tpResult.getKey()); - String offset ; - - if( offsetLong == -2L ) - offset = "TO_EARLIEST" ; - else if (offsetLong == -1L) - offset = "TO_LATEST"; - else offset = Long.toString(offsetLong); - if( tpResult.getValue() == null) { - subscriptions.requestOffsetReset(tpResult.getKey(), null); - log.trace("seek to offset {} for topicpartition {} is successful", offset, tpResult.getKey()); - } - else { - if(tpResult.getValue() instanceof java.sql.SQLException && ((java.sql.SQLException)tpResult.getValue()).getErrorCode() == 25323) - subscriptions.requestOffsetReset(tpResult.getKey(), null); - else failed.add(tpResult.getKey()); - - log.warn("Failed to update seek for topicpartition {} to offset {}", tpResult.getKey(), offset); - } - - } - subscriptions.resetFailed(failed, time.milliseconds() + retryBackoffMs); + OffsetResetResponse offsetResetResponse = (OffsetResetResponse)response.responseBody(); + Map result = offsetResetResponse.offsetResetResponse(); + Set failed = new HashSet<>(); + for(Map.Entry tpResult : result.entrySet()) { + Long offsetLong = offsetResetTimestamps.get(tpResult.getKey()); + String offset ; + + if( offsetLong == -2L ) + offset = "TO_EARLIEST" ; + else if (offsetLong == -1L) + offset = "TO_LATEST"; + else offset = Long.toString(offsetLong); + if( tpResult.getValue() == null && tpResult.getKey().partition()!=-1) { + subscriptions.requestOffsetReset(tpResult.getKey(), null); + subscriptions.seekValidated(tpResult.getKey(), new FetchPosition(0)); + subscriptions.completeValidation(tpResult.getKey()); + log.trace("seek to offset {} for topicpartition {} is successful", offset, tpResult.getKey()); + } + else if(tpResult.getValue()!=null){ + if(tpResult.getValue() instanceof java.sql.SQLException && ((java.sql.SQLException)tpResult.getValue()).getErrorCode() == 25323) + subscriptions.requestOffsetReset(tpResult.getKey(), null); + else failed.add(tpResult.getKey()); + log.warn("Failed to update seek for topicpartition {} to offset {}", tpResult.getKey(), offset); + } + + } + //Changes for 2.8.1. Copied SubscriptionState.java from org.apache.kafka.clients.consumer.internals to + // org.oracle.okafka.clients.consumer.internals for this requestFailed method. + subscriptions.requestFailed(failed, time.milliseconds() + retryBackoffMs); return true; } + /** * Synchronously commit last consumed offsets if auto commit is enabled. */ public void maybeAutoCommitOffsetsSync(long now) { - if (autoCommitEnabled && now >= nextAutoCommitDeadline) { - this.nextAutoCommitDeadline = now + autoCommitIntervalMs; - doCommitOffsetsSync(); - } - } - + if (autoCommitEnabled && now >= nextAutoCommitDeadline) { + this.nextAutoCommitDeadline = now + autoCommitIntervalMs; + doCommitOffsetsSync(); + } + } + public void clearSubscription() { //doCommitOffsetsSync(); - this.subcriptionSnapshot.clear(); + this.subscriptionSnapshot.clear(); } - + /** * Synchronously commit offsets. */ private void doCommitOffsetsSync() { Map allConsumedOffsets = subscriptions.allConsumed(); - - try { - commitOffsetsSync(allConsumedOffsets, 0); - - } catch(Exception exception) { - //nothing to do - } finally { - nextAutoCommitDeadline = time.milliseconds() + autoCommitIntervalMs; - } + + try { + commitOffsetsSync(allConsumedOffsets, 0); + + } catch(Exception exception) { + //nothing to do + } finally { + nextAutoCommitDeadline = time.milliseconds() + autoCommitIntervalMs; + } } - + public void unsubscribe() { - ClientRequest request = this.client.newClientRequest(null, new UnsubscribeRequest.Builder(), time.milliseconds(), true); + + if(currentSession!=null) { + ClientRequest request = this.client.newClientRequest(currentSession, new UnsubscribeRequest.Builder(), time.milliseconds(), true); ClientResponse response = this.client.send(request, time.milliseconds()); handleUnsubscribeResponse(response); - + } + } - private void handleUnsubscribeResponse(ClientResponse response) { + + if(response.wasDisconnected()) { + log.debug("handleUnsubscribeResponse : node in disconnected state\n"); + client.disconnected(metadata.getNodeById(Integer.parseInt(response.destination())), time.milliseconds()); + rejoin = true; + currentSession = null; + if(sessionData != null) + { + log.debug("handleUnsubscribeResponse : Invalidating database session " + sessionData.name +". New one will get created.\n"); + sessionData.invalidSessionData(); + } + return; + } UnsubscribeResponse unsubResponse = (UnsubscribeResponse)response.responseBody(); for(Map.Entry responseByTopic: unsubResponse.response().entrySet()) { - if(responseByTopic.getValue() == null) - log.trace("Failed to unsubscribe from topic: with exception: ", responseByTopic.getKey(), responseByTopic.getValue()); + if(responseByTopic.getValue() != null) + log.info("Failed to unsubscribe from topic: with exception: ", responseByTopic.getKey(), responseByTopic.getValue()); else - log.trace("Unsubscribed from topic: ", responseByTopic.getKey()); + log.info("Unsubscribed from topic: ", responseByTopic.getKey()); } + } - + /** - * Return the time to the next needed invocation of {@link #poll(long)}. - * @param now current time in milliseconds - * @return the maximum time in milliseconds the caller should wait before the next invocation of poll() - */ - public long timeToNextPoll(long now, long timeoutMs) { - if (!autoCommitEnabled) - return timeoutMs; - - if (now > nextAutoCommitDeadline) - return 0; - - return Math.min(nextAutoCommitDeadline - now, timeoutMs); - } + * Return the time to the next needed invocation of {@link #poll(long)}. + * @param now current time in milliseconds + * @return the maximum time in milliseconds the caller should wait before the next invocation of poll() + */ + public long timeToNextPoll(long now, long timeoutMs) { + if (!autoCommitEnabled) + return timeoutMs; + + if (now > nextAutoCommitDeadline) + return 0; + + return Math.min(nextAutoCommitDeadline - now, timeoutMs); + } /** * Closes the AQKafkaConsumer. @@ -405,14 +1019,142 @@ public void close(long timeoutMs) throws Exception { if(autoCommitEnabled) { Map allConsumedOffsets = subscriptions.allConsumed(); try { - commitOffsetsSync(subscriptions.allConsumed(), timeoutMs); - } catch (Exception exception) { - autoCommitException= new KafkaException("failed to commit consumed messages", exception); - } + commitOffsetsSync(allConsumedOffsets, timeoutMs); + } catch (Exception exception) { + autoCommitException= new KafkaException("failed to commit consumed messages", exception); + } } this.client.close(); if(autoCommitException != null) throw autoCommitException; } + + private Node getPreferredNode(Node currentNode, String schema, String topic, String groupId) + { + long now = time.milliseconds(); + ClientRequest request = this.client.newClientRequest(currentNode, new ConnectMeRequest.Builder(schema,topic,groupId), now, true); + log.debug("Sending ConnectMe Request"); + ClientResponse response = this.client.send(request, now); // Invokes DBMS_TEQK.AQ$_CONNECT_ME + log.debug("Got ConnectMe response"); + ConnectMeResponse connMeResponse = (ConnectMeResponse)response.responseBody(); + Node preferredNode = connMeResponse.getPreferredNode(); + log.debug("ConnectMe: PreferredNode " +preferredNode ); + return preferredNode; + } + + private class ConsumerCoordinatorMetrics { + public final String metricGrpName; + + private final Sensor commitSensor; + public final Sensor joinSensor; + public final Sensor syncSensor; + public final Sensor successfulRebalanceSensor; + public final Sensor failedRebalanceSensor; + + + public ConsumerCoordinatorMetrics(Metrics metrics, String metricGrpPrefix) { + this.metricGrpName = metricGrpPrefix + "-coordinator-metrics"; + + this.commitSensor = metrics.sensor("commit-latency"); + this.commitSensor.add(metrics.metricName("commit-latency-avg", + this.metricGrpName, + "The average time taken for a commit request"), new Avg()); + this.commitSensor.add(metrics.metricName("commit-latency-max", + this.metricGrpName, + "The max time taken for a commit request"), new Max()); + this.commitSensor.add(createMeter(metrics, metricGrpName, "commit", "commit calls")); + + this.joinSensor = metrics.sensor("join-latency"); + this.joinSensor.add(metrics.metricName("join-time-avg", + this.metricGrpName, + "The average time taken for a group rejoin"), new Avg()); + this.joinSensor.add(metrics.metricName("join-time-max", + this.metricGrpName, + "The max time taken for a group rejoin"), new Max()); + this.joinSensor.add(createMeter(metrics, metricGrpName, "join", "group joins")); + + this.syncSensor = metrics.sensor("sync-latency"); + this.syncSensor.add(metrics.metricName("sync-time-avg", + this.metricGrpName, + "The average time taken for a group sync"), new Avg()); + this.syncSensor.add(metrics.metricName("sync-time-max", + this.metricGrpName, + "The max time taken for a group sync"), new Max()); + this.syncSensor.add(createMeter(metrics, metricGrpName, "sync", "group syncs")); + + this.successfulRebalanceSensor = metrics.sensor("rebalance-latency"); + this.successfulRebalanceSensor.add(metrics.metricName("rebalance-latency-avg", + this.metricGrpName, + "The average time taken for a group to complete a successful rebalance, which may be composed of " + + "several failed re-trials until it succeeded"), new Avg()); + this.successfulRebalanceSensor.add(metrics.metricName("rebalance-latency-max", + this.metricGrpName, + "The max time taken for a group to complete a successful rebalance, which may be composed of " + + "several failed re-trials until it succeeded"), new Max()); + this.successfulRebalanceSensor.add(metrics.metricName("rebalance-latency-total", + this.metricGrpName, + "The total number of milliseconds this consumer has spent in successful rebalances since creation"), + new CumulativeSum()); + this.successfulRebalanceSensor.add( + metrics.metricName("rebalance-total", + this.metricGrpName, + "The total number of successful rebalance events, each event is composed of " + + "several failed re-trials until it succeeded"), + new CumulativeCount() + ); + this.successfulRebalanceSensor.add( + metrics.metricName( + "rebalance-rate-per-hour", + this.metricGrpName, + "The number of successful rebalance events per hour, each event is composed of " + + "several failed re-trials until it succeeded"), + new Rate(TimeUnit.HOURS, new WindowedCount()) + ); + + this.failedRebalanceSensor = metrics.sensor("failed-rebalance"); + this.failedRebalanceSensor.add( + metrics.metricName("failed-rebalance-total", + this.metricGrpName, + "The total number of failed rebalance events"), + new CumulativeCount() + ); + this.failedRebalanceSensor.add( + metrics.metricName( + "failed-rebalance-rate-per-hour", + this.metricGrpName, + "The number of failed rebalance events per hour"), + new Rate(TimeUnit.HOURS, new WindowedCount()) + ); + + + + Measurable lastRebalance = (config, now) -> { + if (lastRebalanceEndMs == -1L) + // if no rebalance is ever triggered, we just return -1. + return -1d; + else + return TimeUnit.SECONDS.convert(now - lastRebalanceEndMs, TimeUnit.MILLISECONDS); + }; + metrics.addMetric(metrics.metricName("last-rebalance-seconds-ago", + this.metricGrpName, + "The number of seconds since the last successful rebalance event"), + lastRebalance); + + Measurable numParts = (config, now) -> subscriptions.numAssignedPartitions(); + metrics.addMetric(metrics.metricName("assigned-partitions", + this.metricGrpName, + "The number of partitions currently assigned to this consumer"), numParts); + + } + + public Meter createMeter(Metrics metrics, String groupName, String baseName, String descriptiveName) { + return new Meter(new WindowedCount(), + metrics.metricName(baseName + "-rate", groupName, + String.format("The number of %s per second", descriptiveName)), + metrics.metricName(baseName + "-total", groupName, + String.format("The total number of %s", descriptiveName))); + } + } + } diff --git a/clients/src/main/java/org/oracle/okafka/clients/consumer/internals/FetcherMetricsRegistry.java b/clients/src/main/java/org/oracle/okafka/clients/consumer/internals/FetchMetricsRegistry.java similarity index 86% rename from clients/src/main/java/org/oracle/okafka/clients/consumer/internals/FetcherMetricsRegistry.java rename to clients/src/main/java/org/oracle/okafka/clients/consumer/internals/FetchMetricsRegistry.java index 251599b..5963aa7 100644 --- a/clients/src/main/java/org/oracle/okafka/clients/consumer/internals/FetcherMetricsRegistry.java +++ b/clients/src/main/java/org/oracle/okafka/clients/consumer/internals/FetchMetricsRegistry.java @@ -1,26 +1,10 @@ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. */ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ package org.oracle.okafka.clients.consumer.internals; import java.util.Arrays; @@ -29,9 +13,9 @@ import java.util.List; import java.util.Set; -import org.oracle.okafka.common.MetricNameTemplate; +import org.apache.kafka.common.MetricNameTemplate; -public class FetcherMetricsRegistry { +public class FetchMetricsRegistry { public MetricNameTemplate fetchSizeAvg; public MetricNameTemplate fetchSizeMax; @@ -61,16 +45,17 @@ public class FetcherMetricsRegistry { public MetricNameTemplate partitionRecordsLead; public MetricNameTemplate partitionRecordsLeadMin; public MetricNameTemplate partitionRecordsLeadAvg; + public MetricNameTemplate partitionPreferredReadReplica; - public FetcherMetricsRegistry() { + public FetchMetricsRegistry() { this(new HashSet(), ""); } - public FetcherMetricsRegistry(String metricGrpPrefix) { + public FetchMetricsRegistry(String metricGrpPrefix) { this(new HashSet(), metricGrpPrefix); } - public FetcherMetricsRegistry(Set tags, String metricGrpPrefix) { + public FetchMetricsRegistry(Set tags, String metricGrpPrefix) { /***** Client level *****/ String groupName = metricGrpPrefix + "-fetch-manager-metrics"; @@ -146,7 +131,9 @@ public FetcherMetricsRegistry(Set tags, String metricGrpPrefix) { "The min lead of the partition", partitionTags); this.partitionRecordsLeadAvg = new MetricNameTemplate("records-lead-avg", groupName, "The average lead of the partition", partitionTags); - + this.partitionPreferredReadReplica = new MetricNameTemplate( + "preferred-read-replica", "consumer-fetch-manager-metrics", + "The current read replica for the partition, or -1 if reading from leader", partitionTags); } public List getAllTemplates() { @@ -178,8 +165,9 @@ public List getAllTemplates() { partitionRecordsLagMax, partitionRecordsLead, partitionRecordsLeadMin, - partitionRecordsLeadAvg + partitionRecordsLeadAvg, + partitionPreferredReadReplica ); } -} +} \ No newline at end of file diff --git a/clients/src/main/java/org/oracle/okafka/clients/consumer/internals/NoOpConsumerRebalanceListener.java b/clients/src/main/java/org/oracle/okafka/clients/consumer/internals/NoOpConsumerRebalanceListener.java index be2ede0..47533d5 100644 --- a/clients/src/main/java/org/oracle/okafka/clients/consumer/internals/NoOpConsumerRebalanceListener.java +++ b/clients/src/main/java/org/oracle/okafka/clients/consumer/internals/NoOpConsumerRebalanceListener.java @@ -1,36 +1,19 @@ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. */ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ package org.oracle.okafka.clients.consumer.internals; -import org.oracle.okafka.clients.consumer.ConsumerRebalanceListener; -import org.oracle.okafka.common.TopicPartition; - +import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; +import org.apache.kafka.common.TopicPartition; import java.util.Collection; -public class NoOpConsumerRebalanceListener implements ConsumerRebalanceListener { - - @Override +public class NoOpConsumerRebalanceListener implements ConsumerRebalanceListener{ + + @Override public void onPartitionsAssigned(Collection partitions) {} @Override diff --git a/clients/src/main/java/org/oracle/okafka/clients/consumer/internals/OkafkaConsumerMetrics.java b/clients/src/main/java/org/oracle/okafka/clients/consumer/internals/OkafkaConsumerMetrics.java new file mode 100644 index 0000000..3354c82 --- /dev/null +++ b/clients/src/main/java/org/oracle/okafka/clients/consumer/internals/OkafkaConsumerMetrics.java @@ -0,0 +1,96 @@ +/* +** OKafka Java Client version 23.4. +** +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. +** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +*/ + +package org.oracle.okafka.clients.consumer.internals; + +import org.apache.kafka.common.MetricName; +import org.apache.kafka.common.metrics.Measurable; +import org.apache.kafka.common.metrics.Metrics; +import org.apache.kafka.common.metrics.Sensor; +import org.apache.kafka.common.metrics.stats.Avg; +import org.apache.kafka.common.metrics.stats.CumulativeSum; +import org.apache.kafka.common.metrics.stats.Max; + +import java.util.concurrent.TimeUnit; + +public class OkafkaConsumerMetrics implements AutoCloseable { + private final MetricName lastPollMetricName; + private final Sensor timeBetweenPollSensor; + private final Sensor pollIdleSensor; + private final Sensor commitSyncSensor; + private final Metrics metrics; + private long lastPollMs; + private long pollStartMs; + private long timeSinceLastPollMs; + + public OkafkaConsumerMetrics(Metrics metrics, String metricGrpPrefix) { + this.metrics = metrics; + String metricGroupName = metricGrpPrefix + "-metrics"; + Measurable lastPoll = (mConfig, now) -> { + if (lastPollMs == 0L) + return -1d; + else + return TimeUnit.SECONDS.convert(now - lastPollMs, TimeUnit.MILLISECONDS); + }; + this.lastPollMetricName = metrics.metricName("last-poll-seconds-ago", + metricGroupName, "The number of seconds since the last poll() invocation."); + metrics.addMetric(lastPollMetricName, lastPoll); + + this.timeBetweenPollSensor = metrics.sensor("time-between-poll"); + this.timeBetweenPollSensor.add(metrics.metricName("time-between-poll-avg", + metricGroupName, + "The average delay between invocations of poll() in milliseconds."), + new Avg()); + this.timeBetweenPollSensor.add(metrics.metricName("time-between-poll-max", + metricGroupName, + "The max delay between invocations of poll() in milliseconds."), + new Max()); + + this.pollIdleSensor = metrics.sensor("poll-idle-ratio-avg"); + this.pollIdleSensor.add(metrics.metricName("poll-idle-ratio-avg", + metricGroupName, + "The average fraction of time the consumer's poll() is idle as opposed to waiting for the user code to process records."), + new Avg()); + + this.commitSyncSensor = metrics.sensor("commit-sync-time-ns-total"); + this.commitSyncSensor.add( + metrics.metricName( + "commit-sync-time-ns-total", + metricGroupName, + "The total time the consumer has spent in commitSync in nanoseconds" + ), + new CumulativeSum() + ); + + } + + public void recordPollStart(long pollStartMs) { + this.pollStartMs = pollStartMs; + this.timeSinceLastPollMs = lastPollMs != 0L ? pollStartMs - lastPollMs : 0; + this.timeBetweenPollSensor.record(timeSinceLastPollMs); + this.lastPollMs = pollStartMs; + } + + public void recordPollEnd(long pollEndMs) { + long pollTimeMs = pollEndMs - pollStartMs; + double pollIdleRatio = pollTimeMs * 1.0 / (pollTimeMs + timeSinceLastPollMs); + this.pollIdleSensor.record(pollIdleRatio); + } + + public void recordCommitSync(long duration) { + this.commitSyncSensor.record(duration); + } + + + @Override + public void close() { + metrics.removeMetric(lastPollMetricName); + metrics.removeSensor(timeBetweenPollSensor.name()); + metrics.removeSensor(pollIdleSensor.name()); + metrics.removeSensor(commitSyncSensor.name()); + } +} diff --git a/clients/src/main/java/org/oracle/okafka/clients/consumer/internals/SubscriptionState.java b/clients/src/main/java/org/oracle/okafka/clients/consumer/internals/SubscriptionState.java index 60bdc79..ecef791 100644 --- a/clients/src/main/java/org/oracle/okafka/clients/consumer/internals/SubscriptionState.java +++ b/clients/src/main/java/org/oracle/okafka/clients/consumer/internals/SubscriptionState.java @@ -1,9 +1,9 @@ /* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ + ** OKafka Java Client version 23.4. + ** + ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. + ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + */ /* * Licensed to the Apache Software Foundation (ASF) under one or more @@ -21,38 +21,52 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -/* - * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. - * - */ - package org.oracle.okafka.clients.consumer.internals; -import org.oracle.okafka.clients.consumer.ConsumerRebalanceListener; -import org.oracle.okafka.clients.consumer.OffsetAndMetadata; -import org.oracle.okafka.clients.consumer.OffsetResetStrategy; -import org.oracle.okafka.common.TopicPartition; -import org.oracle.okafka.common.internals.PartitionStates; -import org.oracle.okafka.common.requests.IsolationLevel; +import org.apache.kafka.clients.ApiVersions; +import org.apache.kafka.clients.Metadata; +import org.apache.kafka.clients.NodeApiVersions; +import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; +import org.apache.kafka.clients.consumer.NoOffsetForPartitionException; +import org.apache.kafka.clients.consumer.OffsetAndMetadata; +import org.apache.kafka.clients.consumer.OffsetResetStrategy; +import org.apache.kafka.common.IsolationLevel; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.internals.PartitionStates; +import org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersion; +import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset; +import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.requests.OffsetsForLeaderEpochRequest; +import org.apache.kafka.common.utils.LogContext; +import org.slf4j.Logger; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Objects; +import java.util.Optional; import java.util.Set; +import java.util.function.LongSupplier; +import java.util.function.Predicate; import java.util.regex.Pattern; +///import static org.apache.kafka.clients.consumer.internals.Fetcher.hasUsableOffsetForLeaderEpochVersion; +import static org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.UNDEFINED_EPOCH; +import static org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.UNDEFINED_EPOCH_OFFSET; + /** * A class for tracking the topics, partitions, and offsets for the consumer. A partition * is "assigned" either directly with {@link #assignFromUser(Set)} (manual assignment) * or with {@link #assignFromSubscribed(Collection)} (automatic assignment from subscription). * * Once assigned, the partition is not considered "fetchable" until its initial position has - * been set with {@link #seek(TopicPartition, long)}. Fetchable partitions track a fetch + * been set with {@link #seekValidated(TopicPartition, FetchPosition)}. Fetchable partitions track a fetch * position which is used to set the offset of the next fetch, and a consumed position * which is the last offset that has been returned to the user. You can suspend fetching * from a partition through {@link #pause(TopicPartition)} without affecting the fetched/consumed @@ -61,11 +75,15 @@ * * Note that pause state as well as fetch/consumed positions are not preserved when partition * assignment is changed whether directly by the user or through a group rebalance. + * + * Thread Safety: this class is thread-safe. */ public class SubscriptionState { private static final String SUBSCRIPTION_EXCEPTION_MESSAGE = "Subscription to topics, partitions and pattern are mutually exclusive"; + private final Logger log; + private enum SubscriptionType { NONE, AUTO_TOPICS, AUTO_PATTERN, USER_ASSIGNED } @@ -79,8 +97,10 @@ private enum SubscriptionType { /* the list of topics the user has requested */ private Set subscription; - /* the list of topics the group has subscribed to (set only for the leader on join group completion) */ - private final Set groupSubscription; + /* The list of topics the group has subscribed to. This may include some topics which are not part + * of `subscription` for the leader of a group since it is responsible for detecting metadata changes + * which require a group rebalance. */ + private Set groupSubscription; /* the partitions that are currently assigned, note that the order of partition matters (see FetchBuilder for more details) */ private final PartitionStates assignment; @@ -88,23 +108,55 @@ private enum SubscriptionType { /* Default offset reset strategy */ private final OffsetResetStrategy defaultResetStrategy; - /* Listeners provide a hook for internal state cleanup (e.g. metrics) on assignment changes */ - private final List listeners = new ArrayList<>(); - - /* Tracks last consumed offset for each topic partition */ - private final Map allConsumed; - /* User-provided listener to be invoked when assignment changes */ private ConsumerRebalanceListener rebalanceListener; - public SubscriptionState(OffsetResetStrategy defaultResetStrategy) { + private int assignmentId = 0; + + @Override + public synchronized String toString() { + return "SubscriptionState{" + + "type=" + subscriptionType + + ", subscribedPattern=" + subscribedPattern + + ", subscription=" + String.join(",", subscription) + + ", groupSubscription=" + String.join(",", groupSubscription) + + ", defaultResetStrategy=" + defaultResetStrategy + + ", assignment=" + assignment.partitionStateValues() + " (id=" + assignmentId + ")}"; + } + + public synchronized String prettyString() { + switch (subscriptionType) { + case NONE: + return "None"; + case AUTO_TOPICS: + return "Subscribe(" + String.join(",", subscription) + ")"; + case AUTO_PATTERN: + return "Subscribe(" + subscribedPattern + ")"; + case USER_ASSIGNED: + return "Assign(" + assignedPartitions() + " , id=" + assignmentId + ")"; + default: + throw new IllegalStateException("Unrecognized subscription type: " + subscriptionType); + } + } + + public SubscriptionState(LogContext logContext, OffsetResetStrategy defaultResetStrategy) { + this.log = logContext.logger(this.getClass()); this.defaultResetStrategy = defaultResetStrategy; - this.subscription = Collections.emptySet(); + this.subscription = new HashSet<>(); this.assignment = new PartitionStates<>(); this.groupSubscription = new HashSet<>(); this.subscribedPattern = null; this.subscriptionType = SubscriptionType.NONE; - this.allConsumed = new HashMap<>(); + } + + /** + * Monotonically increasing id which is incremented after every assignment change. This can + * be used to check when an assignment has changed. + * + * @return The current assignment Id + */ + synchronized int assignmentId() { + return assignmentId; } /** @@ -120,47 +172,53 @@ else if (this.subscriptionType != type) throw new IllegalStateException(SUBSCRIPTION_EXCEPTION_MESSAGE); } - public void subscribe(Set topics, ConsumerRebalanceListener listener) { - //if (listener == null) - // throw new IllegalArgumentException("RebalanceListener cannot be null"); - + public synchronized boolean subscribe(Set topics, ConsumerRebalanceListener listener) { + registerRebalanceListener(listener); setSubscriptionType(SubscriptionType.AUTO_TOPICS); + return changeSubscription(topics); + } - this.rebalanceListener = listener; - - changeSubscription(topics); + public synchronized void subscribe(Pattern pattern, ConsumerRebalanceListener listener) { + registerRebalanceListener(listener); + setSubscriptionType(SubscriptionType.AUTO_PATTERN); + this.subscribedPattern = pattern; } - public void subscribeFromPattern(Set topics) { + public synchronized boolean subscribeFromPattern(Set topics) { if (subscriptionType != SubscriptionType.AUTO_PATTERN) throw new IllegalArgumentException("Attempt to subscribe from pattern while subscription type set to " + subscriptionType); - changeSubscription(topics); + return changeSubscription(topics); } - private void changeSubscription(Set topicsToSubscribe) { - if (!this.subscription.equals(topicsToSubscribe)) { - this.subscription = topicsToSubscribe; - } + private boolean changeSubscription(Set topicsToSubscribe) { + if (subscription.equals(topicsToSubscribe)) + return false; + + subscription = topicsToSubscribe; + return true; } /** - * Add topics to the current group subscription. This is used by the group leader to ensure + * Set the current group subscription. This is used by the group leader to ensure * that it receives metadata updates for all topics that the group is interested in. - * @param topics The topics to add to the group subscription + * + * @param topics All topics from the group subscription + * @return true if the group subscription contains topics which are not part of the local subscription */ - public void groupSubscribe(Collection topics) { - if (this.subscriptionType == SubscriptionType.USER_ASSIGNED) + synchronized boolean groupSubscribe(Collection topics) { + if (!hasAutoAssignedPartitions()) throw new IllegalStateException(SUBSCRIPTION_EXCEPTION_MESSAGE); - this.groupSubscription.addAll(topics); + groupSubscription = new HashSet<>(topics); + return !subscription.containsAll(groupSubscription); } /** * Reset the group's subscription to only contain topics subscribed by this consumer. */ - public void resetGroupSubscription() { - this.groupSubscription.retainAll(subscription); + synchronized void resetGroupSubscription() { + groupSubscription = Collections.emptySet(); } /** @@ -168,324 +226,728 @@ public void resetGroupSubscription() { * note this is different from {@link #assignFromSubscribed(Collection)} * whose input partitions are provided from the subscribed topics. */ - public void assignFromUser(Set partitions) { + public synchronized boolean assignFromUser(Set partitions) { setSubscriptionType(SubscriptionType.USER_ASSIGNED); - if (!this.assignment.partitionSet().equals(partitions)) { - fireOnAssignment(partitions); + if (this.assignment.partitionSet().equals(partitions)) + return false; + + assignmentId++; + + // update the subscribed topics + Set manualSubscribedTopics = new HashSet<>(); + Map partitionToState = new HashMap<>(); + for (TopicPartition partition : partitions) { + TopicPartitionState state = assignment.stateValue(partition); + if (state == null) + state = new TopicPartitionState(); + partitionToState.put(partition, state); + + manualSubscribedTopics.add(partition.topic()); + } - Map partitionToState = new HashMap<>(); - for (TopicPartition partition : partitions) { - TopicPartitionState state = assignment.stateValue(partition); - if (state == null) - state = new TopicPartitionState(); - partitionToState.put(partition, state); + this.assignment.set(partitionToState); + return changeSubscription(manualSubscribedTopics); + } + + /** + * @return true if assignments matches subscription, otherwise false + */ + public synchronized boolean checkAssignmentMatchedSubscription(Collection assignments) { + for (TopicPartition topicPartition : assignments) { + if (this.subscribedPattern != null) { + if (!this.subscribedPattern.matcher(topicPartition.topic()).matches()) { + log.info("Assigned partition {} for non-subscribed topic regex pattern; subscription pattern is {}", + topicPartition, + this.subscribedPattern); + + return false; + } + } else { + if (!this.subscription.contains(topicPartition.topic())) { + log.info("Assigned partition {} for non-subscribed topic; subscription is {}", topicPartition, this.subscription); + + return false; + } } - this.assignment.set(partitionToState); } + + return true; } /** - * Change the assignment to the specified partitions returned from the coordinator, - * note this is different from {@link #assignFromUser(Set)} which directly set the assignment from user inputs + * Change the assignment to the specified partitions returned from the coordinator, note this is + * different from {@link #assignFromUser(Set)} which directly set the assignment from user inputs. */ - public void assignFromSubscribed(Collection assignments) { - if (!this.partitionsAutoAssigned()) + public synchronized void assignFromSubscribed(Collection assignments) { + if (!this.hasAutoAssignedPartitions()) throw new IllegalArgumentException("Attempt to dynamically assign partitions while manual assignment in use"); - Map assignedPartitionStates = partitionToStateMap(assignments); - fireOnAssignment(assignedPartitionStates.keySet()); - - if (this.subscribedPattern != null) { - for (TopicPartition tp : assignments) { - if (!this.subscribedPattern.matcher(tp.topic()).matches()) - throw new IllegalArgumentException("Assigned partition " + tp + " for non-subscribed topic regex pattern; subscription pattern is " + this.subscribedPattern); - } - } else { - for (TopicPartition tp : assignments) - if (!this.subscription.contains(tp.topic())) - throw new IllegalArgumentException("Assigned partition " + tp + " for non-subscribed topic; subscription is " + this.subscription); + Map assignedPartitionStates = new HashMap<>(assignments.size()); + for (TopicPartition tp : assignments) { + TopicPartitionState state = this.assignment.stateValue(tp); + if (state == null) + state = new TopicPartitionState(); + assignedPartitionStates.put(tp, state); } + assignmentId++; this.assignment.set(assignedPartitionStates); } - public boolean hasPatternSubscription() { + private void registerRebalanceListener(ConsumerRebalanceListener listener) { + if (listener == null) + throw new IllegalArgumentException("RebalanceListener cannot be null"); + this.rebalanceListener = listener; + } + + /** + * Check whether pattern subscription is in use. + * + */ + synchronized boolean hasPatternSubscription() { return this.subscriptionType == SubscriptionType.AUTO_PATTERN; } - public boolean hasNoSubscription() { + public synchronized boolean hasNoSubscriptionOrUserAssignment() { return this.subscriptionType == SubscriptionType.NONE; } - public void unsubscribe() { + public synchronized void unsubscribe() { this.subscription = Collections.emptySet(); + this.groupSubscription = Collections.emptySet(); + this.assignment.clear(); + this.subscribedPattern = null; this.subscriptionType = SubscriptionType.NONE; - fireOnAssignment(Collections.emptySet()); + this.assignmentId++; } - public Pattern subscribedPattern() { - return this.subscribedPattern; + /** + * Check whether a topic matches a subscribed pattern. + * + * @return true if pattern subscription is in use and the topic matches the subscribed pattern, false otherwise + */ + synchronized boolean matchesSubscribedPattern(String topic) { + Pattern pattern = this.subscribedPattern; + if (hasPatternSubscription() && pattern != null) + return pattern.matcher(topic).matches(); + return false; } - public Set subscription() { - return this.subscription; + public synchronized Set subscription() { + if (hasAutoAssignedPartitions()) + return this.subscription; + return Collections.emptySet(); } - public Set pausedPartitions() { - HashSet paused = new HashSet<>(); - for (PartitionStates.PartitionState state : assignment.partitionStates()) { - if (state.value().paused) { - paused.add(state.topicPartition()); - } - } - return paused; + public synchronized Set pausedPartitions() { + return collectPartitions(TopicPartitionState::isPaused); } /** - * Get the subscription for the group. For the leader, this will include the union of the - * subscriptions of all group members. For followers, it is just that member's subscription. - * This is used when querying topic metadata to detect the metadata changes which would + * Get the subscription topics for which metadata is required. For the leader, this will include + * the union of the subscriptions of all group members. For followers, it is just that member's + * subscription. This is used when querying topic metadata to detect the metadata changes which would * require rebalancing. The leader fetches metadata for all topics in the group so that it * can do the partition assignment (which requires at least partition counts for all topics * to be assigned). + * * @return The union of all subscribed topics in the group if this member is the leader * of the current generation; otherwise it returns the same set as {@link #subscription()} */ - public Set groupSubscription() { - return this.groupSubscription; + public synchronized Set metadataTopics() { + if (groupSubscription.isEmpty()) + return subscription; + else if (groupSubscription.containsAll(subscription)) + return groupSubscription; + else { + // When subscription changes `groupSubscription` may be outdated, ensure that + // new subscription topics are returned. + Set topics = new HashSet<>(groupSubscription); + topics.addAll(subscription); + return topics; + } + } + + synchronized boolean needsMetadata(String topic) { + return subscription.contains(topic) || groupSubscription.contains(topic); } private TopicPartitionState assignedState(TopicPartition tp) { TopicPartitionState state = this.assignment.stateValue(tp); if (state == null) - this.assignment.add(new TopicPartition(tp.topic(), tp.partition()), new TopicPartitionState()); + throw new IllegalStateException("No current assignment for partition " + tp); + return state; + } + + private TopicPartitionState assignedStateOrNull(TopicPartition tp) { return this.assignment.stateValue(tp); } + public synchronized void seekValidated(TopicPartition tp, FetchPosition position) { + assignedState(tp).seekValidated(position); + } + public void seek(TopicPartition tp, long offset) { - assignedState(tp).seek(offset); + seekValidated(tp, new FetchPosition(offset)); } - public Set assignedPartitions() { - return this.assignment.partitionSet(); + public void seekUnvalidated(TopicPartition tp, FetchPosition position) { + assignedState(tp).seekUnvalidated(position); } - public List fetchablePartitions() { - List fetchable = new ArrayList<>(assignment.size()); - for (PartitionStates.PartitionState state : assignment.partitionStates()) { - if (state.value().isFetchable()) - fetchable.add(state.topicPartition()); + synchronized void maybeSeekUnvalidated(TopicPartition tp, FetchPosition position, OffsetResetStrategy requestedResetStrategy) { + TopicPartitionState state = assignedStateOrNull(tp); + if (state == null) { + log.debug("Skipping reset of partition {} since it is no longer assigned", tp); + } else if (!state.awaitingReset()) { + log.debug("Skipping reset of partition {} since reset is no longer needed", tp); + } else if (requestedResetStrategy != state.resetStrategy) { + log.debug("Skipping reset of partition {} since an alternative reset has been requested", tp); + } else { + log.info("Resetting offset for partition {} to position {}.", tp, position); + state.seekUnvalidated(position); } - return fetchable; } - public boolean partitionsAutoAssigned() { + /** + * @return a modifiable copy of the currently assigned partitions + */ + public synchronized Set assignedPartitions() { + return new HashSet<>(this.assignment.partitionSet()); + } + + /** + * @return a modifiable copy of the currently assigned partitions as a list + */ + public synchronized List assignedPartitionsList() { + return new ArrayList<>(this.assignment.partitionSet()); + } + + /** + * Provides the number of assigned partitions in a thread safe manner. + * @return the number of assigned partitions. + */ + synchronized int numAssignedPartitions() { + return this.assignment.size(); + } + + // Visible for testing + public synchronized List fetchablePartitions(Predicate isAvailable) { + // Since this is in the hot-path for fetching, we do this instead of using java.util.stream API + List result = new ArrayList<>(); + assignment.forEach((topicPartition, topicPartitionState) -> { + // Cheap check is first to avoid evaluating the predicate if possible + if (topicPartitionState.isFetchable() && isAvailable.test(topicPartition)) { + result.add(topicPartition); + } + }); + return result; + } + + public synchronized boolean hasAutoAssignedPartitions() { return this.subscriptionType == SubscriptionType.AUTO_TOPICS || this.subscriptionType == SubscriptionType.AUTO_PATTERN; } - public void position(TopicPartition tp, long offset) { - assignedState(tp).position(offset); + public synchronized void position(TopicPartition tp, FetchPosition position) { + assignedState(tp).position(position); + } + + /** + * Enter the offset validation state if the leader for this partition is known to support a usable version of the + * OffsetsForLeaderEpoch API. If the leader node does not support the API, simply complete the offset validation. + * + * @param apiVersions supported API versions + * @param tp topic partition to validate + * @param leaderAndEpoch leader epoch of the topic partition + * @return true if we enter the offset validation state + */ + public synchronized boolean maybeValidatePositionForCurrentLeader(ApiVersions apiVersions, + TopicPartition tp, + Metadata.LeaderAndEpoch leaderAndEpoch) { + if (leaderAndEpoch.leader.isPresent()) { + NodeApiVersions nodeApiVersions = apiVersions.get(leaderAndEpoch.leader.get().idString()); + if (nodeApiVersions == null || hasUsableOffsetForLeaderEpochVersion(nodeApiVersions)) { + return assignedState(tp).maybeValidatePosition(leaderAndEpoch); + } else { + // If the broker does not support a newer version of OffsetsForLeaderEpoch, we skip validation + assignedState(tp).updatePositionLeaderNoValidation(leaderAndEpoch); + return false; + } + } else { + return assignedState(tp).maybeValidatePosition(leaderAndEpoch); + } + } + // CHANGES for 2.8.1 . Moved SubscriptionState.java from org.apache.kafka.* package to org.oracle.okafka.* package + // Copied hasUsableOffsetForLeaderEpochVersion from org.apache.kafka.clients.consumer.internals.Fetcher.hasUsableOffsetForLeaderEpochVersion to here + + static boolean hasUsableOffsetForLeaderEpochVersion(NodeApiVersions nodeApiVersions) { + ApiVersion apiVersion = nodeApiVersions.apiVersion(ApiKeys.OFFSET_FOR_LEADER_EPOCH); + if (apiVersion == null) + return false; + + return OffsetsForLeaderEpochRequest.supportsTopicPermission(apiVersion.maxVersion()); + } + + /** + * Attempt to complete validation with the end offset returned from the OffsetForLeaderEpoch request. + * @return Log truncation details if detected and no reset policy is defined. + */ + public synchronized Optional maybeCompleteValidation(TopicPartition tp, + FetchPosition requestPosition, + EpochEndOffset epochEndOffset) { + TopicPartitionState state = assignedStateOrNull(tp); + if (state == null) { + log.debug("Skipping completed validation for partition {} which is not currently assigned.", tp); + } else if (!state.awaitingValidation()) { + log.debug("Skipping completed validation for partition {} which is no longer expecting validation.", tp); + } else { + SubscriptionState.FetchPosition currentPosition = state.position; + if (!currentPosition.equals(requestPosition)) { + log.debug("Skipping completed validation for partition {} since the current position {} " + + "no longer matches the position {} when the request was sent", + tp, currentPosition, requestPosition); + } else if (epochEndOffset.endOffset() == UNDEFINED_EPOCH_OFFSET || + epochEndOffset.leaderEpoch() == UNDEFINED_EPOCH) { + if (hasDefaultOffsetResetPolicy()) { + log.info("Truncation detected for partition {} at offset {}, resetting offset", + tp, currentPosition); + requestOffsetReset(tp); + } else { + log.warn("Truncation detected for partition {} at offset {}, but no reset policy is set", + tp, currentPosition); + return Optional.of(new LogTruncation(tp, requestPosition, Optional.empty())); + } + } else if (epochEndOffset.endOffset() < currentPosition.offset) { + if (hasDefaultOffsetResetPolicy()) { + SubscriptionState.FetchPosition newPosition = new SubscriptionState.FetchPosition( + epochEndOffset.endOffset(), Optional.of(epochEndOffset.leaderEpoch()), + currentPosition.currentLeader); + log.info("Truncation detected for partition {} at offset {}, resetting offset to " + + "the first offset known to diverge {}", tp, currentPosition, newPosition); + state.seekValidated(newPosition); + } else { + OffsetAndMetadata divergentOffset = new OffsetAndMetadata(epochEndOffset.endOffset(), + Optional.of(epochEndOffset.leaderEpoch()), null); + log.warn("Truncation detected for partition {} at offset {} (the end offset from the " + + "broker is {}), but no reset policy is set", tp, currentPosition, divergentOffset); + return Optional.of(new LogTruncation(tp, requestPosition, Optional.of(divergentOffset))); + } + } else { + state.completeValidation(); + } + } + + return Optional.empty(); + } + + public synchronized boolean awaitingValidation(TopicPartition tp) { + return assignedState(tp).awaitingValidation(); + } + + public synchronized void completeValidation(TopicPartition tp) { + assignedState(tp).completeValidation(); + } + + public synchronized FetchPosition validPosition(TopicPartition tp) { + return assignedState(tp).validPosition(); } - public Long position(TopicPartition tp) { + public synchronized FetchPosition position(TopicPartition tp) { return assignedState(tp).position; } - public Long partitionLag(TopicPartition tp, IsolationLevel isolationLevel) { + synchronized Long partitionLag(TopicPartition tp, IsolationLevel isolationLevel) { TopicPartitionState topicPartitionState = assignedState(tp); if (isolationLevel == IsolationLevel.READ_COMMITTED) - return topicPartitionState.lastStableOffset == null ? null : topicPartitionState.lastStableOffset - topicPartitionState.position; + return topicPartitionState.lastStableOffset == null ? null : topicPartitionState.lastStableOffset - topicPartitionState.position.offset; else - return topicPartitionState.highWatermark == null ? null : topicPartitionState.highWatermark - topicPartitionState.position; + return topicPartitionState.highWatermark == null ? null : topicPartitionState.highWatermark - topicPartitionState.position.offset; } - public Long partitionLead(TopicPartition tp) { + synchronized Long partitionLead(TopicPartition tp) { TopicPartitionState topicPartitionState = assignedState(tp); - return topicPartitionState.logStartOffset == null ? null : topicPartitionState.position - topicPartitionState.logStartOffset; + return topicPartitionState.logStartOffset == null ? null : topicPartitionState.position.offset - topicPartitionState.logStartOffset; + } + + synchronized void updateHighWatermark(TopicPartition tp, long highWatermark) { + assignedState(tp).highWatermark(highWatermark); + } + + synchronized void updateLogStartOffset(TopicPartition tp, long logStartOffset) { + assignedState(tp).logStartOffset(logStartOffset); + } + + synchronized void updateLastStableOffset(TopicPartition tp, long lastStableOffset) { + assignedState(tp).lastStableOffset(lastStableOffset); } - public void updateHighWatermark(TopicPartition tp, long highWatermark) { - assignedState(tp).highWatermark = highWatermark; + /** + * Set the preferred read replica with a lease timeout. After this time, the replica will no longer be valid and + * {@link #preferredReadReplica(TopicPartition, long)} will return an empty result. + * + * @param tp The topic partition + * @param preferredReadReplicaId The preferred read replica + * @param timeMs The time at which this preferred replica is no longer valid + */ + public synchronized void updatePreferredReadReplica(TopicPartition tp, int preferredReadReplicaId, LongSupplier timeMs) { + assignedState(tp).updatePreferredReadReplica(preferredReadReplicaId, timeMs); } - public void updateLogStartOffset(TopicPartition tp, long logStartOffset) { - assignedState(tp).logStartOffset = logStartOffset; + /** + * Get the preferred read replica + * + * @param tp The topic partition + * @param timeMs The current time + * @return Returns the current preferred read replica, if it has been set and if it has not expired. + */ + public synchronized Optional preferredReadReplica(TopicPartition tp, long timeMs) { + final TopicPartitionState topicPartitionState = assignedStateOrNull(tp); + if (topicPartitionState == null) { + return Optional.empty(); + } else { + return topicPartitionState.preferredReadReplica(timeMs); + } } - public void updateLastStableOffset(TopicPartition tp, long lastStableOffset) { - assignedState(tp).lastStableOffset = lastStableOffset; + /** + * Unset the preferred read replica. This causes the fetcher to go back to the leader for fetches. + * + * @param tp The topic partition + * @return true if the preferred read replica was set, false otherwise. + */ + public synchronized Optional clearPreferredReadReplica(TopicPartition tp) { + return assignedState(tp).clearPreferredReadReplica(); } - public Map allConsumed() { + public synchronized Map allConsumed() { + Map allConsumed = new HashMap<>(); + assignment.forEach((topicPartition, partitionState) -> { + if (partitionState.hasValidPosition()) + allConsumed.put(topicPartition, new OffsetAndMetadata(partitionState.position.offset, + partitionState.position.offsetEpoch, "")); + }); return allConsumed; } - public void requestOffsetReset(TopicPartition partition, OffsetResetStrategy offsetResetStrategy) { + public synchronized void requestOffsetReset(TopicPartition partition, OffsetResetStrategy offsetResetStrategy) { assignedState(partition).reset(offsetResetStrategy); } + public synchronized void requestOffsetReset(Collection partitions, OffsetResetStrategy offsetResetStrategy) { + partitions.forEach(tp -> { + log.info("Seeking to {} offset of partition {}", offsetResetStrategy, tp); + assignedState(tp).reset(offsetResetStrategy); + }); + } + public void requestOffsetReset(TopicPartition partition) { requestOffsetReset(partition, defaultResetStrategy); } - public void setResetPending(Set partitions, long nextAllowResetTimeMs) { + synchronized void setNextAllowedRetry(Set partitions, long nextAllowResetTimeMs) { for (TopicPartition partition : partitions) { - assignedState(partition).setResetPending(nextAllowResetTimeMs); + assignedState(partition).setNextAllowedRetry(nextAllowResetTimeMs); } } - public boolean hasDefaultOffsetResetPolicy() { + boolean hasDefaultOffsetResetPolicy() { return defaultResetStrategy != OffsetResetStrategy.NONE; } - public boolean isOffsetResetNeeded(TopicPartition partition) { + public synchronized boolean isOffsetResetNeeded(TopicPartition partition) { return assignedState(partition).awaitingReset(); } - public OffsetResetStrategy resetStrategy(TopicPartition partition) { - return assignedState(partition).resetStrategy; + public synchronized OffsetResetStrategy resetStrategy(TopicPartition partition) { + return assignedState(partition).resetStrategy(); } - public boolean hasAllFetchPositions() { - for (PartitionStates.PartitionState state : assignment.partitionStates()) { - if (!state.value().hasValidPosition()) + public synchronized boolean hasAllFetchPositions() { + // Since this is in the hot-path for fetching, we do this instead of using java.util.stream API + Iterator it = assignment.stateIterator(); + while (it.hasNext()) { + if (!it.next().hasValidPosition()) { return false; + } } return true; } - public Set missingFetchPositions() { - Set missing = new HashSet<>(); - for (PartitionStates.PartitionState state : assignment.partitionStates()) { - if (state.value().isMissingPosition()) - missing.add(state.topicPartition()); - } - return missing; + public synchronized Set initializingPartitions() { + return collectPartitions(state -> state.fetchState.equals(FetchStates.INITIALIZING)); } - public Set partitionsNeedingReset(long nowMs) { - Set partitions = new HashSet<>(); - for (PartitionStates.PartitionState state : assignment.partitionStates()) { - TopicPartitionState partitionState = state.value(); - if (partitionState.awaitingReset() && partitionState.isResetAllowed(nowMs)) - partitions.add(state.topicPartition()); - } - return partitions; + private Set collectPartitions(Predicate filter) { + Set result = new HashSet<>(); + assignment.forEach((topicPartition, topicPartitionState) -> { + if (filter.test(topicPartitionState)) { + result.add(topicPartition); + } + }); + return result; } - public boolean isAssigned(TopicPartition tp) { - return assignment.contains(tp); + + public synchronized void resetInitializingPositions() { + final Set partitionsWithNoOffsets = new HashSet<>(); + assignment.forEach((tp, partitionState) -> { + if (partitionState.fetchState.equals(FetchStates.INITIALIZING)) { + if (defaultResetStrategy == OffsetResetStrategy.NONE) + partitionsWithNoOffsets.add(tp); + else + requestOffsetReset(tp); + } + }); + + if (!partitionsWithNoOffsets.isEmpty()) + throw new NoOffsetForPartitionException(partitionsWithNoOffsets); } - public boolean isPaused(TopicPartition tp) { - return isAssigned(tp) && assignedState(tp).paused; + public synchronized Set partitionsNeedingReset(long nowMs) { + return collectPartitions(state -> state.awaitingReset() && !state.awaitingRetryBackoff(nowMs)); } - public boolean isFetchable(TopicPartition tp) { - return isAssigned(tp) && assignedState(tp).isFetchable(); + public synchronized Set partitionsNeedingValidation(long nowMs) { + return collectPartitions(state -> state.awaitingValidation() && !state.awaitingRetryBackoff(nowMs)); } - public boolean hasValidPosition(TopicPartition tp) { - return isAssigned(tp) && assignedState(tp).hasValidPosition(); + public synchronized boolean isAssigned(TopicPartition tp) { + return assignment.contains(tp); } - public void pause(TopicPartition tp) { - assignedState(tp).pause(); + public synchronized boolean isPaused(TopicPartition tp) { + TopicPartitionState assignedOrNull = assignedStateOrNull(tp); + return assignedOrNull != null && assignedOrNull.isPaused(); } - public void resume(TopicPartition tp) { - assignedState(tp).resume(); + synchronized boolean isFetchable(TopicPartition tp) { + TopicPartitionState assignedOrNull = assignedStateOrNull(tp); + return assignedOrNull != null && assignedOrNull.isFetchable(); } - public void resetFailed(Set partitions, long nextRetryTimeMs) { - for (TopicPartition partition : partitions) - assignedState(partition).resetFailed(nextRetryTimeMs); + public synchronized boolean hasValidPosition(TopicPartition tp) { + TopicPartitionState assignedOrNull = assignedStateOrNull(tp); + return assignedOrNull != null && assignedOrNull.hasValidPosition(); } - public void movePartitionToEnd(TopicPartition tp) { - assignment.moveToEnd(tp); + public synchronized void pause(TopicPartition tp) { + assignedState(tp).pause(); } - public ConsumerRebalanceListener rebalanceListener() { - return rebalanceListener; + public synchronized void resume(TopicPartition tp) { + assignedState(tp).resume(); } - public void addListener(Listener listener) { - listeners.add(listener); + synchronized void requestFailed(Set partitions, long nextRetryTimeMs) { + for (TopicPartition partition : partitions) { + // by the time the request failed, the assignment may no longer + // contain this partition any more, in which case we would just ignore. + final TopicPartitionState state = assignedStateOrNull(partition); + if (state != null) + state.requestFailed(nextRetryTimeMs); + } } - public void fireOnAssignment(Set assignment) { - for (Listener listener : listeners) - listener.onAssignment(assignment); + synchronized void movePartitionToEnd(TopicPartition tp) { + assignment.moveToEnd(tp); } - private static Map partitionToStateMap(Collection assignments) { - Map map = new HashMap<>(assignments.size()); - for (TopicPartition tp : assignments) - map.put(tp, new TopicPartitionState()); - return map; + public synchronized ConsumerRebalanceListener rebalanceListener() { + return rebalanceListener; } private static class TopicPartitionState { - private Long position; // last consumed position + + private FetchState fetchState; + private FetchPosition position; // last consumed position + private Long highWatermark; // the high watermark from last fetch private Long logStartOffset; // the log start offset private Long lastStableOffset; private boolean paused; // whether this partition has been paused by the user private OffsetResetStrategy resetStrategy; // the strategy to use if the offset needs resetting - private Long nextAllowedRetryTimeMs; + private Long nextRetryTimeMs; + private Integer preferredReadReplica; + private Long preferredReadReplicaExpireTimeMs; TopicPartitionState() { this.paused = false; + this.fetchState = FetchStates.INITIALIZING; this.position = null; this.highWatermark = null; this.logStartOffset = null; this.lastStableOffset = null; this.resetStrategy = null; - this.nextAllowedRetryTimeMs = null; + this.nextRetryTimeMs = null; + this.preferredReadReplica = null; + } + + private void transitionState(FetchState newState, Runnable runIfTransitioned) { + FetchState nextState = this.fetchState.transitionTo(newState); + if (nextState.equals(newState)) { + this.fetchState = nextState; + runIfTransitioned.run(); + if (this.position == null && nextState.requiresPosition()) { + throw new IllegalStateException("Transitioned subscription state to " + nextState + ", but position is null"); + } else if (!nextState.requiresPosition()) { + this.position = null; + } + } + } + + private Optional preferredReadReplica(long timeMs) { + if (preferredReadReplicaExpireTimeMs != null && timeMs > preferredReadReplicaExpireTimeMs) { + preferredReadReplica = null; + return Optional.empty(); + } else { + return Optional.ofNullable(preferredReadReplica); + } + } + + private void updatePreferredReadReplica(int preferredReadReplica, LongSupplier timeMs) { + if (this.preferredReadReplica == null || preferredReadReplica != this.preferredReadReplica) { + this.preferredReadReplica = preferredReadReplica; + this.preferredReadReplicaExpireTimeMs = timeMs.getAsLong(); + } + } + + private Optional clearPreferredReadReplica() { + if (preferredReadReplica != null) { + int removedReplicaId = this.preferredReadReplica; + this.preferredReadReplica = null; + this.preferredReadReplicaExpireTimeMs = null; + return Optional.of(removedReplicaId); + } else { + return Optional.empty(); + } } private void reset(OffsetResetStrategy strategy) { - this.resetStrategy = strategy; - this.position = null; - this.nextAllowedRetryTimeMs = null; + transitionState(FetchStates.AWAIT_RESET, () -> { + this.resetStrategy = strategy; + this.nextRetryTimeMs = null; + }); + } + + /** + * Check if the position exists and needs to be validated. If so, enter the AWAIT_VALIDATION state. This method + * also will update the position with the current leader and epoch. + * + * @param currentLeaderAndEpoch leader and epoch to compare the offset with + * @return true if the position is now awaiting validation + */ + private boolean maybeValidatePosition(Metadata.LeaderAndEpoch currentLeaderAndEpoch) { + if (this.fetchState.equals(FetchStates.AWAIT_RESET)) { + return false; + } + + if (!currentLeaderAndEpoch.leader.isPresent()) { + return false; + } + + if (position != null && !position.currentLeader.equals(currentLeaderAndEpoch)) { + FetchPosition newPosition = new FetchPosition(position.offset, position.offsetEpoch, currentLeaderAndEpoch); + validatePosition(newPosition); + preferredReadReplica = null; + } + return this.fetchState.equals(FetchStates.AWAIT_VALIDATION); + } + + /** + * For older versions of the API, we cannot perform offset validation so we simply transition directly to FETCHING + */ + private void updatePositionLeaderNoValidation(Metadata.LeaderAndEpoch currentLeaderAndEpoch) { + if (position != null) { + transitionState(FetchStates.FETCHING, () -> { + this.position = new FetchPosition(position.offset, position.offsetEpoch, currentLeaderAndEpoch); + this.nextRetryTimeMs = null; + }); + } + } + + private void validatePosition(FetchPosition position) { + if (position.offsetEpoch.isPresent() && position.currentLeader.epoch.isPresent()) { + transitionState(FetchStates.AWAIT_VALIDATION, () -> { + this.position = position; + this.nextRetryTimeMs = null; + }); + } else { + // If we have no epoch information for the current position, then we can skip validation + transitionState(FetchStates.FETCHING, () -> { + this.position = position; + this.nextRetryTimeMs = null; + }); + } + } + + /** + * Clear the awaiting validation state and enter fetching. + */ + private void completeValidation() { + //if (hasPosition()) + { + transitionState(FetchStates.FETCHING, () -> this.nextRetryTimeMs = null); + } + } + + private boolean awaitingValidation() { + return fetchState.equals(FetchStates.AWAIT_VALIDATION); } - private boolean isResetAllowed(long nowMs) { - return nextAllowedRetryTimeMs == null || nowMs >= nextAllowedRetryTimeMs; + private boolean awaitingRetryBackoff(long nowMs) { + return nextRetryTimeMs != null && nowMs < nextRetryTimeMs; } private boolean awaitingReset() { - return resetStrategy != null; + return fetchState.equals(FetchStates.AWAIT_RESET); } - private void setResetPending(long nextAllowedRetryTimeMs) { - this.nextAllowedRetryTimeMs = nextAllowedRetryTimeMs; + private void setNextAllowedRetry(long nextAllowedRetryTimeMs) { + this.nextRetryTimeMs = nextAllowedRetryTimeMs; } - private void resetFailed(long nextAllowedRetryTimeMs) { - this.nextAllowedRetryTimeMs = nextAllowedRetryTimeMs; + private void requestFailed(long nextAllowedRetryTimeMs) { + this.nextRetryTimeMs = nextAllowedRetryTimeMs; } private boolean hasValidPosition() { + return fetchState.hasValidPosition(); + } + + private boolean hasPosition() { return position != null; } - private boolean isMissingPosition() { - return !hasValidPosition() && !awaitingReset(); + private boolean isPaused() { + return paused; + } + + private void seekValidated(FetchPosition position) { + transitionState(FetchStates.FETCHING, () -> { + this.position = position; + this.resetStrategy = null; + this.nextRetryTimeMs = null; + }); } - private void seek(long offset) { - this.position = offset; - this.resetStrategy = OffsetResetStrategy.TO_OFFSET; - this.nextAllowedRetryTimeMs = null; + private void seekUnvalidated(FetchPosition fetchPosition) { + seekValidated(fetchPosition); + validatePosition(fetchPosition); } - private void position(long offset) { + private void position(FetchPosition position) { if (!hasValidPosition()) throw new IllegalStateException("Cannot set a new position without a valid current position"); - this.position = offset; + this.position = position; + } + + private FetchPosition validPosition() { + if (hasValidPosition()) { + return position; + } else { + return null; + } } private void pause() { @@ -500,16 +962,208 @@ private boolean isFetchable() { return !paused && hasValidPosition(); } + private void highWatermark(Long highWatermark) { + this.highWatermark = highWatermark; + } + + private void logStartOffset(Long logStartOffset) { + this.logStartOffset = logStartOffset; + } + + private void lastStableOffset(Long lastStableOffset) { + this.lastStableOffset = lastStableOffset; + } + + private OffsetResetStrategy resetStrategy() { + return resetStrategy; + } } - public interface Listener { + /** + * The fetch state of a partition. This class is used to determine valid state transitions and expose the some of + * the behavior of the current fetch state. Actual state variables are stored in the {@link TopicPartitionState}. + */ + interface FetchState { + default FetchState transitionTo(FetchState newState) { + if (validTransitions().contains(newState)) { + return newState; + } else { + return this; + } + } + /** - * Fired after a new assignment is received (after a group rebalance or when the user manually changes the - * assignment). - * - * @param assignment The topic partitions assigned to the consumer + * Return the valid states which this state can transition to + */ + Collection validTransitions(); + + /** + * Test if this state requires a position to be set + */ + boolean requiresPosition(); + + /** + * Test if this state is considered to have a valid position which can be used for fetching */ - void onAssignment(Set assignment); + boolean hasValidPosition(); + } + + /** + * An enumeration of all the possible fetch states. The state transitions are encoded in the values returned by + * {@link FetchState#validTransitions}. + */ + enum FetchStates implements FetchState { + INITIALIZING() { + @Override + public Collection validTransitions() { + return Arrays.asList(FetchStates.FETCHING, FetchStates.AWAIT_RESET, FetchStates.AWAIT_VALIDATION); + } + + @Override + public boolean requiresPosition() { + return false; + } + + @Override + public boolean hasValidPosition() { + return false; + } + }, + + FETCHING() { + @Override + public Collection validTransitions() { + return Arrays.asList(FetchStates.FETCHING, FetchStates.AWAIT_RESET, FetchStates.AWAIT_VALIDATION); + } + + @Override + public boolean requiresPosition() { + return true; + } + + @Override + public boolean hasValidPosition() { + return true; + } + }, + + AWAIT_RESET() { + @Override + public Collection validTransitions() { + return Arrays.asList(FetchStates.FETCHING, FetchStates.AWAIT_RESET); + } + + @Override + public boolean requiresPosition() { + return false; + } + + @Override + public boolean hasValidPosition() { + return false; + } + }, + + AWAIT_VALIDATION() { + @Override + public Collection validTransitions() { + return Arrays.asList(FetchStates.FETCHING, FetchStates.AWAIT_RESET, FetchStates.AWAIT_VALIDATION); + } + + @Override + public boolean requiresPosition() { + return true; + } + + @Override + public boolean hasValidPosition() { + return false; + } + } + } + + /** + * Represents the position of a partition subscription. + * + * This includes the offset and epoch from the last record in + * the batch from a FetchResponse. It also includes the leader epoch at the time the batch was consumed. + */ + public static class FetchPosition { + public final long offset; + final Optional offsetEpoch; + final Metadata.LeaderAndEpoch currentLeader; + + FetchPosition(long offset) { + this(offset, Optional.empty(), Metadata.LeaderAndEpoch.noLeaderOrEpoch()); + } + + public FetchPosition(long offset, Optional offsetEpoch, Metadata.LeaderAndEpoch currentLeader) { + this.offset = offset; + this.offsetEpoch = Objects.requireNonNull(offsetEpoch); + this.currentLeader = Objects.requireNonNull(currentLeader); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + FetchPosition that = (FetchPosition) o; + return offset == that.offset && + offsetEpoch.equals(that.offsetEpoch) && + currentLeader.equals(that.currentLeader); + } + + @Override + public int hashCode() { + return Objects.hash(offset, offsetEpoch, currentLeader); + } + + @Override + public String toString() { + return "FetchPosition{" + + "offset=" + offset + + ", offsetEpoch=" + offsetEpoch + + ", currentLeader=" + currentLeader + + '}'; + } } + public static class LogTruncation { + public final TopicPartition topicPartition; + public final FetchPosition fetchPosition; + public final Optional divergentOffsetOpt; + + public LogTruncation(TopicPartition topicPartition, + FetchPosition fetchPosition, + Optional divergentOffsetOpt) { + this.topicPartition = topicPartition; + this.fetchPosition = fetchPosition; + this.divergentOffsetOpt = divergentOffsetOpt; + } + + @Override + public String toString() { + StringBuilder bldr = new StringBuilder() + .append("(partition=") + .append(topicPartition) + .append(", fetchOffset=") + .append(fetchPosition.offset) + .append(", fetchEpoch=") + .append(fetchPosition.offsetEpoch); + + if (divergentOffsetOpt.isPresent()) { + OffsetAndMetadata divergentOffset = divergentOffsetOpt.get(); + bldr.append(", divergentOffset=") + .append(divergentOffset.offset()) + .append(", divergentEpoch=") + .append(divergentOffset.leaderEpoch()); + } else { + bldr.append(", divergentOffset=unknown") + .append(", divergentEpoch=unknown"); + } + + return bldr.append(")").toString(); + + } + } } diff --git a/clients/src/main/java/org/oracle/okafka/clients/producer/BufferExhaustedException.java b/clients/src/main/java/org/oracle/okafka/clients/producer/BufferExhaustedException.java deleted file mode 100644 index 8d54955..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/producer/BufferExhaustedException.java +++ /dev/null @@ -1,41 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.clients.producer; - -import org.oracle.okafka.common.KafkaException; - -/** - * This exception is thrown if the producer is in non-blocking mode and the rate of data production exceeds the rate at - * which data can be sent for long enough for the allocated buffer to be exhausted. - */ -public class BufferExhaustedException extends KafkaException { - - private static final long serialVersionUID = 1L; - - public BufferExhaustedException(String message) { - super(message); - } - -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/producer/Callback.java b/clients/src/main/java/org/oracle/okafka/clients/producer/Callback.java deleted file mode 100644 index ff9b52e..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/producer/Callback.java +++ /dev/null @@ -1,44 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.oracle.okafka.clients.producer; - -/** - * A callback interface that the user can implement to allow code to execute when the request is complete. This callback - * will generally execute in the background I/O thread so it should be fast. - */ -public interface Callback { - - /** - * A callback method the user can implement to provide synchronous handling of request completion. This method will - * be called when the record sent to the server has been acknowledged. Exactly one of the arguments will be - * non-null. - * @param metadata The metadata for the record that was sent (i.e. the partition and offset). Null if an error - * occurred. - * @param exception The exception thrown during processing of this record. Null if no error occurred. - * Possible thrown exceptions include: - * org.oracle.okafka.common.errors.TimeoutException thrown when a batch is expired. - * javax.jms.JMSException thrown for any other exception. - */ - public void onCompletion(RecordMetadata metadata, Exception exception); -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/producer/KafkaProducer.java b/clients/src/main/java/org/oracle/okafka/clients/producer/KafkaProducer.java index 77fdf33..0aae174 100644 --- a/clients/src/main/java/org/oracle/okafka/clients/producer/KafkaProducer.java +++ b/clients/src/main/java/org/oracle/okafka/clients/producer/KafkaProducer.java @@ -1,9 +1,9 @@ /* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ + ** OKafka Java Client version 23.4. + ** + ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. + ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + */ /* * Licensed to the Apache Software Foundation (ASF) under one or more @@ -21,942 +21,2514 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -/* - * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. - * - */ - package org.oracle.okafka.clients.producer; -import static org.oracle.okafka.common.serialization.ExtendedSerializer.Wrapper.ensureExtended; - -import java.net.InetSocketAddress; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; -import java.util.Collections; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Properties; - -import org.oracle.okafka.clients.producer.Callback; -import org.oracle.okafka.clients.producer.ProducerRecord; -import org.oracle.okafka.clients.ClientUtils; -import org.oracle.okafka.clients.CommonClientConfigs; +import org.apache.kafka.clients.ApiVersions; +import org.apache.kafka.clients.ClientDnsLookup; +import org.apache.kafka.clients.ClientRequest; +import org.apache.kafka.clients.ClientResponse; +import org.apache.kafka.clients.ClientUtils; +import org.apache.kafka.clients.CommonClientConfigs; import org.oracle.okafka.clients.KafkaClient; -import org.oracle.okafka.clients.Metadata; import org.oracle.okafka.clients.NetworkClient; -import org.oracle.okafka.clients.admin.AdminClientConfig; -import org.oracle.okafka.clients.consumer.OffsetAndMetadata; -import org.oracle.okafka.clients.producer.ProducerConfig; -import org.oracle.okafka.clients.producer.internals.AQKafkaProducer; -import org.oracle.okafka.clients.producer.internals.ProducerInterceptors; -import org.oracle.okafka.clients.producer.internals.ProducerMetrics; +import org.apache.kafka.clients.admin.AdminClientConfig; +import org.apache.kafka.clients.consumer.ConsumerGroupMetadata; +import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.clients.consumer.OffsetAndMetadata; +import org.apache.kafka.clients.consumer.OffsetCommitCallback; +import org.apache.kafka.clients.producer.Callback; +import org.apache.kafka.clients.producer.Partitioner; +import org.apache.kafka.clients.producer.Producer; +import org.apache.kafka.clients.producer.ProducerInterceptor; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.clients.producer.RecordMetadata; +import org.apache.kafka.clients.producer.internals.*; import org.oracle.okafka.clients.producer.internals.RecordAccumulator; +import org.apache.kafka.clients.producer.internals.TransactionManager; +import org.apache.kafka.common.Cluster; +import org.apache.kafka.common.KafkaException; +import org.apache.kafka.common.Metric; +import org.apache.kafka.common.MetricName; +import org.apache.kafka.common.Node; +import org.apache.kafka.common.PartitionInfo; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.Uuid; +import org.apache.kafka.common.config.ConfigException; +import org.apache.kafka.common.errors.ApiException; +import org.apache.kafka.common.errors.AuthenticationException; +import org.apache.kafka.common.errors.AuthorizationException; +import org.apache.kafka.common.errors.DisconnectException; +import org.apache.kafka.common.errors.InterruptException; +import org.apache.kafka.common.errors.InvalidTopicException; +import org.apache.kafka.common.errors.ProducerFencedException; +import org.apache.kafka.common.errors.RecordTooLargeException; +import org.apache.kafka.common.errors.SerializationException; +import org.apache.kafka.common.errors.TimeoutException; +import org.apache.kafka.common.header.Header; +import org.apache.kafka.common.header.Headers; +import org.apache.kafka.common.header.internals.RecordHeaders; +import org.apache.kafka.common.internals.ClusterResourceListeners; +import org.apache.kafka.common.metrics.JmxReporter; +import org.apache.kafka.common.metrics.KafkaMetricsContext; +import org.apache.kafka.common.metrics.MetricConfig; +import org.apache.kafka.common.metrics.Metrics; +import org.apache.kafka.common.metrics.MetricsContext; +import org.apache.kafka.common.metrics.MetricsReporter; +import org.apache.kafka.common.metrics.Sensor; +import org.apache.kafka.common.record.AbstractRecords; +import org.apache.kafka.common.record.CompressionType; +import org.apache.kafka.common.record.RecordBatch; +import org.apache.kafka.common.requests.JoinGroupRequest; +import org.apache.kafka.common.serialization.Serializer; +import org.apache.kafka.common.utils.AppInfoParser; +import org.apache.kafka.common.utils.KafkaThread; +import org.apache.kafka.common.utils.LogContext; +import org.apache.kafka.common.utils.Time; +import org.apache.kafka.common.utils.Utils; +import org.oracle.okafka.clients.Metadata; +import org.oracle.okafka.clients.producer.internals.AQKafkaProducer; +import org.oracle.okafka.clients.producer.internals.OracleTransactionManager; +import org.oracle.okafka.clients.producer.internals.OkafkaProducerMetrics; import org.oracle.okafka.clients.producer.internals.SenderThread; -import org.oracle.okafka.common.Cluster; -import org.oracle.okafka.common.KafkaException; -import org.oracle.okafka.common.Metric; -import org.oracle.okafka.common.MetricName; -import org.oracle.okafka.common.PartitionInfo; -import org.oracle.okafka.common.TopicPartition; -import org.oracle.okafka.common.config.ConfigException; import org.oracle.okafka.common.config.SslConfigs; -import org.oracle.okafka.common.errors.ApiException; import org.oracle.okafka.common.errors.FeatureNotSupportedException; -import org.oracle.okafka.common.errors.InterruptException; import org.oracle.okafka.common.errors.InvalidLoginCredentialsException; -import org.oracle.okafka.common.errors.ProducerFencedException; -import org.oracle.okafka.common.errors.RecordTooLargeException; -import org.oracle.okafka.common.errors.SerializationException; -import org.oracle.okafka.common.errors.TimeoutException; -import org.oracle.okafka.common.header.Header; -import org.oracle.okafka.common.header.Headers; -import org.oracle.okafka.common.header.internals.RecordHeaders; -import org.oracle.okafka.common.internals.ClusterResourceListeners; -import org.oracle.okafka.common.metrics.JmxReporter; -import org.oracle.okafka.common.metrics.MetricConfig; -import org.oracle.okafka.common.metrics.Metrics; -import org.oracle.okafka.common.metrics.MetricsReporter; -import org.oracle.okafka.common.metrics.Sensor; -import org.oracle.okafka.common.record.AbstractRecords; -import org.oracle.okafka.common.record.CompressionType; -import org.oracle.okafka.common.record.RecordBatch; -import org.oracle.okafka.common.serialization.ExtendedSerializer; -import org.oracle.okafka.common.serialization.Serializer; -import org.oracle.okafka.common.utils.AppInfoParser; -import org.oracle.okafka.common.utils.KafkaThread; -import org.oracle.okafka.common.utils.LogContext; -import org.oracle.okafka.common.utils.Time; +import org.oracle.okafka.common.requests.MetadataResponse; +import org.oracle.okafka.common.utils.ConnectionUtils; import org.oracle.okafka.common.utils.TNSParser; import org.slf4j.Logger; +import java.net.InetSocketAddress; +import java.sql.Connection; +import java.sql.SQLException; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Properties; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; /** - * Note: Topic name has to be in uppercase wherever used. - * A Java client that publishes records into the Transactional Event Queues. + * An OKafka client that publishes records to the Oracle's Transactional Event + * Queue (TxEQ) messaging broker. + *

    + * The producer is thread safe. *

    - * The producer internally stores these records in batches in a buffer pool. And IO thread running in the background sends these batches synchronously one at a time. - *

    - * The producer is thread safe i.e multiple threads can use same producer instance to publish records. - *

    - * Here is a simple example of using the producer to send records with strings containing sequential numbers as the key/value - * pairs. + * Here is a simple example of using the producer to send records with strings + * containing sequential numbers as the key/value pairs. + * *

      * {@code
      * Properties props = new Properties();
    - * props.put("oracle.instance.name", "instancename");
    - * props.put("oracle.service.name", "serviceid.regress.rdbms.dev.us.oracle.com");	    
    - * props.put("oracle.user.name", "username");
    - * props.put("oracle.password", "pwd");
    - * props.put("bootstrap.servers", "localhost:9092");
    - * props.put("batch.size", 16384);
    + * props.put("bootstrap.servers", "localhost:1521");
    + * props.put("oracle.service.name", "freepdb1");
    + * props.put("oracle.net.tns_admin", ".");
      * props.put("linger.ms", 1);
    - * props.put("buffer.memory", 33554432);
    - * props.put("retries", 0);
    - * props.put("key.serializer", "org.oracle.okafka.common.serialization.StringSerializer");
    - * props.put("value.serializer", "org.oracle.okafka.common.serialization.StringSerializer");
    + * props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
    + * props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
      *
      * Producer producer = new KafkaProducer<>(props);
      * for (int i = 0; i < 100; i++)
    - *     producer.send(new ProducerRecord("my-topic", Integer.toString(i), Integer.toString(i)));
    + * 	producer.send(new ProducerRecord("my-topic", Integer.toString(i), Integer.toString(i)));
    + *
      * producer.close();
      * }
    *

    - * The producer consists of a pool of buffer space that holds records that haven't yet been transmitted to the server - * as well as a background I/O thread that is responsible for turning these records into requests and transmitting them - * to the cluster. Failure to close the producer after use will leak these resources. + * This producer connects to Oracle database instance running on local host on + * port 1521. This is specified by property bootstrap.servers It + * connects to service named 'freepdb1' specified using property + * oracle.service.name It reads authentication and other JDBC + * driver parameters to connect to Oracle database from the directory specified + * via property oracle.net.tns_admin + *

    + *

    + * The producer consists of a pool of buffer space that holds records that + * haven't yet been transmitted to the server as well as a background I/O thread + * that is responsible for turning these records into requests and transmitting + * them to the Oracle database. Failure to close the producer after use will + * leak these resources. + *

    + * The {@link #send(ProducerRecord) send()} method is asynchronous. When called + * it adds the record to a buffer of pending record sends and immediately + * returns. This allows the producer to batch together individual records for + * efficiency. + *

    + * If the request fails, the producer can automatically retry. The + * retries setting defaults to Integer.MAX_VALUE. + *

    + * The producer maintains buffers of unsent records for each partition. These + * buffers are of a size specified by the batch.size config. Making + * this larger can result in more batching, but requires more memory (since we + * will generally have one of these buffers for each active partition). + *

    + * By default a buffer is available to send immediately even if there is + * additional unused space in the buffer. However if you want to reduce the + * number of requests you can set linger.ms to something greater + * than 0. This will instruct the producer to wait up to that number of + * milliseconds before sending a request in hope that more records will arrive + * to fill up the same batch. This is analogous to Nagle's algorithm in TCP. For + * example, in the code snippet above, likely all 100 records would be sent in a + * single request since we set our linger time to 1 millisecond. However this + * setting would add 1 millisecond of latency to our request waiting for more + * records to arrive if we didn't fill up the buffer. Note that records that + * arrive close together in time will generally batch together even with + * linger.ms=0 so under heavy load batching will occur regardless + * of the linger configuration; however setting this to something larger than 0 + * can lead to fewer, more efficient requests when not under maximal load at the + * cost of a small amount of latency. + *

    + * The buffer.memory controls the total amount of memory available + * to the producer for buffering. If records are sent faster than they can be + * transmitted to the server then this buffer space will be exhausted. When the + * buffer space is exhausted additional send calls will block. The threshold for + * time to block is determined by max.block.ms after which it + * throws a TimeoutException. + *

    + * The key.serializer and value.serializer instruct + * how to turn the key and value objects the user provides with their + * ProducerRecord into bytes. You can use the included + * {@link org.apache.kafka.common.serialization.ByteArraySerializer} or + * {@link org.apache.kafka.common.serialization.StringSerializer} for simple + * string or byte types. *

    - * The {@link #send(ProducerRecord) send()} method is asynchronous. When called it adds the record to a buffer of pending record sends - * and immediately returns. This allows the producer to batch together individual records for efficiency. + * From OKafka 23.4, the KafkaProducer supports two additional modes: the + * idempotent producer and the transactional producer. The idempotent producer + * strengthens OKafka's delivery semantics from at least once to exactly once + * delivery. In particular producer retries will no longer introduce duplicates. + * The transactional producer allows an application to send messages to multiple + * partitions (and topics!) atomically. + *

    *

    - * The acks config controls the criteria under which requests are considered complete. The "all" setting - * we have specified will result in blocking on the full commit of the record, the slowest but most durable setting. - * OKafka supports only default setting "all" + * To enable idempotence, the enable.idempotence configuration must + * be set to true. If set, the retries config will default to + * Integer.MAX_VALUE. There are no API changes for the idempotent + * producer, so existing applications will not need to be modified to take + * advantage of this feature. + *

    *

    - * The retries config resends the request if request fails with retriable excpetion. + * To take advantage of the idempotent producer, it is imperative to avoid + * application level re-sends since these cannot be de-duplicated. As such, if + * an application enables idempotence, it is recommended to leave the + * retries config unset, as it will be defaulted to + * Integer.MAX_VALUE. Additionally, if a + * {@link #send(ProducerRecord)} returns an error even with infinite retries + * (for instance if the message expires in the buffer before being sent), then + * it is recommended to shut down the producer and check the contents of the + * last produced message to ensure that it is not duplicated. Finally, the + * producer can only guarantee idempotence for messages sent within a single + * session. *

    - * The producer maintains buffers of unsent records for each partition. These buffers are of a size specified by - * the batch.size config. Making this larger can result in more batching, but requires more memory (since we will - * generally have one of these buffers for each active partition). *

    - * By default a buffer is available to send immediately even if there is additional unused space in the buffer. However if you - * want to reduce the number of requests you can set linger.ms to something greater than 0. This will - * instruct the producer to wait up to that number of milliseconds before sending a request in hope that more records will - * arrive to fill up the same batch. This is analogous to Nagle's algorithm in TCP. For example, in the code snippet above, - * likely all 100 records would be sent in a single request since we set our linger time to 1 millisecond. However this setting - * would add 1 millisecond of latency to our request waiting for more records to arrive if we didn't fill up the buffer. Note that - * records that arrive close together in time will generally batch together even with linger.ms=0 so under heavy load - * batching will occur regardless of the linger configuration; however setting this to something larger than 0 can lead to fewer, more - * efficient requests when not under maximal load at the cost of a small amount of latency. + * To use the transactional producer and the attendant APIs, application must + * set the oracle.transactional.producer configuration property to + * true. The transactional producer is not thread safe. + * Application should manage the concurrent access of the transactional + * producer. Transactional producer does not get benefit of batching. Each + * message is sent to Oracle Transactional Event Queue broker in a separate + * request. + *

    *

    - * The buffer.memory controls the total amount of memory available to the producer for buffering. If records - * are sent faster than they can be transmitted to the server then this buffer space will be exhausted. When the buffer space is - * exhausted additional send calls will block. The threshold for time to block is determined by max.block.ms after which it throws - * a TimeoutException. + * Transactional producer can use {@link #getDBConnection()} to fetch the + * database connection which is being used to send the records to the Oracle's + * Transactional Event Queue broker. {@link #commitTransaction()} will + * atomically commit the DML operation(s) and send operation(s) performed within + * the current transaction. {@link #abortTransaction()} will atomically + * roll-back the DML operation and abort the producer records sent within the + * current transaction. + * + *

    *

    - * The key.serializer and value.serializer instruct how to turn the key and value objects the user provides with - * their ProducerRecord into bytes. You can use the included {@link org.oracle.okafka.common.serialization.ByteArraySerializer} or - * {@link org.oracle.okafka.common.serialization.StringSerializer} for simple string or byte types. + * All the new transactional APIs are blocking and will throw exceptions on + * failure. The example below illustrates how the new APIs are meant to be used. + * It is similar to the example above, except that all 100 messages are part of + * a single transaction. + *

    *

    - * The producer doesn't support idempotency and transactional behaviour yet. + * + *

    + * {@code
    + * Properties props = new Properties();
    + * props.put("bootstrap.servers", "localhost:1521");
    + * props.put("oracle.service.name", "freepdb1");
    + * props.put("oracle.net.tns_admin",".");
    + * props.put("oracle.transactional.producer", "true");
    + * Producer producer = new KafkaProducer<>(props, new StringSerializer(), new StringSerializer());
    + *
    + * producer.initTransactions();
    + *
    + * try {
    + *     producer.beginTransaction();
    + *     Connection dbConn = ((KafkaProducer )producer).getDBConnection();
    + *     for (int i = 0; i < 100; i++) {
    + *     	   ProducerRecord pRecord = new ProducerRecord<>("my-topic", Integer.toString(i), Integer.toString(i))
    + *     	   processRecord(dbConn, pRecord);
    + *         producer.send(pRecord);
    + *     }
    + *     producer.commitTransaction();
    + * }catch( DisconnectException dcE) {
    + *  // Producer is disconnected from Oracle Transactional Event Queue broker.
    + *     producer.close();
    + * } 
    + * catch (KafkaException e) {
    + *     // For all exceptions, just abort the transaction and try again.
    + *     producer.abortTransaction();
    + * }
    + * producer.close();
    + * } 
    + *

    *

    - * */ + * As is hinted at in the example, there can be only one open transaction per + * producer. All messages sent between the {@link #beginTransaction()} and + * {@link #commitTransaction()} calls will be part of a single transaction. When + * the transactional.id is specified, all messages sent by the + * producer must be part of a transaction. + *

    + *

    + * The transactional producer uses exceptions to communicate error states. In + * particular, it is not required to specify callbacks for + * producer.send() or to call .get() on the returned + * Future. A KafkaException would be thrown if any of the + * producer.send() or transactional calls hit an recoverable error + * during a transaction. A DisconnectException would be thrown if + * producer.commitTransaction() call hit an irrecoverable error + * during commit. At this point producer cannot confirm if the operations + * performed within this transaction were successfully committed or not. See the + * {@link #send(ProducerRecord)} documentation for more details about detecting + * errors from a transactional send. + *

    + * + *

    + * By calling producer.abortTransaction() upon receiving a + * KafkaException we can ensure that any successful writes are + * marked as aborted, hence keeping the transactional guarantees. + *

    + *

    + * OKafka Transactional Producer can also be created by passing a pre-created + * Oracle database connection through + * {@link #KafkaProducer(Properties, Connection)} or similar overloaded + * constructors. Application must set the + * oracle.transactional.producer property to true here as well. + * Transactional producer created this way can be used for + * 'consume-transform-produce' workflow. Below example depicts that. Here a + * consumer is created. A database connection is retrieved from the + * KafkaConsumer and passed to create a KafkaProducer. Consumer consumes records + * from "my-topic1". While processing the records, a transaction is started and + * within this transaction processed records are send to topic "my-topic2". When + * KafkaProducer commits the transaction both the consumed and produced records + * are committed. When KafkaProducer aborts the transaction, all consumed and + * produced records are rolled-back. Since producer and consumer are using the + * same database connection all their operations are either committed or aborted + * atomically. + *

    + *

    + * + *

    + * {@code
    + * Properties commonProps = new Properties();
    + * Properties cProps = new Properties();
    + * Properties pProps = new Properties();
    + *
    + * commonProps.put("bootstrap.servers", "localhost:1521");
    + * commonProps.put("oracle.service.name", "freepdb1");
    + * commonProps.put("oracle.net.tns_admin", ".");
    + * 
    + * //Create Consumer 
    + * cProps.putAll(commonProps);
    + * cProps.put("group.id", "S1");
    + * cProps.put("enable.auto.commit", "false");
    + * Consumer consumer = null;
    + * Producer producer = null;
    + * 
    + * try {
    + * 	consumer = new KafkaConsumer(cProps);
    + * 	consumer.subscribe(Arrays.asList("my-topic1"));
    + * 	Connection conn = ((KafkaConsumer) consumer).getDBConnection();
    + * 	// Create Producer
    + * 	pProps.put("oracle.transactional.producer", "true");
    + * 	producer = new KafkaProducer(pProps, conn);
    + * 
    + * 	while (true) {
    + * 		ConsumerRecords records = consumer.poll(Duration.ofMillis(10000));
    + * 		if (records != null && records.count() > 0) {
    + * 			producer.beginTransaction();
    + * 			try {
    + * 				for (ConsumerRecord consumerRecord : records) {
    + * 					ProducerRecord pRecord = transform(consumerRecord, "my-topic2");
    + * 					porducer.send(pRecord);
    + * 				}
    + * 				// Commit all consumed and produced records
    + * 				producer.commitTransaction();
    + * 			} catch (DisconnectException dcE) {
    + * 				producer.close();
    + * 				throw dcE;
    + * 			} catch (KafkaException e) {
    + * 				// Re-process all the consumed record
    + * 				producer.abortTransaction();
    + * 			}
    + * 		}
    + * 	}
    + * } finally {
    + * 	producer.close();
    + * 	consumer.close();
    + * }
    + * }
    + * 
    + *

    + * + */ public class KafkaProducer implements Producer { - private final Logger log; - private static final AtomicInteger PROD_CLIENT_ID_SEQUENCE = new AtomicInteger(1); - private final String clientId; - private static final String JMX_PREFIX = "kafka.producer"; - public static final String NETWORK_THREAD_PREFIX = "kafka-producer-network-thread"; - private final ProducerConfig prodConfigs; - final Metrics metrics; - private final ExtendedSerializer keySerializer; - private final ExtendedSerializer valueSerializer; - - private final RecordAccumulator recordAccumulator; - /** - * The network client to use. - */ - private final KafkaClient client; - private final SenderThread sender; - private final KafkaThread ioThread; - private final Metadata metadata; - private final ProducerInterceptors interceptors; - private final CompressionType compressionType; - private final Partitioner partitioner; - private final long totalMemorySize; - private final long maxBlockTimeMs; - private final Time time; - private final int maxRequestSize; - private final int requestTimeoutMs; - /** - * A producer is instantiated by providing a set of key-value pairs as configuration. Valid configuration strings - * are documented here. Values can be - * either strings or Objects of the appropriate type (for example a numeric configuration would accept either the - * string "42" or the integer 42). - *

    - * Note: after creating a {@code KafkaProducer} you must always {@link #close()} it to avoid resource leaks. - * @param configs The producer configs - * - */ - public KafkaProducer(final Map configs) { - this(configs, null, null); - } - - /** - * A producer is instantiated by providing a set of key-value pairs as configuration, a key and a value {@link Serializer}. - * Valid configuration strings are documented here. - * Values can be either strings or Objects of the appropriate type (for example a numeric configuration would accept - * either the string "42" or the integer 42). - *

    - * Note: after creating a {@code KafkaProducer} you must always {@link #close()} it to avoid resource leaks. - * @param configs The producer configs - * @param keySerializer The serializer for key that implements {@link Serializer}. The configure() method won't be - * called in the producer when the serializer is passed in directly. - * @param valueSerializer The serializer for value that implements {@link Serializer}. The configure() method won't - * be called in the producer when the serializer is passed in directly. - */ - public KafkaProducer(Map configs, Serializer keySerializer, Serializer valueSerializer) { - this(new ProducerConfig(ProducerConfig.addSerializerToConfig(configs, keySerializer, valueSerializer)), keySerializer, valueSerializer, null, null); - } - - /** - * A producer is instantiated by providing a set of key-value pairs as configuration. Valid configuration strings - * are documented here. - *

    - * Note: after creating a {@code KafkaProducer} you must always {@link #close()} it to avoid resource leaks. - * @param properties The producer configs - */ - public KafkaProducer(Properties properties) { - this(properties, null, null); - } - - /** - * A producer is instantiated by providing a set of key-value pairs as configuration, a key and a value {@link Serializer}. - * Valid configuration strings are documented here. - *

    - * Note: after creating a {@code KafkaProducer} you must always {@link #close()} it to avoid resource leaks. - * @param properties The producer configs - * @param keySerializer The serializer for key that implements {@link Serializer}. The configure() method won't be - * called in the producer when the serializer is passed in directly. - * @param valueSerializer The serializer for value that implements {@link Serializer}. The configure() method won't - * be called in the producer when the serializer is passed in directly. - */ - public KafkaProducer(Properties properties, Serializer keySerializer, Serializer valueSerializer) { - - this(new ProducerConfig(ProducerConfig.addSerializerToConfig(properties, keySerializer, valueSerializer)), keySerializer, valueSerializer, null, null); - } - - - @SuppressWarnings("unchecked") - // visible for testing - KafkaProducer(ProducerConfig prodConfigs, Serializer keySerializer, Serializer valueSerializer, Metadata metadata, KafkaClient kafkaClient) { - Map userProvidedConfigs = prodConfigs.originals(); - this.time = Time.SYSTEM; + + private final Logger log; + private static final String JMX_PREFIX = "kafka.producer"; + public static final String NETWORK_THREAD_PREFIX = "kafka-producer-network-thread"; + public static final String PRODUCER_METRIC_GROUP_NAME = "producer-metrics"; + + private final String clientId; + // Visible for testing + final Metrics metrics; + private final Partitioner partitioner; + private final int maxRequestSize; + private final long totalMemorySize; + private final Metadata metadata; + private final RecordAccumulator accumulator; + private final SenderThread sender; + private final Thread ioThread; + private final CompressionType compressionType; + private final Sensor errors; + private final Time time; + private final Serializer keySerializer; + private final Serializer valueSerializer; + private final ProducerConfig producerConfig; + private final long maxBlockTimeMs; + private final ProducerInterceptors interceptors; + private final ApiVersions apiVersions; + private final TransactionManager transactionManager; + private final KafkaClient client; + private boolean transactionalProducer = false; + private AQKafkaProducer aqProducer = null; + private boolean transactionInitDone = false; + private OracleTransactionManager oracleTransctionManager; + // Visible for testing + private final OkafkaProducerMetrics okpMetrics; + + /** + * A producer is instantiated by providing a set of key-value pairs as + * configuration. Valid configuration strings are documented here. Values + * can be either strings or Objects of the appropriate type (for example a + * numeric configuration would accept either the string "42" or the integer 42). + *

    + * Note: after creating a {@code KafkaProducer} you must always {@link #close()} + * it to avoid resource leaks. + * + * @param configs The producer configs + * + */ + public KafkaProducer(final Map configs) { + this(configs, null, null); + } + + /** + * A producer is instantiated by providing a set of key-value pairs as + * configuration. And a valid Connection object to Oracle Database. Valid + * configuration strings are documented here. + * Values can be either strings or Objects of the appropriate type (for example + * a numeric configuration would accept either the string "42" or the integer + * 42). + *

    + * Note: after creating a {@code KafkaProducer} you must always {@link #close()} + * it to avoid resource leaks. + * + * @param configs The producer configs + * @param conn Connection to Oracle Database + * + */ + public KafkaProducer(final Map configs, Connection conn) { + this(configs, null, null, conn); + } + + /** + * A producer is instantiated by providing a set of key-value pairs as + * configuration, a key and a value {@link Serializer}. Valid configuration + * strings are documented here. + * Values can be either strings or Objects of the appropriate type (for example + * a numeric configuration would accept either the string "42" or the integer + * 42). + *

    + * Note: after creating a {@code KafkaProducer} you must always {@link #close()} + * it to avoid resource leaks. + * + * @param configs The producer configs + * @param keySerializer The serializer for key that implements + * {@link Serializer}. The configure() method won't be + * called in the producer when the serializer is passed + * in directly. + * @param valueSerializer The serializer for value that implements + * {@link Serializer}. The configure() method won't be + * called in the producer when the serializer is passed + * in directly. + */ + public KafkaProducer(Map configs, Serializer keySerializer, Serializer valueSerializer) { + this(new ProducerConfig(ProducerConfig.appendSerializerToConfig(configs, keySerializer, valueSerializer)), + keySerializer, valueSerializer, null, null, null, Time.SYSTEM, null); + } + + /** + * A producer is instantiated by providing a set of key-value pairs as + * configuration, a key and a value {@link Serializer} and Connection to Oracle + * Database, versioned 23c and above. Valid configuration strings are documented + * here. + * Values can be either strings or Objects of the appropriate type (for example + * a numeric configuration would accept either the string "42" or the integer + * 42). + *

    + * Note: after creating a {@code KafkaProducer} you must always {@link #close()} + * it to avoid resource leaks. + * + * @param configs The producer configs + * @param keySerializer The serializer for key that implements + * {@link Serializer}. The configure() method won't be + * called in the producer when the serializer is passed + * in directly. + * @param valueSerializer The serializer for value that implements + * {@link Serializer}. The configure() method won't be + * called in the producer when the serializer is passed + * in directly. + * @param conn Connection to Oracle Database + * + */ + public KafkaProducer(Map configs, Serializer keySerializer, Serializer valueSerializer, + Connection conn) { + this(new ProducerConfig(ProducerConfig.appendSerializerToConfig(configs, keySerializer, valueSerializer)), + keySerializer, valueSerializer, null, null, null, Time.SYSTEM, conn); + } + + /** + * A producer is instantiated by providing a set of key-value pairs as + * configuration. Valid configuration strings are documented here. + *

    + * Note: after creating a {@code KafkaProducer} you must always {@link #close()} + * it to avoid resource leaks. + * + * @param properties The producer configs + */ + public KafkaProducer(Properties properties) { + this(properties, null, null); + + } + + /** + * A producer is instantiated by providing a set of key-value pairs as + * configuration. Valid configuration strings are documented here. + *

    + * Note: after creating a {@code KafkaProducer} you must always {@link #close()} + * it to avoid resource leaks. + * + * @param properties The producer configs + * @param conn Connection to Oracle Database + */ + + public KafkaProducer(Properties properties, Connection conn) { + this(properties, null, null, conn); + } + + /** + * A producer is instantiated by providing a set of key-value pairs as + * configuration, a key and a value {@link Serializer}. Valid configuration + * strings are documented here. + *

    + * Note: after creating a {@code KafkaProducer} you must always {@link #close()} + * it to avoid resource leaks. + * + * @param properties The producer configs + * @param keySerializer The serializer for key that implements + * {@link Serializer}. The configure() method won't be + * called in the producer when the serializer is passed + * in directly. + * @param valueSerializer The serializer for value that implements + * {@link Serializer}. The configure() method won't be + * called in the producer when the serializer is passed + * in directly. + */ + public KafkaProducer(Properties properties, Serializer keySerializer, Serializer valueSerializer) { + this(Utils.propsToMap(properties), keySerializer, valueSerializer); + } + + /** + * A producer is instantiated by providing a set of key-value pairs as + * configuration, a key and a value {@link Serializer} and Connection to Oracle + * Database. Valid configuration strings are documented here. + *

    + * Note: after creating a {@code KafkaProducer} you must always {@link #close()} + * it to avoid resource leaks. + * + * @param properties The producer configs + * @param keySerializer The serializer for key that implements + * {@link Serializer}. The configure() method won't be + * called in the producer when the serializer is passed + * in directly. + * @param valueSerializer The serializer for value that implements + * {@link Serializer}. The configure() method won't be + * called in the producer when the serializer is passed + * in directly. + * @param conn Connection to Oracle Database + */ + public KafkaProducer(Properties properties, Serializer keySerializer, Serializer valueSerializer, + Connection conn) { + this(Utils.propsToMap(properties), keySerializer, valueSerializer, conn); + } + + // visible for testing + @SuppressWarnings("unchecked") + KafkaProducer(ProducerConfig config, Serializer keySerializer, Serializer valueSerializer, Metadata metadata, + KafkaClient kafkaClient, ProducerInterceptors interceptors, Time time, Connection conn) { + try { + this.producerConfig = config; + this.time = time; + + String transactionalId = config.getString(ProducerConfig.TRANSACTIONAL_ID_CONFIG); + this.clientId = config.getString(ProducerConfig.CLIENT_ID_CONFIG); + + LogContext logContext; + if (transactionalId == null) + logContext = new LogContext(String.format("[Producer clientId=%s] ", clientId)); + else + logContext = new LogContext( + String.format("[Producer clientId=%s, transactionalId=%s] ", clientId, transactionalId)); + + log = logContext.logger(KafkaProducer.class); + log.trace("Starting the Kafka producer"); + + try { + transactionalProducer = config.getBoolean(ProducerConfig.ORACLE_TRANSACTIONAL_PRODUCER); + oracleTransctionManager = new OracleTransactionManager(logContext); + } catch (Exception e) { + transactionalProducer = false; + } + log.debug("Transactioal Producer set to " + transactionalProducer); + + Map metricTags = Collections.singletonMap("client-id", clientId); + MetricConfig metricConfig = new MetricConfig() + .samples(config.getInt(ProducerConfig.METRICS_NUM_SAMPLES_CONFIG)) + .timeWindow(config.getLong(ProducerConfig.METRICS_SAMPLE_WINDOW_MS_CONFIG), TimeUnit.MILLISECONDS) + .recordLevel(Sensor.RecordingLevel + .forName(config.getString(ProducerConfig.METRICS_RECORDING_LEVEL_CONFIG))) + .tags(metricTags); + + List reporters = config.getConfiguredInstances( + ProducerConfig.METRIC_REPORTER_CLASSES_CONFIG, MetricsReporter.class, + Collections.singletonMap(ProducerConfig.CLIENT_ID_CONFIG, clientId)); + + JmxReporter jmxReporter = new JmxReporter(); + jmxReporter + .configure(config.originals(Collections.singletonMap(ProducerConfig.CLIENT_ID_CONFIG, clientId))); + reporters.add(jmxReporter); + + MetricsContext metricsContext = new KafkaMetricsContext(JMX_PREFIX, + config.originalsWithPrefix(CommonClientConfigs.METRICS_CONTEXT_PREFIX)); + + this.metrics = new Metrics(metricConfig, reporters, time, metricsContext); + this.okpMetrics = new OkafkaProducerMetrics(metrics); + this.partitioner = config.getConfiguredInstance(ProducerConfig.PARTITIONER_CLASS_CONFIG, Partitioner.class, + Collections.singletonMap(ProducerConfig.CLIENT_ID_CONFIG, clientId)); + + long retryBackoffMs = config.getLong(ProducerConfig.RETRY_BACKOFF_MS_CONFIG); + if (keySerializer == null) { + this.keySerializer = config.getConfiguredInstance(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, + Serializer.class); + this.keySerializer.configure( + config.originals(Collections.singletonMap(ProducerConfig.CLIENT_ID_CONFIG, clientId)), true); + } else { + config.ignore(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG); + this.keySerializer = keySerializer; + } + if (valueSerializer == null) { + this.valueSerializer = config.getConfiguredInstance(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, + Serializer.class); + this.valueSerializer.configure( + config.originals(Collections.singletonMap(ProducerConfig.CLIENT_ID_CONFIG, clientId)), false); + } else { + config.ignore(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG); + this.valueSerializer = valueSerializer; + } + + List> interceptorList = (List) config.getConfiguredInstances( + ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, ProducerInterceptor.class, + Collections.singletonMap(ProducerConfig.CLIENT_ID_CONFIG, clientId)); + if (interceptors != null) + this.interceptors = interceptors; + else + this.interceptors = new ProducerInterceptors<>(interceptorList); + + ClusterResourceListeners clusterResourceListeners = configureClusterResourceListeners(keySerializer, + valueSerializer, interceptorList, reporters); + + this.maxRequestSize = config.getInt(ProducerConfig.MAX_REQUEST_SIZE_CONFIG); + this.totalMemorySize = config.getLong(ProducerConfig.BUFFER_MEMORY_CONFIG); + this.compressionType = CompressionType.forName(config.getString(ProducerConfig.COMPRESSION_TYPE_CONFIG)); + + this.maxBlockTimeMs = config.getLong(ProducerConfig.MAX_BLOCK_MS_CONFIG); + int deliveryTimeoutMs = configureDeliveryTimeout(config, log); + + this.apiVersions = new ApiVersions(); + this.transactionManager = configureTransactionState(config, logContext); + + if (transactionalProducer) { + this.accumulator = null; + } else { + this.accumulator = new RecordAccumulator(logContext, config.getInt(ProducerConfig.BATCH_SIZE_CONFIG), + this.compressionType, lingerMs(config), retryBackoffMs, deliveryTimeoutMs, metrics, + PRODUCER_METRIC_GROUP_NAME, time, apiVersions, transactionManager, + new BufferPool(this.totalMemorySize, config.getInt(ProducerConfig.BATCH_SIZE_CONFIG), metrics, + time, PRODUCER_METRIC_GROUP_NAME)); + } + + /* + * List addresses = ClientUtils.parseAndValidateAddresses( + * config.getList(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG), + * config.getString(ProducerConfig.CLIENT_DNS_LOOKUP_CONFIG)); if (metadata != + * null) { this.metadata = metadata; } else { this.metadata = new + * ProducerMetadata(retryBackoffMs, + * config.getLong(ProducerConfig.METADATA_MAX_AGE_CONFIG), + * config.getLong(ProducerConfig.METADATA_MAX_IDLE_CONFIG), logContext, + * clusterResourceListeners, Time.SYSTEM); this.metadata.bootstrap(addresses); } + */ + + List addresses = null; + String serviceName = null; + String instanceName = null; + System.setProperty("oracle.net.tns_admin", config.getString(ProducerConfig.ORACLE_NET_TNS_ADMIN)); + + if (config.getString(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG).trim().equalsIgnoreCase("PLAINTEXT")) { + + addresses = ClientUtils.parseAndValidateAddresses( + config.getList(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG), + ClientDnsLookup.RESOLVE_CANONICAL_BOOTSTRAP_SERVERS_ONLY); + serviceName = config.getString(ProducerConfig.ORACLE_SERVICE_NAME); + instanceName = config.getString(ProducerConfig.ORACLE_INSTANCE_NAME); + } else { + if (config.getString(SslConfigs.TNS_ALIAS) == null) + throw new InvalidLoginCredentialsException("Please provide valid connection string"); + TNSParser parser = new TNSParser(config); + parser.readFile(); + String connStr = parser.getConnectionString(config.getString(SslConfigs.TNS_ALIAS).toUpperCase()); + if (connStr == null) + throw new InvalidLoginCredentialsException("Please provide valid connection string"); + String host = parser.getProperty(connStr, "HOST"); + String portStr = parser.getProperty(connStr, "PORT"); + serviceName = parser.getProperty(connStr, "SERVICE_NAME"); + int port; + if (host == null || portStr == null || serviceName == null) + throw new InvalidLoginCredentialsException("Please provide valid connection string"); + try { + port = Integer.parseInt(portStr); + } catch (NumberFormatException nfe) { + throw new InvalidLoginCredentialsException("Please provide valid connection string"); + } + instanceName = parser.getProperty(connStr, "INSTANCE_NAME"); + addresses = new ArrayList<>(); + addresses.add(new InetSocketAddress(host, port)); + } + if (metadata != null) { + this.metadata = metadata; + } else { + this.metadata = new Metadata(retryBackoffMs, config.getLong(ProducerConfig.METADATA_MAX_AGE_CONFIG), + true, true, clusterResourceListeners, config); + + /* + * this.metadata.update(Cluster.bootstrap(addresses, prodConfigs, serviceName, + * instanceName), Collections.emptySet(), time.milliseconds()); + */ + + { // Changes for 2.8.1 :: Create Bootstrap Cluster and pass it to metadata.update + // We must have OKafka Node with Service Name and Instance Name placed in the + // bootstrap cluster. + // For cluster created here, isBootstrapConfigured is not set to TRUE because it + // is not public + + ArrayList bootStrapNodeList = new ArrayList(addresses.size()); + int id = -1; + ConnectionUtils.remDuplicateEntries(addresses); + for (InetSocketAddress inetAddr : addresses) { + org.oracle.okafka.common.Node bootStrapNode = new org.oracle.okafka.common.Node(id--, + inetAddr.getHostName(), inetAddr.getPort(), serviceName, instanceName); + bootStrapNodeList.add((Node) bootStrapNode); + } + Cluster bootStrapCluster = new Cluster(null, bootStrapNodeList, new ArrayList<>(0), + Collections.emptySet(), Collections.emptySet()); + this.metadata.update(bootStrapCluster, Collections.emptySet(), time.milliseconds(), true); + } + } + + this.errors = this.metrics.sensor("errors"); + if (kafkaClient != null) { + client = kafkaClient; + } else { + aqProducer = new AQKafkaProducer(logContext, config, time, this.metadata, this.metrics, + this.oracleTransctionManager); + client = new NetworkClient(aqProducer, this.metadata, clientId, + config.getLong(AdminClientConfig.RECONNECT_BACKOFF_MS_CONFIG), + config.getLong(AdminClientConfig.RECONNECT_BACKOFF_MAX_MS_CONFIG), + config.getInt(AdminClientConfig.SEND_BUFFER_CONFIG), + config.getInt(AdminClientConfig.RECEIVE_BUFFER_CONFIG), (int) TimeUnit.HOURS.toMillis(1), time, + logContext); + } + if (transactionalProducer) { + this.sender = null; + this.ioThread = null; + log.debug("Setting externally supplied database conneciton "); + aqProducer.setExternalDbConnection(conn); + } else { + this.sender = newSender(logContext, kafkaClient, this.metadata); + String ioThreadName = NETWORK_THREAD_PREFIX + " | " + clientId; + this.ioThread = new KafkaThread(ioThreadName, this.sender, true); + this.ioThread.start(); + } + config.logUnused(); + + AppInfoParser.registerAppInfo(JMX_PREFIX, clientId, metrics, time.milliseconds()); + log.debug("Kafka producer started"); + } catch (Throwable t) { + // call close methods if internal objects are already constructed this is to + // prevent resource leak. see KAFKA-2121 + close(Duration.ofMillis(0), true); + // now propagate the exception + throw new KafkaException("Failed to construct kafka producer", t); + } + } + + // visible for testing + SenderThread newSender(LogContext logContext, KafkaClient kafkaClient, Metadata metadata) { + int maxInflightRequests = configureInflightRequests(producerConfig); + // int requestTimeoutMs = + // producerConfig.getInt(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG); + // ChannelBuilder channelBuilder = + // ClientUtils.createChannelBuilder(producerConfig, time, logContext); + // ProducerMetrics metricsRegistry = new ProducerMetrics(this.metrics); + // Sensor throttleTimeSensor = + // Sender.throttleTimeSensor(metricsRegistry.senderMetrics); + + /* + * KafkaClient client = kafkaClient != null ? kafkaClient : new NetworkClient( + * new + * Selector(producerConfig.getLong(ProducerConfig.CONNECTIONS_MAX_IDLE_MS_CONFIG + * ), this.metrics, time, "producer", channelBuilder, logContext), metadata, + * clientId, maxInflightRequests, + * producerConfig.getLong(ProducerConfig.RECONNECT_BACKOFF_MS_CONFIG), + * producerConfig.getLong(ProducerConfig.RECONNECT_BACKOFF_MAX_MS_CONFIG), + * producerConfig.getInt(ProducerConfig.SEND_BUFFER_CONFIG), + * producerConfig.getInt(ProducerConfig.RECEIVE_BUFFER_CONFIG), + * requestTimeoutMs, producerConfig.getLong(ProducerConfig. + * SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG), + * producerConfig.getLong(ProducerConfig. + * SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG), + * ClientDnsLookup.forConfig(producerConfig.getString(ProducerConfig. + * CLIENT_DNS_LOOKUP_CONFIG)), time, true, apiVersions, throttleTimeSensor, + * logContext); + */ + + short acks = configureAcks(producerConfig, log); + + /* + * return new Sender(logContext, client, metadata, this.accumulator, + * maxInflightRequests == 1, + * producerConfig.getInt(ProducerConfig.MAX_REQUEST_SIZE_CONFIG), acks, + * producerConfig.getInt(ProducerConfig.RETRIES_CONFIG), + * metricsRegistry.senderMetrics, time, requestTimeoutMs, + * producerConfig.getLong(ProducerConfig.RETRY_BACKOFF_MS_CONFIG), + * this.transactionManager, apiVersions); + */ + int retries = configureRetries(producerConfig, producerConfig.idempotenceEnabled(), log); + + ProducerMetrics metricsRegistry = new ProducerMetrics(this.metrics); + + return new SenderThread(logContext, this.clientId, client, this.metadata, this.accumulator, + maxInflightRequests == 1, this.producerConfig, acks, retries, metricsRegistry.senderMetrics, + Time.SYSTEM); + } + + private static int configureRetries(ProducerConfig config, boolean idempotenceEnabled, Logger log) { + boolean userConfiguredRetries = false; + if (config.originals().containsKey(ProducerConfig.RETRIES_CONFIG)) { + userConfiguredRetries = true; + } + if (idempotenceEnabled && !userConfiguredRetries) { + // We recommend setting infinite retries when the idempotent producer is + // enabled, so it makes sense to make + // this the default. + log.info("Overriding the default retries config to the recommended value of {} since the idempotent " + + "producer is enabled.", Integer.MAX_VALUE); + return Integer.MAX_VALUE; + } + if (idempotenceEnabled && config.getInt(ProducerConfig.RETRIES_CONFIG) == 0) { + throw new ConfigException( + "Must set " + ProducerConfig.RETRIES_CONFIG + " to non-zero when using the idempotent producer."); + } + return config.getInt(ProducerConfig.RETRIES_CONFIG); + } + + private static int lingerMs(ProducerConfig config) { + return (int) Math.min(config.getLong(ProducerConfig.LINGER_MS_CONFIG), Integer.MAX_VALUE); + } + + private static int configureDeliveryTimeout(ProducerConfig config, Logger log) { + int deliveryTimeoutMs = config.getInt(ProducerConfig.DELIVERY_TIMEOUT_MS_CONFIG); + int lingerMs = lingerMs(config); + int requestTimeoutMs = config.getInt(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG); + int lingerAndRequestTimeoutMs = (int) Math.min((long) lingerMs + requestTimeoutMs, Integer.MAX_VALUE); + + if (deliveryTimeoutMs < lingerAndRequestTimeoutMs) { + if (config.originals().containsKey(ProducerConfig.DELIVERY_TIMEOUT_MS_CONFIG)) { + // throw an exception if the user explicitly set an inconsistent value + throw new ConfigException( + ProducerConfig.DELIVERY_TIMEOUT_MS_CONFIG + " should be equal to or larger than " + + ProducerConfig.LINGER_MS_CONFIG + " + " + ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG); + } else { + // override deliveryTimeoutMs default value to lingerMs + requestTimeoutMs for + // backward compatibility + deliveryTimeoutMs = lingerAndRequestTimeoutMs; + log.warn("{} should be equal to or larger than {} + {}. Setting it to {}.", + ProducerConfig.DELIVERY_TIMEOUT_MS_CONFIG, ProducerConfig.LINGER_MS_CONFIG, + ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG, deliveryTimeoutMs); + } + } + return deliveryTimeoutMs; + } + + private TransactionManager configureTransactionState(ProducerConfig config, LogContext logContext) { + + TransactionManager transactionManager = null; + + // OKafka does not support Kafka Transactions + + /* + * final boolean userConfiguredIdempotence = + * config.originals().containsKey(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG); + * final boolean userConfiguredTransactions = + * config.originals().containsKey(ProducerConfig.TRANSACTIONAL_ID_CONFIG); if + * (userConfiguredTransactions && !userConfiguredIdempotence) + * log.info("Overriding the default {} to true since {} is specified.", + * ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, + * ProducerConfig.TRANSACTIONAL_ID_CONFIG); + * + * if (config.idempotenceEnabled()) { final String transactionalId = + * config.getString(ProducerConfig.TRANSACTIONAL_ID_CONFIG); final int + * transactionTimeoutMs = + * config.getInt(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG); final long + * retryBackoffMs = config.getLong(ProducerConfig.RETRY_BACKOFF_MS_CONFIG); + * final boolean autoDowngradeTxnCommit = + * config.getBoolean(ProducerConfig.AUTO_DOWNGRADE_TXN_COMMIT); + * transactionManager = new TransactionManager( logContext, transactionalId, + * transactionTimeoutMs, retryBackoffMs, apiVersions, autoDowngradeTxnCommit); + * + * if (transactionManager.isTransactional()) + * log.info("Instantiated a transactional producer."); else + * log.info("Instantiated an idempotent producer."); } + */ + return transactionManager; + } + + private static int configureInflightRequests(ProducerConfig config) { + if (config.idempotenceEnabled() && 5 < config.getInt(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION)) { + throw new ConfigException("Must set " + ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + + " to at most 5" + " to use the idempotent producer."); + } + return config.getInt(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION); + } + + private static short configureAcks(ProducerConfig config, Logger log) { + boolean userConfiguredAcks = config.originals().containsKey(ProducerConfig.ACKS_CONFIG); + short acks = Short.parseShort(config.getString(ProducerConfig.ACKS_CONFIG)); + + if (config.idempotenceEnabled()) { + if (!userConfiguredAcks) + log.info("Overriding the default {} to all since idempotence is enabled.", ProducerConfig.ACKS_CONFIG); + else if (acks != -1) + throw new ConfigException( + "Must set " + ProducerConfig.ACKS_CONFIG + " to all in order to use the idempotent " + + "producer. Otherwise we cannot guarantee idempotence."); + } + return acks; + } + + /** + * Needs to be called before any other methods when the transactional.id is set + * in the configuration. + * + * This method does the following: 1. Ensures any transactions initiated by + * previous instances of the producer with the same transactional.id are + * completed. If the previous instance had failed with a transaction in + * progress, it will be aborted. If the last transaction had begun completion, + * but not yet finished, this method awaits its completion. 2. Gets the internal + * producer id and epoch, used in all future transactional messages issued by + * the producer. + * + * Note that this method will raise {@link TimeoutException} if the + * transactional state cannot be initialized before expiration of + * {@code max.block.ms}. Additionally, it will raise {@link InterruptException} + * if interrupted. It is safe to retry in either case, but once the + * transactional state has been successfully initialized, this method should no + * longer be used. + * + * @throws IllegalStateException if no + * transactional.id + * has been + * configured + * @throws org.apache.kafka.common.errors.UnsupportedVersionException fatal + * error + * indicating + * the broker + * does not + * support + * transactions + * (i.e. if + * its + * version is + * lower than + * 0.11.0.0) + * @throws org.apache.kafka.common.errors.AuthorizationException fatal + * error + * indicating + * that the + * configured + * transactional.id + * is not + * authorized. + * See the + * exception + * for more + * details + * @throws KafkaException if the + * producer + * has + * encountered + * a previous + * fatal + * error or + * for any + * other + * unexpected + * error + * @throws TimeoutException if the + * time taken + * for + * initialize + * the + * transaction + * has + * surpassed + * max.block.ms. + * @throws InterruptException if the + * thread is + * interrupted + * while + * blocked + */ + public void initTransactions() { + // throwIfNoTransactionManager(); + throwIfProducerClosed(); + transactionInitDone = true; + oracleTransctionManager.initTxn(); + /* + * TransactionalRequestResult result = + * transactionManager.initializeTransactions(); sender.wakeup(); + * result.await(maxBlockTimeMs, TimeUnit.MILLISECONDS); + */ + } + + /** + * Should be called before the start of each new transaction. Note that prior to + * the first invocation of this method, you must invoke + * {@link #initTransactions()} exactly one time. + * + * @throws IllegalStateException if no + * transactional.id + * has been + * configured + * or if + * {@link #initTransactions()} + * has not + * yet been + * invoked + * @throws ProducerFencedException if + * another + * producer + * with the + * same + * transactional.id + * is + * active + * @throws org.apache.kafka.common.errors.InvalidProducerEpochException if the + * producer + * has + * attempted + * to + * produce + * with an + * old + * epoch to + * the + * partition + * leader. + * See the + * exception + * for more + * details + * @throws org.apache.kafka.common.errors.UnsupportedVersionException fatal + * error + * indicating + * the + * broker + * does not + * support + * transactions + * (i.e. if + * its + * version + * is lower + * than + * 0.11.0.0) + * @throws org.apache.kafka.common.errors.AuthorizationException fatal + * error + * indicating + * that the + * configured + * transactional.id + * is not + * authorized. + * See the + * exception + * for more + * details + * @throws KafkaException if the + * producer + * has + * encountered + * a + * previous + * fatal + * error or + * for any + * other + * unexpected + * error + */ + public void beginTransaction() throws ProducerFencedException { + // throwIfNoTransactionManager(); + throwIfProducerClosed(); + long nowNanos = time.nanoseconds(); + if (oracleTransctionManager.getDBConnection() == null) { + Connection conn = getDBConnection(true); + oracleTransctionManager.setDBConnection(conn); + } + oracleTransctionManager.beginTransaction(); + okpMetrics.recordBeginTxn(time.nanoseconds() - nowNanos); + } + + /** + * This method is not supported for this release of OKafka. It will throw + * FeatureNotSupportedException if invoked. + * + * Sends a list of specified offsets to the consumer group coordinator, and also + * marks those offsets as part of the current transaction. These offsets will be + * considered committed only if the transaction is committed successfully. The + * committed offset should be the next message your application will consume, + * i.e. lastProcessedMessageOffset + 1. + *

    + * This method should be used when you need to batch consumed and produced + * messages together, typically in a consume-transform-produce pattern. Thus, + * the specified {@code consumerGroupId} should be the same as config parameter + * {@code group.id} of the used {@link KafkaConsumer consumer}. Note, that the + * consumer should have {@code enable.auto.commit=false} and should also not + * commit offsets manually (via {@link KafkaConsumer#commitSync(Map) sync} or + * {@link KafkaConsumer#commitAsync(Map, OffsetCommitCallback) async} commits). + * + * @throws IllegalStateException if + * no + * transactional.id + * has + * been + * configured, + * no + * transaction + * has + * been + * started + * @throws ProducerFencedException fatal + * error + * indicating + * another + * producer + * with + * the + * same + * transactional.id + * is + * active + * @throws org.apache.kafka.common.errors.UnsupportedVersionException fatal + * error + * indicating + * the + * broker + * does + * not + * support + * transactions + * (i.e. + * if + * its + * version + * is + * lower + * than + * 0.11.0.0) + * @throws org.apache.kafka.common.errors.UnsupportedForMessageFormatException fatal + * error + * indicating + * the + * message + * format + * used + * for + * the + * offsets + * topic + * on + * the + * broker + * does + * not + * support + * transactions + * @throws org.apache.kafka.common.errors.AuthorizationException fatal + * error + * indicating + * that + * the + * configured + * transactional.id + * is + * not + * authorized, + * or + * the + * consumer + * group + * id + * is + * not + * authorized. + * @throws org.apache.kafka.common.errors.InvalidProducerEpochException if + * the + * producer + * has + * attempted + * to + * produce + * with + * an + * old + * epoch + * to + * the + * partition + * leader. + * See + * the + * exception + * for + * more + * details + * @throws KafkaException if + * the + * producer + * has + * encountered + * a + * previous + * fatal + * or + * abortable + * error, + * or + * for + * any + * other + * unexpected + * error + */ + public void sendOffsetsToTransaction(Map offsets, String consumerGroupId) + throws ProducerFencedException { + throw new FeatureNotSupportedException( + "Sending offset to transaction feature is not suported for this release."); + // sendOffsetsToTransaction(offsets, new + // ConsumerGroupMetadata(consumerGroupId)); + } + + /** + * This method is not supported for this release of OKafka. It will throw + * FeatureNotSupportedException if invoked. + * + * Sends a list of specified offsets to the consumer group coordinator, and also + * marks those offsets as part of the current transaction. These offsets will be + * considered committed only if the transaction is committed successfully. The + * committed offset should be the next message your application will consume, + * i.e. lastProcessedMessageOffset + 1. + *

    + * This method should be used when you need to batch consumed and produced + * messages together, typically in a consume-transform-produce pattern. Thus, + * the specified {@code groupMetadata} should be extracted from the used + * {@link KafkaConsumer consumer} via {@link KafkaConsumer#groupMetadata()} to + * leverage consumer group metadata for stronger fencing than + * {@link #sendOffsetsToTransaction(Map, String)} which only sends with consumer + * group id. + * + *

    + * Note, that the consumer should have {@code enable.auto.commit=false} and + * should also not commit offsets manually (via + * {@link KafkaConsumer#commitSync(Map) sync} or + * {@link KafkaConsumer#commitAsync(Map, OffsetCommitCallback) async} commits). + * This method will raise {@link TimeoutException} if the producer cannot send + * offsets before expiration of {@code max.block.ms}. Additionally, it will + * raise {@link InterruptException} if interrupted. + * + * @throws IllegalStateException if + * no + * transactional.id + * has + * been + * configured + * or + * no + * transaction + * has + * been + * started. + * @throws ProducerFencedException fatal + * error + * indicating + * another + * producer + * with + * the + * same + * transactional.id + * is + * active + * @throws org.apache.kafka.common.errors.UnsupportedVersionException fatal + * error + * indicating + * the + * broker + * does + * not + * support + * transactions + * (i.e. + * if + * its + * version + * is + * lower + * than + * 0.11.0.0) + * or + * the + * broker + * doesn't + * support + * latest + * version + * of + * transactional + * API + * with + * consumer + * group + * metadata + * (i.e. + * if + * its + * version + * is + * lower + * than + * 2.5.0). + * @throws org.apache.kafka.common.errors.UnsupportedForMessageFormatException fatal + * error + * indicating + * the + * message + * format + * used + * for + * the + * offsets + * topic + * on + * the + * broker + * does + * not + * support + * transactions + * @throws org.apache.kafka.common.errors.AuthorizationException fatal + * error + * indicating + * that + * the + * configured + * transactional.id + * is + * not + * authorized, + * or + * the + * consumer + * group + * id + * is + * not + * authorized. + * @throws org.apache.kafka.clients.consumer.CommitFailedException if + * the + * commit + * failed + * and + * cannot + * be + * retried + * (e.g. + * if + * the + * consumer + * has + * been + * kicked + * out + * of + * the + * group). + * Users + * should + * handle + * this + * by + * aborting + * the + * transaction. + * @throws org.apache.kafka.common.errors.FencedInstanceIdException if + * this + * producer + * instance + * gets + * fenced + * by + * broker + * due + * to + * a + * mis-configured + * consumer + * instance + * id + * within + * group + * metadata. + * @throws org.apache.kafka.common.errors.InvalidProducerEpochException if + * the + * producer + * has + * attempted + * to + * produce + * with + * an + * old + * epoch + * to + * the + * partition + * leader. + * See + * the + * exception + * for + * more + * details + * @throws KafkaException if + * the + * producer + * has + * encountered + * a + * previous + * fatal + * or + * abortable + * error, + * or + * for + * any + * other + * unexpected + * error + * @throws TimeoutException if + * the + * time + * taken + * for + * sending + * offsets + * has + * surpassed + * max.block.ms. + * @throws InterruptException if + * the + * thread + * is + * interrupted + * while + * blocked + */ + public void sendOffsetsToTransaction(Map offsets, + ConsumerGroupMetadata groupMetadata) throws ProducerFencedException { + + throw new FeatureNotSupportedException( + "Sending offset to transaction feature is not suported for this release."); + /* + * throwIfInvalidGroupMetadata(groupMetadata); throwIfNoTransactionManager(); + * throwIfProducerClosed(); TransactionalRequestResult result = + * transactionManager.sendOffsetsToTransaction(offsets, groupMetadata); + * sender.wakeup(); result.await(maxBlockTimeMs, TimeUnit.MILLISECONDS);+ + */ + } + + /** + * Commits the ongoing transaction. This method will flush any unsent records + * before actually committing the transaction. + * + * Further, if any of the {@link #send(ProducerRecord)} calls which were part of + * the transaction hit irrecoverable errors, this method will throw the last + * received exception immediately and the transaction will not be committed. So + * all {@link #send(ProducerRecord)} calls in a transaction must succeed in + * order for this method to succeed. + * + * Note that this method will raise {@link TimeoutException} if the transaction + * cannot be committed before expiration of {@code max.block.ms}. Additionally + * {@link InterruptException} if interrupted. It is safe to retry in either + * case, but it is not possible to attempt a different operation (such as + * abortTransaction) since the commit may already be in the progress of + * completing. If not retrying, the only option is to close the producer. + * + * @throws IllegalStateException if no + * transactional.id + * has been + * configured + * or no + * transaction + * has been + * started + * @throws ProducerFencedException fatal + * error + * indicating + * another + * producer + * with the + * same + * transactional.id + * is + * active + * @throws org.apache.kafka.common.errors.UnsupportedVersionException fatal + * error + * indicating + * the + * broker + * does not + * support + * transactions + * (i.e. if + * its + * version + * is lower + * than + * 0.11.0.0) + * @throws org.apache.kafka.common.errors.AuthorizationException fatal + * error + * indicating + * that the + * configured + * transactional.id + * is not + * authorized. + * See the + * exception + * for more + * details + * @throws org.apache.kafka.common.errors.InvalidProducerEpochException if the + * producer + * has + * attempted + * to + * produce + * with an + * old + * epoch to + * the + * partition + * leader. + * See the + * exception + * for more + * details + * @throws KafkaException if the + * producer + * has + * encountered + * a + * previous + * fatal or + * abortable + * error, + * or for + * any + * other + * unexpected + * error + * @throws TimeoutException if the + * time + * taken + * for + * committing + * the + * transaction + * has + * surpassed + * max.block.ms. + * @throws InterruptException if the + * thread + * is + * interrupted + * while + * blocked + */ + public void commitTransaction() throws DisconnectException, KafkaException { + + if (!transactionalProducer) { + throw new KafkaException("KafkaProducer is not an Oracle Transactional Producer." + + "Please set oracle.transactional.producer property to true."); + } + throwIfProducerClosed(); + try { + long nowNanos = time.nanoseconds(); + oracleTransctionManager.commitTransaction(); + okpMetrics.recordCommitTxn(time.nanoseconds() - nowNanos); + } catch (DisconnectException dE) { + throw dE; + } catch (KafkaException kE) { + throw kE; + } catch (Exception e) { + KafkaException okafkaE = new KafkaException("Exception while committing transaction:" + e.getMessage(), e); + throw okafkaE; + } + + /* + * TransactionalRequestResult result = transactionManager.beginCommit(); + * sender.wakeup(); result.await(maxBlockTimeMs, TimeUnit.MILLISECONDS); + */ + } + + /** + * Aborts the ongoing transaction. Any unflushed produce messages will be + * aborted when this call is made. This call will throw an exception immediately + * if any prior {@link #send(ProducerRecord)} calls failed with a + * {@link ProducerFencedException} or an instance of + * {@link org.apache.kafka.common.errors.AuthorizationException}. + * + * Note that this method will raise {@link TimeoutException} if the transaction + * cannot be aborted before expiration of {@code max.block.ms}. Additionally, it + * will raise {@link InterruptException} if interrupted. It is safe to retry in + * either case, but it is not possible to attempt a different operation (such as + * commitTransaction) since the abort may already be in the progress of + * completing. If not retrying, the only option is to close the producer. + * + * @throws IllegalStateException if no + * transactional.id + * has been + * configured + * or no + * transaction + * has been + * started + * @throws ProducerFencedException fatal + * error + * indicating + * another + * producer + * with the + * same + * transactional.id + * is + * active + * @throws org.apache.kafka.common.errors.InvalidProducerEpochException if the + * producer + * has + * attempted + * to + * produce + * with an + * old + * epoch to + * the + * partition + * leader. + * See the + * exception + * for more + * details + * @throws org.apache.kafka.common.errors.UnsupportedVersionException fatal + * error + * indicating + * the + * broker + * does not + * support + * transactions + * (i.e. if + * its + * version + * is lower + * than + * 0.11.0.0) + * @throws org.apache.kafka.common.errors.AuthorizationException fatal + * error + * indicating + * that the + * configured + * transactional.id + * is not + * authorized. + * See the + * exception + * for more + * details + * @throws KafkaException if the + * producer + * has + * encountered + * a + * previous + * fatal + * error or + * for any + * other + * unexpected + * error + * @throws TimeoutException if the + * time + * taken + * for + * aborting + * the + * transaction + * has + * surpassed + * max.block.ms. + * @throws InterruptException if the + * thread + * is + * interrupted + * while + * blocked + */ + public void abortTransaction() throws ProducerFencedException { + // throwIfNoTransactionManager(); + throwIfProducerClosed(); + log.info("Aborting incomplete transaction"); + if (!transactionalProducer) { + throw new KafkaException("KafkaProducer is not an Oracle Transactional Producer." + + "Please set oracle.transactional.producer property to true."); + } + try { + long nowNanos = time.nanoseconds(); + oracleTransctionManager.abortTransaction(); + okpMetrics.recordAbortTxn(time.nanoseconds() - nowNanos); + } catch (Exception e) { + KafkaException okafkaE = new KafkaException("Exception while aborting transaction:" + e.getMessage(), e); + throw okafkaE; + } + } + + /** + * Asynchronously send a record to a topic. Equivalent to + * send(record, null). See {@link #send(ProducerRecord, Callback)} + * for details. + */ + @Override + public Future send(ProducerRecord record) { + return send(record, null); + } + + /** + * Asynchronously send a record to a topic and invoke the provided callback when + * the send has been acknowledged. + *

    + * The send is asynchronous and this method will return immediately once the + * record has been stored in the buffer of records waiting to be sent. This + * allows sending many records in parallel without blocking to wait for the + * response after each one. + *

    + * The result of the send is a {@link RecordMetadata} specifying the partition + * the record was sent to, the offset it was assigned and the timestamp of the + * record. If {@link org.apache.kafka.common.record.TimestampType#CREATE_TIME + * CreateTime} is used by the topic, the timestamp will be the user provided + * timestamp or the record send time if the user did not specify a timestamp for + * the record. If + * {@link org.apache.kafka.common.record.TimestampType#LOG_APPEND_TIME + * LogAppendTime} is used for the topic, the timestamp will be the Kafka broker + * local time when the message is appended. + *

    + * Since the send call is asynchronous it returns a + * {@link java.util.concurrent.Future Future} for the {@link RecordMetadata} + * that will be assigned to this record. Invoking + * {@link java.util.concurrent.Future#get() get()} on this future will block + * until the associated request completes and then return the metadata for the + * record or throw any exception that occurred while sending the record. + *

    + * If you want to simulate a simple blocking call you can call the + * get() method immediately: + * + *

    +	 * {@code
    +	 * byte[] key = "key".getBytes();
    +	 * byte[] value = "value".getBytes();
    +	 * ProducerRecord record = new ProducerRecord("my-topic", key, value)
    +	 * producer.send(record).get();
    +	 * }
    + *

    + * Fully non-blocking usage can make use of the {@link Callback} parameter to + * provide a callback that will be invoked when the request is complete. + * + *

    +	 * {@code
    +	 * ProducerRecord record = new ProducerRecord("the-topic", key, value);
    +	 * producer.send(myRecord, new Callback() {
    +	 * 	public void onCompletion(RecordMetadata metadata, Exception e) {
    +	 * 		if (e != null) {
    +	 * 			e.printStackTrace();
    +	 * 		} else {
    +	 * 			System.out.println("The offset of the record we just sent is: " + metadata.offset());
    +	 * 		}
    +	 * 	}
    +	 * });
    +	 * }
    +	 * 
    + * + * Callbacks for records being sent to the same partition are guaranteed to + * execute in order. That is, in the following example callback1 is + * guaranteed to execute before callback2: + * + *
    +	 * {@code
    +	 * producer.send(new ProducerRecord(topic, partition, key1, value1), callback1);
    +	 * producer.send(new ProducerRecord(topic, partition, key2, value2), callback2);
    +	 * }
    +	 * 
    + *

    + * When used as part of a transaction, it is not necessary to define a callback + * or check the result of the future in order to detect errors from + * send. If any of the send calls failed with an irrecoverable + * error, the final {@link #commitTransaction()} call will fail and throw the + * exception from the last failed send. When this happens, your application + * should call {@link #abortTransaction()} to reset the state and continue to + * send data. + *

    + *

    + * Some transactional send errors cannot be resolved with a call to + * {@link #abortTransaction()}. In particular, if a transactional send finishes + * with a {@link ProducerFencedException}, a + * {@link org.apache.kafka.common.errors.OutOfOrderSequenceException}, a + * {@link org.apache.kafka.common.errors.UnsupportedVersionException}, or an + * {@link org.apache.kafka.common.errors.AuthorizationException}, then the only + * option left is to call {@link #close()}. Fatal errors cause the producer to + * enter a defunct state in which future API calls will continue to raise the + * same underyling error wrapped in a new {@link KafkaException}. + *

    + *

    + * It is a similar picture when idempotence is enabled, but no + * transactional.id has been configured. In this case, + * {@link org.apache.kafka.common.errors.UnsupportedVersionException} and + * {@link org.apache.kafka.common.errors.AuthorizationException} are considered + * fatal errors. However, {@link ProducerFencedException} does not need to be + * handled. Additionally, it is possible to continue sending after receiving an + * {@link org.apache.kafka.common.errors.OutOfOrderSequenceException}, but doing + * so can result in out of order delivery of pending messages. To ensure proper + * ordering, you should close the producer and create a new instance. + *

    + *

    + * If the message format of the destination topic is not upgraded to 0.11.0.0, + * idempotent and transactional produce requests will fail with an + * {@link org.apache.kafka.common.errors.UnsupportedForMessageFormatException} + * error. If this is encountered during a transaction, it is possible to abort + * and continue. But note that future sends to the same topic will continue + * receiving the same exception until the topic is upgraded. + *

    + *

    + * Note that callbacks will generally execute in the I/O thread of the producer + * and so should be reasonably fast or they will delay the sending of messages + * from other threads. If you want to execute blocking or computationally + * expensive callbacks it is recommended to use your own + * {@link java.util.concurrent.Executor} in the callback body to parallelize + * processing. + * + * @param record The record to send + * @param callback A user-supplied callback to execute when the record has been + * acknowledged by the server (null indicates no callback) + * + * @throws AuthenticationException if authentication fails. See the exception + * for more details + * @throws AuthorizationException fatal error indicating that the producer is + * not allowed to write + * @throws IllegalStateException if a transactional.id has been configured and + * no transaction has been started, or when send + * is invoked after producer has been closed. + * @throws InterruptException If the thread is interrupted while blocked + * @throws SerializationException If the key or value are not valid objects + * given the configured serializers + * @throws KafkaException If a Kafka related error occurs that does not + * belong to the public API exceptions. + */ + @Override + public Future send(ProducerRecord record, Callback callback) { + // intercept the record, which can be potentially modified; this method does not + // throw exceptions + ProducerRecord interceptedRecord = this.interceptors.onSend(record); + return doSend(interceptedRecord, callback); + } + + // Verify that this producer instance has not been closed. This method throws + // IllegalStateException if the producer + // has already been closed. + private void throwIfProducerClosed() { + if (transactionalProducer) { + if (aqProducer == null || aqProducer.isClosed()) + throw new IllegalStateException("Cannot perform operation after producer has been closed"); + } else if (sender == null || !sender.isRunning()) + throw new IllegalStateException("Cannot perform operation after producer has been closed"); + } + + /** + * Implementation of asynchronously send a record to a topic. + */ + private Future doSend(ProducerRecord record, Callback callback) { + TopicPartition tp = null; try { - this.prodConfigs = prodConfigs; - String clientId= prodConfigs.getString(ProducerConfig.CLIENT_ID_CONFIG); - if (clientId.length() <= 0) - clientId = "producer-" + PROD_CLIENT_ID_SEQUENCE.getAndIncrement(); - this.clientId = clientId; - String transactionalId = null; // userProvidedConfigs.containsKey(ProducerConfig.TRANSACTIONAL_ID_CONFIG) ? - //(String) userProvidedConfigs.get(ProducerConfig.TRANSACTIONAL_ID_CONFIG) : null; - LogContext logContext; - if (transactionalId == null) - logContext = new LogContext(String.format("[Producer clientId=%s] ", clientId)); - else - logContext = new LogContext(String.format("[Producer clientId=%s, transactionalId=%s] ", clientId, transactionalId)); - log = logContext.logger(KafkaProducer.class); - log.trace("Starting the Kafka producer"); - - Map metricTags = Collections.singletonMap("client-id", clientId); - MetricConfig metricConfig = new MetricConfig().samples(prodConfigs.getInt(ProducerConfig.METRICS_NUM_SAMPLES_CONFIG)) - .timeWindow(prodConfigs.getLong(ProducerConfig.METRICS_SAMPLE_WINDOW_MS_CONFIG), TimeUnit.MILLISECONDS) - .recordLevel(Sensor.RecordingLevel.forName(prodConfigs.getString(ProducerConfig.METRICS_RECORDING_LEVEL_CONFIG))) - .tags(metricTags); - List reporters = prodConfigs.getConfiguredInstances(ProducerConfig.METRIC_REPORTER_CLASSES_CONFIG, - MetricsReporter.class); - reporters.add(new JmxReporter(JMX_PREFIX)); - this.metrics = new Metrics(metricConfig, reporters, time); - ProducerMetrics metricsRegistry = new ProducerMetrics(this.metrics); - - if (keySerializer == null) { - this.keySerializer = ensureExtended(prodConfigs.getConfiguredInstance(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, - Serializer.class)); - this.keySerializer.configure(prodConfigs.originals(), true); - } else { - prodConfigs.ignore(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG); - this.keySerializer = ensureExtended(keySerializer); - } - if (valueSerializer == null) { - this.valueSerializer = ensureExtended(prodConfigs.getConfiguredInstance(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, - Serializer.class)); - this.valueSerializer.configure(prodConfigs.originals(), false); - } else { - prodConfigs.ignore(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG); - this.valueSerializer = ensureExtended(valueSerializer); - } - - long retryBackoffMs = prodConfigs.getLong(ProducerConfig.RETRY_BACKOFF_MS_CONFIG); - // load interceptors and make sure they get clientId - userProvidedConfigs.put(ProducerConfig.CLIENT_ID_CONFIG, clientId); - List> interceptorList = (List) (new ProducerConfig(userProvidedConfigs, false)).getConfiguredInstances(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, - ProducerInterceptor.class); - this.interceptors = new ProducerInterceptors<>(interceptorList); - ClusterResourceListeners clusterResourceListeners = configureClusterResourceListeners(keySerializer, valueSerializer, interceptorList, reporters); - List addresses = null; - String serviceName = null; - String instanceName = null; - System.setProperty("oracle.net.tns_admin", prodConfigs.getString(ProducerConfig.ORACLE_NET_TNS_ADMIN)); - - if( prodConfigs.getString( CommonClientConfigs.SECURITY_PROTOCOL_CONFIG).trim().equalsIgnoreCase("PLAINTEXT")) - addresses = ClientUtils.parseAndValidateAddresses(prodConfigs.getList(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG)); - else { - if( prodConfigs.getString(SslConfigs.TNS_ALIAS) == null) - throw new InvalidLoginCredentialsException("Please provide valid connection string"); - TNSParser parser = new TNSParser(prodConfigs); - parser.readFile(); - String connStr = parser.getConnectionString(prodConfigs.getString(SslConfigs.TNS_ALIAS).toUpperCase()); - if (connStr == null) - throw new InvalidLoginCredentialsException("Please provide valid connection string"); - String host = parser.getProperty(connStr, "HOST"); - String portStr = parser.getProperty(connStr, "PORT"); - serviceName = parser.getProperty(connStr, "SERVICE_NAME"); - int port; - if( host == null || portStr == null || serviceName == null) - throw new InvalidLoginCredentialsException("Please provide valid connection string"); - try { - port = Integer.parseInt(portStr); - } catch(NumberFormatException nfe) { - throw new InvalidLoginCredentialsException("Please provide valid connection string"); - } - instanceName = parser.getProperty(connStr, "INSTANCE_NAME"); - addresses = new ArrayList<>(); - addresses.add(new InetSocketAddress(host, port)); - } - if (metadata != null) { - this.metadata = metadata; - } else { - this.metadata = new Metadata(retryBackoffMs, prodConfigs.getLong(ProducerConfig.METADATA_MAX_AGE_CONFIG), - true, true, clusterResourceListeners, prodConfigs); - this.metadata.update(Cluster.bootstrap(addresses, prodConfigs, serviceName, instanceName), Collections.emptySet(), time.milliseconds()); - } - - this.compressionType = CompressionType.forName(prodConfigs.getString(ProducerConfig.COMPRESSION_TYPE_CONFIG)); - this.partitioner = prodConfigs.getConfiguredInstance(ProducerConfig.PARTITIONER_CLASS_CONFIG, Partitioner.class); - this.maxRequestSize = prodConfigs.getInt(ProducerConfig.MAX_REQUEST_SIZE_CONFIG); - this.requestTimeoutMs = Integer.MAX_VALUE; //prodConfigs.getInt(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG); - this.totalMemorySize = prodConfigs.getLong(ProducerConfig.BUFFER_MEMORY_CONFIG); - this.maxBlockTimeMs = prodConfigs.getLong(ProducerConfig.MAX_BLOCK_MS_CONFIG); - int retries = configureRetries(prodConfigs, false, log); - int maxInflightRequests = 1; //configureInflightRequests(prodConfigs, false); - short acks = configureAcks(prodConfigs, false, log); - - this.recordAccumulator = new RecordAccumulator(logContext, - prodConfigs.getInt(ProducerConfig.BATCH_SIZE_CONFIG), - this.totalMemorySize, - this.compressionType, - prodConfigs.getLong(ProducerConfig.LINGER_MS_CONFIG), - retryBackoffMs, - null, - time); - client = kafkaClient != null ? kafkaClient : new NetworkClient(new AQKafkaProducer(logContext, prodConfigs, time), this.metadata, clientId, - prodConfigs.getLong(AdminClientConfig.RECONNECT_BACKOFF_MS_CONFIG), - prodConfigs.getLong(AdminClientConfig.RECONNECT_BACKOFF_MAX_MS_CONFIG), - prodConfigs.getInt(AdminClientConfig.SEND_BUFFER_CONFIG), - prodConfigs.getInt(AdminClientConfig.RECEIVE_BUFFER_CONFIG), (int) TimeUnit.HOURS.toMillis(1), time, - logContext); - this.sender = new SenderThread(logContext, - this.clientId, - client, - this.metadata, - this.recordAccumulator, - false, - prodConfigs.getInt(ProducerConfig.MAX_REQUEST_SIZE_CONFIG), - acks, - retries, - null, - Time.SYSTEM, - this.requestTimeoutMs, - prodConfigs.getLong(ProducerConfig.RETRY_BACKOFF_MS_CONFIG)); - String ioThreadName = NETWORK_THREAD_PREFIX + " | " + clientId; - this.ioThread = new KafkaThread(ioThreadName, this.sender, false); - this.ioThread.start(); - AppInfoParser.registerAppInfo(JMX_PREFIX, this.clientId, null); - } - catch(Throwable t) - { - - // call close methods if internal objects are already constructed this is to prevent resource leak. see KAFKA-2121 - close(0, TimeUnit.MILLISECONDS, true); - // now propagate the exception - throw new KafkaException("Failed to construct kafka producer", t); - } - - } - - /** - * This api is not yet supported - */ + if (!transactionalProducer) { + throwIfProducerClosed(); + } + // first make sure the metadata for the topic is available + long nowMs = time.milliseconds(); + ClusterAndWaitTime clusterAndWaitTime; + try { + clusterAndWaitTime = waitOnMetadata(record.topic(), record.partition(), nowMs, maxBlockTimeMs); + } catch (KafkaException e) { + if (metadata.isClosed()) + throw new KafkaException("Producer closed while metadata fetch was in progress", e); + throw e; + } + nowMs += clusterAndWaitTime.waitedOnMetadataMs; + long remainingWaitMs = Math.max(0, maxBlockTimeMs - clusterAndWaitTime.waitedOnMetadataMs); + Cluster cluster = clusterAndWaitTime.cluster; + + byte[] serializedKey; + try { + serializedKey = keySerializer.serialize(record.topic(), record.headers(), record.key()); + } catch (ClassCastException cce) { + throw new SerializationException("Can't convert key of class " + record.key().getClass().getName() + + " to class " + producerConfig.getClass(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG).getName() + + " specified in key.serializer", cce); + } + + byte[] serializedValue; + try { + serializedValue = valueSerializer.serialize(record.topic(), record.headers(), record.value()); + + } catch (ClassCastException cce) { + throw new SerializationException("Can't convert value of class " + record.value().getClass().getName() + + " to class " + producerConfig.getClass(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG).getName() + + " specified in value.serializer", cce); + } + int partition = partition(record, serializedKey, serializedValue, cluster); + tp = new TopicPartition(record.topic(), partition); + + setReadOnly(record.headers()); + Header[] headers = record.headers().toArray(); + + int serializedSize = AbstractRecords.estimateSizeInBytesUpperBound(apiVersions.maxUsableProduceMagic(), + compressionType, serializedKey, serializedValue, headers); + + ensureValidRecordSize(serializedSize); + + long timestamp = record.timestamp() == null ? nowMs : record.timestamp(); + if (log.isTraceEnabled()) { + log.trace("Attempting to append record {} with callback {} to topic {} partition {}", record, callback, + record.topic(), partition); + } + // producer callback will make sure to call both 'callback' and interceptor + // callback + Callback interceptCallback = new InterceptorCallback<>(callback, this.interceptors, tp); + + if (transactionalProducer) { + return aqProducer.transactionalSend(tp, serializedKey, serializedValue, headers, interceptCallback); + } else { + RecordAccumulator.RecordAppendResult result = accumulator.append(tp, timestamp, serializedKey, + serializedValue, headers, interceptCallback, remainingWaitMs, true, nowMs); + + if (result.abortForNewBatch) { + int prevPartition = partition; + partitioner.onNewBatch(record.topic(), cluster, prevPartition); + partition = partition(record, serializedKey, serializedValue, cluster); + tp = new TopicPartition(record.topic(), partition); + if (log.isTraceEnabled()) { + log.trace( + "Retrying append due to new batch creation for topic {} partition {}. The old partition was {}", + record.topic(), partition, prevPartition); + } + // producer callback will make sure to call both 'callback' and interceptor + // callback + interceptCallback = new InterceptorCallback<>(callback, this.interceptors, tp); + result = accumulator.append(tp, timestamp, serializedKey, serializedValue, headers, + interceptCallback, remainingWaitMs, false, nowMs); + } + + if (result.batchIsFull || result.newBatchCreated) { + log.trace("Waking up the sender since topic {} partition {} is either full or getting a new batch", + record.topic(), partition); + this.sender.wakeup(); + } + return result.future; + } + + // handling exceptions and record the errors; + // for API exceptions return them in the future, + // for other exceptions throw directly + } catch (ApiException e) { + log.debug("Exception occurred during message send:", e); + if (callback != null) + callback.onCompletion(null, e); + this.errors.record(); + this.interceptors.onSendError(record, tp, e); + return new FutureFailure(e); + } catch (InterruptedException e) { + this.errors.record(); + this.interceptors.onSendError(record, tp, e); + throw new InterruptException(e); + } catch (KafkaException e) { + this.errors.record(); + this.interceptors.onSendError(record, tp, e); + throw e; + } catch (Exception e) { + // we notify interceptor about all exceptions, since onSend is called before + // anything else in this method + this.interceptors.onSendError(record, tp, e); + throw e; + } + } + + private void setReadOnly(Headers headers) { + if (headers instanceof RecordHeaders) { + ((RecordHeaders) headers).setReadOnly(); + } + } + + /** + * Wait for cluster metadata including partitions for the given topic to be + * available. + * + * @param topic The topic we want metadata for + * @param partition A specific partition expected to exist in metadata, or null + * if there's no preference + * @param nowMs The current time in ms + * @param maxWaitMs The maximum time in ms for waiting on the metadata + * @return The cluster containing topic metadata and the amount of time we + * waited in ms + * @throws TimeoutException if metadata could not be refreshed within + * {@code max.block.ms} + * @throws KafkaException for all Kafka-related exceptions, including the case + * where this method is called after producer close + */ + private ClusterAndWaitTime waitOnMetadata(String topic, Integer partition, long nowMs, long maxWaitMs) + throws InterruptedException { + // add topic to metadata topic list if it is not there already and reset expiry + Cluster cluster = metadata.fetch(); + + if (cluster.invalidTopics().contains(topic)) + throw new InvalidTopicException(topic); + + metadata.add(topic, nowMs); + Integer partitionsCount = cluster.partitionCountForTopic(topic); + // Return cached metadata if we have it, and if the record's partition is either + // undefined + // or within the known partition range + if (partitionsCount != null && (partition == null || partition < partitionsCount)) { + return new ClusterAndWaitTime(cluster, 0); + } + + long remainingWaitMs = maxWaitMs; + long elapsed = 0; + if (transactionalProducer) { + org.oracle.okafka.common.requests.AbstractRequest.Builder metadataRequest = null; + List topicList = new ArrayList<>(metadata.topics()); + metadataRequest = new org.oracle.okafka.common.requests.MetadataRequest.Builder(topicList, + metadata.allowAutoTopicCreation(), topicList); + + Node requestNode = metadata.getLeader() != null ? metadata.getLeader() : metadata.fetch().nodes().get(0); + + // ToDo: Check if it is right to use the first node always. We may have a valid + // connection already with node 2. + + ClientRequest clientRequest = client.newClientRequest((org.oracle.okafka.common.Node) requestNode, + metadataRequest, time.milliseconds(), true); + + // ToDo: Check if node needs to be send instead of null + Connection conn = getDBConnection(true); + log.debug("Fetch Metadata using connection " + conn); + ClientResponse response = aqProducer.getMetadataNow(clientRequest, conn, + (org.oracle.okafka.common.Node) requestNode, true); + MetadataResponse mResponse = (MetadataResponse) response.responseBody(); + if (response.wasDisconnected()) { + // ToDo: handle disconnect + } + for (String topicM : metadata.topics()) { + try { + aqProducer.fetchQueueParameters(topicM, conn, metadata.topicParaMap); + } catch (SQLException e) { + log.error("Exception while fetching TEQ parameters and updating metadata " + e.getMessage()); + } + } + elapsed = time.milliseconds() - nowMs; + Cluster newCluster = mResponse.cluster(); + metadata.update(newCluster, null, time.milliseconds(), false); + return new ClusterAndWaitTime(newCluster, elapsed); + } + + // Issue metadata requests until we have metadata for the topic and the + // requested partition, + // or until maxWaitTimeMs is exceeded. This is necessary in case the metadata + // is stale and the number of partitions for this topic has increased in the + // meantime. + long nowNanos = time.nanoseconds(); + do { + if (partition != null) { + log.trace("Requesting metadata update for partition {} of topic {}.", partition, topic); + } else { + log.trace("Requesting metadata update for topic {}.", topic); + } + // metadata.add(topic, nowMs + elapsed); + // int version = metadata.requestUpdateForTopic(topic); + int version = metadata.requestUpdate(); + sender.wakeup(); + try { + metadata.awaitUpdate(version, remainingWaitMs); + } catch (TimeoutException ex) { + // Rethrow with original maxWaitMs to prevent logging exception with + // remainingWaitMs + throw new TimeoutException( + String.format("Topic %s not present in metadata after %d ms.", topic, maxWaitMs)); + } + cluster = metadata.fetch(); + elapsed = time.milliseconds() - nowMs; + if (elapsed >= maxWaitMs) { + throw new TimeoutException(partitionsCount == null + ? String.format("Topic %s not present in metadata after %d ms.", topic, maxWaitMs) + : String.format( + "Partition %d of topic %s with partition count %d is not present in metadata after %d ms.", + partition, topic, partitionsCount, maxWaitMs)); + } + // metadata.maybeThrowExceptionForTopic(topic); + remainingWaitMs = maxWaitMs - elapsed; + partitionsCount = cluster.partitionCountForTopic(topic); + } while (partitionsCount == null || (partition != null && partition >= partitionsCount)); + okpMetrics.recordMetadataWait(time.nanoseconds() - nowNanos); + return new ClusterAndWaitTime(cluster, elapsed); + } + + /** + * Validate that the record size isn't too large + */ + private void ensureValidRecordSize(int size) { + if (size > maxRequestSize) + throw new RecordTooLargeException("The message is " + size + " bytes when serialized which is larger than " + + maxRequestSize + ", which is the value of the " + ProducerConfig.MAX_REQUEST_SIZE_CONFIG + + " configuration."); + if (size > totalMemorySize) + throw new RecordTooLargeException("The message is " + size + + " bytes when serialized which is larger than the total memory buffer you have configured with the " + + ProducerConfig.BUFFER_MEMORY_CONFIG + " configuration."); + } + + /** + * Invoking this method makes all buffered records immediately available to send + * (even if linger.ms is greater than 0) and blocks on the + * completion of the requests associated with these records. The post-condition + * of flush() is that any previously sent record will have + * completed (e.g. Future.isDone() == true). A request is + * considered completed when it is successfully acknowledged according to the + * acks configuration you have specified or else it results in an + * error. + *

    + * Other threads can continue sending records while one thread is blocked + * waiting for a flush call to complete, however no guarantee is made about the + * completion of records sent after the flush call begins. + *

    + * This method can be useful when consuming from some input system and producing + * into Kafka. The flush() call gives a convenient way to ensure + * all previously sent messages have actually completed. + *

    + * This example shows how to consume from one Kafka topic and produce to another + * Kafka topic: + * + *

    +	 * {@code
    +	 * for(ConsumerRecord record: consumer.poll(100))
    +	 *     producer.send(new ProducerRecord("my-topic", record.key(), record.value());
    +	 * producer.flush();
    +	 * consumer.commitSync();
    +	 * }
    +	 * 
    + * + * Note that the above example may drop records if the produce request fails. If + * we want to ensure that this does not occur we need to set + * retries=<large_number> in our config. + *

    + *

    + * Applications don't need to call this method for transactional producers, + * since the {@link #commitTransaction()} will flush all buffered records before + * performing the commit. This ensures that all the + * {@link #send(ProducerRecord)} calls made since the previous + * {@link #beginTransaction()} are completed before the commit. + *

    + * + * @throws InterruptException If the thread is interrupted while blocked + */ + @Override + public void flush() { + log.trace("Flushing accumulated records in producer."); + long start = time.nanoseconds(); + this.accumulator.beginFlush(); + this.sender.wakeup(); + try { + this.accumulator.awaitFlushCompletion(); + } catch (InterruptedException e) { + throw new InterruptException("Flush interrupted.", e); + } finally { + okpMetrics.recordFlush(time.nanoseconds() - start); + } + } + + /** + * Get the partition metadata for the given topic. This can be used for custom + * partitioning. + * + * @throws AuthenticationException if authentication fails. See the exception + * for more details + * @throws AuthorizationException if not authorized to the specified topic. See + * the exception for more details + * @throws InterruptException if the thread is interrupted while blocked + * @throws TimeoutException if metadata could not be refreshed within + * {@code max.block.ms} + * @throws KafkaException for all Kafka-related exceptions, including + * the case where this method is called after + * producer close + */ + @Override + public List partitionsFor(String topic) { + Objects.requireNonNull(topic, "topic cannot be null"); + try { + return waitOnMetadata(topic, null, time.milliseconds(), maxBlockTimeMs).cluster.partitionsForTopic(topic); + } catch (InterruptedException e) { + throw new InterruptException(e); + } + } + + /** + * Get the full set of internal metrics maintained by the producer. + */ + @Override + public Map metrics() { + return Collections.unmodifiableMap(this.metrics.metrics()); + } + + /** + * Close this producer. This method blocks until all previously sent requests + * complete. This method is equivalent to + * close(Long.MAX_VALUE, TimeUnit.MILLISECONDS). + *

    + * If close() is called from {@link Callback}, a warning message will be + * logged and close(0, TimeUnit.MILLISECONDS) will be called instead. We do this + * because the sender thread would otherwise try to join itself and block + * forever. + *

    + * + * @throws InterruptException If the thread is interrupted while blocked. + * @throws KafkaException If a unexpected error occurs while trying to close + * the client, this error should be treated as fatal + * and indicate the client is no longer functionable. + */ + @Override + public void close() { + close(Duration.ofMillis(Long.MAX_VALUE)); + } + + /** + * This method waits up to timeout for the producer to complete the + * sending of all incomplete requests. + *

    + * If the producer is unable to complete all requests before the timeout + * expires, this method will fail any unsent and unacknowledged records + * immediately. It will also abort the ongoing transaction if it's not already + * completing. + *

    + * If invoked from within a {@link Callback} this method will not block and will + * be equivalent to close(Duration.ofMillis(0)). This is done since + * no further sending will happen while blocking the I/O thread of the producer. + * + * @param timeout The maximum time to wait for producer to complete any pending + * requests. The value should be non-negative. Specifying a + * timeout of zero means do not wait for pending send requests to + * complete. + * @throws InterruptException If the thread is interrupted while blocked. + * @throws KafkaException If a unexpected error occurs while trying to + * close the client, this error should be + * treated as fatal and indicate the client is + * no longer functionable. + * @throws IllegalArgumentException If the timeout is negative. + * + */ + @Override + public void close(Duration timeout) { + close(timeout, false); + } + + private void close(Duration timeout, boolean swallowException) { + long timeoutMs = timeout.toMillis(); + if (timeoutMs < 0) + throw new IllegalArgumentException("The timeout cannot be negative."); + + log.info("Closing the Kafka producer with timeoutMillis = {} ms.", timeoutMs); + + // this will keep track of the first encountered exception + AtomicReference firstException = new AtomicReference<>(); + + if (transactionalProducer) { + if (aqProducer != null) + aqProducer.close(); + } + boolean invokedFromCallback = false; + if (this.ioThread != null) + invokedFromCallback = Thread.currentThread() == this.ioThread; + + if (timeoutMs > 0) { + if (invokedFromCallback) { + log.warn( + "Overriding close timeout {} ms to 0 ms in order to prevent useless blocking due to self-join. " + + "This means you have incorrectly invoked close with a non-zero timeout from the producer call-back.", + timeoutMs); + } else { + // Try to close gracefully. + if (this.sender != null) + this.sender.initiateClose(); + if (this.ioThread != null) { + try { + this.ioThread.join(timeoutMs); + } catch (InterruptedException t) { + firstException.compareAndSet(null, new InterruptException(t)); + log.error("Interrupted while joining ioThread", t); + } + } + } + } + + if (this.sender != null && this.ioThread != null && this.ioThread.isAlive()) { + log.info("Proceeding to force close the producer since pending requests could not be completed " + + "within timeout {} ms.", timeoutMs); + this.sender.forceClose(); + // Only join the sender thread when not calling from callback. + if (!invokedFromCallback) { + try { + this.ioThread.join(); + } catch (InterruptedException e) { + firstException.compareAndSet(null, new InterruptException(e)); + } + } + } + + Utils.closeQuietly(interceptors, "producer interceptors", firstException); + Utils.closeQuietly(metrics, "producer metrics", firstException); + Utils.closeQuietly(keySerializer, "producer keySerializer", firstException); + Utils.closeQuietly(valueSerializer, "producer valueSerializer", firstException); + Utils.closeQuietly(partitioner, "producer partitioner", firstException); + AppInfoParser.unregisterAppInfo(JMX_PREFIX, clientId, metrics); + Throwable exception = firstException.get(); + if (exception != null && !swallowException) { + if (exception instanceof InterruptException) { + throw (InterruptException) exception; + } + throw new KafkaException("Failed to close kafka producer", exception); + } + log.debug("Kafka producer has been closed"); + } + + private ClusterResourceListeners configureClusterResourceListeners(Serializer keySerializer, + Serializer valueSerializer, List... candidateLists) { + ClusterResourceListeners clusterResourceListeners = new ClusterResourceListeners(); + for (List candidateList : candidateLists) + clusterResourceListeners.maybeAddAll(candidateList); + + clusterResourceListeners.maybeAdd(keySerializer); + clusterResourceListeners.maybeAdd(valueSerializer); + return clusterResourceListeners; + } + + /** + * computes partition for given record. if the record has partition returns the + * value otherwise calls configured partitioner class to compute the partition. + */ + private int partition(ProducerRecord record, byte[] serializedKey, byte[] serializedValue, Cluster cluster) { + Integer partition = record.partition(); + return partition != null ? partition + : partitioner.partition(record.topic(), record.key(), serializedKey, record.value(), serializedValue, + cluster); + } + + private void throwIfInvalidGroupMetadata(ConsumerGroupMetadata groupMetadata) { + if (groupMetadata == null) { + throw new IllegalArgumentException("Consumer group metadata could not be null"); + } else if (groupMetadata.generationId() > 0 + && JoinGroupRequest.UNKNOWN_MEMBER_ID.equals(groupMetadata.memberId())) { + throw new IllegalArgumentException( + "Passed in group metadata " + groupMetadata + " has generationId > 0 but member.id "); + } + } + + private void throwIfNoTransactionManager() { + if (transactionManager == null) + throw new IllegalStateException("Cannot use transactional methods without enabling transactions " + + "by setting the " + ProducerConfig.TRANSACTIONAL_ID_CONFIG + " configuration property"); + } + + // Visible for testing + String getClientId() { + return clientId; + } + + private static class ClusterAndWaitTime { + final Cluster cluster; + final long waitedOnMetadataMs; + + ClusterAndWaitTime(Cluster cluster, long waitedOnMetadataMs) { + this.cluster = cluster; + this.waitedOnMetadataMs = waitedOnMetadataMs; + } + } + + private static class FutureFailure implements Future { + + private final ExecutionException exception; + + public FutureFailure(Exception exception) { + this.exception = new ExecutionException(exception); + } + + @Override + public boolean cancel(boolean interrupt) { + return false; + } + @Override - public void initTransactions() { - throw new FeatureNotSupportedException("This feature is not suported for this release."); - } - - /** - * This api is not yet supported - */ - @Override - public void beginTransaction() throws ProducerFencedException { - throw new FeatureNotSupportedException("This feature is not suported for this release."); - } - - /** - * This api is not yet supported - */ - @Override - public void sendOffsetsToTransaction(Map offsets, - String consumerGroupId) throws ProducerFencedException { - throw new FeatureNotSupportedException("This feature is not suported for this release."); - - } - - /** - * This api is not yet supported - */ - @Override - public void commitTransaction() throws ProducerFencedException { - throw new FeatureNotSupportedException("This feature is not suported for this release."); - } - - /** - * This api is not yet supported - */ - @Override - public void abortTransaction() throws ProducerFencedException { - throw new FeatureNotSupportedException("This feature is not suported for this release."); - } - - - /** - * Asynchronously send a record to a topic. Equivalent to send(record, null). - * See {@link #send(ProducerRecord, Callback)} for details. - */ - @Override - public Future send(ProducerRecord record) { - - return send(record, null); - } - - /** - * Asynchronously send a record to a topic and invoke the provided callback when the send has been acknowledged. - *

    - * The send is asynchronous and this method will return immediately once the record has been stored in the buffer of - * records waiting to be sent. If buffer memory is full then send call blocks for a maximum of time max.block.ms .This allows sending many records in parallel without blocking to wait for the - * response after each one. - *

    - * The result of the send is a {@link RecordMetadata} specifying the partition the record was sent to, the offset - * it was assigned and the timestamp of the record. If - * {@link org.oracle.okafka.common.record.TimestampType#CREATE_TIME CreateTime} is used by the topic, the timestamp - * will be the user provided timestamp or the record send time if the user did not specify a timestamp for the - * record. If {@link org.oracle.okafka.common.record.TimestampType#LOG_APPEND_TIME LogAppendTime} is used for the - * topic, the timestamp will be the TEQ local time when the message is appended. OKafka currently supports only - * LogAppendTime. - *

    - * Send call returns a {@link java.util.concurrent.Future Future} for the - * {@link RecordMetadata} that will be assigned to this record. Invoking {@link java.util.concurrent.Future#get() - * get()} on this future will block until the associated request completes and then return the metadata for the record - * or throw any exception that occurred while sending the record. - *

    - * If you want to simulate a simple blocking call you can call the get() method immediately: - * - *

    -	     * {@code
    -	     * byte[] key = "key".getBytes();
    -	     * byte[] value = "value".getBytes();
    -	     * ProducerRecord record = new ProducerRecord("my-topic", key, value)
    -	     * producer.send(record).get();
    -	     * }
    - *

    - * Fully non-blocking usage can make use of the {@link Callback} parameter to provide a callback that - * will be invoked when the request is complete. - * - *

    -	     * {@code
    -	     * ProducerRecord record = new ProducerRecord("the-topic", key, value);
    -	     * producer.send(myRecord,
    -	     *               new Callback() {
    -	     *                   public void onCompletion(RecordMetadata metadata, Exception e) {
    -	     *                       if(e != null) {
    -	     *                          e.printStackTrace();
    -	     *                       } else {
    -	     *                          System.out.println("The offset of the record we just sent is: " + metadata.offset());
    -	     *                       }
    -	     *                   }
    -	     *               });
    -	     * }
    -	     * 
    - * - * Callbacks for records being sent to the same partition are guaranteed to execute in order. That is, in the - * following example callback1 is guaranteed to execute before callback2: - * - *
    -	     * {@code
    -	     * producer.send(new ProducerRecord(topic, partition, key1, value1), callback1);
    -	     * producer.send(new ProducerRecord(topic, partition, key2, value2), callback2);
    -	     * }
    -	     * 
    - * Note that callbacks will generally execute in the I/O thread of the producer and so should be reasonably fast or - * they will delay the sending of messages from other threads. If you want to execute blocking or computationally - * expensive callbacks it is recommended to use your own {@link java.util.concurrent.Executor} in the callback body - * to parallelize processing. - * - * @param record The record to send - * @param callback A user-supplied callback to execute when the record has been acknowledged by the server (null - * indicates no callback) - * - * @throws InterruptException If the thread is interrupted while blocked - * @throws SerializationException If the key or value are not valid objects given the configured serializers - * @throws TimeoutException If the time taken for fetching metadata or allocating memory for the record has surpassed max.block.ms. - * @throws KafkaException If a Kafka related error occurs that does not belong to the public API exceptions. - */ - @Override - public Future send(ProducerRecord record, Callback callback) { - // intercept the record, which can be potentially modified; this method does not throw exceptions - ProducerRecord interceptedRecord = this.interceptors.onSend(record); - return doSend(interceptedRecord, callback); - } - - - /** - * Implementation of asynchronously send a record to a topic. - */ - private Future doSend(ProducerRecord record, Callback callback) { - TopicPartition tp = null; - try { - throwIfProducerClosed(); - - // first make sure the metadata for the topic is available - ClusterAndWaitTime clusterAndWaitTime; - try { - clusterAndWaitTime = waitOnMetadata(record.topic(), record.partition(), maxBlockTimeMs); - } catch (KafkaException e) { - if (metadata.isClosed()) - throw new KafkaException("Producer closed while send in progress", e); - throw e; - } - long remainingWaitMs = Math.max(0, maxBlockTimeMs - clusterAndWaitTime.waitedOnMetadataMs); - byte[] serializedKey; - try { - serializedKey = keySerializer.serialize(record.topic(),record.headers(), record.key()); - } - catch(ClassCastException cce) { - throw new SerializationException("Can't convert key of class " + record.key().getClass().getName() + " to class " - + prodConfigs.getClass(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG).getName() + " specified in key.serializer", cce); - } - - byte[] serializedValue; - try { - serializedValue = valueSerializer.serialize(record.topic(), record.headers(), record.value()); - } - catch(ClassCastException cce) { - throw new SerializationException("Can't convert value of class " + record.value().getClass().getName() + " to class " - + prodConfigs.getClass(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG).getName() + " specified in value.serializer", cce); - } - int partition = partition(record, serializedKey, serializedValue, metadata.fetch()); - - tp = new TopicPartition(record.topic(), partition); - setReadOnly(record.headers()); - Header[] headers = record.headers().toArray(); - - int serializedSize = AbstractRecords.estimateSizeInBytesUpperBound(RecordBatch.CURRENT_MAGIC_VALUE, compressionType, - serializedKey, serializedValue, headers); - ensureValidRecordSize(serializedSize); - long timestamp = record.timestamp() == null ? time.milliseconds() : record.timestamp(); - log.trace("Sending record {} with callback {} to topic {} partition {}", record, callback, record.topic(), partition); - Callback interceptCallback = new InterceptorCallback<>(callback, this.interceptors, tp); - RecordAccumulator.RecordAppendResult result = recordAccumulator.append(tp, timestamp, serializedKey, - serializedValue, headers, interceptCallback ,remainingWaitMs); - return result.future; - } catch (ApiException e) { - log.debug("Exception occurred during message send:", e); - if (callback != null) - callback.onCompletion(null, e); - //this.errors.record(); - this.interceptors.onSendError(record, tp, e); - return new FutureFailure(e); - } catch (InterruptedException e) { - //this.errors.record(); - this.interceptors.onSendError(record, tp, e); - throw new InterruptException(e); - } catch (BufferExhaustedException e) { - //this.errors.record(); - //this.metrics.sensor("buffer-exhausted-records").record(); - this.interceptors.onSendError(record, tp, e); - throw e; - } catch (KafkaException e) { - //this.errors.record(); - this.interceptors.onSendError(record, tp, e); - throw e; - } catch (Exception e) { - // we notify interceptor about all exceptions, since onSend is called before anything else in this method - this.interceptors.onSendError(record, tp, e); - throw e; - } - } - - /** - * This api is not yet supported - */ - @Override - public void flush() { - throw new FeatureNotSupportedException("This feature is not suported for this release."); - } - - /** - * Get the partition metadata for the given topic. This can be used for custom partitioning. - * @throws InterruptException if the thread is interrupted while blocked - * @throws TimeoutException if metadata could not be refreshed within {@code max.block.ms} - * @throws KafkaException for all Kafka-related exceptions, including the case where this method is called after producer close - */ - @Override - public List partitionsFor(String topic) { - Objects.requireNonNull(topic, "topic cannot be null"); - try { - return waitOnMetadata(topic, null, maxBlockTimeMs).cluster.partitionsForTopic(topic); - } catch (InterruptedException e) { - throw new InterruptException(e); - } - } - - /** - * This api is not yet supported - */ - @Override - public Map metrics() { - throw new FeatureNotSupportedException("This feature is not suported for this release."); - } - - - /** - * Close this producer. This method blocks until all previously sent requests complete. - * This method is equivalent to close(Long.MAX_VALUE, TimeUnit.MILLISECONDS). - *

    - * If close() is called from {@link Callback}, a warning message will be logged and close(0, TimeUnit.MILLISECONDS) - * will be called instead. We do this because the sender thread would otherwise try to join itself and - * block forever. - *

    - * - * @throws InterruptException If the thread is interrupted while blocked - */ - @Override - public void close() { - close(Long.MAX_VALUE, TimeUnit.MILLISECONDS); - } - - /** - * This method waits up to timeout for the producer to complete the sending of all incomplete requests. - *

    - * If the producer is unable to complete all requests before the timeout expires, this method will fail - * any unsent and unacknowledged records immediately. - *

    - * If invoked from within a {@link Callback} this method will not block and will be equivalent to - * close(0, TimeUnit.MILLISECONDS). This is done since no further sending will happen while - * blocking the I/O thread of the producer. - * - * @param timeout The maximum time to wait for producer to complete any pending requests. The value should be - * non-negative. Specifying a timeout of zero means do not wait for pending send requests to complete. - * @param timeUnit The time unit for the timeout - * @throws InterruptException If the thread is interrupted while blocked - * @throws IllegalArgumentException If the timeout is negative. - */ - @Override - public void close(long timeout, TimeUnit timeUnit) { - close(timeout, timeUnit, false); - } - - private void close(long timeout, TimeUnit timeUnit, boolean swallowException) { - if (timeout < 0) - throw new IllegalArgumentException("The timeout cannot be negative."); - - log.info("Closing the Kafka producer with timeoutMillis = {} ms.", timeUnit.toMillis(timeout)); - // this will keep track of the first encountered exception - AtomicReference firstException = new AtomicReference<>(); - boolean invokedFromCallback = Thread.currentThread() == this.ioThread; - if (timeout > 0) { - if (invokedFromCallback) { - log.warn("Overriding close timeout {} ms to 0 ms in order to prevent useless blocking due to self-join. " + - "This means you have incorrectly invoked close with a non-zero timeout from the producer call-back.", timeout); - } else { - // Try to close gracefully. - if (this.sender != null) - this.sender.initiateClose(); - if (this.ioThread != null) { - try { - this.ioThread.join(timeUnit.toMillis(timeout)); - } catch (InterruptedException t) { - firstException.compareAndSet(null, new InterruptException(t)); - log.error("Interrupted while joining ioThread", t); - } - } - } - } - - if (this.sender != null && this.ioThread != null && this.ioThread.isAlive()) { - log.info("Proceeding to force close the producer since pending requests could not be completed " + - "within timeout {} ms.", timeout); - this.sender.forceClose(); - // Only join the sender thread when not calling from callback. - if (!invokedFromCallback) { - try { - this.ioThread.join(); - } catch (InterruptedException e) { - firstException.compareAndSet(null, new InterruptException(e)); - } - } - } - - ClientUtils.closeQuietly(interceptors, "producer interceptors", firstException); - ClientUtils.closeQuietly(metrics, "producer metrics", firstException); - ClientUtils.closeQuietly(keySerializer, "producer keySerializer", firstException); - ClientUtils.closeQuietly(valueSerializer, "producer valueSerializer", firstException); - ClientUtils.closeQuietly(partitioner, "producer partitioner", firstException); - AppInfoParser.unregisterAppInfo(JMX_PREFIX, clientId, null); - log.debug("Kafka producer has been closed"); - Throwable exception = firstException.get(); - if (exception != null && !swallowException) { - if (exception instanceof InterruptException) { - throw (InterruptException) exception; - } - throw new KafkaException("Failed to close kafka producer", exception); - } - } - private void throwIfProducerClosed() { - if (ioThread == null || !ioThread.isAlive()) - throw new IllegalStateException("Cannot perform operation after producer has been closed"); - } - - /** - * Wait for cluster metadata including partitions for the given topic to be available. - * @param topic The topic we want metadata for - * @param partition A specific partition expected to exist in metadata, or null if there's no preference - * @param maxWaitMs The maximum time in ms for waiting on the metadata - * @return The cluster containing topic metadata and the amount of time we waited in ms - * @throws KafkaException for all Kafka-related exceptions, including the case where this method is called after producer close - */ - private ClusterAndWaitTime waitOnMetadata(String topic, Integer partition, long maxWaitMs) throws InterruptedException { - // add topic to metadata topic list if it is not there already and reset expiry - metadata.add(topic);Cluster cluster = null;try { - cluster = metadata.fetch();} catch(Exception e) { - throw new InterruptedException(); - } - Integer partitionsCount = cluster.partitionCountForTopic(topic); - - // Return cached metadata if we have it, and if the record's partition is either undefined - // or within the known partition range - if (partitionsCount != null && (partition == null || partition < partitionsCount)) - return new ClusterAndWaitTime(cluster, 0); - - long begin = time.milliseconds(); - long remainingWaitMs = maxWaitMs; - long elapsed; - // Issue metadata requests until we have metadata for the topic or maxWaitTimeMs is exceeded. - // In case we already have cached metadata for the topic, but the requested partition is greater - // than expected, issue an update request only once. This is necessary in case the metadata - // is stale and the number of partitions for this topic has increased in the meantime. - do { - log.trace("Requesting metadata update for topic {}.", topic); - metadata.add(topic); - int version = metadata.requestUpdate(); - try { - metadata.awaitUpdate(version, remainingWaitMs); - } catch (TimeoutException ex) { - // Rethrow with original maxWaitMs to prevent logging exception with remainingWaitMs - throw new TimeoutException("Failed to update metadata after " + maxWaitMs + " ms."); - } - cluster = metadata.fetch(); - elapsed = time.milliseconds() - begin; - if (elapsed >= maxWaitMs) - throw new TimeoutException("Failed to update metadata after " + maxWaitMs + " ms."); - remainingWaitMs = maxWaitMs - elapsed; - partitionsCount = cluster.partitionCountForTopic(topic); - - } while (partitionsCount == null); - - if (partition != null && partition >= partitionsCount) { - throw new KafkaException( - String.format("Invalid partition given with record: %d is not in the range [0...%d).", partition, partitionsCount)); - } - - return new ClusterAndWaitTime(cluster, elapsed); - } - - - /** - * computes partition for given record. - * if the record has partition returns the value otherwise - * calls configured partitioner class to compute the partition. - */ - private int partition(ProducerRecord record, byte[] serializedKey, byte[] serializedValue, Cluster cluster) { - Integer partition = record.partition(); - return partition != null ? - partition : - partitioner.partition( - record.topic(), record.key(), serializedKey, record.value(), serializedValue, cluster); - } - - private void setReadOnly(Headers headers) { - if (headers instanceof RecordHeaders) { - ((RecordHeaders) headers).setReadOnly(); - } - } - - /** - * Validate that the record size isn't too large - */ - private void ensureValidRecordSize(int size) { - if (size > this.totalMemorySize) - throw new RecordTooLargeException("The message is " + size + - " bytes when serialized which is larger than the total memory buffer you have configured with the " + - ProducerConfig.BUFFER_MEMORY_CONFIG + - " configuration."); - } - - private static int parseAcks(String acksString) { - try { - return acksString.trim().equalsIgnoreCase("all") ? -1 : Integer.parseInt(acksString.trim()); - } catch (NumberFormatException e) { - throw new ConfigException("Invalid configuration value for 'acks': " + acksString); - } - } - - private ClusterResourceListeners configureClusterResourceListeners(Serializer keySerializer, Serializer valueSerializer, List... candidateLists) { - ClusterResourceListeners clusterResourceListeners = new ClusterResourceListeners(); - for (List candidateList: candidateLists) - clusterResourceListeners.maybeAddAll(candidateList); - - clusterResourceListeners.maybeAdd(keySerializer); - clusterResourceListeners.maybeAdd(valueSerializer); - return clusterResourceListeners; - } - - private static int configureRetries(ProducerConfig config, boolean idempotenceEnabled, Logger log) { - boolean userConfiguredRetries = false; - if (config.originals().containsKey(ProducerConfig.RETRIES_CONFIG)) { - userConfiguredRetries = true; - } - if (idempotenceEnabled && !userConfiguredRetries) { - // We recommend setting infinite retries when the idempotent producer is enabled, so it makes sense to make - // this the default. - log.info("Overriding the default retries config to the recommended value of {} since the idempotent " + - "producer is enabled.", Integer.MAX_VALUE); - return Integer.MAX_VALUE; - } - if (idempotenceEnabled && config.getInt(ProducerConfig.RETRIES_CONFIG) == 0) { - throw new ConfigException("Must set " + ProducerConfig.RETRIES_CONFIG + " to non-zero when using the idempotent producer."); - } - return config.getInt(ProducerConfig.RETRIES_CONFIG); - } - - private static int configureInflightRequests(ProducerConfig config, boolean idempotenceEnabled) { - if (idempotenceEnabled && 5 < config.getInt(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION)) { - throw new ConfigException("Must set " + ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + " to at most 5" + - " to use the idempotent producer."); - } - return config.getInt(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION); - } - - - private static short configureAcks(ProducerConfig config, boolean idempotenceEnabled, Logger log) { - boolean userConfiguredAcks = false; - short acks = (short) parseAcks(config.getString(ProducerConfig.ACKS_CONFIG)); - if (config.originals().containsKey(ProducerConfig.ACKS_CONFIG)) { - userConfiguredAcks = true; - } - - if (idempotenceEnabled && !userConfiguredAcks) { - log.info("Overriding the default {} to all since idempotence is enabled.", ProducerConfig.ACKS_CONFIG); - return -1; - } - - if (idempotenceEnabled && acks != -1) { - throw new ConfigException("Must set " + ProducerConfig.ACKS_CONFIG + " to all in order to use the idempotent " + - "producer. Otherwise we cannot guarantee idempotence."); - } - return acks; - } - - - private static class ClusterAndWaitTime { - final Cluster cluster; - final long waitedOnMetadataMs; - ClusterAndWaitTime(Cluster cluster, long waitedOnMetadataMs) { - this.cluster = cluster; - this.waitedOnMetadataMs = waitedOnMetadataMs; - } - } - - private static class FutureFailure implements Future { - - private final ExecutionException exception; - - public FutureFailure(Exception exception) { - this.exception = new ExecutionException(exception); - } - - @Override - public boolean cancel(boolean interrupt) { - return false; - } - - @Override - public RecordMetadata get() throws ExecutionException { - throw this.exception; - } - - @Override - public RecordMetadata get(long timeout, TimeUnit unit) throws ExecutionException { - throw this.exception; - } - - @Override - public boolean isCancelled() { - return false; - } - - @Override - public boolean isDone() { - return true; - } - - } - - /** - * A callback called when producer request is complete. It in turn calls user-supplied callback (if given) and - * notifies producer interceptors about the request completion. - */ - private static class InterceptorCallback implements Callback { - private final Callback userCallback; - private final ProducerInterceptors interceptors; - private final TopicPartition tp; - - private InterceptorCallback(Callback userCallback, ProducerInterceptors interceptors, TopicPartition tp) { - this.userCallback = userCallback; - this.interceptors = interceptors; - this.tp = tp; - } - - public void onCompletion(RecordMetadata metadata, Exception exception) { - metadata = metadata != null ? metadata : new RecordMetadata(tp, -1, -1, RecordBatch.NO_TIMESTAMP, Long.valueOf(-1L), -1, -1); - this.interceptors.onAcknowledgement(metadata, exception); - if (this.userCallback != null) - this.userCallback.onCompletion(metadata, exception); - } - } + public RecordMetadata get() throws ExecutionException { + throw this.exception; + } + + @Override + public RecordMetadata get(long timeout, TimeUnit unit) throws ExecutionException { + throw this.exception; + } + + @Override + public boolean isCancelled() { + return false; + } + + @Override + public boolean isDone() { + return true; + } + + } + + /** + * A callback called when producer request is complete. It in turn calls + * user-supplied callback (if given) and notifies producer interceptors about + * the request completion. + */ + private static class InterceptorCallback implements Callback { + private final Callback userCallback; + private final ProducerInterceptors interceptors; + private final TopicPartition tp; + + private InterceptorCallback(Callback userCallback, ProducerInterceptors interceptors, TopicPartition tp) { + this.userCallback = userCallback; + this.interceptors = interceptors; + this.tp = tp; + } + + public void onCompletion(RecordMetadata metadata, Exception exception) { + metadata = metadata != null ? metadata + : new RecordMetadata(tp, -1, -1, RecordBatch.NO_TIMESTAMP, -1L, -1, -1); + this.interceptors.onAcknowledgement(metadata, exception); + if (this.userCallback != null) + this.userCallback.onCompletion(metadata, exception); + } + } + + private Connection getDBConnection(boolean force) throws KafkaException { + if (!transactionalProducer) { + throw new KafkaException("KafkaProducer is not an Oracle Transactional Producer." + + "Please set oracle.transactional.producer property to true."); + } + try { + return aqProducer.getDBConnection(force); + } catch (Exception e) { + throw new KafkaException("Failed to fetch Oracle Database Connection for this producer", e); + } + } + + /** + * This method returns the database connection used by this KafkaProducer. + * + * @throws KafkaException if oracle.transactional.producer property + * is not set to true or KafkaProducer fails to create a + * database connection. + */ + public Connection getDBConnection() throws KafkaException { + return getDBConnection(true); + } + @Override + public Uuid clientInstanceId(Duration timeout) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } } diff --git a/clients/src/main/java/org/oracle/okafka/clients/producer/Partitioner.java b/clients/src/main/java/org/oracle/okafka/clients/producer/Partitioner.java deleted file mode 100644 index dde0a7d..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/producer/Partitioner.java +++ /dev/null @@ -1,54 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.oracle.okafka.clients.producer; - -import java.io.Closeable; - -import org.oracle.okafka.common.Cluster; -import org.oracle.okafka.common.Configurable; - -/** - * Partitioner Interface - */ - -public interface Partitioner extends Configurable, Closeable { - - /** - * Compute the partition for the given record. - * - * @param topic The topic name - * @param key The key to partition on (or null if no key) - * @param keyBytes The serialized key to partition on( or null if no key) - * @param value The value to partition on or null - * @param valueBytes The serialized value to partition on or null - * @param cluster The current cluster metadata - */ - public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster); - - /** - * This is called when partitioner is closed. - */ - public void close(); - -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/producer/Producer.java b/clients/src/main/java/org/oracle/okafka/clients/producer/Producer.java deleted file mode 100644 index a43e764..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/producer/Producer.java +++ /dev/null @@ -1,107 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.clients.producer; - -import org.oracle.okafka.clients.consumer.OffsetAndMetadata; -import org.oracle.okafka.common.Metric; -import org.oracle.okafka.common.MetricName; -import org.oracle.okafka.common.PartitionInfo; -import org.oracle.okafka.common.TopicPartition; -import org.oracle.okafka.common.errors.ProducerFencedException; - -import java.io.Closeable; -import java.util.List; -import java.util.Map; -import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; - -/** - * The interface for the {@link KafkaProducer} - * @see KafkaProducer - */ -public interface Producer extends Closeable { - - /** - * See {@link KafkaProducer#initTransactions()} - */ - void initTransactions(); - - /** - * See {@link KafkaProducer#beginTransaction()} - */ - void beginTransaction() throws ProducerFencedException; - - /** - * See {@link KafkaProducer#sendOffsetsToTransaction(Map, String)} - */ - void sendOffsetsToTransaction(Map offsets, - String consumerGroupId) throws ProducerFencedException; - - /** - * See {@link KafkaProducer#commitTransaction()} - */ - void commitTransaction() throws ProducerFencedException; - - /** - * See {@link KafkaProducer#abortTransaction()} - */ - void abortTransaction() throws ProducerFencedException; - - /** - * See {@link KafkaProducer#send(ProducerRecord)} - */ - Future send(ProducerRecord record); - - /** - * See {@link KafkaProducer#send(ProducerRecord, Callback)} - */ - Future send(ProducerRecord record, Callback callback); - - /** - * See {@link KafkaProducer#flush()} - */ - void flush(); - - /** - * See {@link KafkaProducer#partitionsFor(String)} - */ - List partitionsFor(String topic); - - /** - * See {@link KafkaProducer#metrics()} - */ - Map metrics(); - - /** - * See {@link KafkaProducer#close()} - */ - void close(); - - /** - * See {@link KafkaProducer#close(long, TimeUnit)} - */ - void close(long timeout, TimeUnit unit); - -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/producer/ProducerConfig.java b/clients/src/main/java/org/oracle/okafka/clients/producer/ProducerConfig.java index 46c3a24..34da58a 100644 --- a/clients/src/main/java/org/oracle/okafka/clients/producer/ProducerConfig.java +++ b/clients/src/main/java/org/oracle/okafka/clients/producer/ProducerConfig.java @@ -1,9 +1,9 @@ /* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ + ** OKafka Java Client version 23.4. + ** + ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. + ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + */ /* * Licensed to the Apache Software Foundation (ASF) under one or more @@ -29,9 +29,9 @@ package org.oracle.okafka.clients.producer; -import static org.oracle.okafka.common.config.ConfigDef.Range.atLeast; -import static org.oracle.okafka.common.config.ConfigDef.Range.between; -import static org.oracle.okafka.common.config.ConfigDef.ValidString.in; +import static org.apache.kafka.common.config.ConfigDef.Range.atLeast; +import static org.apache.kafka.common.config.ConfigDef.Range.between; +import static org.apache.kafka.common.config.ConfigDef.ValidString.in; import java.util.Collections; import java.util.HashMap; @@ -40,350 +40,452 @@ import java.util.Set; import org.oracle.okafka.clients.CommonClientConfigs; -import org.oracle.okafka.clients.producer.internals.DefaultPartitioner; -import org.oracle.okafka.common.config.AbstractConfig; -import org.oracle.okafka.common.config.ConfigDef; +import org.apache.kafka.clients.ClientDnsLookup; +import org.apache.kafka.common.config.AbstractConfig; +import org.apache.kafka.common.config.ConfigDef; +import org.apache.kafka.common.config.ConfigException; import org.oracle.okafka.common.config.SslConfigs; -import org.oracle.okafka.common.config.ConfigDef.Importance; -import org.oracle.okafka.common.config.ConfigDef.Type; -import org.oracle.okafka.common.metrics.Sensor; -import org.oracle.okafka.common.serialization.Serializer; +import org.apache.kafka.common.config.ConfigDef.Importance; +import org.apache.kafka.common.config.ConfigDef.Type; +import org.apache.kafka.common.metrics.Sensor; +import org.apache.kafka.common.serialization.Serializer; public class ProducerConfig extends AbstractConfig { private static final ConfigDef CONFIG; - + /** oracle.instance.name*/ public static final String ORACLE_INSTANCE_NAME = CommonClientConfigs.ORACLE_INSTANCE_NAME; /** oracle.service.name*/ public static final String ORACLE_SERVICE_NAME = CommonClientConfigs.ORACLE_SERVICE_NAME; /** oracle.net.tns_admin */ public static final String ORACLE_NET_TNS_ADMIN = CommonClientConfigs.ORACLE_NET_TNS_ADMIN; + + public static final String ORACLE_TRANSACTIONAL_PRODUCER = "oracle.transactional.producer"; + public static final String ORACLE_TRANSACTIONAL_PRODUCER_DOC = "Create a transactional producer which can be used to " + + "retrieve the database connection being used to publish the ProducerRecord." + + "Application can use getDBConnection() api to fetch the database connection." + + "A transactional producer will not accumulate the records in the batch " + + "but will immediately send it to Oracle Transactional Event Queue(TxEQ) server to publish into the topic."; /** bootstrap.servers*/ - public static final String BOOTSTRAP_SERVERS_CONFIG = CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG; - - /** metadata.max.age.ms*/ - public static final String METADATA_MAX_AGE_CONFIG = CommonClientConfigs.METADATA_MAX_AGE_CONFIG; - private static final String METADATA_MAX_AGE_DOC = CommonClientConfigs.METADATA_MAX_AGE_DOC; - - /** batch.size */ - public static final String BATCH_SIZE_CONFIG = "batch.size"; - private static final String BATCH_SIZE_DOC = "The producer will attempt to batch records together into fewer requests whenever multiple records are being sent" - + " to the same partition. This helps performance on both the client and the server. This configuration controls the " - + "default batch size in bytes. " - + "

    " - + "No attempt will be made to batch records larger than this size. " - + "

    " - + "Each request sent to TEQ will contain only one batch, each message in batch corresponds to same partition. " - + "

    " - + "A small batch size will make batching less common and may reduce throughput (a batch size of zero will disable " - + "batching entirely). A very large batch size may use memory a bit more wastefully as we will always allocate a " - + "buffer of the specified batch size in anticipation of additional records."; - - /** acks */ - public static final String ACKS_CONFIG = "acks"; - private static final String ACKS_DOC = "TEQ supports only acks=all since all instances of a oracle databse run on same disk. Hence there is no concept of replicating messages."; - - /** linger.ms */ - public static final String LINGER_MS_CONFIG = "linger.ms"; - private static final String LINGER_MS_DOC = "The producer groups together any records that arrive in between request transmissions into a single batched request. " - + "Normally this occurs only under load when records arrive faster than they can be sent out. However in some circumstances the client may want to " - + "reduce the number of requests even under moderate load. This setting accomplishes this by adding a small amount " - + "of artificial delay—that is, rather than immediately sending out a record the producer will wait for up to " - + "the given delay to allow other records to be sent so that the sends can be batched together. This can be thought " - + "of as analogous to Nagle's algorithm in TCP. This setting gives the upper bound on the delay for batching: once " - + "we get " + BATCH_SIZE_CONFIG + " worth of records for a partition it will be sent immediately regardless of this " - + "setting, however if we have fewer than this many bytes accumulated for this partition we will 'linger' for the " - + "specified time waiting for more records to show up. This setting defaults to 0 (i.e. no delay). Setting " + LINGER_MS_CONFIG + "=5, " - + "would have the effect of reducing the number of requests sent but would add up to 5ms of latency to records sent in the absence of load."; - - /** client.id */ - public static final String CLIENT_ID_CONFIG = CommonClientConfigs.CLIENT_ID_CONFIG; - - /** send.buffer.bytes (This property is not yet supported)*/ - public static final String SEND_BUFFER_CONFIG = CommonClientConfigs.SEND_BUFFER_CONFIG; - - /** receive.buffer.bytes (This property is not yet supported)*/ - public static final String RECEIVE_BUFFER_CONFIG = CommonClientConfigs.RECEIVE_BUFFER_CONFIG; - - /** max.request.size */ - public static final String MAX_REQUEST_SIZE_CONFIG = "max.request.size"; - private static final String MAX_REQUEST_SIZE_DOC = "The maximum size of a request in bytes. Each request to TEQ contains a batch of records from same and only one partition." - + "For this reason, this property doesn't have any effect and is effectively equivalent to batch.size."; - - /** reconnect.backoff.ms */ - public static final String RECONNECT_BACKOFF_MS_CONFIG = CommonClientConfigs.RECONNECT_BACKOFF_MS_CONFIG; - - /** reconnect.backoff.max.ms */ - public static final String RECONNECT_BACKOFF_MAX_MS_CONFIG = CommonClientConfigs.RECONNECT_BACKOFF_MAX_MS_CONFIG; - - /** max.block.ms */ - public static final String MAX_BLOCK_MS_CONFIG = "max.block.ms"; - private static final String MAX_BLOCK_MS_DOC = "The configuration controls how long KafkaProducer.send() and KafkaProducer.partitionsFor() will block." - + "These methods can be blocked either because the buffer is full or metadata unavailable." - + "Blocking in the user-supplied serializers or partitioner will not be counted against this timeout."; - - /** buffer.memory */ - public static final String BUFFER_MEMORY_CONFIG = "buffer.memory"; - private static final String BUFFER_MEMORY_DOC = "The total bytes of memory the producer can use to buffer records waiting to be sent to the server. If records are " - + "sent faster than they can be delivered to the server the producer will block for " + MAX_BLOCK_MS_CONFIG + " after which it will throw an exception." - + "

    " - + "This setting should correspond roughly to the total memory the producer will use, but is not a hard bound since " - + "not all memory the producer uses is used for buffering."; - - /** retry.backoff.ms */ - public static final String RETRY_BACKOFF_MS_CONFIG = CommonClientConfigs.RETRY_BACKOFF_MS_CONFIG; - - /** compression.type (This property is not yet supported)*/ - public static final String COMPRESSION_TYPE_CONFIG = "compression.type"; - private static final String COMPRESSION_TYPE_DOC = "The compression type for all data generated by the producer. The default is none (i.e. no compression). Valid " - + " values are none, gzip, snappy, or lz4. " - + "Compression is of full batches of data, so the efficacy of batching will also impact the compression ratio (more batching means better compression). This property is ot yet supported."; - - /** metrics.sample.window.ms (This property is not yet supported)*/ - public static final String METRICS_SAMPLE_WINDOW_MS_CONFIG = CommonClientConfigs.METRICS_SAMPLE_WINDOW_MS_CONFIG; - - /** metrics.num.samples (This property is not yet supported)*/ - public static final String METRICS_NUM_SAMPLES_CONFIG = CommonClientConfigs.METRICS_NUM_SAMPLES_CONFIG; - - /** - * metrics.log.level (This property is not yet supported) - */ - public static final String METRICS_RECORDING_LEVEL_CONFIG = CommonClientConfigs.METRICS_RECORDING_LEVEL_CONFIG; - - /** metric.reporters (This property is not yet supported)*/ - public static final String METRIC_REPORTER_CLASSES_CONFIG = CommonClientConfigs.METRIC_REPORTER_CLASSES_CONFIG; - - /** max.in.flight.requests.per.connection (This property is not yet supported)*/ - public static final String MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION = "max.in.flight.requests.per.connection"; - private static final String MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION_DOC = "The maximum number of unacknowledged requests the client will send on a single connection before blocking." - + "This property is not supportable beacuse client sends a request and waits for response."; - - /** retries */ - public static final String RETRIES_CONFIG = CommonClientConfigs.RETRIES_CONFIG; - private static final String RETRIES_DOC = "Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error." - + " Note that this retry is no different than if the client resent the record upon receiving the error."; - - - /** key.serializer */ - public static final String KEY_SERIALIZER_CLASS_CONFIG = "key.serializer"; - public static final String KEY_SERIALIZER_CLASS_DOC = "Serializer class for key that implements the org.oracle.okafka.common.serialization.Serializer interface."; - - /** value.serializer */ - public static final String VALUE_SERIALIZER_CLASS_CONFIG = "value.serializer"; - public static final String VALUE_SERIALIZER_CLASS_DOC = "Serializer class for value that implements the org.oracle.okafka.common.serialization.Serializer interface."; - - /** connections.max.idle.ms (This property is not yet supported)*/ - public static final String CONNECTIONS_MAX_IDLE_MS_CONFIG = CommonClientConfigs.CONNECTIONS_MAX_IDLE_MS_CONFIG; - - /** partitioner.class */ - public static final String PARTITIONER_CLASS_CONFIG = "partitioner.class"; - private static final String PARTITIONER_CLASS_DOC = "Partitioner class that implements the org.oracle.okafka.clients.producer.Partitioner interface."; - - /** request.timeout.ms (This property is not yet supported)*/ - public static final String REQUEST_TIMEOUT_MS_CONFIG = CommonClientConfigs.REQUEST_TIMEOUT_MS_CONFIG; - private static final String REQUEST_TIMEOUT_MS_DOC = CommonClientConfigs.REQUEST_TIMEOUT_MS_DOC; - - - /** interceptor.classes */ - public static final String INTERCEPTOR_CLASSES_CONFIG = "interceptor.classes"; - public static final String INTERCEPTOR_CLASSES_DOC = "A list of classes to use as interceptors. " - + "Implementing the org.oracle.okafka.clients.producer.ProducerInterceptor interface allows you to intercept (and possibly mutate) the records " - + "received by the producer before they are published to the Kafka cluster. By default, there are no interceptors."; - - /** enable.idempotence (This property is not yet supported)*/ - public static final String ENABLE_IDEMPOTENCE_CONFIG = "enable.idempotence"; - public static final String ENABLE_IDEMPOTENCE_DOC = "When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer " - + "retries due to broker failures, etc., may write duplicates of the retried message in the stream. " - + "Note that enabling idempotence requires " + MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + " to be less than or equal to 5, " - + "" + RETRIES_CONFIG + " to be greater than 0 and " + ACKS_CONFIG + " must be 'all'. If these values " - + "are not explicitly set by the user, suitable values will be chosen. If incompatible values are set, " - + "a ConfigException will be thrown. This property is not yet supported."; - - /** transaction.timeout.ms (This property is not yet supported) */ - public static final String TRANSACTION_TIMEOUT_CONFIG = "transaction.timeout.ms"; - public static final String TRANSACTION_TIMEOUT_DOC = "The maximum amount of time in ms that the transaction coordinator will wait for a transaction status update from the producer before proactively aborting the ongoing transaction." + - "If this value is larger than the transaction.max.timeout.ms setting in the broker, the request will fail with a `InvalidTransactionTimeout` error. This property is not yet supported."; - - /** transactional.id (This property is not yet supported) */ - public static final String TRANSACTIONAL_ID_CONFIG = "transactional.id"; - public static final String TRANSACTIONAL_ID_DOC = "The TransactionalId to use for transactional delivery. This enables reliability semantics which span multiple producer sessions since it allows the client to guarantee that transactions using the same TransactionalId have been completed prior to starting any new transactions. If no TransactionalId is provided, then the producer is limited to idempotent delivery. " + - "Note that enable.idempotence must be enabled if a TransactionalId is configured. " + - "The default is null, which means transactions cannot be used. " + - "Note that transactions requires a cluster of at least three brokers by default what is the recommended setting for production; for development you can change this, by adjusting broker setting `transaction.state.log.replication.factor`. This property is not yet supported."; - - - - static { - CONFIG = new ConfigDef().define(BOOTSTRAP_SERVERS_CONFIG, Type.LIST, Collections.emptyList(), new ConfigDef.NonNullValidator(), Importance.HIGH, CommonClientConfigs.BOOTSTRAP_SERVERS_DOC) - .define(BUFFER_MEMORY_CONFIG, Type.LONG, 32 * 1024 * 1024L, atLeast(0L), Importance.HIGH, BUFFER_MEMORY_DOC) - .define(RETRIES_CONFIG, Type.INT, 0, between(0, Integer.MAX_VALUE), Importance.HIGH, RETRIES_DOC) - .define(ACKS_CONFIG, - Type.STRING, - "1", - in("all", "-1", "0", "1"), - Importance.HIGH, - ACKS_DOC) - .define(COMPRESSION_TYPE_CONFIG, Type.STRING, "none", Importance.HIGH, COMPRESSION_TYPE_DOC) - .define(BATCH_SIZE_CONFIG, Type.INT, 16384, atLeast(0), Importance.MEDIUM, BATCH_SIZE_DOC) - .define(LINGER_MS_CONFIG, Type.LONG, 0, atLeast(0L), Importance.MEDIUM, LINGER_MS_DOC) - .define(CLIENT_ID_CONFIG, Type.STRING, "", Importance.MEDIUM, CommonClientConfigs.CLIENT_ID_DOC) - .define(SEND_BUFFER_CONFIG, Type.INT, 128 * 1024, atLeast(-1), Importance.MEDIUM, CommonClientConfigs.SEND_BUFFER_DOC) - .define(RECEIVE_BUFFER_CONFIG, Type.INT, 32 * 1024, atLeast(-1), Importance.MEDIUM, CommonClientConfigs.RECEIVE_BUFFER_DOC) - .define(MAX_REQUEST_SIZE_CONFIG, - Type.INT, - 1 * 1024 * 1024, - atLeast(0), - Importance.MEDIUM, - MAX_REQUEST_SIZE_DOC) - .define(RECONNECT_BACKOFF_MS_CONFIG, Type.LONG, 50L, atLeast(0L), Importance.LOW, CommonClientConfigs.RECONNECT_BACKOFF_MS_DOC) - .define(RECONNECT_BACKOFF_MAX_MS_CONFIG, Type.LONG, 1000L, atLeast(0L), Importance.LOW, CommonClientConfigs.RECONNECT_BACKOFF_MAX_MS_DOC) - .define(RETRY_BACKOFF_MS_CONFIG, Type.LONG, 100L, atLeast(0L), Importance.LOW, CommonClientConfigs.RETRY_BACKOFF_MS_DOC) - .define(MAX_BLOCK_MS_CONFIG, - Type.LONG, - 60 * 1000, - atLeast(0), - Importance.MEDIUM, - MAX_BLOCK_MS_DOC) - .define(REQUEST_TIMEOUT_MS_CONFIG, - Type.INT, - 30 * 1000, - atLeast(0), - Importance.MEDIUM, - REQUEST_TIMEOUT_MS_DOC) - .define(METADATA_MAX_AGE_CONFIG, Type.LONG, 5 * 60 * 1000, atLeast(0), Importance.LOW, METADATA_MAX_AGE_DOC) - .define(METRICS_SAMPLE_WINDOW_MS_CONFIG, - Type.LONG, - 30000, - atLeast(0), - Importance.LOW, - CommonClientConfigs.METRICS_SAMPLE_WINDOW_MS_DOC) - .define(METRICS_NUM_SAMPLES_CONFIG, Type.INT, 2, atLeast(1), Importance.LOW, CommonClientConfigs.METRICS_NUM_SAMPLES_DOC) - .define(METRICS_RECORDING_LEVEL_CONFIG, - Type.STRING, - Sensor.RecordingLevel.INFO.toString(), - in(Sensor.RecordingLevel.INFO.toString(), Sensor.RecordingLevel.DEBUG.toString()), - Importance.LOW, - CommonClientConfigs.METRICS_RECORDING_LEVEL_DOC) - .define(METRIC_REPORTER_CLASSES_CONFIG, - Type.LIST, - Collections.emptyList(), - new ConfigDef.NonNullValidator(), - Importance.LOW, - CommonClientConfigs.METRIC_REPORTER_CLASSES_DOC) - .define(MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, - Type.INT, - 5, - atLeast(1), - Importance.LOW, - MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION_DOC) - .define(KEY_SERIALIZER_CLASS_CONFIG, - Type.CLASS, - Importance.HIGH, - KEY_SERIALIZER_CLASS_DOC) - .define(VALUE_SERIALIZER_CLASS_CONFIG, - Type.CLASS, - Importance.HIGH, - VALUE_SERIALIZER_CLASS_DOC) - /* default is set to be a bit lower than the server default (10 min), to avoid both client and server closing connection at same time */ - .define(CONNECTIONS_MAX_IDLE_MS_CONFIG, - Type.LONG, - 9 * 60 * 1000, - Importance.MEDIUM, - CommonClientConfigs.CONNECTIONS_MAX_IDLE_MS_DOC) - - .define(INTERCEPTOR_CLASSES_CONFIG, - Type.LIST, - Collections.emptyList(), - new ConfigDef.NonNullValidator(), - Importance.LOW, - INTERCEPTOR_CLASSES_DOC) - .define(PARTITIONER_CLASS_CONFIG, - Type.CLASS, - DefaultPartitioner.class, - Importance.MEDIUM, PARTITIONER_CLASS_DOC) - .define(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, - Type.STRING, - CommonClientConfigs.DEFAULT_SECURITY_PROTOCOL, - Importance.MEDIUM, - CommonClientConfigs.SECURITY_PROTOCOL_DOC) - .withClientSslSupport() - .define(ENABLE_IDEMPOTENCE_CONFIG, - Type.BOOLEAN, - false, - Importance.LOW, - ENABLE_IDEMPOTENCE_DOC) - .define(TRANSACTION_TIMEOUT_CONFIG, - Type.INT, - 60000, - Importance.LOW, - TRANSACTION_TIMEOUT_DOC) - .define(TRANSACTIONAL_ID_CONFIG, - Type.STRING, - null, - new ConfigDef.NonEmptyString(), - Importance.LOW, - TRANSACTIONAL_ID_DOC) - .define(ORACLE_SERVICE_NAME, - Type.STRING, - null, - Importance.HIGH, - CommonClientConfigs.ORACLE_SERVICE_NAME_DOC) - .define(ORACLE_INSTANCE_NAME, - Type.STRING, - null, - Importance.HIGH, - CommonClientConfigs.ORACLE_INSTANCE_NAME_DOC) - .define(CommonClientConfigs.ORACLE_NET_TNS_ADMIN, - ConfigDef.Type.STRING, - Importance.MEDIUM, - CommonClientConfigs.ORACLE_NET_TNS_ADMIN_DOC); - - - } - - - public static Map addSerializerToConfig(Map configs, - Serializer keySerializer, Serializer valueSerializer) { - Map newConfigs = new HashMap<>(configs); - if (keySerializer != null) - newConfigs.put(KEY_SERIALIZER_CLASS_CONFIG, keySerializer.getClass()); - if (valueSerializer != null) - newConfigs.put(VALUE_SERIALIZER_CLASS_CONFIG, valueSerializer.getClass()); - return newConfigs; - } - - public static Properties addSerializerToConfig(Properties properties, - Serializer keySerializer, - Serializer valueSerializer) { - Properties newProperties = new Properties(); - newProperties.putAll(properties); - if (keySerializer != null) - newProperties.put(KEY_SERIALIZER_CLASS_CONFIG, keySerializer.getClass().getName()); - if (valueSerializer != null) - newProperties.put(VALUE_SERIALIZER_CLASS_CONFIG, valueSerializer.getClass().getName()); - return newProperties; - } - - public ProducerConfig(Properties props) { - super(CONFIG, props); - } - - public ProducerConfig(Map props) { - super(CONFIG, props); - } - - ProducerConfig(Map props, boolean doLog) { - super(CONFIG, props, doLog); - } - - public static Set configNames() { - return CONFIG.names(); - } - - public static void main(String[] args) { - System.out.println(CONFIG.toHtmlTable()); - } -} \ No newline at end of file + public static final String BOOTSTRAP_SERVERS_CONFIG = CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG; + + /** client.dns.lookup */ + public static final String CLIENT_DNS_LOOKUP_CONFIG = CommonClientConfigs.CLIENT_DNS_LOOKUP_CONFIG; + + /** socket.connection.setup.timeout.ms */ + public static final String SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG = CommonClientConfigs.SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG; + + /** socket.connection.setup.timeout.max.ms */ + public static final String SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG = CommonClientConfigs.SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG; + + + /** metadata.max.age.ms*/ + public static final String METADATA_MAX_AGE_CONFIG = CommonClientConfigs.METADATA_MAX_AGE_CONFIG; + private static final String METADATA_MAX_AGE_DOC = CommonClientConfigs.METADATA_MAX_AGE_DOC; + + /** batch.size */ + public static final String BATCH_SIZE_CONFIG = "batch.size"; + private static final String BATCH_SIZE_DOC = "The producer will attempt to batch records together into fewer requests whenever multiple records are being sent" + + " to the same partition. This helps performance on both the client and the server. This configuration controls the " + + "default batch size in bytes. " + + "

    " + + "No attempt will be made to batch records larger than this size. " + + "

    " + + "Each request sent to TEQ will contain only one batch, each message in batch corresponds to same partition. " + + "

    " + + "A small batch size will make batching less common and may reduce throughput (a batch size of zero will disable " + + "batching entirely). A very large batch size may use memory a bit more wastefully as we will always allocate a " + + "buffer of the specified batch size in anticipation of additional records."; + + /** acks */ + public static final String ACKS_CONFIG = "acks"; + private static final String ACKS_DOC = "TEQ supports only acks=all since all instances of a oracle databse run on same disk. Hence there is no concept of replicating messages."; + + /** linger.ms */ + public static final String LINGER_MS_CONFIG = "linger.ms"; + private static final String LINGER_MS_DOC = "The producer groups together any records that arrive in between request transmissions into a single batched request. " + + "Normally this occurs only under load when records arrive faster than they can be sent out. However in some circumstances the client may want to " + + "reduce the number of requests even under moderate load. This setting accomplishes this by adding a small amount " + + "of artificial delay—that is, rather than immediately sending out a record the producer will wait for up to " + + "the given delay to allow other records to be sent so that the sends can be batched together. This can be thought " + + "of as analogous to Nagle's algorithm in TCP. This setting gives the upper bound on the delay for batching: once " + + "we get " + BATCH_SIZE_CONFIG + " worth of records for a partition it will be sent immediately regardless of this " + + "setting, however if we have fewer than this many bytes accumulated for this partition we will 'linger' for the " + + "specified time waiting for more records to show up. This setting defaults to 0 (i.e. no delay). Setting " + LINGER_MS_CONFIG + "=5, " + + "would have the effect of reducing the number of requests sent but would add up to 5ms of latency to records sent in the absence of load."; + + /** client.id */ + public static final String CLIENT_ID_CONFIG = CommonClientConfigs.CLIENT_ID_CONFIG; + + /** send.buffer.bytes (This property is not yet supported)*/ + public static final String SEND_BUFFER_CONFIG = CommonClientConfigs.SEND_BUFFER_CONFIG; + + /** receive.buffer.bytes (This property is not yet supported)*/ + public static final String RECEIVE_BUFFER_CONFIG = CommonClientConfigs.RECEIVE_BUFFER_CONFIG; + + /** max.request.size */ + public static final String MAX_REQUEST_SIZE_CONFIG = "max.request.size"; + private static final String MAX_REQUEST_SIZE_DOC = "The maximum size of a request in bytes. Each request to TEQ contains a batch of records from same and only one partition." + + "For this reason, this property doesn't have any effect and is effectively equivalent to batch.size."; + + /** reconnect.backoff.ms */ + public static final String RECONNECT_BACKOFF_MS_CONFIG = CommonClientConfigs.RECONNECT_BACKOFF_MS_CONFIG; + + /** reconnect.backoff.max.ms */ + public static final String RECONNECT_BACKOFF_MAX_MS_CONFIG = CommonClientConfigs.RECONNECT_BACKOFF_MAX_MS_CONFIG; + + /** max.block.ms */ + public static final String MAX_BLOCK_MS_CONFIG = "max.block.ms"; + private static final String MAX_BLOCK_MS_DOC = "The configuration controls how long KafkaProducer.send() and KafkaProducer.partitionsFor() will block." + + "These methods can be blocked either because the buffer is full or metadata unavailable." + + "Blocking in the user-supplied serializers or partitioner will not be counted against this timeout."; + + /** metadata.max.idle.ms */ + public static final String METADATA_MAX_IDLE_CONFIG = "metadata.max.idle.ms"; + private static final String METADATA_MAX_IDLE_DOC = + "Controls how long the producer will cache metadata for a topic that's idle. If the elapsed " + + "time since a topic was last produced to exceeds the metadata idle duration, then the topic's " + + "metadata is forgotten and the next access to it will force a metadata fetch request."; + + + /** buffer.memory */ + public static final String BUFFER_MEMORY_CONFIG = "buffer.memory"; + private static final String BUFFER_MEMORY_DOC = "The total bytes of memory the producer can use to buffer records waiting to be sent to the server. If records are " + + "sent faster than they can be delivered to the server the producer will block for " + MAX_BLOCK_MS_CONFIG + " after which it will throw an exception." + + "

    " + + "This setting should correspond roughly to the total memory the producer will use, but is not a hard bound since " + + "not all memory the producer uses is used for buffering."; + + /** retry.backoff.ms */ + public static final String RETRY_BACKOFF_MS_CONFIG = CommonClientConfigs.RETRY_BACKOFF_MS_CONFIG; + + /** compression.type (This property is not yet supported)*/ + public static final String COMPRESSION_TYPE_CONFIG = "compression.type"; + private static final String COMPRESSION_TYPE_DOC = "The compression type for all data generated by the producer. The default is none (i.e. no compression). Valid " + + " values are none, gzip, snappy, or lz4. " + + "Compression is of full batches of data, so the efficacy of batching will also impact the compression ratio (more batching means better compression). This property is ot yet supported."; + + /** metrics.sample.window.ms (This property is not yet supported)*/ + public static final String METRICS_SAMPLE_WINDOW_MS_CONFIG = CommonClientConfigs.METRICS_SAMPLE_WINDOW_MS_CONFIG; + + /** metrics.num.samples (This property is not yet supported)*/ + public static final String METRICS_NUM_SAMPLES_CONFIG = CommonClientConfigs.METRICS_NUM_SAMPLES_CONFIG; + + /** + * metrics.log.level (This property is not yet supported) + */ + public static final String METRICS_RECORDING_LEVEL_CONFIG = CommonClientConfigs.METRICS_RECORDING_LEVEL_CONFIG; + + /** metric.reporters (This property is not yet supported)*/ + public static final String METRIC_REPORTER_CLASSES_CONFIG = CommonClientConfigs.METRIC_REPORTER_CLASSES_CONFIG; + + /** max.in.flight.requests.per.connection (This property is not yet supported)*/ + public static final String MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION = "max.in.flight.requests.per.connection"; + private static final String MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION_DOC = "The maximum number of unacknowledged requests the client will send on a single connection before blocking." + + "This property is not supportable beacuse client sends a request and waits for response."; + + /** retries */ + public static final String RETRIES_CONFIG = CommonClientConfigs.RETRIES_CONFIG; + private static final String RETRIES_DOC = "Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error." + + " Note that this retry is no different than if the client resent the record upon receiving the error."; + + + /** key.serializer */ + public static final String KEY_SERIALIZER_CLASS_CONFIG = "key.serializer"; + public static final String KEY_SERIALIZER_CLASS_DOC = "Serializer class for key that implements the org.oracle.okafka.common.serialization.Serializer interface."; + + /** value.serializer */ + public static final String VALUE_SERIALIZER_CLASS_CONFIG = "value.serializer"; + public static final String VALUE_SERIALIZER_CLASS_DOC = "Serializer class for value that implements the org.oracle.okafka.common.serialization.Serializer interface."; + + /** connections.max.idle.ms (This property is not yet supported)*/ + public static final String CONNECTIONS_MAX_IDLE_MS_CONFIG = CommonClientConfigs.CONNECTIONS_MAX_IDLE_MS_CONFIG; + + /** partitioner.class */ + public static final String PARTITIONER_CLASS_CONFIG = "partitioner.class"; + private static final String PARTITIONER_CLASS_DOC = "Partitioner class that implements the org.oracle.okafka.clients.producer.Partitioner interface."; + + /** request.timeout.ms (This property is not yet supported)*/ + public static final String REQUEST_TIMEOUT_MS_CONFIG = CommonClientConfigs.REQUEST_TIMEOUT_MS_CONFIG; + private static final String REQUEST_TIMEOUT_MS_DOC = CommonClientConfigs.REQUEST_TIMEOUT_MS_DOC; + + + /** interceptor.classes */ + public static final String INTERCEPTOR_CLASSES_CONFIG = "interceptor.classes"; + public static final String INTERCEPTOR_CLASSES_DOC = "A list of classes to use as interceptors. " + + "Implementing the org.oracle.okafka.clients.producer.ProducerInterceptor interface allows you to intercept (and possibly mutate) the records " + + "received by the producer before they are published to the Kafka cluster. By default, there are no interceptors."; + + /** enable.idempotence (This property is not yet supported)*/ + public static final String ENABLE_IDEMPOTENCE_CONFIG = "enable.idempotence"; + public static final String ENABLE_IDEMPOTENCE_DOC = "When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer " + + "retries due to broker failures, etc., may write duplicates of the retried message in the stream. " + + "Note that enabling idempotence requires " + MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + " to be less than or equal to 5, " + + "" + RETRIES_CONFIG + " to be greater than 0 and " + ACKS_CONFIG + " must be 'all'. If these values " + + "are not explicitly set by the user, suitable values will be chosen. If incompatible values are set, " + + "a ConfigException will be thrown. This property is not yet supported."; + + /** transaction.timeout.ms (This property is not yet supported) */ + public static final String TRANSACTION_TIMEOUT_CONFIG = "transaction.timeout.ms"; + public static final String TRANSACTION_TIMEOUT_DOC = "The maximum amount of time in ms that the transaction coordinator will wait for a transaction status update from the producer before proactively aborting the ongoing transaction." + + "If this value is larger than the transaction.max.timeout.ms setting in the broker, the request will fail with a `InvalidTransactionTimeout` error. This property is not yet supported."; + + /** transactional.id (This property is not yet supported) */ + public static final String TRANSACTIONAL_ID_CONFIG = "transactional.id"; + public static final String TRANSACTIONAL_ID_DOC = "The TransactionalId to use for transactional delivery. This enables reliability semantics which span multiple producer sessions since it allows the client to guarantee that transactions using the same TransactionalId have been completed prior to starting any new transactions. If no TransactionalId is provided, then the producer is limited to idempotent delivery. " + + "Note that enable.idempotence must be enabled if a TransactionalId is configured. " + + "The default is null, which means transactions cannot be used. " + + "Note that transactions requires a cluster of at least three brokers by default what is the recommended setting for production; for development you can change this, by adjusting broker setting `transaction.state.log.replication.factor`. This property is not yet supported."; + + /** delivery.timeout.ms */ + public static final String DELIVERY_TIMEOUT_MS_CONFIG = "delivery.timeout.ms"; + private static final String DELIVERY_TIMEOUT_MS_DOC = "An upper bound on the time to report success or failure " + + "after a call to send() returns. This limits the total time that a record will be delayed " + + "prior to sending, the time to await acknowledgement from the broker (if expected), and the time allowed " + + "for retriable send failures. The producer may report failure to send a record earlier than this config if " + + "either an unrecoverable error is encountered, the retries have been exhausted, " + + "or the record is added to a batch which reached an earlier delivery expiration deadline. " + + "The value of this config should be greater than or equal to the sum of " + REQUEST_TIMEOUT_MS_CONFIG + " " + + "and " + LINGER_MS_CONFIG + "."; + + /** + * internal.auto.downgrade.txn.commit + * Whether or not the producer should automatically downgrade the transactional commit request when the new group metadata + * feature is not supported by the broker. + *

    + * The purpose of this flag is to make Kafka Streams being capable of working with old brokers when applying this new API. + * Non Kafka Streams users who are building their own EOS applications should be careful playing around + * with config as there is a risk of violating EOS semantics when turning on this flag. + * + *

    + * Note: this is an internal configuration and could be changed in the future in a backward incompatible way + * + */ + static final String AUTO_DOWNGRADE_TXN_COMMIT = "internal.auto.downgrade.txn.commit"; + + + static { + CONFIG = new ConfigDef().define(BOOTSTRAP_SERVERS_CONFIG, Type.LIST, Collections.emptyList(), new ConfigDef.NonNullValidator(), Importance.HIGH, CommonClientConfigs.BOOTSTRAP_SERVERS_DOC) + .define(CLIENT_DNS_LOOKUP_CONFIG, + Type.STRING, + ClientDnsLookup.USE_ALL_DNS_IPS.toString(), + in(ClientDnsLookup.USE_ALL_DNS_IPS.toString(), + ClientDnsLookup.USE_ALL_DNS_IPS.toString(), + ClientDnsLookup.RESOLVE_CANONICAL_BOOTSTRAP_SERVERS_ONLY.toString()), + Importance.MEDIUM, + CommonClientConfigs.CLIENT_DNS_LOOKUP_DOC) + .define(BUFFER_MEMORY_CONFIG, Type.LONG, 32 * 1024 * 1024L, atLeast(0L), Importance.HIGH, BUFFER_MEMORY_DOC) + .define(RETRIES_CONFIG, Type.INT, 0, between(0, Integer.MAX_VALUE), Importance.HIGH, RETRIES_DOC) + .define(ACKS_CONFIG, + Type.STRING, + "1", + in("all", "-1", "0", "1"), + Importance.HIGH, + ACKS_DOC) + .define(COMPRESSION_TYPE_CONFIG, Type.STRING, "none", Importance.HIGH, COMPRESSION_TYPE_DOC) + .define(BATCH_SIZE_CONFIG, Type.INT, 16384, atLeast(0), Importance.MEDIUM, BATCH_SIZE_DOC) + .define(LINGER_MS_CONFIG, Type.LONG, 0, atLeast(0L), Importance.MEDIUM, LINGER_MS_DOC) + .define(CLIENT_ID_CONFIG, Type.STRING, "", Importance.MEDIUM, CommonClientConfigs.CLIENT_ID_DOC) + .define(SEND_BUFFER_CONFIG, Type.INT, 128 * 1024, atLeast(-1), Importance.MEDIUM, CommonClientConfigs.SEND_BUFFER_DOC) + .define(RECEIVE_BUFFER_CONFIG, Type.INT, 32 * 1024, atLeast(-1), Importance.MEDIUM, CommonClientConfigs.RECEIVE_BUFFER_DOC) + .define(MAX_REQUEST_SIZE_CONFIG, + Type.INT, + 1 * 1024 * 1024, + atLeast(0), + Importance.MEDIUM, + MAX_REQUEST_SIZE_DOC) + .define(RECONNECT_BACKOFF_MS_CONFIG, Type.LONG, 50L, atLeast(0L), Importance.LOW, CommonClientConfigs.RECONNECT_BACKOFF_MS_DOC) + .define(RECONNECT_BACKOFF_MAX_MS_CONFIG, Type.LONG, 1000L, atLeast(0L), Importance.LOW, CommonClientConfigs.RECONNECT_BACKOFF_MAX_MS_DOC) + .define(RETRY_BACKOFF_MS_CONFIG, Type.LONG, 100L, atLeast(0L), Importance.LOW, CommonClientConfigs.RETRY_BACKOFF_MS_DOC) + .define(MAX_BLOCK_MS_CONFIG, + Type.LONG, + 60 * 1000, + atLeast(0), + Importance.MEDIUM, + MAX_BLOCK_MS_DOC) + .define(REQUEST_TIMEOUT_MS_CONFIG, + Type.INT, + 30 * 1000, + atLeast(0), + Importance.MEDIUM, + REQUEST_TIMEOUT_MS_DOC) + .define(METADATA_MAX_AGE_CONFIG, Type.LONG, 5 * 60 * 1000, atLeast(0), Importance.LOW, METADATA_MAX_AGE_DOC) + .define(METRICS_SAMPLE_WINDOW_MS_CONFIG, + Type.LONG, + 30000, + atLeast(0), + Importance.LOW, + CommonClientConfigs.METRICS_SAMPLE_WINDOW_MS_DOC) + .define(METADATA_MAX_IDLE_CONFIG, + Type.LONG, + 5 * 60 * 1000, + atLeast(5000), + Importance.LOW, + METADATA_MAX_IDLE_DOC) + .define(METRICS_NUM_SAMPLES_CONFIG, Type.INT, 2, atLeast(1), Importance.LOW, CommonClientConfigs.METRICS_NUM_SAMPLES_DOC) + .define(METRICS_RECORDING_LEVEL_CONFIG, + Type.STRING, + Sensor.RecordingLevel.INFO.toString(), + in(Sensor.RecordingLevel.INFO.toString(), Sensor.RecordingLevel.DEBUG.toString()), + Importance.LOW, + CommonClientConfigs.METRICS_RECORDING_LEVEL_DOC) + .define(METRIC_REPORTER_CLASSES_CONFIG, + Type.LIST, + Collections.emptyList(), + new ConfigDef.NonNullValidator(), + Importance.LOW, + CommonClientConfigs.METRIC_REPORTER_CLASSES_DOC) + .define(MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, + Type.INT, + 5, + atLeast(1), + Importance.LOW, + MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION_DOC) + .define(KEY_SERIALIZER_CLASS_CONFIG, + Type.CLASS, + Importance.HIGH, + KEY_SERIALIZER_CLASS_DOC) + .define(VALUE_SERIALIZER_CLASS_CONFIG, + Type.CLASS, + Importance.HIGH, + VALUE_SERIALIZER_CLASS_DOC) + .define(SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG, + Type.LONG, + CommonClientConfigs.DEFAULT_SOCKET_CONNECTION_SETUP_TIMEOUT_MS, + Importance.MEDIUM, + CommonClientConfigs.SOCKET_CONNECTION_SETUP_TIMEOUT_MS_DOC) + .define(SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG, + Type.LONG, + CommonClientConfigs.DEFAULT_SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS, + Importance.MEDIUM, + CommonClientConfigs.SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_DOC) + /* default is set to be a bit lower than the server default (10 min), to avoid both client and server closing connection at same time */ + .define(CONNECTIONS_MAX_IDLE_MS_CONFIG, + Type.LONG, + 9 * 60 * 1000, + Importance.MEDIUM, + CommonClientConfigs.CONNECTIONS_MAX_IDLE_MS_DOC) + .define(DELIVERY_TIMEOUT_MS_CONFIG, Type.INT, 120 * 1000, atLeast(0), Importance.MEDIUM, DELIVERY_TIMEOUT_MS_DOC) + .define(INTERCEPTOR_CLASSES_CONFIG, + Type.LIST, + Collections.emptyList(), + new ConfigDef.NonNullValidator(), + Importance.LOW, + INTERCEPTOR_CLASSES_DOC) + .define(PARTITIONER_CLASS_CONFIG, + Type.CLASS, + org.apache.kafka.clients.producer.internals.DefaultPartitioner.class, + Importance.MEDIUM, PARTITIONER_CLASS_DOC) + .define(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, + Type.STRING, + CommonClientConfigs.DEFAULT_SECURITY_PROTOCOL, + Importance.MEDIUM, + CommonClientConfigs.SECURITY_PROTOCOL_DOC) + .withClientSslSupport() + //.withClientSaslSupport() + .define(SslConfigs.TNS_ALIAS, ConfigDef.Type.STRING, null, Importance.MEDIUM, SslConfigs.TNS_ALIAS_DOC) + .define(ENABLE_IDEMPOTENCE_CONFIG, + Type.BOOLEAN, + false, + Importance.LOW, + ENABLE_IDEMPOTENCE_DOC) + .define(TRANSACTION_TIMEOUT_CONFIG, + Type.INT, + 60000, + Importance.LOW, + TRANSACTION_TIMEOUT_DOC) + .define(TRANSACTIONAL_ID_CONFIG, + Type.STRING, + null, + new ConfigDef.NonEmptyString(), + Importance.LOW, + TRANSACTIONAL_ID_DOC) + .define(ORACLE_SERVICE_NAME, + Type.STRING, + null, + Importance.HIGH, + CommonClientConfigs.ORACLE_SERVICE_NAME_DOC) + .define(ORACLE_INSTANCE_NAME, + Type.STRING, + null, + Importance.HIGH, + CommonClientConfigs.ORACLE_INSTANCE_NAME_DOC) + .define(CommonClientConfigs.ORACLE_NET_TNS_ADMIN, + ConfigDef.Type.STRING, + Importance.MEDIUM, + CommonClientConfigs.ORACLE_NET_TNS_ADMIN_DOC) + .define(CommonClientConfigs.ORACLE_TRANSACTIONAL_PRODUCER, + ConfigDef.Type.BOOLEAN, + false, + Importance.LOW, + CommonClientConfigs.ORACLE_SERVICE_NAME_DOC); + } + + + public static Map addSerializerToConfig(Map configs, + Serializer keySerializer, Serializer valueSerializer) { + Map newConfigs = new HashMap<>(configs); + if (keySerializer != null) + newConfigs.put(KEY_SERIALIZER_CLASS_CONFIG, keySerializer.getClass()); + if (valueSerializer != null) + newConfigs.put(VALUE_SERIALIZER_CLASS_CONFIG, valueSerializer.getClass()); + return newConfigs; + } + + public static Properties addSerializerToConfig(Properties properties, + Serializer keySerializer, + Serializer valueSerializer) { + Properties newProperties = new Properties(); + newProperties.putAll(properties); + if (keySerializer != null) + newProperties.put(KEY_SERIALIZER_CLASS_CONFIG, keySerializer.getClass().getName()); + if (valueSerializer != null) + newProperties.put(VALUE_SERIALIZER_CLASS_CONFIG, valueSerializer.getClass().getName()); + return newProperties; + } + + public ProducerConfig(Properties props) { + super(CONFIG, props); + } + + public ProducerConfig(Map props) { + super(CONFIG, props); + } + + ProducerConfig(Map props, boolean doLog) { + super(CONFIG, props, doLog); + } + + public static Set configNames() { + return CONFIG.names(); + } + + public static void main(String[] args) { + System.out.println(CONFIG.toHtmlTable()); + } + + static Map appendSerializerToConfig(Map configs, + Serializer keySerializer, + Serializer valueSerializer) { + Map newConfigs = new HashMap<>(configs); + if (keySerializer != null) + newConfigs.put(KEY_SERIALIZER_CLASS_CONFIG, keySerializer.getClass()); + if (valueSerializer != null) + newConfigs.put(VALUE_SERIALIZER_CLASS_CONFIG, valueSerializer.getClass()); + return newConfigs; + } + + + boolean idempotenceEnabled() { + boolean userConfiguredIdempotence = this.originals().containsKey(ENABLE_IDEMPOTENCE_CONFIG); + boolean userConfiguredTransactions = this.originals().containsKey(TRANSACTIONAL_ID_CONFIG); + boolean idempotenceEnabled = userConfiguredIdempotence && this.getBoolean(ENABLE_IDEMPOTENCE_CONFIG); + + if (!idempotenceEnabled && userConfiguredIdempotence && userConfiguredTransactions) + throw new ConfigException("Cannot set a " + ProducerConfig.TRANSACTIONAL_ID_CONFIG + " without also enabling idempotence."); + return userConfiguredTransactions || idempotenceEnabled; + } +} diff --git a/clients/src/main/java/org/oracle/okafka/clients/producer/ProducerInterceptor.java b/clients/src/main/java/org/oracle/okafka/clients/producer/ProducerInterceptor.java deleted file mode 100644 index 62bf5d2..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/producer/ProducerInterceptor.java +++ /dev/null @@ -1,102 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.oracle.okafka.clients.producer; - -import org.oracle.okafka.common.Configurable; - -/** - * A plugin interface that allows you to intercept (and possibly mutate) the records received by the producer before - * they are published to the Transactional Event Queues. - *

    - * This class will get producer config properties via configure() method, including clientId assigned - * by KafkaProducer if not specified in the producer config. The interceptor implementation needs to be aware that it will be - * sharing producer config namespace with other interceptors and serializers, and ensure that there are no conflicts. - *

    - * Exceptions thrown by ProducerInterceptor methods will be caught, logged, but not propagated further. As a result, if - * the user configures the interceptor with the wrong key and value type parameters, the producer will not throw an exception, - * just log the errors. - *

    - * ProducerInterceptor callbacks may be called from multiple threads. Interceptor implementation must ensure thread-safety, if needed. - *

    - * Implement {@link org.oracle.okafka.common.ClusterResourceListener} to receive cluster metadata once it's available. Please see the class documentation for ClusterResourceListener for more information. - */ -public interface ProducerInterceptor extends Configurable { - /** - * This is called from {@link org.oracle.okafka.clients.producer.KafkaProducer#send(ProducerRecord)} and - * {@link org.oracle.okafka.clients.producer.KafkaProducer#send(ProducerRecord, Callback)} methods, before key and value - * get serialized and partition is assigned (if partition is not specified in ProducerRecord). - *

    - * This method is allowed to modify the record, in which case, the new record will be returned. The implication of modifying - * key/value is that partition assignment (if not specified in ProducerRecord) will be done based on modified key/value, - * not key/value from the client. Consequently, key and value transformation done in onSend() needs to be consistent: - * same key and value should mutate to the same (modified) key and value. Otherwise, log compaction would not work - * as expected. - *

    - * Similarly, it is up to interceptor implementation to ensure that correct topic/partition is returned in ProducerRecord. - * Most often, it should be the same topic/partition from 'record'. - *

    - * Any exception thrown by this method will be caught by the caller and logged, but not propagated further. - *

    - * Since the producer may run multiple interceptors, a particular interceptor's onSend() callback will be called in the order - * specified by {@link org.oracle.okafka.clients.producer.ProducerConfig#INTERCEPTOR_CLASSES_CONFIG}. The first interceptor - * in the list gets the record passed from the client, the following interceptor will be passed the record returned by the - * previous interceptor, and so on. Since interceptors are allowed to modify records, interceptors may potentially get - * the record already modified by other interceptors. However, building a pipeline of mutable interceptors that depend on the output - * of the previous interceptor is discouraged, because of potential side-effects caused by interceptors potentially failing to - * modify the record and throwing an exception. If one of the interceptors in the list throws an exception from onSend(), the exception - * is caught, logged, and the next interceptor is called with the record returned by the last successful interceptor in the list, - * or otherwise the client. - * - * @param record the record from client or the record returned by the previous interceptor in the chain of interceptors. - * @return producer record to send to topic/partition - */ - public ProducerRecord onSend(ProducerRecord record); - - /** - * This method is called when the record sent to the server has been acknowledged, or when sending the record fails before - * it gets sent to the server. - *

    - * This method is generally called just before the user callback is called, and in additional cases when KafkaProducer.send() - * throws an exception. - *

    - * Any exception thrown by this method will be ignored by the caller. - *

    - * This method will generally execute in the background I/O thread, so the implementation should be reasonably fast. - * Otherwise, sending of messages from other threads could be delayed. - * - * @param metadata The metadata for the record that was sent (i.e. the partition and offset). - * If an error occurred, metadata will contain only valid topic and maybe - * partition. If partition is not given in ProducerRecord and an error occurs - * before partition gets assigned, then partition will be set to RecordMetadata.NO_PARTITION. - * The metadata may be null if the client passed null record to - * {@link org.oracle.okafka.clients.producer.KafkaProducer#send(ProducerRecord)}. - * @param exception The exception thrown during processing of this record. Null if no error occurred. - */ - public void onAcknowledgement(RecordMetadata metadata, Exception exception); - - /** - * This is called when interceptor is closed - */ - public void close(); -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/producer/ProducerRecord.java b/clients/src/main/java/org/oracle/okafka/clients/producer/ProducerRecord.java deleted file mode 100644 index a144bb6..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/producer/ProducerRecord.java +++ /dev/null @@ -1,236 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.clients.producer; - -import org.oracle.okafka.common.header.Header; -import org.oracle.okafka.common.header.Headers; -import org.oracle.okafka.common.header.internals.RecordHeaders; - -/** - * A key/value pair to be sent to TEQ. This consists of a topic name to which the record is being sent, an optional - * partition number, and an optional key and value. - *

    - * If a valid partition number is specified that partition will be used when sending the record. If no partition is - * specified but a key is present a partition will be chosen using a hash of the key. If neither key nor partition is - * present a partition will be assigned in a round-robin fashion. - *

    - * The record also has an associated timestamp. Currently TEQ supports only - * {@link org.oracle.okafka.common.record.TimestampType#LOG_APPEND_TIME LogAppendTime}. Timestamp provided by the user at - * creation time is ignored. - *

  • - * By default Topic is configured with {@link org.oracle.okafka.common.record.TimestampType#LOG_APPEND_TIME LogAppendTime}, - *
  • - *

    - * The timestamp that has actually been used will be returned to user in - * {@link RecordMetadata} - */ -public class ProducerRecord { - - private final String topic; - private final Integer partition; - private final Headers headers; - private final K key; - private final V value; - private final Long timestamp; - - /** - * Creates a record with a specified timestamp to be sent to a specified topic and partition - * - * @param topic The topic the record will be appended to - * @param partition The partition to which the record should be sent - * @param timestamp The timestamp of the record, in milliseconds since epoch. Timestamp provided by the user at - * creation time is ignored. - * @param key The key that will be included in the record - * @param value The record contents - * @param headers the headers that will be included in the record. Headers feature is not yet supportable. - */ - public ProducerRecord(String topic, Integer partition, Long timestamp, K key, V value, Iterable

    headers) { - if (topic == null) - throw new IllegalArgumentException("Topic cannot be null."); - if (timestamp != null && timestamp < 0) - throw new IllegalArgumentException( - String.format("Invalid timestamp: %d. Timestamp should always be non-negative or null.", timestamp)); - if (partition != null && partition < 0) - throw new IllegalArgumentException( - String.format("Invalid partition: %d. Partition number should always be non-negative or null.", partition)); - this.topic = topic; - this.partition = partition; - this.key = key; - this.value = value; - this.timestamp = timestamp; - this.headers = new RecordHeaders(headers); - } - - /** - * Creates a record with a specified timestamp to be sent to a specified topic and partition - * - * @param topic The topic the record will be appended to - * @param partition The partition to which the record should be sent - * @param timestamp The timestamp of the record, in milliseconds since epoch. Timestamp provided by the user at - * creation time is ignored. - * @param key The key that will be included in the record - * @param value The record contents - */ - public ProducerRecord(String topic, Integer partition, Long timestamp, K key, V value) { - this(topic, partition, timestamp, key, value, null); - } - - /** - * Creates a record to be sent to a specified topic and partition - * - * @param topic The topic the record will be appended to - * @param partition The partition to which the record should be sent - * @param key The key that will be included in the record - * @param value The record contents - * @param headers The headers that will be included in the record. Headers feature is not yet supportable. - */ - public ProducerRecord(String topic, Integer partition, K key, V value, Iterable
    headers) { - this(topic, partition, null, key, value, headers); - } - - /** - * Creates a record to be sent to a specified topic and partition - * - * @param topic The topic the record will be appended to - * @param partition The partition to which the record should be sent - * @param key The key that will be included in the record - * @param value The record contents - */ - public ProducerRecord(String topic, Integer partition, K key, V value) { - this(topic, partition, null, key, value, null); - } - - /** - * Create a record to be sent to Kafka - * - * @param topic The topic the record will be appended to - * @param key The key that will be included in the record - * @param value The record contents - */ - public ProducerRecord(String topic, K key, V value) { - this(topic, null, null, key, value, null); - } - - /** - * Create a record with no key - * - * @param topic The topic this record should be sent to - * @param value The record contents - */ - public ProducerRecord(String topic, V value) { - this(topic, null, null, null, value, null); - } - - /** - * @return The topic this record is being sent to - */ - public String topic() { - return topic; - } - - /** - * This method is not yet supported - * @return The headers (null) - */ - public Headers headers() { - return headers; - } - - /** - * @return The key (or null if no key is specified) - */ - public K key() { - return key; - } - - /** - * @return The value - */ - public V value() { - return value; - } - - /** - * @return The timestamp, which is in milliseconds since epoch. - */ - public Long timestamp() { - return timestamp; - } - - /** - * @return The partition to which the record will be sent (or null if no partition was specified) - */ - public Integer partition() { - return partition; - } - - @Override - public String toString() { - String headers = this.headers == null ? "null" : this.headers.toString(); - String key = this.key == null ? "null" : this.key.toString(); - String value = this.value == null ? "null" : this.value.toString(); - String timestamp = this.timestamp == null ? "null" : this.timestamp.toString(); - return "ProducerRecord(topic=" + topic + ", partition=" + partition + ", headers=" + headers + ", key=" + key + ", value=" + value + - ", timestamp=" + timestamp + ")"; - } - - @Override - public boolean equals(Object o) { - if (this == o) - return true; - else if (!(o instanceof ProducerRecord)) - return false; - - ProducerRecord that = (ProducerRecord) o; - - if (key != null ? !key.equals(that.key) : that.key != null) - return false; - else if (partition != null ? !partition.equals(that.partition) : that.partition != null) - return false; - else if (topic != null ? !topic.equals(that.topic) : that.topic != null) - return false; - else if (headers != null ? !headers.equals(that.headers) : that.headers != null) - return false; - else if (value != null ? !value.equals(that.value) : that.value != null) - return false; - else if (timestamp != null ? !timestamp.equals(that.timestamp) : that.timestamp != null) - return false; - - return true; - } - - @Override - public int hashCode() { - int result = topic != null ? topic.hashCode() : 0; - result = 31 * result + (partition != null ? partition.hashCode() : 0); - result = 31 * result + (headers != null ? headers.hashCode() : 0); - result = 31 * result + (key != null ? key.hashCode() : 0); - result = 31 * result + (value != null ? value.hashCode() : 0); - result = 31 * result + (timestamp != null ? timestamp.hashCode() : 0); - return result; - } -} - - diff --git a/clients/src/main/java/org/oracle/okafka/clients/producer/RecordMetadata.java b/clients/src/main/java/org/oracle/okafka/clients/producer/RecordMetadata.java deleted file mode 100644 index 4ec9ef8..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/producer/RecordMetadata.java +++ /dev/null @@ -1,149 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. - * - */ - -package org.oracle.okafka.clients.producer; - -import org.oracle.okafka.common.TopicPartition; -import org.oracle.okafka.common.record.DefaultRecord; -import org.oracle.okafka.common.record.RecordBatch; - -/** - * The metadata for a record that has been acknowledged by the server - */ -public final class RecordMetadata { - - /** - * Partition value for record without partition assigned - */ - public static final int UNKNOWN_PARTITION = -1; - - private final long offset; - // The timestamp of the message. - // If LogAppendTime is used for the topic, the timestamp will be the timestamp returned by the broker. - // If CreateTime is used for the topic, the timestamp is the timestamp in the corresponding ProducerRecord if the - // user provided one. Otherwise, it will be the producer local time when the producer record was handed to the - // producer. - private final long timestamp; - private final int serializedKeySize; - private final int serializedValueSize; - private final TopicPartition topicPartition; - - private volatile Long checksum; - - public RecordMetadata(TopicPartition topicPartition, long baseOffset, long relativeOffset, long timestamp, - Long checksum, int serializedKeySize, int serializedValueSize) { - // ignore the relativeOffset if the base offset is -1, - // since this indicates the offset is unknown - this.offset = baseOffset == -1 ? -1 :((baseOffset << 16) + (relativeOffset)); - this.timestamp = timestamp; - this.checksum = checksum; - this.serializedKeySize = serializedKeySize; - this.serializedValueSize = serializedValueSize; - this.topicPartition = topicPartition; - } - - /** - * Indicates whether the record metadata includes the offset. - * @return true if the offset is included in the metadata, false otherwise. - */ - public boolean hasOffset() { - return this.offset != -1L; - } - - /** - * The offset of the record in the topic/partition. - * @return the offset of the record, or -1 if {{@link #hasOffset()}} returns false. - */ - public long offset() { - return this.offset; - } - - /** - * Indicates whether the record metadata includes the timestamp. - * @return true if a valid timestamp exists, false otherwise. - */ - public boolean hasTimestamp() { - return this.timestamp != RecordBatch.NO_TIMESTAMP; - } - - /** - * The timestamp of the record in the topic/partition. - * This method returns LogAppendTime of the record. CreateTime of the record is not yet supported. - * @return the timestamp of the record, or -1 if the {{@link #hasTimestamp()}} returns false. - */ - public long timestamp() { - return this.timestamp; - } - - /** - * This method is not yet supported. - */ - @Deprecated - public long checksum() { - return -1; - } - - /** - * The size of the serialized, uncompressed key in bytes. If key is null, the returned size - * is -1. - */ - public int serializedKeySize() { - return this.serializedKeySize; - } - - /** - * The size of the serialized, uncompressed value in bytes. If value is null, the returned - * size is -1. - */ - public int serializedValueSize() { - return this.serializedValueSize; - } - - /** - * The topic the record was appended to - */ - public String topic() { - return this.topicPartition.topic(); - } - - /** - * The partition the record was sent to - */ - public int partition() { - return this.topicPartition.partition(); - } - - /** - * @return a string representing RecordMetadata object - */ - @Override - public String toString() { - return topicPartition.toString() + "@" + offset; - } -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/producer/internals/AQKafkaProducer.java b/clients/src/main/java/org/oracle/okafka/clients/producer/internals/AQKafkaProducer.java index 6dbe703..f26f188 100644 --- a/clients/src/main/java/org/oracle/okafka/clients/producer/internals/AQKafkaProducer.java +++ b/clients/src/main/java/org/oracle/okafka/clients/producer/internals/AQKafkaProducer.java @@ -1,20 +1,30 @@ /* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ + ** OKafka Java Client version 23.4. + ** + ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. + ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + */ package org.oracle.okafka.clients.producer.internals; + import java.nio.ByteBuffer; import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.concurrent.Future; +import javax.jms.BytesMessage; +import javax.jms.DeliveryMode; import javax.jms.JMSException; import javax.jms.Topic; import javax.jms.TopicConnection; @@ -22,65 +32,384 @@ import javax.jms.TopicPublisher; import oracle.jms.AQjmsBytesMessage; +import oracle.jms.AQjmsConnection; +import oracle.jms.AQjmsConstants; import oracle.jms.AQjmsException; import oracle.jms.AQjmsProducer; import oracle.jms.AQjmsSession; -import org.oracle.okafka.clients.ClientRequest; -import org.oracle.okafka.clients.ClientResponse; +import org.apache.kafka.clients.ClientRequest; +import org.apache.kafka.clients.ClientResponse; +import org.apache.kafka.clients.producer.Callback; +import org.apache.kafka.clients.producer.RecordMetadata; +import org.oracle.okafka.clients.Metadata; +import org.oracle.okafka.clients.NetworkClient; +import org.oracle.okafka.clients.TopicTeqParameters; import org.oracle.okafka.clients.producer.ProducerConfig; +import org.oracle.okafka.clients.producer.internals.OracleTransactionManager.TransactionState; import org.oracle.okafka.common.Node; -import org.oracle.okafka.common.TopicPartition; -import org.oracle.okafka.common.errors.NotLeaderForPartitionException; -import org.oracle.okafka.common.header.Header; +import org.apache.kafka.common.Cluster; +import org.apache.kafka.common.KafkaException; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.errors.InvalidTopicException; +import org.apache.kafka.common.errors.NotLeaderForPartitionException; +import org.apache.kafka.common.header.Header; +import org.apache.kafka.common.metrics.Metrics; +//import org.apache.kafka.common.network.Selector.SelectorMetrics; import org.oracle.okafka.common.network.AQClient; +import org.oracle.okafka.common.network.SelectorMetrics; import org.oracle.okafka.common.protocol.ApiKeys; -import org.oracle.okafka.common.record.MemoryRecords; -import org.oracle.okafka.common.record.MutableRecordBatch; -import org.oracle.okafka.common.record.Record; +import org.oracle.okafka.common.requests.MetadataResponse; +import org.oracle.okafka.common.requests.ProduceRequest; +import org.oracle.okafka.common.requests.ProduceResponse; +import org.apache.kafka.common.record.MemoryRecords; +import org.apache.kafka.common.record.MutableRecordBatch; +import org.apache.kafka.common.record.Record; import org.oracle.okafka.common.utils.ConnectionUtils; -import org.oracle.okafka.common.utils.LogContext; -import org.oracle.okafka.common.utils.Time; +import org.oracle.okafka.common.utils.MessageIdConverter; +import org.oracle.okafka.common.utils.MessageIdConverter.OKafkaOffset; +import org.apache.kafka.common.utils.LogContext; +import org.apache.kafka.common.utils.Time; /** * This class sends messages to AQ */ public final class AQKafkaProducer extends AQClient { - + //Holds TopicPublishers of each node. Each TopicPublisher can contain a connection to corresponding node, session associated with that connection and topic publishers associated with that session private final Map topicPublishersMap; private final ProducerConfig configs; private final Time time; - public AQKafkaProducer(LogContext logContext, ProducerConfig configs, Time time) + private Metadata metadata; + private final Metrics metrics; + private final SelectorMetrics selectorMetrics; + private final int DLENGTH_SIZE = 4; + private boolean transactionalProducer = false; + private boolean idempotentProducer = false; + private int connectMode = AQjmsSession.AUTO_ACKNOWLEDGE; + private Connection dbConn = null; + private Connection externalDbConn = null; + private AQKafkaProducerStatus status = AQKafkaProducerStatus.PRE_INIT; + private OracleTransactionManager oTxm = null; + + // Flags for testing only. Do not set any of these to true + static boolean forceRollback = false; + static boolean forceRetry = false; + static boolean forceDisconnect = false; + static boolean stopReconnect = false; + + + /* To Test + * 1. forceRollback to cause messages to be rolled back. Message Id won't exist in the table. + * 1.1 Retry here with same connection and confirm that message id is not found and on retry we are able to publish the messages in the same sendTOAQ call + * 1.2 Retry here after disconnecting the current connection. This emulates session crashing and failing to produce. Verify that we are able to connect on next attempt and reproduce + * 1.3 Disconnect the connection and do not re-attempt here. This emulates instance crashing for a while. Verify that the batch gets re-enqueued and we are able to publish it again. + * + * 2. forceRollback is false hence messages are persistently stored in the table. + * 2.1 Retry here with same connection and confirm that message id is found and on retry we are not publishing messages again. + * 2.2 Retry here after disconnecting the current connection. This emulates session crashing after successful produce. + * Verify that we are able to connect on next attempt and confirm that messages are reproduced + * 2.3 Disconnect the connection and do not re-attempt here. This emulates instance crashing for a while after successful enqueue. + * Verify that the batch gets re-enqueued and on retry we confirm that the message exists in the system and they are not produced again. + */ + + private enum AQKafkaProducerStatus + { + PRE_INIT, + INIT, + OPEN, + CLOSE + } + + private HashMap currentOffsetMap = null; + // private final SelectorMetrics sensors; + + public AQKafkaProducer(LogContext logContext, ProducerConfig configs, Time time, Metadata _metadata, Metrics metrics, OracleTransactionManager txM) { super(logContext.logger(AQKafkaProducer.class), configs); this.configs = configs; this.time = time; this.topicPublishersMap = new HashMap(); + this.metadata = _metadata; + this.oTxm = txM; + this.metrics=metrics; + this.selectorMetrics = new SelectorMetrics(this.metrics, "Selector", Collections.emptyMap(),true); + this.selectorMetrics.recordConnectionCount(topicPublishersMap);; + + try + { + transactionalProducer = configs.getBoolean(ProducerConfig.ORACLE_TRANSACTIONAL_PRODUCER); + }catch(Exception e) { + transactionalProducer = false; + } + + try { + idempotentProducer = configs.getBoolean(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG); + if(idempotentProducer) + { + connectMode = AQjmsSession.SESSION_TRANSACTED; + } + }catch(Exception e) + { + idempotentProducer = false; + } + + if(transactionalProducer) + { + connectMode = AQjmsSession.SESSION_TRANSACTED; + currentOffsetMap = new HashMap(); + } + status = AQKafkaProducerStatus.INIT; } + synchronized public void setExternalDbConnection (Connection conn) + { + log.debug("Setting externally supplied db connection " + conn); + if(oTxm.getTransactionState() == TransactionState.BEGIN) + { + throw new KafkaException("A transaction with another oracle connection already active."); + } + oTxm.setDBConnection(externalDbConn); + this.externalDbConn = conn; + } + + private void addToMap(Node node,TopicPublishers nodePublishers ) throws JMSException + { + topicPublishersMap.put(node, nodePublishers); + log.info("Connected nodes: "+topicPublishersMap.keySet()); + status = AQKafkaProducerStatus.OPEN; + selectorMetrics.maybeRegisterConnectionMetrics(node); + selectorMetrics.connectionCreated.record(); + log.debug("CONNECTED NODES: "+topicPublishersMap.keySet()); + } + + private void connect(Node node, Connection con) throws JMSException { + TopicPublishers nodePublishers = null; + try { + log.debug("Creating new Topic connection for node " + node); + nodePublishers = new TopicPublishers(node, con); + addToMap(node, nodePublishers); + }catch(JMSException e) { + close(node, nodePublishers); + throw e; + } + } + + public void connect(Node node) throws JMSException { TopicPublishers nodePublishers = null; try { - nodePublishers = new TopicPublishers(node); - topicPublishersMap.put(node, nodePublishers); + log.debug("Creating new connection for node " + node); + nodePublishers = new TopicPublishers(node, connectMode); + addToMap(node, nodePublishers); }catch(JMSException e) { close(node, nodePublishers); throw e; } } - + public boolean isChannelReady(Node node) { if(topicPublishersMap.containsKey(node)) { return true; } return false; } - + + /* If Database Connection was externally created then pass it. + * If no external connection provided, then look for internally created database connection. + * If internal database connection is not created then create one and cache it. Use it for all publish calls. + */ + public Connection getDBConnection(boolean force) throws JMSException + { + if(externalDbConn != null) + { + log.debug("Returning externally supplied db connection. " + externalDbConn); + return externalDbConn; + } + + if(dbConn != null) + { + log.debug("Returning already created db connection. " + dbConn); + return dbConn; + } + + //Database connection is not established yet. If not forced to do so, return null + if(!force) + { + log.debug(" Database connection not established yet. Not forced to create one. Returning null."); + return null; + } + + try { + if(topicPublishersMap == null || topicPublishersMap.isEmpty()) + { + org.apache.kafka.common.Node bootStrapNode = metadata.fetch().nodes().get(0); + log.debug("Transactional producer trying to connect to BootstrapNode. " + bootStrapNode ); + //ToDo: NetworkClient.initConnection() should be invoked ideally. + this.connect((Node)bootStrapNode); + } + Cluster clusterNow = metadata.fetch(); + Node leaderNode = metadata.getLeader(); + log.debug("Leader node is " + leaderNode); + + Node controllerNode = (Node)clusterNow.controller(); + log.debug("Controller Node " + controllerNode); + + if(controllerNode == null) + { + boolean isBootStrap = metadata.fetch().isBootstrapConfigured(); + if(isBootStrap) + controllerNode = (Node)metadata.fetch().controller(); + else + controllerNode = (Node)metadata.fetch().nodes().get(0); + + metadata.setLeader(controllerNode); + log.debug("getDBConnection: Controller Node and LeaderNode set to " + controllerNode); + } + TopicPublishers topicPbs = topicPublishersMap.get(controllerNode); + dbConn = ((AQjmsSession)topicPbs.sess).getDBConnection(); + return dbConn; + } + catch(Exception e) + { + log.info("Faiiled to create database connection for transactional producer. Exception: " + e ); + throw e; + } + } + + public Future transactionalSend(TopicPartition tp, byte[] serializedKey, byte[] serializedValue, + Header[] headers, Callback interceptCallback) + { + ProduceRequestResult produceResult = null; + FutureRecordMetadata frm = null; + RuntimeException publishException = null; + OKafkaOffset thisOffset = null; + + log.debug("Message for TopicPartition " + tp); + try { + if(topicPublishersMap == null || topicPublishersMap.isEmpty()) + { + org.apache.kafka.common.Node bootStrapNode = null; + if(externalDbConn != null) + { + log.debug(""); + int instId = ConnectionUtils.getInstanceId(externalDbConn); + bootStrapNode = metadata.fetch().nodeById(instId); + if(bootStrapNode == null ) + { + //if(metadata.isBootstrap()) + { + bootStrapNode = metadata.fetch().nodes().get(0); + ((Node)bootStrapNode).setId(instId); + log.debug("External DB + BOotStrap Node " + bootStrapNode); + } + log.warn("Connection setup to instance "+ instId + ". Which is not found in current cluster"); + } + //else + { + log.debug("Created publisher using externally supplied database connection to instance " + instId); + this.connect((Node)bootStrapNode, externalDbConn); + } + } + + if(bootStrapNode == null) { + bootStrapNode = metadata.fetch().nodes().get(0); + log.debug("Creating using bootstrapnode " + bootStrapNode ); + this.connect((Node)bootStrapNode); + } + metadata.setLeader((Node)bootStrapNode); + } + + Cluster clusterNow = metadata.fetch(); + Node controllerNode = (Node)clusterNow.controller(); + log.debug("Controller " + controllerNode); + Node leaderNode = metadata.getLeader(); + log.debug("Leader Node " + leaderNode); + + TopicPublishers topicPbs = null; + if(leaderNode != null) + topicPbs = topicPublishersMap.get(leaderNode); + else + topicPbs = topicPublishersMap.get(controllerNode); + + log.debug("Available Topic Publishers " + topicPbs); + if(topicPbs == null && topicPublishersMap.size() > 0) // Get first connected node + { + for(Map.Entry connectedPubEntry : topicPublishersMap.entrySet()) + { + metadata.setLeader(connectedPubEntry.getKey()); + topicPbs = connectedPubEntry.getValue(); + break; + } + } + TopicPublisher tps = topicPbs.getTopicPublisher(tp.topic()); + Connection conn = ((AQjmsSession)topicPbs.getSession()).getDBConnection(); + + oTxm.setDBConnection(conn); + + TopicTeqParameters topicTeqParam = metadata.topicParaMap.get(tp.topic()); + if(topicTeqParam == null) + { + try { + super.fetchQueueParameters(tp.topic(), conn, metadata.topicParaMap); + } catch (SQLException e) { + log.error("Exception while fetching TEQ parameters and updating metadata " + e.getMessage()); + } + } + int msgVersion = topicTeqParam.getMsgVersion(); + + BytesMessage byteMessage = createBytesMessage(topicPbs.sess, tp, + ByteBuffer.wrap(serializedKey), ByteBuffer.wrap(serializedValue), headers, msgVersion); + + try { + tps.publish(byteMessage, DeliveryMode.PERSISTENT, 0, AQjmsConstants.EXPIRATION_NEVER); + }catch(JMSException e) + { + log.error("Exception while producing transactionl message " + e.getMessage()); + publishException = new RuntimeException(e); + } + + if(publishException == null) + { + OKafkaOffset prevOffset = currentOffsetMap.get(tp); + thisOffset = MessageIdConverter.computeOffset(prevOffset, byteMessage.getJMSMessageID()); + currentOffsetMap.remove(tp); + currentOffsetMap.put(tp, thisOffset); + } + else { + //Create Empty or Invalid Offset + thisOffset= MessageIdConverter.getOKafkaOffset("",false,false); + } + + produceResult = new ProduceRequestResult(tp); + + produceResult.set(thisOffset.subPartitionId(), (publishException==null)?byteMessage.getJMSTimestamp():-1, + Collections.singletonList(thisOffset), publishException); + + frm = new FutureRecordMetadata(produceResult, 0, System.currentTimeMillis(), + serializedKey.length, serializedValue.length,time ); + + produceResult.done(); + this.oTxm.addRecordToTransaction(frm); + + }catch(Exception e) + { + log.error("Error while publishing records within a transaction." + e.getMessage(), e); + produceResult = new ProduceRequestResult(tp); + produceResult.set(-1L, -1L, null, new RuntimeException(e)); + frm = new FutureRecordMetadata(produceResult, -1l, System.currentTimeMillis(), + serializedKey.length, serializedValue.length,time ); + produceResult.done(); + } + return frm; + } + public ClientResponse send(ClientRequest request) { - return parseRequest(request, request.apiKey()); + ClientResponse cr = parseRequest(request, ApiKeys.convertToOracleApiKey(request.apiKey())); + selectorMetrics.recordCompletedReceive(cr.destination(), cr.requestLatencyMs()); + return cr; } - + /** * Determines the type of request and calls appropriate method for handling request * @param request request to be sent @@ -91,11 +420,11 @@ private ClientResponse parseRequest( ClientRequest request, ApiKeys key) { if(key == ApiKeys.PRODUCE) return publish(request); if(key == ApiKeys.METADATA) - return getMetadata(request); + return getMetadata(request); return null; - + } - + /** *Unwraps memory records of a producer batch into records. *Then translates each record into AQjmsBytesMessage and sends them to database instance as AqjmsBytesMessage array. @@ -104,67 +433,333 @@ private ClientResponse parseRequest( ClientRequest request, ApiKeys key) { private ClientResponse publish(ClientRequest request) { ProduceRequest.Builder builder = (ProduceRequest.Builder)request.requestBuilder(); ProduceRequest produceRequest = builder.build(); - Node node = request.destination(); + Node node = metadata.getNodeById(Integer.parseInt(request.destination())); TopicPartition topicPartition = produceRequest.getTopicpartition(); MemoryRecords memoryRecords = produceRequest.getMemoryRecords(); - TopicPublishers nodePublishers; + TopicPublishers nodePublishers = null; AQjmsBytesMessage[] msgs =null; ProduceResponse.PartitionResponse partitionResponse = null; + + //TopicPublishers allPublishers = null; + TopicPublisher publisher = null; + int retryCnt = 2; + AQjmsBytesMessage byteMessage = null; + TopicTeqParameters topicTeqParam = metadata.topicParaMap.get(topicPartition.topic()); + long batchSize=memoryRecords.sizeInBytes(); + int msgVersion = topicTeqParam.getMsgVersion(); + + boolean checkForCommit = false; boolean disconnected = false; - try { - nodePublishers = topicPublishersMap.get(node); - TopicSession session = nodePublishers.getSession(); - final List messages = new ArrayList<>(); - Iterator mutableRecordBatchIterator = memoryRecords.batchIterator(); - while(mutableRecordBatchIterator.hasNext()) { - Iterator recordIterator = mutableRecordBatchIterator.next().iterator(); - while(recordIterator.hasNext()) { - Record record = recordIterator.next(); - messages.add(createBytesMessage(session, topicPartition, record.key(), record.value(), record.headers())); + boolean notALeader = false; + Exception pException = null; + + log.debug("Publish request for node " + node); + + try { + if(topicTeqParam.getKeyBased() != 2) { + String errMsg = "Topic " + topicPartition.topic() + " is not an Oracle kafka topic, Please drop and re-create topic" + +" using Admin.createTopics() or dbms_aqadm.create_database_kafka_topic procedure"; + throw new InvalidTopicException(errMsg); + } + } + catch(InvalidTopicException e) { + log.error("Cannot send messages to topic " + topicPartition.topic() + ". Not a kafka topic"); + partitionResponse = createResponses(topicPartition, e, msgs); + // selectorMetrics.recordCompletedReceive(request.destination(), batchSize, System.currentTimeMillis()); + return createClientResponse(request, topicPartition, partitionResponse, disconnected); + } + + do + { + disconnected = false; + checkForCommit = false; + notALeader = false; + pException = null; + retryCnt--; + + try { + nodePublishers = topicPublishersMap.get(node); + if(nodePublishers == null) + { + throw new NullPointerException("No publishers created for node " + node); + } + log.debug("Found a publisher " + nodePublishers +" for node " + node); + TopicSession session = nodePublishers.getSession(); + + if(idempotentProducer) + { + String checkMsgId = null; + try { + if(produceRequest.checkForDups()) + { + Connection dbConn = ((AQjmsSession)session).getDBConnection(); + List retryMsgIds = produceRequest.retryMsgList(); + + if(retryMsgIds != null && retryMsgIds.size() > 0) + { + checkMsgId = retryMsgIds.get(0).getMsgId().substring(3); + log.debug("Duplicate Check for parition " + topicPartition + "for msgId " + checkMsgId); + } + + boolean msgIdExist = checkIfMsgIdExist(dbConn, topicPartition.topic(), checkMsgId); + if(msgIdExist) + { + log.info("Message Id " +checkMsgId +" exists for topic partition "+ topicPartition+". Records were succesfully produced."); + partitionResponse = createResponses(topicPartition, null, null); + partitionResponse.setCheckDuplicate(false); + partitionResponse.setOffsets(retryMsgIds); + return createClientResponse(request, topicPartition, partitionResponse, false); + } + else + { + log.info("Message Id " + checkMsgId +" exists for topic partition "+ topicPartition +" does not exist. Retrying to publish"); + } + } + + }catch(Exception e) + { + log.error("Exception while checking for duplicates for topic partition " + topicPartition +" message id " + checkMsgId +" Exception : "+ e ,e); + checkForCommit = produceRequest.checkForDups(); + throw e; + } + } + + final List messages = new ArrayList<>(); + Iterator mutableRecordBatchIterator = memoryRecords.batchIterator(); + while(mutableRecordBatchIterator.hasNext()) { + Iterator recordIterator = mutableRecordBatchIterator.next().iterator(); + while(recordIterator.hasNext()) { + Record record = recordIterator.next(); + byteMessage = createBytesMessage(session, topicPartition, record.key(), record.value(), record.headers(), msgVersion); + messages.add(byteMessage); + } + } + + publisher = nodePublishers.getTopicPublisher(topicPartition.topic()); + msgs = messages.toArray(new AQjmsBytesMessage[0]); + + log.trace("sending messages to topic : {} with partition: {}, number of messages: {}", topicPartition.topic(), topicPartition.partition(), msgs.length); + + sendToAQ(msgs, publisher); + if(idempotentProducer) + { + try { + //Session must be a transacted session. + log.trace("Idempotent Producer. Committing with node " + node); + + if(forceRollback) { + nodePublishers.sess.rollback(); + forceRetry = true; + forceRollback = false; + } + else + { + nodePublishers.sess.commit(); + } + + if(forceDisconnect) { + nodePublishers.sess.close(); + forceRetry = true; + forceDisconnect = false; + } + + if(forceRetry) + { + forceRetry = false; + throw new KafkaException("Dummy Exception"); + } + }catch(Exception e) + { + log.error("Exception while committing records " + e.getMessage()); + checkForCommit = true; + throw e; + } } - + selectorMetrics.recordCompletedSend(request.destination(),batchSize, System.currentTimeMillis()); + log.trace("Messages sent successfully to topic : {} with partition: {}, number of messages: {}", topicPartition.topic(), topicPartition.partition(), msgs.length); + retryCnt = 0; } - TopicPublisher publisher = null; - TopicPublishers publishers = topicPublishersMap.get(node); - publisher = publishers.getTopicPublisher(topicPartition.topic()); - msgs = messages.toArray(new AQjmsBytesMessage[0]); - log.trace("sending messages to topic : {} with partition: {}, number of messages: {}", topicPartition.topic(), topicPartition.partition(), msgs.length); - sendToAQ(msgs, publisher); - log.trace("Messages sent successfully to topic : {} with partition: {}, number of messages: {}", topicPartition.topic(), topicPartition.partition(), msgs.length); - } catch(Exception exception) { - partitionResponse = createResponses(topicPartition, new NotLeaderForPartitionException(exception), msgs); - if(exception instanceof AQjmsException) { - if (((AQjmsException)exception).getErrorNumber() == 25348 ) { - partitionResponse = createResponses(topicPartition, new NotLeaderForPartitionException(exception), msgs); - } - } else { - log.trace("Unexcepted error occured with connection to node {}, closing the connection", node); - try { - TopicConnection conn = topicPublishersMap.get(node).getConnection(); - topicPublishersMap.remove(node); - conn.close(); - disconnected = true; - log.trace("Connection with node {} is closed", request.destination()); - } catch(JMSException jmsException) { - log.trace("Failed to close connection with node {}", node); - } - - log.error("failed to send messages to topic : {} with partition: {}", topicPartition.topic(), topicPartition.partition()); - partitionResponse = createResponses(topicPartition, exception, msgs); - } - } - if( partitionResponse == null) - partitionResponse = createResponses(topicPartition, null, msgs); + catch(Exception e) { + + pException = e; + + if(!checkForCommit) { + log.error("Exception while sending records for topic partition " + topicPartition + " no node " + node , e); + } + else { + log.error("Exception while committing records for topic partition " + topicPartition + " no node " + node , e); + } + + if ( e instanceof JMSException) { + log.info(" Encountered JMS Exception:" + e.getMessage() ); + // This exception is thrown from sever when AQ tries to publish into a partition which is not owned by the connected node + if( (e instanceof AQjmsException ) && ((AQjmsException)e).getErrorNumber() == 25348 ) + { + notALeader = true; + retryCnt = 0; + break; + } + } + if(nodePublishers != null) + { + boolean connected = nodePublishers.isConnected(); + log.info("KafkaProducer is connected to the broker? " + connected); + // Database connection used to publish the records is terminated. + if(!connected ) + { + try { + nodePublishers.close(); + if( !stopReconnect && retryCnt > 0) + { + log.info("Reconnecting to node " + node); + boolean reCreate = nodePublishers.reCreate(); + if(!reCreate) { + log.info("Failed to reconnect to " + node +" . Failing this batch for " + topicPartition); + disconnected = true; + } + }else { + disconnected = true; + log.info("Failed to reconnect to " + node +" . Failing this batch for " + topicPartition); + } + stopReconnect = false; + + }catch(Exception reConnException) + { + log.error("Exception while reconnecting to node " + node , reConnException); + disconnected = true; + retryCnt = 0; + try { + // Close again just to be sure that we are not leaking connections. + nodePublishers.close(); + }catch(Exception ignoreExcp) {} + } + } + if(checkForCommit) + { + if(!disconnected ) { + //Re-connected to the same database instance after one retry + try { + log.debug("Connection to node is fine. Checking if previous publish was successfull or not."); + nodePublishers = topicPublishersMap.get(node); + java.sql.Connection conn = ((AQjmsSession)nodePublishers.sess).getDBConnection(); + String msgId = msgs[0].getJMSMessageID().substring(3); + boolean msgIdExists = checkIfMsgIdExist(conn, topicPartition.topic(), msgId); + checkForCommit = false; + if(msgIdExists) { + //successfully produced the message + log.debug("Message Id " + msgId+" already present in for " + topicPartition + ". No need to retry."); + retryCnt = 0; + pException = null; + } + else { + /* DO Nothing. + * Producer is successfully connected to the database node. + * It will retry one more time to produce. + */ + } + } + catch(Exception msgIdExcp) + { + log.info("Exception while checking if message id exists or not " + msgIdExcp); + log.info("Batch will be processed again after checking for duplicates."); + checkForCommit = true; + retryCnt=0; + } + } + else { + log.info("Node " + node + " is not reachable. Batch will be reprocessed after checking for duplicates."); + retryCnt=0; + } + } + } + } + }while(retryCnt > 0); + if(pException != null) + { + if(notALeader) + { + log.info("Node "+ node +" is not a Leader for partition " + topicPartition ); + partitionResponse = createResponses(topicPartition, new NotLeaderForPartitionException(pException), msgs); + this.metadata.requestUpdate(); + } + if(disconnected) + { + TopicPublishers tpRemoved = topicPublishersMap.remove(node); + log.trace("Connection with node {} is closed", request.destination()); + String exceptionMsg = "Database instance not reachable: " + node; + org.apache.kafka.common.errors.DisconnectException disconnExcp = new org.apache.kafka.common.errors.DisconnectException(exceptionMsg,pException); + partitionResponse = createResponses(topicPartition, disconnExcp, msgs); + } + } + else + { + partitionResponse = createResponses(topicPartition, null, msgs); + } + partitionResponse.setCheckDuplicate(checkForCommit); return createClientResponse(request, topicPartition, partitionResponse, disconnected); + } + + private void dumpTopicPublishers() + { + if(this.topicPublishersMap == null) + log.info("TopicPublisherMap is null"); + else + log.info("TopicPublisherMap size " +topicPublishersMap.size() ); + + for(Node n: topicPublishersMap.keySet()) { + log.info("Publihsers for Node " + n); + log.info(topicPublishersMap.get(n).toString()); + } } - + + private boolean checkIfMsgIdExist(Connection con,String topicName, String msgId) + { + boolean msgIdExists = false; + String qry =" Select count(*) from " +ConnectionUtils.enquote(topicName) + " where msgid = '" + msgId+"'"; + log.debug("Executing " + qry); + ResultSet rs = null; + try (Statement stmt = con.prepareCall(qry);) { + stmt.execute(qry); + rs = stmt.getResultSet(); + if(rs.next()) + { + int msgCnt = rs.getInt(1); + + if(msgCnt == 0) + { + msgIdExists = false; + } + else + msgIdExists = true; + } + else { + msgIdExists = false; + } + rs.close(); + rs = null; + + }catch(Exception e) + { + log.info("Exception while checking if msgId Exists or not. " + e,e); + if(rs!=null) + { + try { + rs.close(); + }catch(Exception ignoreE) {} + } + } + log.debug("Message Id "+ msgId +" Exists?: " + msgIdExists); + return msgIdExists; + } + + private ClientResponse createClientResponse(ClientRequest request, TopicPartition topicPartition, ProduceResponse.PartitionResponse partitionResponse, boolean disconnected) { - return new ClientResponse(request.makeHeader(), request.callback(), request.destination(), - request.createdTimeMs(), time.milliseconds(), disconnected, - new ProduceResponse(topicPartition, partitionResponse)); + return new ClientResponse(request.makeHeader((short)1), request.callback(), request.destination(), + request.createdTimeMs(), time.milliseconds(), disconnected, null,null, + new ProduceResponse(topicPartition, partitionResponse)); } - + /** * Bulk send messages to AQ. * @param messages array of AQjmsBytesmessage to be sent @@ -172,78 +767,310 @@ private ClientResponse createClientResponse(ClientRequest request, TopicPartitio * @throws JMSException throws JMSException */ private void sendToAQ(AQjmsBytesMessage[] messages, TopicPublisher publisher) throws JMSException { - //Sends messages in bulk using topic publisher - ((AQjmsProducer)publisher).bulkSend(publisher.getTopic(), messages); + //Sends messages in bulk using topic publisher; + log.info("In BulkSend: #messages = " + messages.length); + ((AQjmsProducer)publisher).bulkSend(publisher.getTopic(), messages); } - + /** * Creates AQjmsBytesMessage from ByteBuffer's key, value and headers */ - private AQjmsBytesMessage createBytesMessage(TopicSession session, TopicPartition topicPartition, ByteBuffer key, ByteBuffer value, Header[] headers) throws JMSException { + private AQjmsBytesMessage createBytesMessage(TopicSession session, TopicPartition topicPartition, + ByteBuffer key, ByteBuffer value, Header[] headers, int messageVersion) throws JMSException { + AQjmsBytesMessage msg=null; - msg = (AQjmsBytesMessage)(session.createBytesMessage()); - byte[] keyByteArray = new byte[key.limit()]; - key.get(keyByteArray); - byte[] payload = new byte[value.limit()]; - value.get(payload); - msg.writeBytes(payload); - msg.setJMSCorrelationID(new String(keyByteArray)); - payload = null; - msg.setStringProperty("topic", topicPartition.topic()); - msg.setStringProperty("AQINTERNAL_PARTITION", Integer.toString(topicPartition.partition()*2)); + if(messageVersion == 2) { + msg = createBytesMessageV2(session,topicPartition,key, value, headers); + } + else { + msg = createBytesMessageV1(session,topicPartition,key, value, headers); + } return msg; } - + + /** + * + * Creates AQjmsBytesMessage from ByteBuffer's key, value and headers in V1 version + * In V1 version, Key is stored as correlation ID. + */ + private AQjmsBytesMessage createBytesMessageV1(TopicSession session, TopicPartition topicPartition, + ByteBuffer key, ByteBuffer value, Header[] headers) throws JMSException { + + AQjmsBytesMessage msg = (AQjmsBytesMessage)(session.createBytesMessage()); + + if(key!=null) { + byte[] keyByteArray = new byte[key.limit()]; + key.get(keyByteArray); + msg.setJMSCorrelationID(new String(keyByteArray)); + } + + byte[] payload = new byte[value.limit()]; + value.get(payload); + msg.writeBytes(payload); + payload = null; + msg.setStringProperty("topic", topicPartition.topic()); + msg.setStringProperty(AQClient.PARTITION_PROPERTY, Integer.toString(topicPartition.partition()*2)); + msg.setIntProperty(MESSAGE_VERSION, 1); + + return msg; + } + + /* + * Creates AQjmsBytesMessage from ByteBuffer's key, value and headers in V2 version + * In V2 version, Key is stored as part of the message payload as described below. + * + * Construct Byte Payload in below format: + * | KEY LENGTH (4 Bytes Fixed) | KEY | + * | VALUE LENGTH (4 BYTES FIXED) | VALUE | + * | HEADER NAME LENGTH(4 BYTES FIXED) | HEADER NAME | + * | HEADER VALUE LENGTH (4 BYTES FIXED) | HEADER VALUE | + * | HEADER NAME LENGTH(4 BYTES FIXED) | HEADER NAME | + * | HEADER VALUE LENGTH (4 BYTES FIXED) | HEADER VALUE | + * + * For records with null key , KEY LENGTH is set to 0. + * For records with null value, VALUE LENGTH is set to 0. + * Number of headers are set in property "AQINTERNAL_HEADERCOUNT" + * + * */ + private AQjmsBytesMessage createBytesMessageV2(TopicSession session, TopicPartition topicPartition, + ByteBuffer key, ByteBuffer value, Header[] headers) throws JMSException { + + AQjmsBytesMessage msg=null; + int keyLen = 0; + int valueLen =0; + + int hKeysLen[] = null; + int hValuesLen[] = null; + + byte[] keyByteArray = null; + byte[] valueByteArray = null; + + + if(headers != null) + { + hKeysLen = new int[headers.length]; + hValuesLen = new int[headers.length]; + } + + msg = (AQjmsBytesMessage)(session.createBytesMessage()); + + int totalSize = 0; + if(key != null) { + + keyByteArray = new byte[key.limit()]; + key.get(keyByteArray); + keyLen = keyByteArray.length; + } + + totalSize += (keyLen + DLENGTH_SIZE ); + + if(value != null) { + valueByteArray = new byte[value.limit()]; + value.get(valueByteArray); + valueLen = valueByteArray.length; + + } + totalSize += (valueLen + DLENGTH_SIZE); + + if(headers != null) { + int hIndex = 0; + for(Header h:headers) + { + int hKeyLen = h.key().getBytes().length; + totalSize += (hKeyLen + DLENGTH_SIZE); + hKeysLen[hIndex] = hKeyLen; + int hValueLength = h.value().length; + totalSize += (hValueLength +DLENGTH_SIZE); + hValuesLen[hIndex++] = hValueLength; + } + } + ByteBuffer pBuffer = ByteBuffer.allocate(totalSize); + + //If Key is null Put Length = 0 + pBuffer.put(ConnectionUtils.convertTo4Byte(keyLen)); + if(keyLen > 0) { + pBuffer.put(keyByteArray); + msg.setJMSCorrelationID(new String(keyByteArray)); + } + //If Value is null then put length = 0 + pBuffer.put(ConnectionUtils.convertTo4Byte(valueLen)); + if(valueLen > 0) + { + pBuffer.put(valueByteArray); + } + + if(headers != null) + { + int hIndex = 0; + for(Header h : headers) + { + pBuffer.put(ConnectionUtils.convertTo4Byte(hKeysLen[hIndex])); + pBuffer.put(h.key().getBytes()); + pBuffer.put(ConnectionUtils.convertTo4Byte(hValuesLen[hIndex++])); + pBuffer.put(h.value()); + } + } + + pBuffer.rewind(); + byte[] payload = new byte[pBuffer.limit()]; + pBuffer.get(payload); + msg.writeBytes(payload); + payload = null; + msg.setStringProperty(PARTITION_PROPERTY, Integer.toString(topicPartition.partition()*2)); + if(headers !=null) + { + msg.setIntProperty(HEADERCOUNT_PROPERTY, headers.length); + } + + msg.setIntProperty(MESSAGE_VERSION, 2); + + return msg; + } + + /** * Creates response for records in a producer batch from each corresponding AQjmsBytesMessage data updated after send is done. */ - private ProduceResponse.PartitionResponse createResponses(TopicPartition tp, Exception exception, AQjmsBytesMessage[] msgs) { + private ProduceResponse.PartitionResponse createResponses(TopicPartition tp, RuntimeException exception, AQjmsBytesMessage[] msgs) { int iter=0; //Map responses = new HashMap<>(); ProduceResponse.PartitionResponse response =new ProduceResponse.PartitionResponse(exception); - if(exception == null) { + + //if(exception == null) + if(msgs!= null) + { response.msgIds = new ArrayList<>(); - response.logAppendTime = new ArrayList<>(); + //response.logAppendTime = new ArrayList<>(); String msgId = null; long timeStamp = -1; + long subPartitionId = -1; + OKafkaOffset prevOKafkaOffset = null; + while(iter clusterNodes = NetworkClient.convertToOracleNodes(cluster.nodes()); + // Check if we have a node where connection already exists + Set nodesWithConn = topicPublishersMap.keySet(); + for(Node nodeNow: clusterNodes) + { + for(Node connectedNode : nodesWithConn) + { + if(connectedNode.equals(nodeNow)) + { + //Found a node with a connection to database. + node = nodeNow; + break; + } + } + } + if(node == null) + { + //No node with connection yet. Pick the first bootstrap node. + node = clusterNodes.get(0); + log.info("No Connected Node Found. Picked first of bootstrap nodes.: " + node); + } + } + else + { + node = (org.oracle.okafka.common.Node)metadata.getNodeById(Integer.parseInt(request.destination())); + } try { - conn = ((AQjmsSession)topicPublishersMap.get(request.destination()).getSession()).getDBConnection(); + TopicPublishers tpNode = topicPublishersMap.get(node); + if(tpNode != null) + { + conn = ((AQjmsSession)topicPublishersMap.get(node).getSession()).getDBConnection(); + }else { + // Designated node does not have a connection. Find alternative. + for(TopicPublishers tPublishers: topicPublishersMap.values()) + { + if(tPublishers.isConnected()) + { + conn = ((AQjmsSession)tPublishers.getSession()).getDBConnection(); + } + } + + if(conn == null) + { + log.info("Sender not connected to any node. Re-connecting."); + List clusterNodes = NetworkClient.convertToOracleNodes(metadata.fetch().nodes()); + for(Node n : clusterNodes) + { + try { + this.connect(n); + log.info("Attempting to connect to " + n); + conn = ((AQjmsSession)topicPublishersMap.get(n).getSession()).getDBConnection(); + log.info("Connected to node " + n); + node = n; + break; + }catch(Exception e) + { + log.info(" Node {} not rechable", n); + } + } + } + /*if(conn == null) + metadata.requestUpdate(); */ + } } catch(JMSException jms) { try { log.trace("Unexcepted error occured with connection to node {}, closing the connection", request.destination()); - topicPublishersMap.get(request.destination()).getConnection().close(); + topicPublishersMap.get(metadata.getNodeById(Integer.parseInt(request.destination()))).getConnection().close(); log.trace("Connection with node {} is closed", request.destination()); } catch(JMSException jmsEx) { log.trace("Failed to close connection with node {}", request.destination()); } } - - ClientResponse response = getMetadataNow(request, conn); - if(response.wasDisconnected()) - topicPublishersMap.remove(request.destination()); + + + ClientResponse response = getMetadataNow(request, conn, node, metadata.updateRequested()); + + MetadataResponse metadataresponse = (MetadataResponse)response.responseBody(); + + org.apache.kafka.common.Cluster updatedCluster = metadataresponse.cluster(); + + for(String topic: updatedCluster.topics()) { + try { + super.fetchQueueParameters(topic, conn, metadata.topicParaMap); + } catch (SQLException e) { + log.error("Exception while fetching TEQ parameters and updating metadata " + e.getMessage()); + } + } + + + if(response.wasDisconnected()) { + topicPublishersMap.remove(metadata.getNodeById(Integer.parseInt(request.destination()))); + metadata.requestUpdate(); + } return response; } - + /** * Closes AQKafkaProducer */ @@ -252,18 +1079,29 @@ public void close() { close(nodePublishers.getKey(), nodePublishers.getValue()); } topicPublishersMap.clear(); + status = AQKafkaProducerStatus.CLOSE; } - + + //Close publishers for this node only public void close(Node node) { - + + TopicPublishers tpNode = topicPublishersMap.get(node); + close(node, tpNode); + } + + public boolean isClosed() + { + if(status == AQKafkaProducerStatus.CLOSE) + return true; + return false; } - /** * Closes all connections, session associated with each connection and all topic publishers associated with session. */ private void close(Node node, TopicPublishers publishers) { if( node == null || publishers == null) return ; + for(Map.Entry topicPublisher : publishers.getTopicPublisherMap().entrySet()) { try { topicPublisher.getValue().close(); @@ -278,6 +1116,7 @@ private void close(Node node, TopicPublishers publishers) { } try { publishers.getConnection().close(); + this.selectorMetrics.connectionClosed.record(); } catch(JMSException jms) { log.error("failed to close connection {} associated with node {} ",publishers.getConnection(), node ); } @@ -287,16 +1126,127 @@ private void close(Node node, TopicPublishers publishers) { * Also creates, manages session associated with each connection and topic publishers associated with each session */ private final class TopicPublishers { + private Connection externalConn; + private Node node; private TopicConnection conn; private TopicSession sess; private Map topicPublishers = null; + private int sessionAckMode = javax.jms.Session.AUTO_ACKNOWLEDGE; + + private boolean isAlive = false; + PreparedStatement pingStmt = null; + private final String PING_QUERY = "SELECT banner FROM v$version where 1<>1"; + + private String connInfo = ""; + + public TopicPublishers(Node node, Connection externalConn) throws JMSException { + this.node = node; + this.externalConn = externalConn; + sessionAckMode = javax.jms.Session.SESSION_TRANSACTED; + createPublishers(false); + topicPublishers = new HashMap<>(); + log.debug("ExternalConnection " + externalConn); + } + public TopicPublishers(Node node) throws JMSException { this(node, TopicSession.AUTO_ACKNOWLEDGE); } - public TopicPublishers(Node node,int mode) throws JMSException { - conn = createTopicConnection(node); - sess = createTopicSession(mode); - topicPublishers = new HashMap<>(); + public TopicPublishers(Node _node,int mode) throws JMSException { + this.node = _node; + this.sessionAckMode = mode; + + try { + createPublishers(false); + topicPublishers = new HashMap<>(); + /* + Connection oConn = ((AQjmsSession)sess).getDBConnection(); + int instId = Integer.parseInt(((oracle.jdbc.internal.OracleConnection)oConn).getServerSessionInfo().getProperty("AUTH_INSTANCE_NO")); + String serviceName = ((oracle.jdbc.internal.OracleConnection)oConn).getServerSessionInfo().getProperty("SERVICE_NAME"); + String instanceName = ((oracle.jdbc.internal.OracleConnection)oConn).getServerSessionInfo().getProperty("INSTANCE_NAME"); + String user = oConn.getMetaData().getUserName(); + + try { + String sessionId = ((oracle.jdbc.internal.OracleConnection)oConn).getServerSessionInfo().getProperty("AUTH_SESSION_ID"); + String serialNum = ((oracle.jdbc.internal.OracleConnection)oConn).getServerSessionInfo().getProperty("AUTH_SERIAL_NUM"); + String serverPid = ((oracle.jdbc.internal.OracleConnection)oConn).getServerSessionInfo().getProperty("AUTH_SERVER_PID"); + connInfo = "Session_Info:"+ sessionId +","+serialNum+". Process Id:" + serverPid +". Instance Name:"+instanceName; + log.info("Database Producer "+connInfo); + }catch(Exception ignoreE) + { + } + + node.setId(instId); + node.setService(serviceName); + node.setInstanceName(instanceName); + node.setUser(user); + node.updateHashCode(); + pingStmt = oConn.prepareStatement(PING_QUERY); + pingStmt.setQueryTimeout(1); + isAlive = true;*/ + }catch(Exception e) + { + log.error("Exception while getting instance id from conneciton " + e, e); + throw e; + } + + } + + public String toString() + { + String tpDesc =connInfo +". Acknowledge_mode:"+ sessionAckMode +"."; + if(topicPublishers!= null && topicPublishers.size() > 0) + { + String topicInfo ="Topics:["; + boolean first = true; + for(String topic : topicPublishers.keySet()) + { + if(!first) + topicInfo+=","; + + topicInfo += topic; + + first = false; + } + topicInfo+="]."; + tpDesc += topicInfo; + } + return tpDesc; + } + + private boolean createPublishers(boolean reCreate) throws JMSException { + try { + conn = createTopicConnection(); + sess = createTopicSession(sessionAckMode); + Connection oConn = ((AQjmsSession)sess).getDBConnection(); + + int instId = Integer.parseInt(((oracle.jdbc.internal.OracleConnection)oConn).getServerSessionInfo().getProperty("AUTH_INSTANCE_NO")); + String serviceName = ((oracle.jdbc.internal.OracleConnection)oConn).getServerSessionInfo().getProperty("SERVICE_NAME"); + String instanceName = ((oracle.jdbc.internal.OracleConnection)oConn).getServerSessionInfo().getProperty("INSTANCE_NAME"); + String user = oConn.getMetaData().getUserName(); + String sessionId = ((oracle.jdbc.internal.OracleConnection)oConn).getServerSessionInfo().getProperty("AUTH_SESSION_ID"); + String serialNum = ((oracle.jdbc.internal.OracleConnection)oConn).getServerSessionInfo().getProperty("AUTH_SERIAL_NUM"); + String serverPid = ((oracle.jdbc.internal.OracleConnection)oConn).getServerSessionInfo().getProperty("AUTH_SERVER_PID"); + connInfo = "Session_Info:"+ sessionId +","+serialNum+". Process Id:" + serverPid +". Instance Name:"+instanceName; + log.info("Database Producer "+connInfo); + + if(reCreate) + { + node.setId(instId); + node.setService(serviceName); + node.setInstanceName(instanceName); + node.setUser(user); + node.updateHashCode(); + } + pingStmt = oConn.prepareStatement(PING_QUERY); + pingStmt.setQueryTimeout(1); + isAlive = true; + }catch(Exception setupException) + { + JMSException crPublisherException = new JMSException(setupException.getMessage()); + crPublisherException.setLinkedException(setupException); + throw crPublisherException; + } + return true; } /** * Creates topic connection to node @@ -304,15 +1254,25 @@ public TopicPublishers(Node node,int mode) throws JMSException { * @return established topic connection * @throws JMSException */ - public TopicConnection createTopicConnection(Node node) throws JMSException { - conn = ConnectionUtils.createTopicConnection(node, configs); + public TopicConnection createTopicConnection() throws Exception { + if(externalConn != null && !externalConn.isClosed()) + { + log.debug("Using External Connection to setup TopicConnection"); + conn = ConnectionUtils.createTopicConnection(externalConn, configs, log); + } + else + { + conn = ConnectionUtils.createTopicConnection(node, configs, log); + } return conn; } - + public TopicPublisher getTopicPublisher(String topic) throws JMSException { TopicPublisher publisher = topicPublishers.get(topic); - if(publisher == null) + if(publisher == null) { publisher = createTopicPublisher(topic); + topicPublishers.put(topic, publisher); + } return publisher; } @@ -323,35 +1283,135 @@ public TopicPublisher getTopicPublisher(String topic) throws JMSException { * @throws JMSException */ public TopicSession createTopicSession(int mode) throws JMSException { - if(sess != null) + if(sess != null && ((AQjmsSession)sess).isOpen()) return sess; - sess= ConnectionUtils.createTopicSession(conn, mode, false); - conn.start(); - return sess; - + + boolean transactedSession = false; + if(mode == AQjmsSession.SESSION_TRANSACTED) + { + transactedSession = true; + mode = AQjmsSession.AUTO_ACKNOWLEDGE; + } + + sess= ConnectionUtils.createTopicSession(conn, mode, transactedSession); + conn.start(); + return sess; + } - + /** * Creates topic publisher for given topic */ private TopicPublisher createTopicPublisher(String topic) throws JMSException { - Topic dest = ((AQjmsSession)sess).getTopic(ConnectionUtils.getUsername(configs), topic); + Topic dest = ((AQjmsSession)sess).getTopic((node!=null&&node.user()!=null)?node.user():ConnectionUtils.getUsername(configs), topic); TopicPublisher publisher = sess.createPublisher(dest); - topicPublishers.put(topic, publisher); return publisher; - + } public TopicConnection getConnection() { return conn; } - + public TopicSession getSession() { return sess; } - + public Map getTopicPublisherMap() { return topicPublishers; } - - } + + public boolean isConnected() + { + if(isAlive) + { + try + { + pingStmt.executeQuery(); + }catch(Exception e) + { + log.error("Publishers to node {} Failed to connect.", node.toString()); + isAlive = false; + } + } + return isAlive; + } + + public boolean reCreate() throws JMSException + { + log.debug("Recreating TopicPublisher " + this.toString()); + close(); + boolean reCreateSucc = createPublishers(true); + if(!reCreateSucc) { + log.debug("Recreation failed"); + return false; + }else { + log.debug("Successfully recreated " + this.toString() ); + } + try { + Map topicPublishersNew = new HashMap(); + for(String topic: topicPublishers.keySet()) + { + try { + TopicPublisher tpNew = createTopicPublisher(topic); + topicPublishersNew.put(topic, tpNew); + } catch(Exception e) + { + log.error("Exception "+ e +" while re-creating publishers for topic " +topic + " for node" + node ); + } + } + topicPublishers.clear(); + topicPublishers = topicPublishersNew; + }catch(Exception e) + { + log.error("Exception "+ e +" while re-creating publishers for topic for node" + node ); + } + isAlive = true; + + return isAlive; + } + + public void close() + { + try { + if(pingStmt != null ) + { + if( !pingStmt.isClosed()) + pingStmt.close(); + + pingStmt = null; + } + }catch(Exception e) { + log.error("Error while closing ping statement for " + node); + pingStmt = null; + } + try { + if(sess != null) + { + if(((AQjmsSession)sess).isOpen()) + sess.close(); + + sess = null; + } + }catch(Exception e) + { + log.error("Error while closing session for " + node); + sess = null; + } + try { + if(conn != null) + { + if( ((AQjmsConnection)conn).isOpen()) + conn.close(); + + conn = null; + } + + }catch(Exception e) + { + log.error("Error while closing connection for " + node); + } + isAlive = false; + } + } +} diff --git a/clients/src/main/java/org/oracle/okafka/clients/producer/internals/BufferPool.java b/clients/src/main/java/org/oracle/okafka/clients/producer/internals/BufferPool.java deleted file mode 100644 index f2b2701..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/producer/internals/BufferPool.java +++ /dev/null @@ -1,304 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.clients.producer.internals; - -import java.nio.ByteBuffer; -import java.util.ArrayDeque; -import java.util.Deque; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.locks.Condition; -import java.util.concurrent.locks.ReentrantLock; - -import org.oracle.okafka.common.errors.TimeoutException; -import org.oracle.okafka.common.utils.Time; - - -/** - * A pool of ByteBuffers kept under a given memory limit. This class is fairly specific to the needs of the producer. In - * particular it has the following properties: - *
      - *
    1. There is a special "poolable size" and buffers of this size are kept in a free list and recycled - *
    2. It is fair. That is all memory is given to the longest waiting thread until it has sufficient memory. This - * prevents starvation or deadlock when a thread asks for a large chunk of memory and needs to block until multiple - * buffers are deallocated. - *
    - */ -public class BufferPool { - - static final String WAIT_TIME_SENSOR_NAME = "bufferpool-wait-time"; - - private final long totalMemory; - private final int poolableSize; - private final ReentrantLock lock; - private final Deque free; - private final Deque waiters; - /** Total available memory is the sum of nonPooledAvailableMemory and the number of byte buffers in free * poolableSize. */ - private long nonPooledAvailableMemory; - private final Time time; - - /** - * Create a new buffer pool - * - * @param memory The maximum amount of memory that this buffer pool can allocate - * @param poolableSize The buffer size to cache in the free list rather than deallocating - * @param time time instance - */ - public BufferPool(long memory, int poolableSize, Time time) { - this.poolableSize = poolableSize; - this.lock = new ReentrantLock(); - this.free = new ArrayDeque<>(); - this.waiters = new ArrayDeque<>(); - this.totalMemory = memory; - this.nonPooledAvailableMemory = memory; - this.time = time; - } - - /** - * Allocate a buffer of the given size. This method blocks if there is not enough memory and the buffer pool - * is configured with blocking mode. - * - * @param size The buffer size to allocate in bytes - * @param maxTimeToBlockMs The maximum time in milliseconds to block for buffer memory to be available - * @return The buffer - * @throws InterruptedException If the thread is interrupted while blocked - * @throws IllegalArgumentException if size is larger than the total memory controlled by the pool (and hence we would block - * forever) - */ - public ByteBuffer allocate(int size, long maxTimeToBlockMs) throws InterruptedException { - if (size > this.totalMemory) - throw new IllegalArgumentException("Attempt to allocate " + size - + " bytes, but there is a hard limit of " - + this.totalMemory - + " on memory allocations."); - - ByteBuffer buffer = null; - this.lock.lock(); - try { - // check if we have a free buffer of the right size pooled - if (size == poolableSize && !this.free.isEmpty()) - return this.free.pollFirst(); - - // now check if the request is immediately satisfiable with the - // memory on hand or if we need to block - int freeListSize = freeSize() * this.poolableSize; - if (this.nonPooledAvailableMemory + freeListSize >= size) { - // we have enough unallocated or pooled memory to immediately - // satisfy the request, but need to allocate the buffer - freeUp(size); - this.nonPooledAvailableMemory -= size; - } else { - // we are out of memory and will have to block - int accumulated = 0; - Condition moreMemory = this.lock.newCondition(); - try { - long remainingTimeToBlockNs = TimeUnit.MILLISECONDS.toNanos(maxTimeToBlockMs); - this.waiters.addLast(moreMemory); - // loop over and over until we have a buffer or have reserved - // enough memory to allocate one - while (accumulated < size) { - long startWaitNs = time.nanoseconds(); - long timeNs; - boolean waitingTimeElapsed; - try { - waitingTimeElapsed = !moreMemory.await(remainingTimeToBlockNs, TimeUnit.NANOSECONDS); - } finally { - long endWaitNs = time.nanoseconds(); - timeNs = Math.max(0L, endWaitNs - startWaitNs); - } - - if (waitingTimeElapsed) { - throw new TimeoutException("Failed to allocate memory within the configured max blocking time " + maxTimeToBlockMs + " ms."); - } - - remainingTimeToBlockNs -= timeNs; - - // check if we can satisfy this request from the free list, - // otherwise allocate memory - if (accumulated == 0 && size == this.poolableSize && !this.free.isEmpty()) { - // just grab a buffer from the free list - buffer = this.free.pollFirst(); - accumulated = size; - } else { - // we'll need to allocate memory, but we may only get - // part of what we need on this iteration - freeUp(size - accumulated); - int got = (int) Math.min(size - accumulated, this.nonPooledAvailableMemory); - this.nonPooledAvailableMemory -= got; - accumulated += got; - } - } - // Don't reclaim memory on throwable since nothing was thrown - accumulated = 0; - } finally { - // When this loop was not able to successfully terminate don't loose available memory - this.nonPooledAvailableMemory += accumulated; - this.waiters.remove(moreMemory); - } - } - } finally { - // signal any additional waiters if there is more memory left - // over for them - try { - if (!(this.nonPooledAvailableMemory == 0 && this.free.isEmpty()) && !this.waiters.isEmpty()) - this.waiters.peekFirst().signal(); - } finally { - // Another finally... otherwise find bugs complains - lock.unlock(); - } - } - - if (buffer == null) - return safeAllocateByteBuffer(size); - else - return buffer; - } - - /** - * Allocate a buffer. If buffer allocation fails (e.g. because of OOM) then return the size count back to - * available memory and signal the next waiter if it exists. - */ - private ByteBuffer safeAllocateByteBuffer(int size) { - boolean error = true; - try { - ByteBuffer buffer = allocateByteBuffer(size); - error = false; - return buffer; - } finally { - if (error) { - this.lock.lock(); - try { - this.nonPooledAvailableMemory += size; - if (!this.waiters.isEmpty()) - this.waiters.peekFirst().signal(); - } finally { - this.lock.unlock(); - } - } - } - } - - // Protected for testing. - protected ByteBuffer allocateByteBuffer(int size) { - return ByteBuffer.allocate(size); - } - - /** - * Attempt to ensure we have at least the requested number of bytes of memory for allocation by deallocating pooled - * buffers (if needed) - */ - private void freeUp(int size) { - while (!this.free.isEmpty() && this.nonPooledAvailableMemory < size) - this.nonPooledAvailableMemory += this.free.pollLast().capacity(); - } - - /** - * Return buffers to the pool. If they are of the poolable size add them to the free list, otherwise just mark the - * memory as free. - * - * @param buffer The buffer to return - * @param size The size of the buffer to mark as deallocated, note that this may be smaller than buffer.capacity - * since the buffer may re-allocate itself during in-place compression - */ - public void deallocate(ByteBuffer buffer, int size) { - lock.lock(); - try { - if (size == this.poolableSize && size == buffer.capacity()) { - buffer.clear(); - this.free.add(buffer); - } else { - this.nonPooledAvailableMemory += size; - } - Condition moreMem = this.waiters.peekFirst(); - if (moreMem != null) - moreMem.signal(); - } finally { - lock.unlock(); - } - } - - public void deallocate(ByteBuffer buffer) { - deallocate(buffer, buffer.capacity()); - } - - /** - * the total free memory both unallocated and in the free list - */ - public long availableMemory() { - lock.lock(); - try { - return this.nonPooledAvailableMemory + freeSize() * (long) this.poolableSize; - } finally { - lock.unlock(); - } - } - - // Protected for testing. - protected int freeSize() { - return this.free.size(); - } - - /** - * Get the unallocated memory (not in the free list or in use) - */ - public long unallocatedMemory() { - lock.lock(); - try { - return this.nonPooledAvailableMemory; - } finally { - lock.unlock(); - } - } - - /** - * The number of threads blocked waiting on memory - */ - public int queued() { - lock.lock(); - try { - return this.waiters.size(); - } finally { - lock.unlock(); - } - } - - /** - * The buffer size that will be retained in the free list after use - */ - public int poolableSize() { - return this.poolableSize; - } - - /** - * The total memory managed by this pool - */ - public long totalMemory() { - return this.totalMemory; - } - - // package-private method used only for testing - Deque waiters() { - return this.waiters; - } -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/producer/internals/DefaultPartitioner.java b/clients/src/main/java/org/oracle/okafka/clients/producer/internals/DefaultPartitioner.java deleted file mode 100644 index 4585a34..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/producer/internals/DefaultPartitioner.java +++ /dev/null @@ -1,95 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.clients.producer.internals; - -import java.util.List; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.ThreadLocalRandom; -import java.util.concurrent.atomic.AtomicInteger; - -import org.oracle.okafka.common.PartitionInfo; -import org.oracle.okafka.clients.producer.Partitioner; -import org.oracle.okafka.common.Cluster; -import org.oracle.okafka.common.utils.Utils; - -/** - * The default partitioning strategy: - *
      - *
    • If a partition is specified in the record, use it - *
    • If no partition is specified but a key is present choose a partition based on a hash of the key - *
    • If no partition or key is present choose a partition in a round-robin fashion - */ -public class DefaultPartitioner implements Partitioner { - - private final ConcurrentMap topicCounterMap = new ConcurrentHashMap<>(); - - public void configure(Map configs) {} - - /** - * Compute the partition for the given record. - * - * @param topic The topic name - * @param key The key to partition on (or null if no key) - * @param keyBytes serialized key to partition on (or null if no key) - * @param value The value to partition on or null - * @param valueBytes serialized value to partition on or null - * @param cluster The current cluster metadata - */ - public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) { - int numPartitions = cluster.partitionCountForTopic(topic); - - if (keyBytes == null) { - int nextValue = nextValue(topic); - List availablePartitions = cluster.availablePartitionsForTopic(topic); - if (availablePartitions.size() > 0) { - int part = Utils.toPositive(nextValue) % availablePartitions.size(); - return availablePartitions.get(part).partition(); - } else { - // no partitions are available, give a non-available partition - return Utils.toPositive(nextValue) % numPartitions; - } - }else { - // hash the keyBytes to choose a partition - return Utils.toPositive(Utils.murmur2(keyBytes)) % numPartitions; - } - } - - private int nextValue(String topic) { - AtomicInteger counter = topicCounterMap.get(topic); - if (null == counter) { - counter = new AtomicInteger(ThreadLocalRandom.current().nextInt()); - AtomicInteger currentCounter = topicCounterMap.putIfAbsent(topic, counter); - if (currentCounter != null) { - counter = currentCounter; - } - } - return counter.getAndIncrement(); - } - - public void close() {} - -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/producer/internals/FutureRecordMetadata.java b/clients/src/main/java/org/oracle/okafka/clients/producer/internals/FutureRecordMetadata.java index 22fa9bd..fdaef41 100644 --- a/clients/src/main/java/org/oracle/okafka/clients/producer/internals/FutureRecordMetadata.java +++ b/clients/src/main/java/org/oracle/okafka/clients/producer/internals/FutureRecordMetadata.java @@ -1,7 +1,7 @@ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. */ @@ -34,121 +34,140 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import org.oracle.okafka.clients.producer.RecordMetadata; +import org.apache.kafka.clients.producer.RecordMetadata; +import org.apache.kafka.common.utils.Time; import org.oracle.okafka.common.utils.MessageIdConverter; +import org.oracle.okafka.common.utils.MessageIdConverter.OKafkaOffset; /** * The future result of a record send */ public final class FutureRecordMetadata implements Future { - private final ProduceRequestResult result; - private final int relativeOffset; - private final long createTimestamp; - private final Long checksum; - private final int serializedKeySize; - private final int serializedValueSize; - private volatile FutureRecordMetadata nextRecordMetadata = null; - - public FutureRecordMetadata(ProduceRequestResult result, int relativeOffset, long createTimestamp, - Long checksum, int serializedKeySize, int serializedValueSize) { - this.result = result; - this.relativeOffset = relativeOffset; - this.createTimestamp = createTimestamp; - this.checksum = checksum; - this.serializedKeySize = serializedKeySize; - this.serializedValueSize = serializedValueSize; - } - - @Override - public boolean cancel(boolean interrupt) { - return false; - } - - @Override - public boolean isCancelled() { - return false; - } - - @Override - public RecordMetadata get() throws InterruptedException, ExecutionException { - this.result.await(); - if (nextRecordMetadata != null) - return nextRecordMetadata.get(); - return valueOrError(); - } - - @Override - public RecordMetadata get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { - // Handle overflow. - long now = System.currentTimeMillis(); - long deadline = Long.MAX_VALUE - timeout < now ? Long.MAX_VALUE : now + timeout; - boolean occurred = this.result.await(timeout, unit); - if (nextRecordMetadata != null) - return nextRecordMetadata.get(deadline - System.currentTimeMillis(), TimeUnit.MILLISECONDS); - if (!occurred) - throw new TimeoutException("Timeout after waiting for " + TimeUnit.MILLISECONDS.convert(timeout, unit) + " ms."); - return valueOrError(); - } - - /** - * This method is used when we have to split a large batch in smaller ones. A chained metadata will allow the - * future that has already returned to the users to wait on the newly created split batches even after the - * old big batch has been deemed as done. - */ - void chain(FutureRecordMetadata futureRecordMetadata) { - if (nextRecordMetadata == null) - nextRecordMetadata = futureRecordMetadata; - else - nextRecordMetadata.chain(futureRecordMetadata); - } - - RecordMetadata valueOrError() throws ExecutionException { - if (this.result.error() != null) - throw new ExecutionException(this.result.error()); - else - return value(); - } - - Long checksumOrNull() { - return this.checksum; - } - - /** - * Converts TEQ message id into kafka offset - * @return record metadata - */ - RecordMetadata value() { - if (nextRecordMetadata != null) - return nextRecordMetadata.value(); - long baseOffset = -1; - long relOffset = -1; - if(this.result.msgIds() != null) { - try { - - String msgId = this.result.msgIds().get(relativeOffset); - long offset = MessageIdConverter.getOffset(msgId); - - baseOffset = offset >>> 16; - relOffset = offset & 65535; - } catch(RuntimeException exception) { - baseOffset = -1; - relOffset = -1; - } - } - return new RecordMetadata(result.topicPartition(), baseOffset, relOffset, - timestamp(), this.checksum, this.serializedKeySize, this.serializedValueSize); - } - - private long timestamp() { - return result.hasLogAppendTime() ? result.logAppendTime().get(relativeOffset) : createTimestamp; - } - - @Override - public boolean isDone() { - if (nextRecordMetadata != null) - return nextRecordMetadata.isDone(); - return this.result.completed(); - } + private final ProduceRequestResult result; + // Removed final here. relativeOffset will be decided after the publish only + private final long relativeOffset; + private final long createTimestamp; + private final int serializedKeySize; + private final int serializedValueSize; + private final Time time; + private volatile FutureRecordMetadata nextRecordMetadata = null; + + public FutureRecordMetadata(ProduceRequestResult result, long relativeOffset, long createTimestamp, + int serializedKeySize, int serializedValueSize, Time _time) { + this.result = result; + this.relativeOffset = relativeOffset; + this.createTimestamp = createTimestamp; + this.serializedKeySize = serializedKeySize; + this.serializedValueSize = serializedValueSize; + time = _time; + } + + public ProduceRequestResult requestResult() { + return this.result; + } + + @Override + public boolean cancel(boolean interrupt) { + return false; + } + + @Override + public boolean isCancelled() { + return false; + } + + @Override + public RecordMetadata get() throws InterruptedException, ExecutionException { + this.result.await(); + if (nextRecordMetadata != null) { + return nextRecordMetadata.get(); + } + return valueOrError(); + } + + @Override + public RecordMetadata get(long timeout, TimeUnit unit) + throws InterruptedException, ExecutionException, TimeoutException { + // Handle overflow. + long now = time.milliseconds(); + long deadline = Long.MAX_VALUE - timeout < now ? Long.MAX_VALUE : now + timeout; + boolean occurred = this.result.await(timeout, unit); + if (nextRecordMetadata != null) + return nextRecordMetadata.get(deadline - System.currentTimeMillis(), TimeUnit.MILLISECONDS); + if (!occurred) + throw new TimeoutException( + "Timeout after waiting for " + TimeUnit.MILLISECONDS.convert(timeout, unit) + " ms."); + return valueOrError(); + } + + /** + * This method is used when we have to split a large batch in smaller ones. A + * chained metadata will allow the future that has already returned to the users + * to wait on the newly created split batches even after the old big batch has + * been deemed as done. + */ + void chain(FutureRecordMetadata futureRecordMetadata) { + if (nextRecordMetadata == null) + nextRecordMetadata = futureRecordMetadata; + else + nextRecordMetadata.chain(futureRecordMetadata); + } + + RecordMetadata valueOrError() throws ExecutionException { + if (this.result.error((int) relativeOffset) != null) + throw new ExecutionException(this.result.error((int) relativeOffset)); + else { + return value(); + } + } + + /** + * Converts TEQ message id into kafka offset + * + * @return record metadata + */ + RecordMetadata value() { + if (nextRecordMetadata != null) + return nextRecordMetadata.value(); + long baseOffset = -1; + long relOffset = -1; + if (this.result.msgIds() != null) { + try { + // Changes for 2.8.1 + if (this.result.msgIds().size() > relativeOffset) { + OKafkaOffset okOffset = this.result.msgIds().get((int) relativeOffset); + // OKafkaOffset okOffset = MessageIdConverter.getOKafkaOffset(msgId, true, + // true); + baseOffset = okOffset.subPartitionId(); + relOffset = okOffset.sequenceNo(); + } + } catch (RuntimeException exception) { + baseOffset = -1; + relOffset = -1; + } + } + + RecordMetadata rm = new RecordMetadata(result.topicPartition(), baseOffset, (int) relOffset, timestamp(), + this.serializedKeySize, this.serializedValueSize); + return rm; + } + + RuntimeException error() { + return result.error((int) relativeOffset); + } + + private long timestamp() { + // return result.hasLogAppendTime() ? + // result.logAppendTime().get((int)relativeOffset) : createTimestamp; + return result.hasLogAppendTime() ? result.logAppendTime() : createTimestamp; + } + + @Override + public boolean isDone() { + if (nextRecordMetadata != null) + return nextRecordMetadata.isDone(); + return this.result.completed(); + } } diff --git a/clients/src/main/java/org/oracle/okafka/clients/producer/internals/IncompleteBatches.java b/clients/src/main/java/org/oracle/okafka/clients/producer/internals/IncompleteBatches.java index 3bd3e1d..44e8278 100644 --- a/clients/src/main/java/org/oracle/okafka/clients/producer/internals/IncompleteBatches.java +++ b/clients/src/main/java/org/oracle/okafka/clients/producer/internals/IncompleteBatches.java @@ -1,9 +1,9 @@ /* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ + ** OKafka Java Client version 23.4. + ** + ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. + ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + */ /* * Licensed to the Apache Software Foundation (ASF) under one or more diff --git a/clients/src/main/java/org/oracle/okafka/clients/producer/internals/OkafkaProducerMetrics.java b/clients/src/main/java/org/oracle/okafka/clients/producer/internals/OkafkaProducerMetrics.java new file mode 100644 index 0000000..82fad64 --- /dev/null +++ b/clients/src/main/java/org/oracle/okafka/clients/producer/internals/OkafkaProducerMetrics.java @@ -0,0 +1,123 @@ +/* +** OKafka Java Client version 23.4. +** +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. +** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +*/ + +package org.oracle.okafka.clients.producer.internals; + +import java.util.Map; + +import org.apache.kafka.common.MetricName; +import org.apache.kafka.common.metrics.Metrics; +import org.apache.kafka.common.metrics.Sensor; +import org.apache.kafka.common.metrics.stats.CumulativeSum; + +public class OkafkaProducerMetrics implements AutoCloseable{ + public static final String GROUP = "producer-metrics"; + private static final String FLUSH = "flush"; + private static final String TXN_INIT = "txn-init"; + private static final String TXN_BEGIN = "txn-begin"; + private static final String TXN_SEND_OFFSETS = "txn-send-offsets"; + private static final String TXN_COMMIT = "txn-commit"; + private static final String TXN_ABORT = "txn-abort"; + private static final String TOTAL_TIME_SUFFIX = "-time-ns-total"; + private static final String METADATA_WAIT = "metadata-wait"; + private final Map tags; + private final Metrics metrics; + private final Sensor initTimeSensor; + private final Sensor beginTxnTimeSensor; + private final Sensor flushTimeSensor; + private final Sensor sendOffsetsSensor; + private final Sensor commitTxnSensor; + private final Sensor abortTxnSensor; + private final Sensor metadataWaitSensor; + + public OkafkaProducerMetrics(Metrics metrics) { + this.metrics = metrics; + tags = this.metrics.config().tags(); + flushTimeSensor = newLatencySensor( + FLUSH, + "Total time producer has spent in flush in nanoseconds." + ); + initTimeSensor = newLatencySensor( + TXN_INIT, + "Total time producer has spent in initTransactions in nanoseconds." + ); + beginTxnTimeSensor = newLatencySensor( + TXN_BEGIN, + "Total time producer has spent in beginTransaction in nanoseconds." + ); + sendOffsetsSensor = newLatencySensor( + TXN_SEND_OFFSETS, + "Total time producer has spent in sendOffsetsToTransaction in nanoseconds." + ); + commitTxnSensor = newLatencySensor( + TXN_COMMIT, + "Total time producer has spent in commitTransaction in nanoseconds." + ); + abortTxnSensor = newLatencySensor( + TXN_ABORT, + "Total time producer has spent in abortTransaction in nanoseconds." + ); + metadataWaitSensor = newLatencySensor( + METADATA_WAIT, + "Total time producer has spent waiting on topic metadata in nanoseconds." + ); + } + + @Override + public void close() { + removeMetric(FLUSH); + removeMetric(TXN_INIT); + removeMetric(TXN_BEGIN); + removeMetric(TXN_SEND_OFFSETS); + removeMetric(TXN_COMMIT); + removeMetric(TXN_ABORT); + removeMetric(METADATA_WAIT); + } + + public void recordFlush(long duration) { + flushTimeSensor.record(duration); + } + + public void recordInit(long duration) { + initTimeSensor.record(duration); + } + + public void recordBeginTxn(long duration) { + beginTxnTimeSensor.record(duration); + } + + public void recordSendOffsets(long duration) { + sendOffsetsSensor.record(duration); + } + + public void recordCommitTxn(long duration) { + commitTxnSensor.record(duration); + } + + public void recordAbortTxn(long duration) { + abortTxnSensor.record(duration); + } + + public void recordMetadataWait(long duration) { + metadataWaitSensor.record(duration); + } + + private Sensor newLatencySensor(String name, String description) { + Sensor sensor = metrics.sensor(name + TOTAL_TIME_SUFFIX); + sensor.add(metricName(name, description), new CumulativeSum()); + return sensor; + } + + private MetricName metricName(final String name, final String description) { + return metrics.metricName(name + TOTAL_TIME_SUFFIX, GROUP, description, tags); + } + + private void removeMetric(final String name) { + metrics.removeSensor(name + TOTAL_TIME_SUFFIX); + } + +} diff --git a/clients/src/main/java/org/oracle/okafka/clients/producer/internals/OracleTransactionManager.java b/clients/src/main/java/org/oracle/okafka/clients/producer/internals/OracleTransactionManager.java new file mode 100644 index 0000000..23bc7d7 --- /dev/null +++ b/clients/src/main/java/org/oracle/okafka/clients/producer/internals/OracleTransactionManager.java @@ -0,0 +1,300 @@ +/* +** OKafka Java Client version 23.4. +** +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. +** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +*/ + +package org.oracle.okafka.clients.producer.internals; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.SQLException; +import java.sql.Statement; +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Date; +import java.util.concurrent.Future; + +import org.apache.kafka.clients.producer.RecordMetadata; +import org.apache.kafka.common.KafkaException; +import org.apache.kafka.common.errors.DisconnectException; +import org.apache.kafka.common.utils.LogContext; +import org.oracle.okafka.common.utils.ConnectionUtils; +import org.slf4j.Logger; + +public class OracleTransactionManager { + + enum TransactionState + { + PRE_INIT, + INIT_INVOKED, + BEGIN, + COMMITTED, + ABORTED + } + + ArrayList> recordsInTxn; + Connection conn; + TransactionState tState = TransactionState.PRE_INIT; + String clientTransactionId; + String connId; + protected final Logger log ; + private boolean initTxnCalled; + private String sId; + private String serialNo; + private String instanceName; + private String serverPid; + + SimpleDateFormat sdf = new SimpleDateFormat("dd_MM_yyyy_HH_mm_ss_SSS"); + + public OracleTransactionManager(LogContext logContext ) + { + log = logContext.logger(OracleTransactionManager.class); + recordsInTxn = new ArrayList>(); + } + public void initTxn() + { + initTxnCalled = true; + tState = TransactionState.INIT_INVOKED; + } + + public void beginTransaction() throws IllegalStateException + { + if(tState == TransactionState.PRE_INIT) + throw new IllegalStateException("InIt transaction not invoked for this KafkaProducer"); + else if(tState == TransactionState.BEGIN) + { + throw new IllegalStateException("KafkaProducer already part of an existing transaction. Either abort it or commit it"); + } + + tState = TransactionState.BEGIN; + if(conn != null) + { + clientTransactionId = generateLocalTransactionId(); + log.debug("Client Transaction id " + clientTransactionId); + } + + } + + public void abortTransaction() throws IllegalStateException, SQLException + { + /*if(tState != TransactionState.BEGIN) + { + throw new IllegalStateException("KafkaProducer not part of any transaction."); + }*/ + tState = TransactionState.ABORTED; + try { + conn.rollback(); + }catch(Exception e) { + log.error(clientTransactionId +": Exception during rollback of transaction." + e, e); + throw e; + }finally { + clientTransactionId = null; + conn = null; + } + } + /* + * Commits the records + */ + public void commitTransaction() throws IllegalStateException, Exception + { + log.debug("Commiting Transaction. TransactionState "+tState+". Local Transaction Id:"+clientTransactionId+". Database Connection " + conn); + + /*if(tState != TransactionState.BEGIN) + { + throw new IllegalStateException("KafkaProducer not part of any transaction.Transaction state is not 'BEGIN' "); + }*/ + + if(conn == null) + { + return; + } + try { + if(sId == null) + { + oracle.jdbc.internal.OracleConnection connInternal = (oracle.jdbc.internal.OracleConnection)conn; + sId = connInternal.getServerSessionInfo().getProperty("AUTH_SESSION_ID"); + serialNo = connInternal.getServerSessionInfo().getProperty("AUTH_SERIAL_NUM"); + serverPid = connInternal.getServerSessionInfo().getProperty("AUTH_SERVER_PID"); + instanceName = connInternal.getServerSessionInfo().getProperty(""); + } + log.debug("Commiting database transaction at instance: "+ instanceName +". Session information: " + sId +","+serialNo+". Process id:" + serverPid+"."); + }catch(Exception ignoreE) + { + + } + + RuntimeException rException = null; + Collections.reverse(recordsInTxn); + + for(Future frm : recordsInTxn) { + try { + if(!frm.isDone()) + { + frm.get(); + } + + }catch(Exception exeException) + { + throw exeException; + } + rException = ((FutureRecordMetadata)frm).error(); + if(rException != null) + throw rException; + } + + try { + conn.commit(); + } + catch(Exception e) { + String dbInfo = String.format(" .Database Session Information.(instance name, session id, serial#)(%s,%s,%s)", instanceName, sId, serialNo); + String excpMsg = getLocalTransactionId(); + if(conn.isClosed()) + { + excpMsg +=":Exception while committing kafka transaction. Database connection found closed. Exception:" +e.getMessage(); + excpMsg += dbInfo; + throw new DisconnectException(excpMsg, e); + } + boolean isAlive = isConnectionAlive(); + if(isAlive) + { + String msgId = null; + String topicName = null; + if(recordsInTxn.size() == 0) + { + excpMsg += ":No record produced in this transaction. Exception during database commit operation:" + e.getMessage(); + excpMsg += "Application should abort this transaction and retry the operations."; + throw new KafkaException(excpMsg+dbInfo, e); + } + FutureRecordMetadata frm = (FutureRecordMetadata)recordsInTxn.get(0); + ProduceRequestResult rs = frm.requestResult(); + boolean msgIdExist = false; + if(rs != null && rs.msgIds() != null && rs.msgIds().size() > 0) + { + msgId = rs.msgIds().get(0).getMsgId(); + msgId = msgId.substring(3); + topicName = frm.get().topic(); + msgIdExist = ConnectionUtils.checkIfMsgIdExist(conn, topicName,msgId , log); + } + + if(msgIdExist) + { + log.info("Commit successful despite exception: " + e + "Application should not need to retry."); + + if(log.isDebugEnabled()) + { + excpMsg += ": Exception while commiting transaction -" +e.getMessage(); + log.error(excpMsg + dbInfo, e); + log.info("Check database server trace file: " + dbInfo +" for more information."); + } + } + else + { + excpMsg+= " :KafkaProducer failed to commit this transaction due to exception:"+e.getMessage(); + excpMsg+=" Application should abort this transaction and retry the operations."; + KafkaException kE = new KafkaException(excpMsg+dbInfo , e); + throw kE; + } + } + else + { + excpMsg +=" : Transactional poruducer disconnected from Oracle Database due to exception -" +e.getMessage(); + throw new DisconnectException(excpMsg + dbInfo , e); + } + } + finally { + recordsInTxn.clear(); + clientTransactionId = null; + } + tState = TransactionState.COMMITTED; + //Next BeginTransaction may use another connection + + conn = null; + } + + public String getLocalTransactionId() + { + return clientTransactionId; + } + synchronized public void addRecordToTransaction(Future frm) + { + recordsInTxn.add(frm); + } + + synchronized public Connection getDBConnection() + { + return conn; + } + + synchronized public void setDBConnection(Connection _conn) + { + if(conn!=null && conn ==_conn) + { + return; + } + + if(conn == null) + { + conn = _conn; + if(clientTransactionId == null) + { + clientTransactionId = generateLocalTransactionId(); + log.debug("Transaction id " + clientTransactionId); + } + + return; + } + + if(tState == TransactionState.BEGIN) + { + log.error("Transaction has already begun with a different database connection."); + throw new KafkaException("A transaction with another oracle connection already in process"); + } + conn = _conn; + } + + synchronized TransactionState getTransactionState() + { + return tState; + } + + String generateLocalTransactionId() + { + String localTransactionId = null; + try { + oracle.jdbc.internal.OracleConnection connInternal = (oracle.jdbc.internal.OracleConnection)conn; + sId = connInternal.getServerSessionInfo().getProperty("AUTH_SESSION_ID"); + serialNo = connInternal.getServerSessionInfo().getProperty("AUTH_SERIAL_NUM"); + instanceName = connInternal.getServerSessionInfo().getProperty("INSTANCE_NAME"); + serverPid = connInternal.getServerSessionInfo().getProperty("AUTH_SERVER_PID"); + String userName = conn.getMetaData().getUserName(); + + localTransactionId = instanceName +"_"+ userName +"_" + sId +"_" + serialNo +"_" + (sdf.format(new Date())); + log.debug("Client Transaction id " + localTransactionId); + + }catch(Exception e) { + } + + return localTransactionId; + } + + boolean isConnectionAlive() + { + if(conn == null) + return false; + + String PING_QUERY = "SELECT banner FROM v$version where 1<>1"; + try (PreparedStatement stmt = conn.prepareStatement(PING_QUERY)) { + stmt.setQueryTimeout(1); + stmt.execute("SELECT banner FROM v$version where 1<>1"); + stmt.close(); + } + catch(Exception ignoreE) + { + return false; + } + return true; + } +} diff --git a/clients/src/main/java/org/oracle/okafka/clients/producer/internals/ProduceRequestResult.java b/clients/src/main/java/org/oracle/okafka/clients/producer/internals/ProduceRequestResult.java index c73f300..2ddba41 100644 --- a/clients/src/main/java/org/oracle/okafka/clients/producer/internals/ProduceRequestResult.java +++ b/clients/src/main/java/org/oracle/okafka/clients/producer/internals/ProduceRequestResult.java @@ -1,7 +1,7 @@ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. */ @@ -30,111 +30,55 @@ package org.oracle.okafka.clients.producer.internals; import java.util.List; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; +import java.util.function.Function; -import org.oracle.okafka.clients.producer.RecordMetadata; -import org.oracle.okafka.common.TopicPartition; +import org.apache.kafka.clients.producer.RecordMetadata; +import org.apache.kafka.common.TopicPartition; +import org.oracle.okafka.common.utils.MessageIdConverter.OKafkaOffset; /** - * A class that models the future completion of a produce request for a single partition. There is one of these per - * partition in a produce request and it is shared by all the {@link RecordMetadata} instances that are batched together + * A class that models the future completion of a produce request for a single + * partition. There is one of these per partition in a produce request and it is + * shared by all the {@link RecordMetadata} instances that are batched together * for the same partition in the request. */ -public final class ProduceRequestResult { +public class ProduceRequestResult extends org.apache.kafka.clients.producer.internals.ProduceRequestResult { + + private volatile List msgIds = null; + + /** + * Create an instance of this class. + * + * @param topicPartition The topic and partition to which this record set was + * sent was sent + */ + public ProduceRequestResult(TopicPartition topicPartition) { + super(topicPartition); + } + + /** + * Set the result of the produce request. + * + * @param baseOffset The base offset assigned to the record + * @param logAppendTime The log append time or -1 if CreateTime is being used + * @param error The error that occurred if there was one, or null + */ + public void set(long baseOffset, long logAppendTime, List msgIds, + Function errorsByIndex) { + set(baseOffset, logAppendTime, errorsByIndex); + this.msgIds = msgIds; + } + + public void set(long baseOffset, long logAppendTime, List msgIds, RuntimeException errorsByIndex) { + set(baseOffset, logAppendTime, batchIndex -> errorsByIndex); + this.msgIds = msgIds; + } + + /** + * The base offset for the request (the first offset in the record set) + */ + public List msgIds() { + return msgIds; + } - private final CountDownLatch latch = new CountDownLatch(1); - private final TopicPartition topicPartition; - - private volatile List msgIds = null; - private volatile List logAppendTime = null; - private volatile Exception error; - - /** - * Create an instance of this class. - * - * @param topicPartition The topic and partition to which this record set was sent was sent - */ - public ProduceRequestResult(TopicPartition topicPartition) { - this.topicPartition = topicPartition; - } - - /** - * Set the result of the produce request. - * - * @param baseOffset The base offset assigned to the record - * @param logAppendTime The log append time or -1 if CreateTime is being used - * @param error The error that occurred if there was one, or null - */ - public void set(List msgIds, List logAppendTime, Exception error) { - this.msgIds = msgIds; - this.logAppendTime = logAppendTime; - this.error = error; - } - - /** - * Mark this request as complete and unblock any threads waiting on its completion. - */ - public void done() { - this.latch.countDown(); - } - - /** - * Await the completion of this request - */ - public void await() throws InterruptedException { - latch.await(); - } - - /** - * Await the completion of this request (up to the given time interval) - * @param timeout The maximum time to wait - * @param unit The unit for the max time - * @return true if the request completed, false if we timed out - */ - public boolean await(long timeout, TimeUnit unit) throws InterruptedException { - return latch.await(timeout, unit); - } - - /** - * The base offset for the request (the first offset in the record set) - */ - public List msgIds() { - return msgIds; - } - - /** - * Return true if log append time is being used for this topic - */ - public boolean hasLogAppendTime() { - return logAppendTime != null; - } - - /** - * The log append time or -1 if CreateTime is being used - */ - public ListlogAppendTime() { - return logAppendTime; - } - - /** - * The error thrown (generally on the server) while processing this request - */ - public Exception error() { - return error; - } - - /** - * The topic and partition to which the record was appended - */ - public TopicPartition topicPartition() { - return topicPartition; - } - - /** - * Has the request completed? - */ - public boolean completed() { - return this.latch.getCount() == 0L; - } } diff --git a/clients/src/main/java/org/oracle/okafka/clients/producer/internals/ProducerBatch.java b/clients/src/main/java/org/oracle/okafka/clients/producer/internals/ProducerBatch.java index 64bdf97..d1ff412 100644 --- a/clients/src/main/java/org/oracle/okafka/clients/producer/internals/ProducerBatch.java +++ b/clients/src/main/java/org/oracle/okafka/clients/producer/internals/ProducerBatch.java @@ -1,9 +1,9 @@ /* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ + ** OKafka Java Client version 23.4. + ** + ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. + ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + */ /* * Licensed to the Apache Software Foundation (ASF) under one or more @@ -21,30 +21,29 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.oracle.okafka.clients.producer.internals; -import org.oracle.okafka.clients.producer.Callback; -import org.oracle.okafka.clients.producer.RecordMetadata; -import org.oracle.okafka.common.TopicPartition; -import org.oracle.okafka.common.errors.RecordBatchTooLargeException; -import org.oracle.okafka.common.errors.TimeoutException; -import org.oracle.okafka.common.header.Header; -import org.oracle.okafka.common.record.AbstractRecords; -import org.oracle.okafka.common.record.CompressionRatioEstimator; -import org.oracle.okafka.common.record.CompressionType; -import org.oracle.okafka.common.record.MemoryRecords; -import org.oracle.okafka.common.record.MemoryRecordsBuilder; -import org.oracle.okafka.common.record.MutableRecordBatch; -import org.oracle.okafka.common.record.Record; -import org.oracle.okafka.common.record.RecordBatch; -import org.oracle.okafka.common.record.TimestampType; +import org.apache.kafka.clients.producer.Callback; +import org.apache.kafka.common.utils.ProducerIdAndEpoch; +import org.apache.kafka.clients.producer.RecordMetadata; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.errors.RecordBatchTooLargeException; +import org.apache.kafka.common.header.Header; +import org.apache.kafka.common.record.AbstractRecords; +import org.apache.kafka.common.record.CompressionRatioEstimator; +import org.apache.kafka.common.record.CompressionType; +import org.apache.kafka.common.record.MemoryRecords; +import org.apache.kafka.common.record.MemoryRecordsBuilder; +import org.apache.kafka.common.record.MutableRecordBatch; +import org.apache.kafka.common.record.Record; +import org.apache.kafka.common.record.RecordBatch; +import org.apache.kafka.common.record.TimestampType; +import org.apache.kafka.common.requests.ProduceResponse; +import org.apache.kafka.common.utils.Time; +import org.oracle.okafka.common.utils.MessageIdConverter.OKafkaOffset; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.oracle.okafka.common.record.RecordBatch.MAGIC_VALUE_V2; -import static org.oracle.okafka.common.record.RecordBatch.NO_TIMESTAMP; - import java.nio.ByteBuffer; import java.util.ArrayDeque; import java.util.ArrayList; @@ -53,425 +52,549 @@ import java.util.List; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Function; + +import static org.apache.kafka.common.record.RecordBatch.MAGIC_VALUE_V2; +import static org.apache.kafka.common.record.RecordBatch.NO_TIMESTAMP; /** * A batch of records that is or will be sent. * - * This class is not thread safe and external synchronization must be used when modifying it + * This class is not thread safe and external synchronization must be used when + * modifying it */ public final class ProducerBatch { - private static final Logger log = LoggerFactory.getLogger(ProducerBatch.class); - - private enum FinalState { ABORTED, FAILED, SUCCEEDED } - - final long createdMs; - final TopicPartition topicPartition; - final ProduceRequestResult produceFuture; - - private final List thunks = new ArrayList<>(); - private final MemoryRecordsBuilder recordsBuilder; - private final AtomicInteger attempts = new AtomicInteger(0); - private final boolean isSplitBatch; - private final AtomicReference finalState = new AtomicReference<>(null); - - int recordCount; - int maxRecordSize; - private long lastAttemptMs; - private long lastAppendTime; - private long drainedMs; - private String expiryErrorMessage; - private boolean retry; - private boolean reopened = false; - - public ProducerBatch(TopicPartition tp, MemoryRecordsBuilder recordsBuilder, long now) { - this(tp, recordsBuilder, now, false); - } - - public ProducerBatch(TopicPartition tp, MemoryRecordsBuilder recordsBuilder, long now, boolean isSplitBatch) { - this.createdMs = now; - this.lastAttemptMs = now; - this.recordsBuilder = recordsBuilder; - this.topicPartition = tp; - this.lastAppendTime = createdMs; - this.produceFuture = new ProduceRequestResult(topicPartition); - this.retry = false; - this.isSplitBatch = isSplitBatch; - float compressionRatioEstimation = CompressionRatioEstimator.estimation(topicPartition.topic(), - recordsBuilder.compressionType()); - recordsBuilder.setEstimatedCompressionRatio(compressionRatioEstimation); - } - - /** - * Append the record to the current record set and return the relative offset within that record set - * - * @return The RecordSend corresponding to this record or null if there isn't sufficient room. - */ - public FutureRecordMetadata tryAppend(long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long now) { - if (!recordsBuilder.hasRoomFor(timestamp, key, value, headers)) { - return null; - } else { - Long checksum = this.recordsBuilder.append(timestamp, key, value, headers); - this.maxRecordSize = Math.max(this.maxRecordSize, AbstractRecords.estimateSizeInBytesUpperBound(magic(), - recordsBuilder.compressionType(), key, value, headers)); - this.lastAppendTime = now; - FutureRecordMetadata future = new FutureRecordMetadata(this.produceFuture, this.recordCount, - timestamp, checksum, - key == null ? -1 : key.length, - value == null ? -1 : value.length); - // we have to keep every future returned to the users in case the batch needs to be - // split to several new batches and resent. - thunks.add(new Thunk(callback, future)); - this.recordCount++; - return future; - } - } - - /** - * This method is only used by {@link #split(int)} when splitting a large batch to smaller ones. - * @return true if the record has been successfully appended, false otherwise. - */ - private boolean tryAppendForSplit(long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers, Thunk thunk) { - if (!recordsBuilder.hasRoomFor(timestamp, key, value, headers)) { - return false; - } else { - // No need to get the CRC. - this.recordsBuilder.append(timestamp, key, value, headers); - this.maxRecordSize = Math.max(this.maxRecordSize, AbstractRecords.estimateSizeInBytesUpperBound(magic(), - recordsBuilder.compressionType(), key, value, headers)); - FutureRecordMetadata future = new FutureRecordMetadata(this.produceFuture, this.recordCount, - timestamp, thunk.future.checksumOrNull(), - key == null ? -1 : key.remaining(), - value == null ? -1 : value.remaining()); - // Chain the future to the original thunk. - thunk.future.chain(future); - this.thunks.add(thunk); - this.recordCount++; - return true; - } - } - - /** - * Abort the batch and complete the future and callbacks. - * - * @param exception The exception to use to complete the future and awaiting callbacks. - */ - public void abort(RuntimeException exception) { - if (!finalState.compareAndSet(null, FinalState.ABORTED)) - throw new IllegalStateException("Batch has already been completed in final state " + finalState.get()); - - log.trace("Aborting batch for partition {}", topicPartition, exception); - completeFutureAndFireCallbacks( null, null, exception); - } - - /** - * Complete the request. If the batch was previously aborted, this is a no-op. - * - * @param baseOffset The base offset of the messages assigned by the server - * @param logAppendTime The log append time or -1 if CreateTime is being used - * @param exception The exception that occurred (or null if the request was successful) - * @return true if the batch was completed successfully and false if the batch was previously aborted - */ - public boolean done(List msgIds, List logAppendTime, Exception exception) { - final FinalState finalState; - if (exception == null) { - log.trace("Successfully produced messages to {} ", topicPartition); - finalState = FinalState.SUCCEEDED; - } else { - log.trace("Failed to produce messages to {}.", topicPartition, exception); - finalState = FinalState.FAILED; - } - if( !this.finalState.compareAndSet(null, finalState)) { - if (this.finalState.get() == FinalState.ABORTED) { - log.debug("ProduceResponse returned for {} after batch had already been aborted.", topicPartition); - return false; - } else { - throw new IllegalStateException("Batch has already been completed in final state " + this.finalState.get()); - } - } - completeFutureAndFireCallbacks(msgIds, logAppendTime, exception); - - return true; - } - - private void completeFutureAndFireCallbacks(List msgIds, List logAppendTime, Exception exception) { - // Set the future before invoking the callbacks as we rely on its state for the `onCompletion` call - produceFuture.set(msgIds, logAppendTime, exception); - - // execute callbacks - for (Thunk thunk : thunks) { - try { - if (exception == null) { - RecordMetadata metadata = thunk.future.value(); - if (thunk.callback != null) - thunk.callback.onCompletion(metadata, null); - } else { - if (thunk.callback != null) - thunk.callback.onCompletion(null, exception); - } - } catch (Exception e) { - log.error("Error executing user-provided callback on message for topic-partition '{}'", topicPartition, e); - } - } - - produceFuture.done(); - } - - public Deque split(int splitBatchSize) { - Deque batches = new ArrayDeque<>(); - MemoryRecords memoryRecords = recordsBuilder.build(); - - Iterator recordBatchIter = memoryRecords.batches().iterator(); - if (!recordBatchIter.hasNext()) - throw new IllegalStateException("Cannot split an empty producer batch."); - - RecordBatch recordBatch = recordBatchIter.next(); - if (recordBatch.magic() < MAGIC_VALUE_V2 && !recordBatch.isCompressed()) - throw new IllegalArgumentException("Batch splitting cannot be used with non-compressed messages " + - "with version v0 and v1"); - - if (recordBatchIter.hasNext()) - throw new IllegalArgumentException("A producer batch should only have one record batch."); - - Iterator thunkIter = thunks.iterator(); - // We always allocate batch size because we are already splitting a big batch. - // And we also Retain the create time of the original batch. - ProducerBatch batch = null; - - for (Record record : recordBatch) { - assert thunkIter.hasNext(); - Thunk thunk = thunkIter.next(); - if (batch == null) - batch = createBatchOffAccumulatorForRecord(record, splitBatchSize); - - // A newly created batch can always host the first message. - if (!batch.tryAppendForSplit(record.timestamp(), record.key(), record.value(), record.headers(), thunk)) { - batches.add(batch); - batch = createBatchOffAccumulatorForRecord(record, splitBatchSize); - batch.tryAppendForSplit(record.timestamp(), record.key(), record.value(), record.headers(), thunk); - } - } - - // Close the last batch and add it to the batch list after split. - if (batch != null) - batches.add(batch); - - produceFuture.set( null, null, new RecordBatchTooLargeException()); - produceFuture.done(); - - if (hasSequence()) { - int sequence = baseSequence(); - ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(producerId(), producerEpoch()); - for (ProducerBatch newBatch : batches) { - newBatch.setProducerState(producerIdAndEpoch, sequence, isTransactional()); - sequence += newBatch.recordCount; - } - } - return batches; - } - - private ProducerBatch createBatchOffAccumulatorForRecord(Record record, int batchSize) { - int initialSize = Math.max(AbstractRecords.estimateSizeInBytesUpperBound(magic(), - recordsBuilder.compressionType(), record.key(), record.value(), record.headers()), batchSize); - ByteBuffer buffer = ByteBuffer.allocate(initialSize); - - // Note that we intentionally do not set producer state (producerId, epoch, sequence, and isTransactional) - // for the newly created batch. This will be set when the batch is dequeued for sending (which is consistent - // with how normal batches are handled). - MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, magic(), recordsBuilder.compressionType(), - TimestampType.CREATE_TIME, 0L); - return new ProducerBatch(topicPartition, builder, this.createdMs, true); - } - - public boolean isCompressed() { - return recordsBuilder.compressionType() != CompressionType.NONE; - } - - /** - * A callback and the associated FutureRecordMetadata argument to pass to it. - */ - final private static class Thunk { - final Callback callback; - final FutureRecordMetadata future; - - Thunk(Callback callback, FutureRecordMetadata future) { - this.callback = callback; - this.future = future; - } - } - - @Override - public String toString() { - return "ProducerBatch(topicPartition=" + topicPartition + ", recordCount=" + recordCount + ")"; - } - - /** - * A batch whose metadata is not available should be expired if one of the following is true: - *
        - *
      1. the batch is not in retry AND request timeout has elapsed after it is ready (full or linger.ms has reached). - *
      2. the batch is in retry AND request timeout has elapsed after the backoff period ended. - *
      - * This methods closes this batch and sets {@code expiryErrorMessage} if the batch has timed out. - */ - boolean maybeExpire(int requestTimeoutMs, long retryBackoffMs, long now, long lingerMs, boolean isFull) { - if (!this.inRetry() && isFull && requestTimeoutMs < (now - this.lastAppendTime)) - expiryErrorMessage = (now - this.lastAppendTime) + " ms has passed since last append"; - else if (!this.inRetry() && requestTimeoutMs < (createdTimeMs(now) - lingerMs)) - expiryErrorMessage = (createdTimeMs(now) - lingerMs) + " ms has passed since batch creation plus linger time"; - else if (this.inRetry() && requestTimeoutMs < (waitedTimeMs(now) - retryBackoffMs)) - expiryErrorMessage = (waitedTimeMs(now) - retryBackoffMs) + " ms has passed since last attempt plus backoff time"; - - boolean expired = expiryErrorMessage != null; - if (expired) - abortRecordAppends(); - return expired; - } - - /** - * If {@link #maybeExpire(int, long, long, long, boolean)} returned true, the sender will fail the batch with - * the exception returned by this method. - * @return An exception indicating the batch expired. - */ - TimeoutException timeoutException() { - if (expiryErrorMessage == null) - throw new IllegalStateException("Batch has not expired"); - return new TimeoutException("Expiring " + recordCount + " record(s) for " + topicPartition + ": " + expiryErrorMessage); - } - - int attempts() { - return attempts.get(); - } - - void reenqueued(long now) { - attempts.getAndIncrement(); - lastAttemptMs = Math.max(lastAppendTime, now); - lastAppendTime = Math.max(lastAppendTime, now); - retry = true; - } - - long queueTimeMs() { - return drainedMs - createdMs; - } - - long createdTimeMs(long nowMs) { - return Math.max(0, nowMs - createdMs); - } - - long waitedTimeMs(long nowMs) { - return Math.max(0, nowMs - lastAttemptMs); - } - - void drained(long nowMs) { - this.drainedMs = Math.max(drainedMs, nowMs); - } - - boolean isSplitBatch() { - return isSplitBatch; - } - - /** - * Returns if the batch is been retried for sending to kafka - */ - public boolean inRetry() { - return this.retry; - } - - public MemoryRecords records() { - return recordsBuilder.build(); - } - - public int estimatedSizeInBytes() { - return recordsBuilder.estimatedSizeInBytes(); - } - - public double compressionRatio() { - return recordsBuilder.compressionRatio(); - } - - public boolean isFull() { - return recordsBuilder.isFull(); - } - - public void setProducerState(ProducerIdAndEpoch producerIdAndEpoch, int baseSequence, boolean isTransactional) { - recordsBuilder.setProducerState(producerIdAndEpoch.producerId, producerIdAndEpoch.epoch, baseSequence, isTransactional); - } - - public void resetProducerState(ProducerIdAndEpoch producerIdAndEpoch, int baseSequence, boolean isTransactional) { - reopened = true; - recordsBuilder.reopenAndRewriteProducerState(producerIdAndEpoch.producerId, producerIdAndEpoch.epoch, baseSequence, isTransactional); - } - - /** - * Release resources required for record appends (e.g. compression buffers). Once this method is called, it's only - * possible to update the RecordBatch header. - */ - public void closeForRecordAppends() { - recordsBuilder.closeForRecordAppends(); - } - - public void close() { - recordsBuilder.close(); - if (!recordsBuilder.isControlBatch()) { - CompressionRatioEstimator.updateEstimation(topicPartition.topic(), - recordsBuilder.compressionType(), - (float) recordsBuilder.compressionRatio()); - } - reopened = false; - } - - /** - * Abort the record builder and reset the state of the underlying buffer. This is used prior to aborting - * the batch with {@link #abort(RuntimeException)} and ensures that no record previously appended can be - * read. This is used in scenarios where we want to ensure a batch ultimately gets aborted, but in which - * it is not safe to invoke the completion callbacks (e.g. because we are holding a lock, - * {@link RecordAccumulator#abortBatches()}). - */ - public void abortRecordAppends() { - recordsBuilder.abort(); - } - - public boolean isClosed() { - return recordsBuilder.isClosed(); - } - - public ByteBuffer buffer() { - return recordsBuilder.buffer(); - } - - public int initialCapacity() { - return recordsBuilder.initialCapacity(); - } - - public boolean isWritable() { - return !recordsBuilder.isClosed(); - } - - public byte magic() { - return recordsBuilder.magic(); - } - - public long producerId() { - return recordsBuilder.producerId(); - } - - public short producerEpoch() { - return recordsBuilder.producerEpoch(); - } - - public int baseSequence() { - return recordsBuilder.baseSequence(); - } - - public boolean hasSequence() { - return baseSequence() != RecordBatch.NO_SEQUENCE; - } - - public boolean isTransactional() { - return recordsBuilder.isTransactional(); - } - - public boolean sequenceHasBeenReset() { - return reopened; - } - + private static final Logger log = LoggerFactory.getLogger(ProducerBatch.class); + + private enum FinalState { + ABORTED, FAILED, SUCCEEDED + } + + final long createdMs; + final TopicPartition topicPartition; + final ProduceRequestResult produceFuture; + + private final List thunks = new ArrayList<>(); + private final MemoryRecordsBuilder recordsBuilder; + private final AtomicInteger attempts = new AtomicInteger(0); + private final boolean isSplitBatch; + private final AtomicReference finalState = new AtomicReference<>(null); + + int recordCount; + int maxRecordSize; + private long lastAttemptMs; + private long lastAppendTime; + private long drainedMs; + private boolean retry; + private boolean reopened; + private List reTryMsgIdList; + + public ProducerBatch(TopicPartition tp, MemoryRecordsBuilder recordsBuilder, long createdMs) { + this(tp, recordsBuilder, createdMs, false); + } + + public ProducerBatch(TopicPartition tp, MemoryRecordsBuilder recordsBuilder, long createdMs, boolean isSplitBatch) { + this.createdMs = createdMs; + this.lastAttemptMs = createdMs; + this.recordsBuilder = recordsBuilder; + this.topicPartition = tp; + this.lastAppendTime = createdMs; + this.produceFuture = new ProduceRequestResult(topicPartition); + this.retry = false; + this.isSplitBatch = isSplitBatch; + float compressionRatioEstimation = CompressionRatioEstimator.estimation(topicPartition.topic(), + recordsBuilder.compressionType()); + recordsBuilder.setEstimatedCompressionRatio(compressionRatioEstimation); + } + + /** + * Append the record to the current record set and return the relative offset + * within that record set + * + * @return The RecordSend corresponding to this record or null if there isn't + * sufficient room. + */ + public FutureRecordMetadata tryAppend(long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, + long now) { + if (!recordsBuilder.hasRoomFor(timestamp, key, value, headers)) { + return null; + } else { + this.recordsBuilder.append(timestamp, key, value, headers); + this.maxRecordSize = Math.max(this.maxRecordSize, AbstractRecords.estimateSizeInBytesUpperBound(magic(), + recordsBuilder.compressionType(), key, value, headers)); + this.lastAppendTime = now; + FutureRecordMetadata future = new FutureRecordMetadata(this.produceFuture, (long) this.recordCount, + timestamp, key == null ? -1 : key.length, value == null ? -1 : value.length, Time.SYSTEM); + // we have to keep every future returned to the users in case the batch needs to + // be + // split to several new batches and resent. + thunks.add(new Thunk(callback, future)); + this.recordCount++; + return future; + } + } + + /** + * This method is only used by {@link #split(int)} when splitting a large batch + * to smaller ones. + * + * @return true if the record has been successfully appended, false otherwise. + */ + private boolean tryAppendForSplit(long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers, Thunk thunk) { + if (!recordsBuilder.hasRoomFor(timestamp, key, value, headers)) { + return false; + } else { + // No need to get the CRC. + this.recordsBuilder.append(timestamp, key, value, headers); + this.maxRecordSize = Math.max(this.maxRecordSize, AbstractRecords.estimateSizeInBytesUpperBound(magic(), + recordsBuilder.compressionType(), key, value, headers)); + FutureRecordMetadata future = new FutureRecordMetadata(this.produceFuture, this.recordCount, timestamp, + key == null ? -1 : key.remaining(), value == null ? -1 : value.remaining(), Time.SYSTEM); + // Chain the future to the original thunk. + thunk.future.chain(future); + this.thunks.add(thunk); + this.recordCount++; + return true; + } + } + + /** + * Abort the batch and complete the future and callbacks. + * + * @param exception The exception to use to complete the future and awaiting + * callbacks. + */ + public void abort(RuntimeException exception) { + if (!finalState.compareAndSet(null, FinalState.ABORTED)) + throw new IllegalStateException("Batch has already been completed in final state " + finalState.get()); + + log.trace("Aborting batch for partition {}", topicPartition, exception); + completeFutureAndFireCallbacks(ProduceResponse.INVALID_OFFSET, RecordBatch.NO_TIMESTAMP, + batchIndex -> exception); + } + + /** + * Return `true` if {@link #done(long, long, RuntimeException)} has been invoked + * at least once, `false` otherwise. + */ + public boolean isDone() { + return finalState() != null; + } + + /** + * Finalize the state of a batch. Final state, once set, is immutable. This + * function may be called once or twice on a batch. It may be called twice if 1. + * An inflight batch expires before a response from the broker is received. The + * batch's final state is set to FAILED. But it could succeed on the broker and + * second time around batch.done() may try to set SUCCEEDED final state. 2. If a + * transaction abortion happens or if the producer is closed forcefully, the + * final state is ABORTED but again it could succeed if broker responds with a + * success. + * + * Attempted transitions from [FAILED | ABORTED] --> SUCCEEDED are logged. + * Attempted transitions from one failure state to the same or a different + * failed state are ignored. Attempted transitions from SUCCEEDED to the same or + * a failed state throw an exception. + * + * @param baseOffset The base offset of the messages assigned by the server + * @param logAppendTime The log append time or -1 if CreateTime is being used + * @param exception The exception that occurred (or null if the request was + * successful) + * @return true if the batch was completed successfully and false if the batch + * was previously aborted + */ + public boolean done(long baseOffset, long logAppendTime, RuntimeException topLevelException, + Function recordExceptions) { + final FinalState tryFinalState = (topLevelException == null) ? FinalState.SUCCEEDED : FinalState.FAILED; + + if (tryFinalState == FinalState.SUCCEEDED) { + log.trace("Successfully produced messages to {} with base offset {}.", topicPartition, baseOffset); + } else { + log.trace("Failed to produce messages to {} with base offset {}.", topicPartition, baseOffset, + topLevelException); + } + + if (this.finalState.compareAndSet(null, tryFinalState)) { + completeFutureAndFireCallbacks(baseOffset, logAppendTime, recordExceptions); + return true; + } + + if (this.finalState.get() != FinalState.SUCCEEDED) { + if (tryFinalState == FinalState.SUCCEEDED) { + // Log if a previously unsuccessful batch succeeded later on. + log.debug("ProduceResponse returned {} for {} after batch with base offset {} had already been {}.", + tryFinalState, topicPartition, baseOffset, this.finalState.get()); + } else { + // FAILED --> FAILED and ABORTED --> FAILED transitions are ignored. + log.debug("Ignored state transition {} -> {} for {} batch with base offset {}", this.finalState.get(), + tryFinalState, topicPartition, baseOffset); + } + } else { + // A SUCCESSFUL batch must not attempt another state change. + throw new IllegalStateException( + "A " + this.finalState.get() + " batch must not attempt another state change to " + tryFinalState); + } + return false; + } + + private void completeFutureAndFireCallbacks(long baseOffset, long logAppendTime, + Function recordExceptions) { + // Set the future before invoking the callbacks as we rely on its state for the + // `onCompletion` call + produceFuture.set(baseOffset, logAppendTime, recordExceptions); + + // execute callbacks + for (int i = 0; i < thunks.size(); i++) { + try { + Thunk thunk = thunks.get(i); + if (thunk.callback != null) { + if (recordExceptions == null) { + RecordMetadata metadata = thunk.future.value(); + thunk.callback.onCompletion(metadata, null); + } else { + RuntimeException exception = recordExceptions.apply(i); + thunk.callback.onCompletion(null, exception); + } + } + } catch (Exception e) { + log.error("Error executing user-provided callback on message for topic-partition '{}'", topicPartition, + e); + } + } + + produceFuture.done(); + } + + /** + * Complete the request. If the batch was previously aborted, this is a no-op. + * + * @param baseOffset The base offset of the messages assigned by the server + * @param logAppendTime The log append time or -1 if CreateTime is being used + * @param exception The exception that occurred (or null if the request was + * successful) + * @return true if the batch was completed successfully and false if the batch + * was previously aborted + */ + public boolean done(long baseOffset, long logAppendTime, List msgIds, + RuntimeException topLevelException, Function recordExceptions) { + final FinalState tryFinalState = (topLevelException == null) ? FinalState.SUCCEEDED : FinalState.FAILED; + + if (tryFinalState == FinalState.SUCCEEDED) { + log.trace("Successfully produced messages to {} with base offset {}.", topicPartition, baseOffset); + } else { + log.trace("Failed to produce messages to {} with base offset {}.", topicPartition, baseOffset, + topLevelException); + } + + if (this.finalState.compareAndSet(null, tryFinalState)) { + completeFutureAndFireCallbacks(baseOffset, logAppendTime, msgIds, recordExceptions); + return true; + } + + if (this.finalState.get() != FinalState.SUCCEEDED) { + if (tryFinalState == FinalState.SUCCEEDED) { + // Log if a previously unsuccessful batch succeeded later on. + log.debug("ProduceResponse returned {} for {} after batch with base offset {} had already been {}.", + tryFinalState, topicPartition, baseOffset, this.finalState.get()); + } else { + // FAILED --> FAILED and ABORTED --> FAILED transitions are ignored. + log.debug("Ignored state transition {} -> {} for {} batch with base offset {}", this.finalState.get(), + tryFinalState, topicPartition, baseOffset); + } + } else { + // A SUCCESSFUL batch must not attempt another state change. + throw new IllegalStateException( + "A " + this.finalState.get() + " batch must not attempt another state change to " + tryFinalState); + } + return false; + /* + * if( !this.finalState.compareAndSet(null, tryFinalState)) { if + * (this.finalState.get() == FinalState.ABORTED) { log. + * debug("ProduceResponse returned for {} after batch had already been aborted." + * , topicPartition); return false; } else { throw new + * IllegalStateException("Batch has already been completed in final state " + + * this.finalState.get()); } } completeFutureAndFireCallbacks(msgIds, + * logAppendTime, exception); return true; + */ + } + + private void completeFutureAndFireCallbacks(long baseOffset, long logAppendTime, List msgIds, + Function recordExceptions) { + // Set the future before invoking the callbacks as we rely on its state for the + // `onCompletion` call + produceFuture.set(baseOffset, logAppendTime, msgIds, recordExceptions); + + // execute callbacks + for (int i = 0; i < thunks.size(); i++) { + try { + Thunk thunk = thunks.get(i); + if (thunk.callback != null) { + if (recordExceptions == null) { + RecordMetadata metadata = thunk.future.value(); + thunk.callback.onCompletion(metadata, null); + } else { + RuntimeException exception = recordExceptions.apply(i); + thunk.callback.onCompletion(null, exception); + } + } + } catch (Exception e) { + log.error("Error executing user-provided callback on message for topic-partition '{}'", topicPartition, + e); + } + } + + produceFuture.done(); + } + + public Deque split(int splitBatchSize) { + Deque batches = new ArrayDeque<>(); + MemoryRecords memoryRecords = recordsBuilder.build(); + + Iterator recordBatchIter = memoryRecords.batches().iterator(); + if (!recordBatchIter.hasNext()) + throw new IllegalStateException("Cannot split an empty producer batch."); + + RecordBatch recordBatch = recordBatchIter.next(); + if (recordBatch.magic() < MAGIC_VALUE_V2 && !recordBatch.isCompressed()) + throw new IllegalArgumentException( + "Batch splitting cannot be used with non-compressed messages " + "with version v0 and v1"); + + if (recordBatchIter.hasNext()) + throw new IllegalArgumentException("A producer batch should only have one record batch."); + + Iterator thunkIter = thunks.iterator(); + // We always allocate batch size because we are already splitting a big batch. + // And we also Retain the create time of the original batch. + ProducerBatch batch = null; + + for (Record record : recordBatch) { + assert thunkIter.hasNext(); + Thunk thunk = thunkIter.next(); + if (batch == null) + batch = createBatchOffAccumulatorForRecord(record, splitBatchSize); + + // A newly created batch can always host the first message. + if (!batch.tryAppendForSplit(record.timestamp(), record.key(), record.value(), record.headers(), thunk)) { + batches.add(batch); + batch.closeForRecordAppends(); + batch = createBatchOffAccumulatorForRecord(record, splitBatchSize); + batch.tryAppendForSplit(record.timestamp(), record.key(), record.value(), record.headers(), thunk); + } + } + + // Close the last batch and add it to the batch list after split. + if (batch != null) { + batches.add(batch); + batch.closeForRecordAppends(); + } + + produceFuture.set(ProduceResponse.INVALID_OFFSET, NO_TIMESTAMP, + batchIndex -> new RecordBatchTooLargeException()); + produceFuture.done(); + + if (hasSequence()) { + int sequence = baseSequence(); + ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(producerId(), producerEpoch()); + for (ProducerBatch newBatch : batches) { + newBatch.setProducerState(producerIdAndEpoch, sequence, isTransactional()); + sequence += newBatch.recordCount; + } + } + return batches; + } + + private ProducerBatch createBatchOffAccumulatorForRecord(Record record, int batchSize) { + int initialSize = Math.max(AbstractRecords.estimateSizeInBytesUpperBound(magic(), + recordsBuilder.compressionType(), record.key(), record.value(), record.headers()), batchSize); + ByteBuffer buffer = ByteBuffer.allocate(initialSize); + + // Note that we intentionally do not set producer state (producerId, epoch, + // sequence, and isTransactional) + // for the newly created batch. This will be set when the batch is dequeued for + // sending (which is consistent + // with how normal batches are handled). + MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, magic(), recordsBuilder.compressionType(), + TimestampType.CREATE_TIME, 0L); + return new ProducerBatch(topicPartition, builder, this.createdMs, true); + } + + public boolean isCompressed() { + return recordsBuilder.compressionType() != CompressionType.NONE; + } + + /** + * A callback and the associated FutureRecordMetadata argument to pass to it. + */ + final private static class Thunk { + final Callback callback; + final FutureRecordMetadata future; + + Thunk(Callback callback, FutureRecordMetadata future) { + this.callback = callback; + this.future = future; + } + } + + @Override + public String toString() { + return "ProducerBatch(topicPartition=" + topicPartition + ", recordCount=" + recordCount + ")"; + } + + boolean hasReachedDeliveryTimeout(long deliveryTimeoutMs, long now) { + return deliveryTimeoutMs <= now - this.createdMs; + } + + public FinalState finalState() { + return this.finalState.get(); + } + + int attempts() { + return attempts.get(); + } + + void reenqueued(long now) { + attempts.getAndIncrement(); + lastAttemptMs = Math.max(lastAppendTime, now); + lastAppendTime = Math.max(lastAppendTime, now); + retry = true; + } + + long queueTimeMs() { + return drainedMs - createdMs; + } + + long waitedTimeMs(long nowMs) { + return Math.max(0, nowMs - lastAttemptMs); + } + + void drained(long nowMs) { + this.drainedMs = Math.max(drainedMs, nowMs); + } + + boolean isSplitBatch() { + return isSplitBatch; + } + + /** + * Returns if the batch is been retried for sending to kafka + */ + public boolean inRetry() { + return this.retry; + } + + public List retryMsgIdList() { + return reTryMsgIdList; + } + + public void setRetryMsgId(List retryMsgIdList) { + this.reTryMsgIdList = retryMsgIdList; + } + + public MemoryRecords records() { + return recordsBuilder.build(); + } + + public int estimatedSizeInBytes() { + return recordsBuilder.estimatedSizeInBytes(); + } + + public double compressionRatio() { + return recordsBuilder.compressionRatio(); + } + + public boolean isFull() { + return recordsBuilder.isFull(); + } + + public void setProducerState(ProducerIdAndEpoch producerIdAndEpoch, int baseSequence, boolean isTransactional) { + recordsBuilder.setProducerState(producerIdAndEpoch.producerId, producerIdAndEpoch.epoch, baseSequence, + isTransactional); + } + + public void resetProducerState(ProducerIdAndEpoch producerIdAndEpoch, int baseSequence, boolean isTransactional) { + log.info("Resetting sequence number of batch with current sequence {} for partition {} to {}", + this.baseSequence(), this.topicPartition, baseSequence); + reopened = true; + recordsBuilder.reopenAndRewriteProducerState(producerIdAndEpoch.producerId, producerIdAndEpoch.epoch, + baseSequence, isTransactional); + } + + /** + * Release resources required for record appends (e.g. compression buffers). + * Once this method is called, it's only possible to update the RecordBatch + * header. + */ + public void closeForRecordAppends() { + recordsBuilder.closeForRecordAppends(); + } + + public void close() { + recordsBuilder.close(); + if (!recordsBuilder.isControlBatch()) { + CompressionRatioEstimator.updateEstimation(topicPartition.topic(), recordsBuilder.compressionType(), + (float) recordsBuilder.compressionRatio()); + } + reopened = false; + } + + /** + * Abort the record builder and reset the state of the underlying buffer. This + * is used prior to aborting the batch with {@link #abort(RuntimeException)} and + * ensures that no record previously appended can be read. This is used in + * scenarios where we want to ensure a batch ultimately gets aborted, but in + * which it is not safe to invoke the completion callbacks (e.g. because we are + * holding a lock, {@link RecordAccumulator#abortBatches()}). + */ + public void abortRecordAppends() { + recordsBuilder.abort(); + } + + public boolean isClosed() { + return recordsBuilder.isClosed(); + } + + public ByteBuffer buffer() { + return recordsBuilder.buffer(); + } + + public int initialCapacity() { + return recordsBuilder.initialCapacity(); + } + + public boolean isWritable() { + return !recordsBuilder.isClosed(); + } + + public byte magic() { + return recordsBuilder.magic(); + } + + public long producerId() { + return recordsBuilder.producerId(); + } + + public short producerEpoch() { + return recordsBuilder.producerEpoch(); + } + + public int baseSequence() { + return recordsBuilder.baseSequence(); + } + + public int lastSequence() { + return recordsBuilder.baseSequence() + recordsBuilder.numRecords() - 1; + } + + public boolean hasSequence() { + return baseSequence() != RecordBatch.NO_SEQUENCE; + } + + public boolean isTransactional() { + return recordsBuilder.isTransactional(); + } + + public boolean sequenceHasBeenReset() { + return reopened; + } } diff --git a/clients/src/main/java/org/oracle/okafka/clients/producer/internals/ProducerIdAndEpoch.java b/clients/src/main/java/org/oracle/okafka/clients/producer/internals/ProducerIdAndEpoch.java index fca0694..13c9dfa 100644 --- a/clients/src/main/java/org/oracle/okafka/clients/producer/internals/ProducerIdAndEpoch.java +++ b/clients/src/main/java/org/oracle/okafka/clients/producer/internals/ProducerIdAndEpoch.java @@ -1,7 +1,7 @@ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. */ @@ -23,8 +23,8 @@ */ package org.oracle.okafka.clients.producer.internals; -import static org.oracle.okafka.common.record.RecordBatch.NO_PRODUCER_EPOCH; -import static org.oracle.okafka.common.record.RecordBatch.NO_PRODUCER_ID; +import static org.apache.kafka.common.record.RecordBatch.NO_PRODUCER_EPOCH; +import static org.apache.kafka.common.record.RecordBatch.NO_PRODUCER_ID; class ProducerIdAndEpoch { static final ProducerIdAndEpoch NONE = new ProducerIdAndEpoch(NO_PRODUCER_ID, NO_PRODUCER_EPOCH); diff --git a/clients/src/main/java/org/oracle/okafka/clients/producer/internals/ProducerInterceptors.java b/clients/src/main/java/org/oracle/okafka/clients/producer/internals/ProducerInterceptors.java deleted file mode 100644 index 9687fc3..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/producer/internals/ProducerInterceptors.java +++ /dev/null @@ -1,147 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.clients.producer.internals; - - -import org.oracle.okafka.clients.producer.ProducerInterceptor; -import org.oracle.okafka.clients.producer.ProducerRecord; -import org.oracle.okafka.clients.producer.RecordMetadata; -import org.oracle.okafka.common.TopicPartition; -import org.oracle.okafka.common.record.RecordBatch; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.Closeable; -import java.util.List; - -/** - * A container that holds the list {@link org.oracle.okafka.clients.producer.ProducerInterceptor} - * and wraps calls to the chain of custom interceptors. - */ -public class ProducerInterceptors implements Closeable { - private static final Logger log = LoggerFactory.getLogger(ProducerInterceptors.class); - private final List> interceptors; - - public ProducerInterceptors(List> interceptors) { - this.interceptors = interceptors; - } - - /** - * This is called when client sends the record to KafkaProducer, before key and value gets serialized. - * The method calls {@link ProducerInterceptor#onSend(ProducerRecord)} method. ProducerRecord - * returned from the first interceptor's onSend() is passed to the second interceptor onSend(), and so on in the - * interceptor chain. The record returned from the last interceptor is returned from this method. - * - * This method does not throw exceptions. Exceptions thrown by any of interceptor methods are caught and ignored. - * If an interceptor in the middle of the chain, that normally modifies the record, throws an exception, - * the next interceptor in the chain will be called with a record returned by the previous interceptor that did not - * throw an exception. - * - * @param record the record from client - * @return producer record to send to topic/partition - */ - public ProducerRecord onSend(ProducerRecord record) { - ProducerRecord interceptRecord = record; - for (ProducerInterceptor interceptor : this.interceptors) { - try { - interceptRecord = interceptor.onSend(interceptRecord); - } catch (Exception e) { - // do not propagate interceptor exception, log and continue calling other interceptors - // be careful not to throw exception from here - if (record != null) - log.warn("Error executing interceptor onSend callback for topic: {}, partition: {}", record.topic(), record.partition(), e); - else - log.warn("Error executing interceptor onSend callback", e); - } - } - return interceptRecord; - } - - /** - * This method is called when the record sent to the server has been acknowledged, or when sending the record fails before - * it gets sent to the server. This method calls {@link ProducerInterceptor#onAcknowledgement(RecordMetadata, Exception)} - * method for each interceptor. - * - * This method does not throw exceptions. Exceptions thrown by any of interceptor methods are caught and ignored. - * - * @param metadata The metadata for the record that was sent (i.e. the partition and offset). - * If an error occurred, metadata will only contain valid topic and maybe partition. - * @param exception The exception thrown during processing of this record. Null if no error occurred. - */ - public void onAcknowledgement(RecordMetadata metadata, Exception exception) { - for (ProducerInterceptor interceptor : this.interceptors) { - try { - interceptor.onAcknowledgement(metadata, exception); - } catch (Exception e) { - // do not propagate interceptor exceptions, just log - log.warn("Error executing interceptor onAcknowledgement callback", e); - } - } - } - - /** - * This method is called when sending the record fails in {@link ProducerInterceptor#onSend - * (ProducerRecord)} method. This method calls {@link ProducerInterceptor#onAcknowledgement(RecordMetadata, Exception)} - * method for each interceptor - * - * @param record The record from client - * @param interceptTopicPartition The topic/partition for the record if an error occurred - * after partition gets assigned; the topic part of interceptTopicPartition is the same as in record. - * @param exception The exception thrown during processing of this record. - */ - public void onSendError(ProducerRecord record, TopicPartition interceptTopicPartition, Exception exception) { - for (ProducerInterceptor interceptor : this.interceptors) { - try { - if (record == null && interceptTopicPartition == null) { - interceptor.onAcknowledgement(null, exception); - } else { - if (interceptTopicPartition == null) { - interceptTopicPartition = new TopicPartition(record.topic(), - record.partition() == null ? RecordMetadata.UNKNOWN_PARTITION : record.partition()); - } - interceptor.onAcknowledgement(new RecordMetadata(interceptTopicPartition, -1, -1, - RecordBatch.NO_TIMESTAMP, Long.valueOf(-1L), -1, -1), exception); - } - } catch (Exception e) { - // do not propagate interceptor exceptions, just log - log.warn("Error executing interceptor onAcknowledgement callback", e); - } - } - } - - /** - * Closes every interceptor in a container. - */ - @Override - public void close() { - for (ProducerInterceptor interceptor : this.interceptors) { - try { - interceptor.close(); - } catch (Exception e) { - log.error("Failed to close producer interceptor ", e); - } - } - } -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/producer/internals/ProducerMetrics.java b/clients/src/main/java/org/oracle/okafka/clients/producer/internals/ProducerMetrics.java deleted file mode 100644 index b0cb37c..0000000 --- a/clients/src/main/java/org/oracle/okafka/clients/producer/internals/ProducerMetrics.java +++ /dev/null @@ -1,60 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.clients.producer.internals; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Map; - -import org.oracle.okafka.common.MetricNameTemplate; -import org.oracle.okafka.common.metrics.MetricConfig; -import org.oracle.okafka.common.metrics.Metrics; - -public class ProducerMetrics { - - public final SenderMetricsRegistry senderMetrics; - private final Metrics metrics; - - public ProducerMetrics(Metrics metrics) { - this.metrics = metrics; - this.senderMetrics = new SenderMetricsRegistry(this.metrics); - } - - private List getAllTemplates() { - List l = new ArrayList<>(this.senderMetrics.allTemplates()); - return l; - } - - public static void main(String[] args) { - Map metricTags = Collections.singletonMap("client-id", "client-id"); - MetricConfig metricConfig = new MetricConfig().tags(metricTags); - Metrics metrics = new Metrics(metricConfig); - - ProducerMetrics metricsRegistry = new ProducerMetrics(metrics); - System.out.println(Metrics.toHtmlTable("kafka.producer", metricsRegistry.getAllTemplates())); - } - -} diff --git a/clients/src/main/java/org/oracle/okafka/clients/producer/internals/RecordAccumulator.java b/clients/src/main/java/org/oracle/okafka/clients/producer/internals/RecordAccumulator.java index 7c112b9..64d20ef 100644 --- a/clients/src/main/java/org/oracle/okafka/clients/producer/internals/RecordAccumulator.java +++ b/clients/src/main/java/org/oracle/okafka/clients/producer/internals/RecordAccumulator.java @@ -1,9 +1,9 @@ /* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ + ** OKafka Java Client version 23.4. + ** + ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. + ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + */ /* * Licensed to the Apache Software Foundation (ASF) under one or more @@ -21,12 +21,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -/* - * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. - * - */ - package org.oracle.okafka.clients.producer.internals; import java.nio.ByteBuffer; @@ -36,33 +30,39 @@ import java.util.Deque; import java.util.HashMap; import java.util.HashSet; -import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Set; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicInteger; - -import org.oracle.okafka.clients.producer.Callback; -import org.oracle.okafka.common.Cluster; -import org.oracle.okafka.common.KafkaException; -import org.oracle.okafka.common.Node; -import org.oracle.okafka.common.PartitionInfo; -import org.oracle.okafka.common.TopicPartition; -import org.oracle.okafka.common.header.Header; -import org.oracle.okafka.common.metrics.Metrics; -import org.oracle.okafka.common.record.AbstractRecords; -import org.oracle.okafka.common.record.CompressionRatioEstimator; -import org.oracle.okafka.common.record.CompressionType; -import org.oracle.okafka.common.record.MemoryRecords; -import org.oracle.okafka.common.record.MemoryRecordsBuilder; -import org.oracle.okafka.common.record.Record; -import org.oracle.okafka.common.record.RecordBatch; -import org.oracle.okafka.common.record.TimestampType; -import org.oracle.okafka.common.utils.CopyOnWriteMap; -import org.oracle.okafka.common.utils.LogContext; -import org.oracle.okafka.common.utils.Time; -import org.oracle.okafka.common.utils.Utils; +import org.apache.kafka.clients.ApiVersions; +import org.apache.kafka.clients.producer.Callback; +import org.apache.kafka.clients.producer.internals.BufferPool; +import org.apache.kafka.clients.producer.internals.TransactionManager; +import org.apache.kafka.common.utils.ProducerIdAndEpoch; +import org.apache.kafka.common.Cluster; +import org.apache.kafka.common.KafkaException; +import org.apache.kafka.common.MetricName; +import org.apache.kafka.common.Node; +import org.apache.kafka.common.PartitionInfo; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.errors.UnsupportedVersionException; +import org.apache.kafka.common.header.Header; +import org.apache.kafka.common.metrics.Measurable; +import org.apache.kafka.common.metrics.MetricConfig; +import org.apache.kafka.common.metrics.Metrics; +import org.apache.kafka.common.record.AbstractRecords; +import org.apache.kafka.common.record.CompressionRatioEstimator; +import org.apache.kafka.common.record.CompressionType; +import org.apache.kafka.common.record.MemoryRecords; +import org.apache.kafka.common.record.MemoryRecordsBuilder; +import org.apache.kafka.common.record.Record; +import org.apache.kafka.common.record.RecordBatch; +import org.apache.kafka.common.record.TimestampType; +import org.apache.kafka.common.utils.CopyOnWriteMap; +import org.apache.kafka.common.utils.LogContext; +import org.apache.kafka.common.utils.Time; import org.slf4j.Logger; /** @@ -73,27 +73,32 @@ * this behavior is explicitly disabled. */ public final class RecordAccumulator { - private final Logger log; + + private final Logger log; private volatile boolean closed; private final AtomicInteger flushesInProgress; private final AtomicInteger appendsInProgress; private final int batchSize; private final CompressionType compression; - private final long lingerMs; + private final int lingerMs; private final long retryBackoffMs; + private final int deliveryTimeoutMs; private final BufferPool free; private final Time time; + private final ApiVersions apiVersions; private final ConcurrentMap> batches; private final IncompleteBatches incomplete; // The following variables are only accessed by the sender thread, so we don't need to protect them. - private final Map muted; + private final Set muted; private int drainIndex; + private final TransactionManager transactionManager; + private long nextBatchExpiryTimeMs = Long.MAX_VALUE; // the earliest time (absolute) a batch will expire. + /** * Create a new record accumulator * * @param logContext The log context used for logging * @param batchSize The size to use when allocating {@link MemoryRecords} instances - * @param totalSize The maximum memory the record accumulator can use. * @param compression The compression codec for the records * @param lingerMs An artificial delay time to add before declaring a records instance that isn't full ready for * sending. This allows time for more records to arrive. Setting a non-zero lingerMs will trade off some @@ -102,29 +107,66 @@ public final class RecordAccumulator { * exhausting all retries in a short period of time. * @param metrics The metrics * @param time The time instance to use + * @param apiVersions Request API versions for current connected brokers + * @param transactionManager The shared transaction state object which tracks producer IDs, epochs, and sequence + * numbers per partition. */ public RecordAccumulator(LogContext logContext, - int batchSize, - long totalSize, - CompressionType compression, - long lingerMs, - long retryBackoffMs, - Metrics metrics, - Time time) { + int batchSize, + CompressionType compression, + int lingerMs, + long retryBackoffMs, + int deliveryTimeoutMs, + Metrics metrics, + String metricGrpName, + Time time, + ApiVersions apiVersions, + TransactionManager transactionManager, + BufferPool bufferPool) { this.log = logContext.logger(RecordAccumulator.class); this.drainIndex = 0; this.closed = false; this.flushesInProgress = new AtomicInteger(0); this.appendsInProgress = new AtomicInteger(0); this.batchSize = batchSize; + this.compression = compression; this.lingerMs = lingerMs; this.retryBackoffMs = retryBackoffMs; - this.compression = compression; + this.deliveryTimeoutMs = deliveryTimeoutMs; this.batches = new CopyOnWriteMap<>(); - this.free = new BufferPool(totalSize, batchSize, time); + this.free = bufferPool; this.incomplete = new IncompleteBatches(); - this.muted = new HashMap<>(); + this.muted = new HashSet<>(); this.time = time; + this.apiVersions = apiVersions; + this.transactionManager = transactionManager; + registerMetrics(metrics, metricGrpName); + } + + private void registerMetrics(Metrics metrics, String metricGrpName) { + MetricName metricName = metrics.metricName("waiting-threads", metricGrpName, "The number of user threads blocked waiting for buffer memory to enqueue their records"); + Measurable waitingThreads = new Measurable() { + public double measure(MetricConfig config, long now) { + return free.queued(); + } + }; + metrics.addMetric(metricName, waitingThreads); + + metricName = metrics.metricName("buffer-total-bytes", metricGrpName, "The maximum amount of buffer memory the client can use (whether or not it is currently used)."); + Measurable totalBytes = new Measurable() { + public double measure(MetricConfig config, long now) { + return free.totalMemory(); + } + }; + metrics.addMetric(metricName, totalBytes); + + metricName = metrics.metricName("buffer-available-bytes", metricGrpName, "The total amount of buffer memory that is not being used (either unallocated or in the free list)."); + Measurable availableBytes = new Measurable() { + public double measure(MetricConfig config, long now) { + return free.availableMemory(); + } + }; + metrics.addMetric(metricName, availableBytes); } /** @@ -140,6 +182,9 @@ public RecordAccumulator(LogContext logContext, * @param headers the Headers for the record * @param callback The user-supplied callback to execute when the request is complete * @param maxTimeToBlock The maximum time in milliseconds to block for buffer memory to be available + * @param abortOnNewBatch A boolean that indicates returning before a new batch is created and + * running the partitioner's onNewBatch method before trying to append again + * @param nowMs The current time, in milliseconds */ public RecordAppendResult append(TopicPartition tp, long timestamp, @@ -147,7 +192,9 @@ public RecordAppendResult append(TopicPartition tp, byte[] value, Header[] headers, Callback callback, - long maxTimeToBlock) throws InterruptedException { + long maxTimeToBlock, + boolean abortOnNewBatch, + long nowMs) throws InterruptedException { // We keep track of the number of appending thread to make sure we do not miss batches in // abortIncompleteBatches(). appendsInProgress.incrementAndGet(); @@ -159,34 +206,46 @@ public RecordAppendResult append(TopicPartition tp, synchronized (dq) { if (closed) throw new KafkaException("Producer closed while send in progress"); - RecordAppendResult appendResult = tryAppend(timestamp, key, value, headers, callback, dq); + RecordAppendResult appendResult = tryAppend(timestamp, key, value, headers, callback, dq, nowMs); if (appendResult != null) return appendResult; } // we don't have an in-progress record batch try to allocate a new batch - int size = Math.max(this.batchSize, AbstractRecords.estimateSizeInBytesUpperBound(RecordBatch.CURRENT_MAGIC_VALUE, compression, key, value, headers)); - log.trace("Allocating a new {} byte message buffer for topic {} partition {}", size, tp.topic(), tp.partition()); + if (abortOnNewBatch) { + // Return a result that will cause another call to append. + return new RecordAppendResult(null, false, false, true); + } + + byte maxUsableMagic = apiVersions.maxUsableProduceMagic(); + int size = Math.max(this.batchSize, AbstractRecords.estimateSizeInBytesUpperBound(maxUsableMagic, compression, key, value, headers)); + log.trace("Allocating a new {} byte message buffer for topic {} partition {} with remaining timeout {}ms", size, tp.topic(), tp.partition(), maxTimeToBlock); buffer = free.allocate(size, maxTimeToBlock); + + // Update the current time in case the buffer allocation blocked above. + nowMs = time.milliseconds(); synchronized (dq) { // Need to check if producer is closed again after grabbing the dequeue lock. if (closed) throw new KafkaException("Producer closed while send in progress"); - RecordAppendResult appendResult = tryAppend(timestamp, key, value, headers, callback, dq); + RecordAppendResult appendResult = tryAppend(timestamp, key, value, headers, callback, dq, nowMs); if (appendResult != null) { // Somebody else found us a batch, return the one we waited for! Hopefully this doesn't happen often... return appendResult; } - MemoryRecordsBuilder recordsBuilder = recordsBuilder(buffer, RecordBatch.CURRENT_MAGIC_VALUE); - ProducerBatch batch = new ProducerBatch(tp, recordsBuilder, time.milliseconds()); - FutureRecordMetadata future = Utils.notNull(batch.tryAppend(timestamp, key, value, headers, callback, time.milliseconds())); + + MemoryRecordsBuilder recordsBuilder = recordsBuilder(buffer, maxUsableMagic); + ProducerBatch batch = new ProducerBatch(tp, recordsBuilder, nowMs); + FutureRecordMetadata future = Objects.requireNonNull(batch.tryAppend(timestamp, key, value, headers, + callback, nowMs)); dq.addLast(batch); - incomplete.add(batch); + incomplete.add(batch); + // Don't deallocate this buffer in the finally block as it's being used in the record batch buffer = null; - return new RecordAppendResult(future, dq.size() > 1 || batch.isFull(), true); + return new RecordAppendResult(future, dq.size() > 1 || batch.isFull(), true, false); } } finally { if (buffer != null) @@ -196,10 +255,10 @@ public RecordAppendResult append(TopicPartition tp, } private MemoryRecordsBuilder recordsBuilder(ByteBuffer buffer, byte maxUsableMagic) { - /* if (transactionManager != null && maxUsableMagic < RecordBatch.MAGIC_VALUE_V2) { + if (transactionManager != null && maxUsableMagic < RecordBatch.MAGIC_VALUE_V2) { throw new UnsupportedVersionException("Attempting to use idempotence with a broker which does not " + - "support the required message format (v2). The broker must be version 0.11 or later."); - }*/ + "support the required message format (v2). The broker must be version 0.11 or later."); + } return MemoryRecords.builder(buffer, maxUsableMagic, compression, TimestampType.CREATE_TIME, 0L); } @@ -212,49 +271,55 @@ private MemoryRecordsBuilder recordsBuilder(ByteBuffer buffer, byte maxUsableMag * if it is expired, or when the producer is closed. */ private RecordAppendResult tryAppend(long timestamp, byte[] key, byte[] value, Header[] headers, - Callback callback, Deque deque) { + Callback callback, Deque deque, long nowMs) { ProducerBatch last = deque.peekLast(); if (last != null) { - FutureRecordMetadata future = last.tryAppend(timestamp, key, value, headers, callback, time.milliseconds()); + FutureRecordMetadata future = last.tryAppend(timestamp, key, value, headers, callback, nowMs); if (future == null) last.closeForRecordAppends(); else - return new RecordAppendResult(future, deque.size() > 1 || last.isFull(), false); + return new RecordAppendResult(future, deque.size() > 1 || last.isFull(), false, false); } return null; } - + + private boolean isMuted(TopicPartition tp) { + return muted.contains(tp); + } + + public void resetNextBatchExpiryTime() { + nextBatchExpiryTimeMs = Long.MAX_VALUE; + } + + public void maybeUpdateNextBatchExpiryTime(ProducerBatch batch) { + if (batch.createdMs + deliveryTimeoutMs > 0) { + // the non-negative check is to guard us against potential overflow due to setting + // a large value for deliveryTimeoutMs + nextBatchExpiryTimeMs = Math.min(nextBatchExpiryTimeMs, batch.createdMs + deliveryTimeoutMs); + } else { + log.warn("Skipping next batch expiry time update due to addition overflow: " + + "batch.createMs={}, deliveryTimeoutMs={}", batch.createdMs, deliveryTimeoutMs); + } + } + /** * Get a list of batches which have been sitting in the accumulator too long and need to be expired. */ - public List expiredBatches(int requestTimeout, long now) { + public List expiredBatches(long now) { List expiredBatches = new ArrayList<>(); for (Map.Entry> entry : this.batches.entrySet()) { - Deque dq = entry.getValue(); - TopicPartition tp = entry.getKey(); - // We only check if the batch should be expired if the partition does not have a batch in flight. - // This is to prevent later batches from being expired while an earlier batch is still in progress. - // Note that `muted` is only ever populated if `max.in.flight.request.per.connection=1` so this protection - // is only active in this case. Otherwise the expiration order is not guaranteed. - if (!isMuted(tp, now)) { - synchronized (dq) { - // iterate over the batches and expire them if they have been in the accumulator for more than requestTimeOut - ProducerBatch lastBatch = dq.peekLast(); - Iterator batchIterator = dq.iterator(); - while (batchIterator.hasNext()) { - ProducerBatch batch = batchIterator.next(); - boolean isFull = batch != lastBatch || batch.isFull(); - // Check if the batch has expired. Expired batches are closed by maybeExpire, but callbacks - // are invoked after completing the iterations, since sends invoked from callbacks - // may append more batches to the deque being iterated. The batch is deallocated after - // callbacks are invoked. - if (batch.maybeExpire(requestTimeout, retryBackoffMs, now, this.lingerMs, isFull)) { - expiredBatches.add(batch); - batchIterator.remove(); - } else { - // Stop at the first batch that has not expired. - break; - } + // expire the batches in the order of sending + Deque deque = entry.getValue(); + synchronized (deque) { + while (!deque.isEmpty()) { + ProducerBatch batch = deque.getFirst(); + if (batch.hasReachedDeliveryTimeout(deliveryTimeoutMs, now)) { + deque.poll(); + batch.abortRecordAppends(); + expiredBatches.add(batch); + } else { + maybeUpdateNextBatchExpiryTime(batch); + break; } } } @@ -262,15 +327,111 @@ public List expiredBatches(int requestTimeout, long now) { return expiredBatches; } - private boolean isMuted(TopicPartition tp, long now) { - boolean result = muted.containsKey(tp) && muted.get(tp) > now; - if (!result) - muted.remove(tp); - return result; + public long getDeliveryTimeoutMs() { + return deliveryTimeoutMs; + } + + /** + * Re-enqueue the given record batch in the accumulator. In Sender.completeBatch method, we check + * whether the batch has reached deliveryTimeoutMs or not. Hence we do not do the delivery timeout check here. + */ + public void reenqueue(ProducerBatch batch, long now) { + batch.reenqueued(now); + Deque deque = getOrCreateDeque(batch.topicPartition); + synchronized (deque) { + /* if (transactionManager != null) + insertInSequenceOrder(deque, batch); + else */ + deque.addFirst(batch); + } + } + + /** + * Split the big batch that has been rejected and reenqueue the split batches in to the accumulator. + * @return the number of split batches. + */ + public int splitAndReenqueue(ProducerBatch bigBatch) { + // Reset the estimated compression ratio to the initial value or the big batch compression ratio, whichever + // is bigger. There are several different ways to do the reset. We chose the most conservative one to ensure + // the split doesn't happen too often. + CompressionRatioEstimator.setEstimation(bigBatch.topicPartition.topic(), compression, + Math.max(1.0f, (float) bigBatch.compressionRatio())); + Deque dq = bigBatch.split(this.batchSize); + int numSplitBatches = dq.size(); + Deque partitionDequeue = getOrCreateDeque(bigBatch.topicPartition); + while (!dq.isEmpty()) { + ProducerBatch batch = dq.pollLast(); + incomplete.add(batch); + // We treat the newly split batches as if they are not even tried. + synchronized (partitionDequeue) { + //Changes for 2.8.1:: transactionManager not supported by Oracle Kafka + /* + if (transactionManager != null) { + // We should track the newly created batches since they already have assigned sequences. + transactionManager.addInFlightBatch(batch); + insertInSequenceOrder(partitionDequeue, batch); + } else */{ + partitionDequeue.addFirst(batch); + } + } + } + return numSplitBatches; } + // We will have to do extra work to ensure the queue is in order when requests are being retried and there are + // multiple requests in flight to that partition. If the first in flight request fails to append, then all the + // subsequent in flight requests will also fail because the sequence numbers will not be accepted. + // + // Further, once batches are being retried, we are reduced to a single in flight request for that partition. So when + // the subsequent batches come back in sequence order, they will have to be placed further back in the queue. + // + // Note that this assumes that all the batches in the queue which have an assigned sequence also have the current + // producer id. We will not attempt to reorder messages if the producer id has changed, we will throw an + // IllegalStateException instead. + /* + * Changes for 2.8.1 :: Method used with TransactionManager only. Commented out + private void insertInSequenceOrder(Deque deque, ProducerBatch batch) { + // When we are requeing and have enabled idempotence, the reenqueued batch must always have a sequence. + if (batch.baseSequence() == RecordBatch.NO_SEQUENCE) + throw new IllegalStateException("Trying to re-enqueue a batch which doesn't have a sequence even " + + "though idempotency is enabled."); + + if (transactionManager.nextBatchBySequence(batch.topicPartition) == null) + throw new IllegalStateException("We are re-enqueueing a batch which is not tracked as part of the in flight " + + "requests. batch.topicPartition: " + batch.topicPartition + "; batch.baseSequence: " + batch.baseSequence()); + + ProducerBatch firstBatchInQueue = deque.peekFirst(); + if (firstBatchInQueue != null && firstBatchInQueue.hasSequence() && firstBatchInQueue.baseSequence() < batch.baseSequence()) { + // The incoming batch can't be inserted at the front of the queue without violating the sequence ordering. + // This means that the incoming batch should be placed somewhere further back. + // We need to find the right place for the incoming batch and insert it there. + // We will only enter this branch if we have multiple inflights sent to different brokers and we need to retry + // the inflight batches. + // + // Since we reenqueue exactly one batch a time and ensure that the queue is ordered by sequence always, it + // is a simple linear scan of a subset of the in flight batches to find the right place in the queue each time. + List orderedBatches = new ArrayList<>(); + while (deque.peekFirst() != null && deque.peekFirst().hasSequence() && deque.peekFirst().baseSequence() < batch.baseSequence()) + orderedBatches.add(deque.pollFirst()); + + log.debug("Reordered incoming batch with sequence {} for partition {}. It was placed in the queue at " + + "position {}", batch.baseSequence(), batch.topicPartition, orderedBatches.size()); + // Either we have reached a point where there are batches without a sequence (ie. never been drained + // and are hence in order by default), or the batch at the front of the queue has a sequence greater + // than the incoming batch. This is the right place to add the incoming batch. + deque.addFirst(batch); + + // Now we have to re insert the previously queued batches in the right order. + for (int i = orderedBatches.size() - 1; i >= 0; --i) { + deque.addFirst(orderedBatches.get(i)); + } - + // At this point, the incoming batch has been queued in the correct place according to its sequence. + } else { + deque.addFirst(batch); + } + } +*/ /** * Get a list of nodes whose partitions are ready to be sent, and the earliest time at which any non-sendable * partition will be ready; Also return the flag for whether there are any unknown leaders for the accumulated @@ -280,7 +441,7 @@ private boolean isMuted(TopicPartition tp, long now) { *
        *
      1. There is at least one partition that is not backing off its send *
      2. and those partitions are not muted (to prevent reordering if - * {@value org.oracle.okafka.clients.producer.ProducerConfig#MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION} + * {@value org.apache.kafka.clients.producer.ProducerConfig#MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION} * is set to one)
      3. *
      4. and any of the following are true
      5. *
          @@ -293,49 +454,45 @@ private boolean isMuted(TopicPartition tp, long now) { *
      */ public ReadyCheckResult ready(Cluster cluster, long nowMs) { - Set readyInstances = new HashSet<>(); - + Set readyNodes = new HashSet<>(); long nextReadyCheckDelayMs = Long.MAX_VALUE; Set unknownLeaderTopics = new HashSet<>(); - boolean exhausted = this.free.queued() > 0; + boolean exhausted = this.free.queued() > 0; for (Map.Entry> entry : this.batches.entrySet()) { - - TopicPartition part = entry.getKey(); Deque deque = entry.getValue(); - Node leader = cluster.leaderFor(part); - - if(deque != null) { - synchronized (deque) { - if (leader == null && !deque.isEmpty()) { + synchronized (deque) { + // When producing to a large number of partitions, this path is hot and deques are often empty. + // We check whether a batch exists first to avoid the more expensive checks whenever possible. + ProducerBatch batch = deque.peekFirst(); + if (batch != null) { + TopicPartition part = entry.getKey(); + Node leader = cluster.leaderFor(part); + if (leader == null) { // This is a partition for which leader is not known, but messages are available to send. // Note that entries are currently not removed from batches when deque is empty. unknownLeaderTopics.add(part.topic()); - } else if ((!readyInstances.contains(leader)) && !isMuted(part, nowMs)) { - ProducerBatch batch = deque.peekFirst(); - if (batch != null) { - long waitedTimeMs = batch.waitedTimeMs(nowMs); - boolean backingOff = batch.attempts() > 0 && waitedTimeMs < retryBackoffMs; - long timeToWaitMs = backingOff ? retryBackoffMs : lingerMs; - boolean full = deque.size() > 1 || batch.isFull(); - boolean expired = waitedTimeMs >= timeToWaitMs; - boolean sendable = full || expired || exhausted || closed || flushInProgress(); - if (sendable && !backingOff) { - readyInstances.add(leader); - } else { - long timeLeftMs = Math.max(lingerMs - waitedTimeMs, 0); - // Note that this results in a conservative estimate since an un-sendable partition may have - // a leader that will later be found to have sendable data. However, this is good enough - // since we'll just wake up and then sleep again for the remaining time. - nextReadyCheckDelayMs = Math.min(timeLeftMs, nextReadyCheckDelayMs); - } - } - } - } + } else if (!readyNodes.contains(leader) && !isMuted(part)) { + long waitedTimeMs = batch.waitedTimeMs(nowMs); + boolean backingOff = batch.attempts() > 0 && waitedTimeMs < retryBackoffMs; + long timeToWaitMs = backingOff ? retryBackoffMs : lingerMs; + boolean full = deque.size() > 1 || batch.isFull(); + boolean expired = waitedTimeMs >= timeToWaitMs; + boolean sendable = full || expired || exhausted || closed || flushInProgress(); + if (sendable && !backingOff) { + readyNodes.add(leader); + } else { + long timeLeftMs = Math.max(timeToWaitMs - waitedTimeMs, 0); + // Note that this results in a conservative estimate since an un-sendable partition may have + // a leader that will later be found to have sendable data. However, this is good enough + // since we'll just wake up and then sleep again for the remaining time. + nextReadyCheckDelayMs = Math.min(timeLeftMs, nextReadyCheckDelayMs); + } + } + } } } - - return new ReadyCheckResult(readyInstances, nextReadyCheckDelayMs, unknownLeaderTopics); + return new ReadyCheckResult(readyNodes, nextReadyCheckDelayMs, unknownLeaderTopics); } /** @@ -352,6 +509,128 @@ public boolean hasUndrained() { return false; } + private boolean shouldStopDrainBatchesForPartition(ProducerBatch first, TopicPartition tp) { + // Changes for 2.8.1 :: Trnasaction Manager not supported + /* ProducerIdAndEpoch producerIdAndEpoch = null; + if (transactionManager != null) { + if (!transactionManager.isSendToPartitionAllowed(tp)) + return true; + + producerIdAndEpoch = transactionManager.producerIdAndEpoch(); + if (!producerIdAndEpoch.isValid()) + // we cannot send the batch until we have refreshed the producer id + return true; + + if (!first.hasSequence()) { + if (transactionManager.hasInflightBatches(tp) && transactionManager.hasStaleProducerIdAndEpoch(tp)) { + // Don't drain any new batches while the partition has in-flight batches with a different epoch + // and/or producer ID. Otherwise, a batch with a new epoch and sequence number + // 0 could be written before earlier batches complete, which would cause out of sequence errors + return true; + } + + if (transactionManager.hasUnresolvedSequence(first.topicPartition)) + // Don't drain any new batches while the state of previous sequence numbers + // is unknown. The previous batches would be unknown if they were aborted + // on the client after being sent to the broker at least once. + return true; + } + + int firstInFlightSequence = transactionManager.firstInFlightSequence(first.topicPartition); + if (firstInFlightSequence != RecordBatch.NO_SEQUENCE && first.hasSequence() + && first.baseSequence() != firstInFlightSequence) + // If the queued batch already has an assigned sequence, then it is being retried. + // In this case, we wait until the next immediate batch is ready and drain that. + // We only move on when the next in line batch is complete (either successfully or due to + // a fatal broker error). This effectively reduces our in flight request count to 1. + return true; + } + */ + return false; + } + + private List drainBatchesForOneNode(Cluster cluster, Node node, int maxSize, long now) { + int size = 0; + List parts = cluster.partitionsForNode(node.id()); + List ready = new ArrayList<>(); + /* to make starvation less likely this loop doesn't start at 0 */ + int start = drainIndex = drainIndex % parts.size(); + do { + PartitionInfo part = parts.get(drainIndex); + TopicPartition tp = new TopicPartition(part.topic(), part.partition()); + this.drainIndex = (this.drainIndex + 1) % parts.size(); + + // Only proceed if the partition has no in-flight batches. + if (isMuted(tp)) + continue; + + Deque deque = getDeque(tp); + if (deque == null) + continue; + + synchronized (deque) { + // invariant: !isMuted(tp,now) && deque != null + ProducerBatch first = deque.peekFirst(); + if (first == null) + continue; + + // first != null + boolean backoff = first.attempts() > 0 && first.waitedTimeMs(now) < retryBackoffMs; + // Only drain the batch if it is not during backoff period. + if (backoff) + continue; + + if (size + first.estimatedSizeInBytes() > maxSize && !ready.isEmpty()) { + // there is a rare case that a single batch size is larger than the request size due to + // compression; in this case we will still eventually send this batch in a single request + break; + } else { + if (shouldStopDrainBatchesForPartition(first, tp)) + break; + + boolean isTransactional = transactionManager != null && transactionManager.isTransactional(); + ProducerBatch batch = deque.pollFirst(); + //Changes for 2.8.1:: Transaction Manager not supported + /* + ProducerIdAndEpoch producerIdAndEpoch = ; + transactionManager != null ? transactionManager.producerIdAndEpoch() : null; + + ProducerBatch batch = deque.pollFirst(); + if (producerIdAndEpoch != null && !batch.hasSequence()) { + // If the the producer id/epoch of the partition do not match the latest one + // of the producer, we update it and reset the sequence. This should be + // only done when all its in-flight batches have completed. This is guarantee + // in `shouldStopDrainBatchesForPartition`. + transactionManager.maybeUpdateProducerIdAndEpoch(batch.topicPartition); + + // If the batch already has an assigned sequence, then we should not change the producer id and + // sequence number, since this may introduce duplicates. In particular, the previous attempt + // may actually have been accepted, and if we change the producer id and sequence here, this + // attempt will also be accepted, causing a duplicate. + // + // Additionally, we update the next sequence number bound for the partition, and also have + // the transaction manager track the batch so as to ensure that sequence ordering is maintained + // even if we receive out of order responses. + batch.setProducerState(producerIdAndEpoch, transactionManager.sequenceNumber(batch.topicPartition), isTransactional); + transactionManager.incrementSequenceNumber(batch.topicPartition, batch.recordCount); + log.debug("Assigned producerId {} and producerEpoch {} to batch with base sequence " + + "{} being sent to partition {}", producerIdAndEpoch.producerId, + producerIdAndEpoch.epoch, batch.baseSequence(), tp); + + transactionManager.addInFlightBatch(batch); + } + */ + batch.close(); + size += batch.records().sizeInBytes(); + ready.add(batch); + + batch.drained(now); + } + } + } while (start != drainIndex); + return ready; + } + /** * Drain all the data for the given nodes and collate them into a list of batches that will fit within the specified * size on a per-node basis. This method attempts to avoid choosing the same topic-node over and over. @@ -362,56 +641,25 @@ public boolean hasUndrained() { * @param now The current unix time in milliseconds * @return A list of {@link ProducerBatch} for each node specified with total size less than the requested maxSize. */ - public Map> drain(Cluster cluster, Set readyNodes, int maxSize, long now) { - if (readyNodes.isEmpty()) + public Map> drain(Cluster cluster, Set nodes, int maxSize, long now) { + if (nodes.isEmpty()) return Collections.emptyMap(); - - Map> batches = new HashMap<>(); - for (Node instance : readyNodes) { - int size = 0; - List parts = cluster.partitionsForNode(instance); - List ready = new ArrayList<>(); - /* to make starvation less likely this loop doesn't start at 0 */ - int start = drainIndex = drainIndex % parts.size(); - do { - PartitionInfo part = parts.get(drainIndex); - TopicPartition tp = new TopicPartition(part.topic(), part.partition()); - // Only proceed if the partition has no in-flight batches. - if (!isMuted(tp, now)) { - Deque deque = getDeque(tp); - if (deque != null) { - synchronized (deque) { - ProducerBatch first = deque.peekFirst(); - if (first != null) { - boolean backoff = first.attempts() > 0 && first.waitedTimeMs(now) < retryBackoffMs; - if(!backoff && (first.isFull() || first.waitedTimeMs(now) >= lingerMs || flushInProgress()) ) { - if (size + first.estimatedSizeInBytes() > maxSize && !ready.isEmpty()) { - // there is a rare case that a single batch size is larger than the request size due - // to compression; in this case we will still eventually send this batch in a single - // request - break; - } - - ProducerBatch batch = deque.pollFirst(); - batch.close(); - size += batch.records().sizeInBytes(); - ready.add(batch); - - batch.drained(now); - } - - - } - } - } - } - this.drainIndex = (this.drainIndex + 1) % parts.size(); - } while (start != drainIndex); - batches.put(instance, ready); + + Map> batches = new HashMap<>(); + for (Node node : nodes) { + List ready = drainBatchesForOneNode(cluster, node, maxSize, now); + batches.put(node.id(), ready); } return batches; } + /** + * The earliest absolute time a batch will expire (in milliseconds) + */ + public long nextExpiryTimeMs() { + return this.nextBatchExpiryTimeMs; + } + private Deque getDeque(TopicPartition tp) { return batches.get(tp); } @@ -430,41 +678,7 @@ private Deque getOrCreateDeque(TopicPartition tp) { else return previous; } - - /** - * Re-enqueue the given record batch in the accumulator to retry - */ - public void reenqueue(ProducerBatch batch, long now) { - batch.reenqueued(now); - Deque deque = getOrCreateDeque(batch.topicPartition); - synchronized (deque) { - deque.addFirst(batch); - } - } - /** - * Split the big batch that has been rejected and reenqueue the split batches in to the accumulator. - * @return the number of split batches. - */ - public int splitAndReenqueue(ProducerBatch bigBatch) { - // Reset the estimated compression ratio to the initial value or the big batch compression ratio, whichever - // is bigger. There are several different ways to do the reset. We chose the most conservative one to ensure - // the split doesn't happen too often. - CompressionRatioEstimator.setEstimation(bigBatch.topicPartition.topic(), compression, - Math.max(1.0f, (float) bigBatch.compressionRatio())); - Deque dq = bigBatch.split(this.batchSize); - int numSplitBatches = dq.size(); - Deque partitionDequeue = getOrCreateDeque(bigBatch.topicPartition); - while (!dq.isEmpty()) { - ProducerBatch batch = dq.pollLast(); - incomplete.add(batch); - // We treat the newly split batches as if they are not even tried. - synchronized (partitionDequeue) { - partitionDequeue.addFirst(batch); - } - } - return numSplitBatches; - } - + /** * Deallocate the record batch */ @@ -579,7 +793,7 @@ void abortUndrainedBatches(RuntimeException reason) { Deque dq = getDeque(batch.topicPartition); boolean aborted = false; synchronized (dq) { - if (!batch.isClosed()) { + if ((transactionManager != null && !batch.hasSequence()) || (transactionManager == null && !batch.isClosed())) { aborted = true; batch.abortRecordAppends(); dq.remove(batch); @@ -593,11 +807,11 @@ void abortUndrainedBatches(RuntimeException reason) { } public void mutePartition(TopicPartition tp) { - muted.put(tp, Long.MAX_VALUE); + muted.add(tp); } - public void unmutePartition(TopicPartition tp, long throttleUntilTimeMs) { - muted.put(tp, throttleUntilTimeMs); + public void unmutePartition(TopicPartition tp) { + muted.remove(tp); } /** @@ -605,6 +819,7 @@ public void unmutePartition(TopicPartition tp, long throttleUntilTimeMs) { */ public void close() { this.closed = true; + this.free.close(); } /* @@ -614,11 +829,13 @@ public final static class RecordAppendResult { public final FutureRecordMetadata future; public final boolean batchIsFull; public final boolean newBatchCreated; + public final boolean abortForNewBatch; - public RecordAppendResult(FutureRecordMetadata future, boolean batchIsFull, boolean newBatchCreated) { + public RecordAppendResult(FutureRecordMetadata future, boolean batchIsFull, boolean newBatchCreated, boolean abortForNewBatch) { this.future = future; this.batchIsFull = batchIsFull; this.newBatchCreated = newBatchCreated; + this.abortForNewBatch = abortForNewBatch; } } @@ -636,5 +853,4 @@ public ReadyCheckResult(Set readyNodes, long nextReadyCheckDelayMs, Set allTemplates; @@ -85,14 +67,15 @@ public SenderMetricsRegistry(Metrics metrics) { this.tags = this.metrics.config().tags().keySet(); this.allTemplates = new ArrayList<>(); - /***** Client level *****/ + /* Client level */ this.batchSizeAvg = createMetricName("batch-size-avg", "The average number of bytes sent per partition per-request."); this.batchSizeMax = createMetricName("batch-size-max", "The max number of bytes sent per partition per-request."); this.compressionRateAvg = createMetricName("compression-rate-avg", - "The average compression rate of record batches."); + "The average compression rate of record batches, defined as the average ratio of the " + + "compressed batch size over the uncompressed size."); this.recordQueueTimeAvg = createMetricName("record-queue-time-avg", "The average time in ms record batches spent in the send buffer."); this.recordQueueTimeMax = createMetricName("record-queue-time-max", @@ -133,7 +116,7 @@ public SenderMetricsRegistry(Metrics metrics) { this.produceThrottleTimeMax = createMetricName("produce-throttle-time-max", "The maximum time in ms a request was throttled by a broker"); - /***** Topic level *****/ + /* Topic level */ this.topicTags = new LinkedHashSet<>(tags); this.topicTags.add("topic"); @@ -147,7 +130,8 @@ public SenderMetricsRegistry(Metrics metrics) { this.topicByteTotal = createTopicTemplate("byte-total", "The total number of bytes sent for a topic."); this.topicCompressionRate = createTopicTemplate("compression-rate", - "The average compression rate of record batches for a topic."); + "The average compression rate of record batches for a topic, defined as the average ratio " + + "of the compressed batch size over the uncompressed size."); this.topicRecordRetryRate = createTopicTemplate("record-retry-rate", "The average per-second number of retried record sends for a topic"); this.topicRecordRetryTotal = createTopicTemplate("record-retry-total", @@ -160,14 +144,14 @@ public SenderMetricsRegistry(Metrics metrics) { } private MetricName createMetricName(String name, String description) { - return this.metrics.metricInstance(createTemplate(name, METRIC_GROUP_NAME, description, this.tags)); + return this.metrics.metricInstance(createTemplate(name, OKAFKA_PRODUCER_METRIC_GROUP_NAME, description, this.tags)); } private MetricNameTemplate createTopicTemplate(String name, String description) { return createTemplate(name, TOPIC_METRIC_GROUP_NAME, description, this.topicTags); } - /** topic level metrics **/ + /* topic level metrics */ public MetricName topicRecordSendRate(Map tags) { return this.metrics.metricInstance(this.topicRecordSendRate, tags); } diff --git a/clients/src/main/java/org/oracle/okafka/clients/producer/internals/SenderThread.java b/clients/src/main/java/org/oracle/okafka/clients/producer/internals/SenderThread.java index 029c339..c044fd8 100644 --- a/clients/src/main/java/org/oracle/okafka/clients/producer/internals/SenderThread.java +++ b/clients/src/main/java/org/oracle/okafka/clients/producer/internals/SenderThread.java @@ -1,9 +1,9 @@ /* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ + ** OKafka Java Client version 23.4. + ** + ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. + ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + */ /* * Licensed to the Apache Software Foundation (ASF) under one or more @@ -29,337 +29,475 @@ package org.oracle.okafka.clients.producer.internals; - import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Objects; +import java.util.function.Function; -import org.oracle.okafka.clients.ClientRequest; -import org.oracle.okafka.clients.ClientResponse; +import org.apache.kafka.clients.ClientRequest; +import org.apache.kafka.clients.ClientResponse; import org.oracle.okafka.clients.KafkaClient; import org.oracle.okafka.clients.Metadata; -import org.oracle.okafka.clients.RequestCompletionHandler; +import org.apache.kafka.clients.RequestCompletionHandler; +import org.oracle.okafka.common.requests.ProduceRequest; +import org.oracle.okafka.clients.producer.KafkaProducer; +import org.oracle.okafka.clients.producer.ProducerConfig; import org.oracle.okafka.clients.producer.internals.ProducerBatch; -import org.oracle.okafka.clients.producer.internals.ProduceResponse; +import org.oracle.okafka.clients.producer.internals.RecordAccumulator; +import org.apache.kafka.clients.producer.internals.SenderMetricsRegistry; +//import org.oracle.okafka.clients.producer.internals.SenderMetricsRegistry; +import org.oracle.okafka.common.requests.ProduceResponse; +import org.oracle.okafka.common.utils.MessageIdConverter; +import org.oracle.okafka.common.utils.MessageIdConverter.OKafkaOffset; import org.oracle.okafka.common.Node; -import org.oracle.okafka.common.errors.InvalidMetadataException; -import org.oracle.okafka.common.errors.RetriableException; -import org.oracle.okafka.common.utils.LogContext; -import org.oracle.okafka.common.utils.Time; +import org.apache.kafka.common.InvalidRecordException; +import org.apache.kafka.common.KafkaException; +import org.apache.kafka.common.MetricName; +import org.apache.kafka.common.errors.InvalidMetadataException; +import org.apache.kafka.common.errors.InvalidTopicException; +import org.apache.kafka.common.errors.NotLeaderForPartitionException; +import org.apache.kafka.common.errors.RetriableException; +import org.apache.kafka.common.errors.TimeoutException; +import org.apache.kafka.common.metrics.Sensor; +import org.apache.kafka.common.metrics.stats.Avg; +import org.apache.kafka.common.metrics.stats.Max; +import org.apache.kafka.common.metrics.stats.Meter; +import org.apache.kafka.common.requests.ProduceResponse.RecordError; +import org.apache.kafka.common.utils.LogContext; +import org.apache.kafka.common.utils.Time; import org.slf4j.Logger; -import static org.oracle.okafka.common.record.RecordBatch.NO_TIMESTAMP; +import static org.apache.kafka.common.record.RecordBatch.NO_TIMESTAMP; + /** - * The background thread that handles the sending of produce requests to the Kafka cluster. This thread makes metadata - * requests to renew its view of the cluster and then sends produce requests to the appropriate nodes. + * The background thread that handles the sending of produce requests to the + * Kafka cluster. This thread makes metadata requests to renew its view of the + * cluster and then sends produce requests to the appropriate nodes. */ public class SenderThread implements Runnable { private final Logger log; - /* the record accumulator that batches records */ - private final RecordAccumulator accumulator; + /* the record accumulator that batches records */ + private final RecordAccumulator accumulator; + + private final KafkaClient client; - private final KafkaClient client; - /* the metadata for the client */ private final Metadata metadata; - + /* * the flag indicating whether the producer should guarantee the message order * on the broker or not. */ private final boolean guaranteeMessageOrder; - /* the client id used to identify this client in requests to the server */ - private final String clientId; + /* the client id used to identify this client in requests to the server */ + private final String clientId; + + /* the current correlation id to use when sending requests to servers */ + private int correlation; + + private final int maxRequestSize; - /* the current correlation id to use when sending requests to servers */ - private int correlation; - - private final int maxRequestSize; - - /* the number of times to retry a failed request before giving up */ + /* the number of times to retry a failed request before giving up */ private final int retries; - /* the number of acknowledgements to request from the server */ - private final short acks; + /* the number of acknowledgements to request from the server */ + private final short acks; - /* the clock instance used for getting the time */ - private final Time time; - - /* the max time to wait for the server to respond to the request */ + /* the clock instance used for getting the time */ + private final Time time; + + /* the max time to wait for the server to respond to the request */ private final int requestTimeoutMs; /* The max time to wait before retrying a request which has failed */ private final long retryBackoffMs; + /* true while the sender thread is still running */ + private volatile boolean running; + + /* + * true when the caller wants to ignore all unsent/inflight messages and force + * close. + */ + private volatile boolean forceClose; - /* true while the sender thread is still running */ - private volatile boolean running; + /* metrics */ + private final SenderMetrics sensors; - /* true when the caller wants to ignore all unsent/inflight messages and force close. */ - private volatile boolean forceClose; - - public SenderThread(LogContext logContext,String clientId, KafkaClient client, Metadata metadata, RecordAccumulator accumulator, - boolean guaranteeMessageOrder, int maxRequestSize, short acks, int retries, - SenderMetricsRegistry metricsRegistry, Time time, int requestTimeoutMs, long retryBackoffMs) { + private Object syncObject = new Object(); + private final ProducerConfig config; + + public SenderThread(LogContext logContext, String clientId, KafkaClient client, Metadata metadata, + RecordAccumulator accumulator, boolean guaranteeMessageOrder, ProducerConfig pConfig, short acks, + int retries, SenderMetricsRegistry metricsRegistry, Time time) { this.log = logContext.logger(SenderThread.class); this.clientId = clientId; - this.accumulator = accumulator; - this.client = client; - this.metadata = metadata; - this.guaranteeMessageOrder = guaranteeMessageOrder; - this.maxRequestSize = maxRequestSize; - this.correlation = 0; - this.running = true; - this.acks = acks; - this.time = time; - this.retries = retries; - this.requestTimeoutMs = requestTimeoutMs; - this.retryBackoffMs = retryBackoffMs; - } - - /** - * The main run loop for the sender thread - */ - public void run() { - log.debug("Starting Kafka producer I/O thread."); - // main loop, runs until close is called - while (running) { - try { - run(time.milliseconds()); - } catch (Exception e) { - log.error("Uncaught error in kafka producer I/O thread: ", e); - } - } - log.debug("Beginning shutdown of Kafka producer I/O thread, sending remaining records."); - // okay we stopped accepting requests but there may still be - // requests in the accumulator or waiting for acknowledgment, - // wait until these are completed. - while (!forceClose && this.accumulator.hasUndrained()) { - try { - run(time.milliseconds()); - } catch (Exception e) { - log.error("Uncaught error in kafka producer I/O thread: ", e); - } - } - if (forceClose) { - // We need to fail all the incomplete batches and wake up the threads waiting on - // the futures. - log.debug("Aborting incomplete batches due to forced shutdown"); - this.accumulator.abortIncompleteBatches(); - } - try { - this.client.close(); - } catch(Exception ex) { - log.error("failed to close AQ producer", ex); - } - - log.debug("Shutdown of Kafka producer I/O thread has completed."); - - } - - /** - * Run a single iteration of sending - * - * @param now The current POSIX time in milliseconds - */ - void run(long now) { - sendProducerData(now); - client.maybeUpdateMetadata(now); - } - - private long sendProducerData(long now) { - // get the list of partitions with data ready to send - RecordAccumulator.ReadyCheckResult result = this.accumulator.ready(metadata.fetch(), now); - - if (!result.unknownLeaderTopics.isEmpty()) { - // The set of topics with unknown leader contains topics with leader election pending as well as - // topics which may have expired. Add the topic again to metadata to ensure it is included - // and request metadata update, since there are messages to send to the topic. - for (String topic : result.unknownLeaderTopics) - this.metadata.add(topic); - - log.debug("Requesting metadata update due to unknown leader topics from the batched records: {}", result.unknownLeaderTopics); - - this.metadata.requestUpdate(); - } - - // remove any nodes we aren't ready to send to - Iterator iter = result.readyNodes.iterator(); - long notReadyTimeout = Long.MAX_VALUE; - while (iter.hasNext()) { - Node node = iter.next(); - - if (!this.client.ready(node, now)) { - - iter.remove(); - notReadyTimeout = Math.min(notReadyTimeout, this.client.pollDelayMs(node, now)); - } - } - - // create produce requests - Map> batches = this.accumulator.drain(metadata.fetch(), result.readyNodes, maxRequestSize, now); - if (guaranteeMessageOrder) { + this.accumulator = accumulator; + this.client = client; + this.metadata = metadata; + this.guaranteeMessageOrder = guaranteeMessageOrder; + this.config = pConfig; + this.maxRequestSize = config.getInt(ProducerConfig.MAX_REQUEST_SIZE_CONFIG); + this.correlation = 0; + this.running = true; + this.acks = acks; + this.time = time; + this.sensors = new SenderMetrics(metricsRegistry, metadata, client, time); + this.retries = retries; + this.requestTimeoutMs = config.getInt(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG); + this.retryBackoffMs = config.getLong(ProducerConfig.RETRY_BACKOFF_MS_CONFIG); + } + + /** + * The main run loop for the sender thread + */ + public void run() { + log.debug("Starting Kafka producer I/O thread."); + // main loop, runs until close is called + while (running) { + try { + run(time.milliseconds()); + } catch (Exception e) { + log.error("Uncaught error in kafka producer I/O thread: ", e); + } + } + log.debug("Beginning shutdown of Kafka producer I/O thread, sending remaining records."); + // okay we stopped accepting requests but there may still be + // requests in the accumulator or waiting for acknowledgment, + // wait until these are completed. + while (!forceClose && this.accumulator.hasUndrained()) { + try { + run(time.milliseconds()); + } catch (Exception e) { + log.error("Uncaught error in kafka producer I/O thread: ", e); + } + } + if (forceClose) { + // We need to fail all the incomplete batches and wake up the threads waiting on + // the futures. + log.debug("Aborting incomplete batches due to forced shutdown"); + this.accumulator.abortIncompleteBatches(); + } + try { + this.client.close(); + } catch (Exception ex) { + log.error("failed to close AQ producer", ex); + } + + log.debug("Shutdown of Kafka producer I/O thread has completed."); + + } + + /** + * Run a single iteration of sending + * + * @param now The current POSIX time in milliseconds + */ + void run(long now) { + long pollTimeOut = sendProducerData(now); + client.maybeUpdateMetadata(now); + try { + long sleepTime = pollTimeOut; + if (sleepTime == Long.MAX_VALUE) { + Long lingerConfig = config.getLong(ProducerConfig.LINGER_MS_CONFIG); + + if (lingerConfig.longValue() > 0) + sleepTime = (int) Math.min(config.getLong(ProducerConfig.LINGER_MS_CONFIG), 500); + else + sleepTime = 500; + } + if (sleepTime > 0) { + log.debug("Sender waiting for " + sleepTime); + try { + synchronized (syncObject) { + syncObject.wait(sleepTime); + } + } catch (Exception sleepE) { + } + // Thread.sleep(sleepTime); + } + } catch (Exception e) { + } + } + + private long sendProducerData(long now) { + // get the list of partitions with data ready to send + RecordAccumulator.ReadyCheckResult result = this.accumulator.ready(metadata.fetch(), now); + + if (!result.unknownLeaderTopics.isEmpty()) { + // The set of topics with unknown leader contains topics with leader election + // pending as well as + // topics which may have expired. Add the topic again to metadata to ensure it + // is included + // and request metadata update, since there are messages to send to the topic. + for (String topic : result.unknownLeaderTopics) + this.metadata.add(topic); + + log.debug("Requesting metadata update due to unknown leader topics from the batched records: {}", + result.unknownLeaderTopics); + + this.metadata.requestUpdate(); + } + // log.debug("ReadyNodes Size " + result.readyNodes.size()); + // remove any nodes we aren't ready to send to + + Iterator iter = result.readyNodes.iterator(); + long notReadyTimeout = Long.MAX_VALUE; + while (iter.hasNext()) { + Node node = (org.oracle.okafka.common.Node) iter.next(); + if (!this.client.ready(node, now)) { + iter.remove(); + log.debug("Node " + node + " is not ready and is removed for now"); + notReadyTimeout = Math.min(notReadyTimeout, this.client.pollDelayMs(node, now)); + } + } + + // create produce requests + Map> batches = this.accumulator.drain(metadata.fetch(), result.readyNodes, + maxRequestSize, now); + + if (guaranteeMessageOrder) { // Mute all the partitions drained for (List batchList : batches.values()) { for (ProducerBatch batch : batchList) this.accumulator.mutePartition(batch.topicPartition); } - } - - List expiredBatches = this.accumulator.expiredBatches(this.requestTimeoutMs, now); - // Reset the producer id if an expired batch has previously been sent to the broker. Also update the metrics - // for expired batches. see the documentation of @TransactionState.resetProducerId to understand why - // we need to reset the producer id here. - if (!expiredBatches.isEmpty()) - log.trace("Expired {} batches in accumulator", expiredBatches.size()); - for (ProducerBatch expiredBatch : expiredBatches) { - failBatch(expiredBatch, new ArrayList() { { add("ID:00000000000000000000000000000000"); }}, new ArrayList() { { add(NO_TIMESTAMP); }}, expiredBatch.timeoutException()); - } - - // If we have any nodes that are ready to send + have sendable data, poll with 0 timeout so this can immediately - // loop and try sending more data. Otherwise, the timeout is determined by nodes that have partitions with data - // that isn't yet sendable (e.g. lingering, backing off). Note that this specifically does not include nodes - // with sendable data that aren't ready to send since they would cause busy looping. - long pollTimeout = Math.min(result.nextReadyCheckDelayMs, notReadyTimeout); - if (!result.readyNodes.isEmpty()) { - log.trace("Instances with data ready to send: {}", result.readyNodes); - // if some partitions are already ready to be sent, the select time would be 0; - // otherwise if some partition already has some data accumulated but not ready yet, - // the select time will be the time difference between now and its linger expiry time; - // otherwise the select time will be the time difference between now and the metadata expiry time; - pollTimeout = 0; - } - sendProduceRequests(batches, pollTimeout); - return pollTimeout; - } - - /** - * Start closing the sender (won't actually complete until all data is sent out) - */ - public void initiateClose() { - // Ensure accumulator is closed first to guarantee that no more appends are accepted after - // breaking from the sender loop. Otherwise, we may miss some callbacks when shutting down. - this.accumulator.close(); - this.running = false; - } - - /** - * Closes the sender without sending out any pending messages. - */ - public void forceClose() { - this.forceClose = true; - initiateClose(); - } - - - /** - * Transfer the record batches into a list of produce requests on a per-node basis - */ - private void sendProduceRequests(Map> collated, long pollTimeout) { - - for (Map.Entry> entry : collated.entrySet()) - sendProduceRequest(entry.getKey(), entry.getValue()); - } - - /** - * Create a produce request from the given record batch - */ - private void sendProduceRequest(Node node, List batches) { - if (batches.isEmpty()) - return; - //Map> produceRecordsByTopic = new HashMap<>(); - //Map batchesByPartition = new HashMap<>(); - for (ProducerBatch batch : batches) { - /*TopicPartition tp = batch.topicPartition; - MemoryRecords records = batch.records(); - if(!produceRecordsByTopic.containsKey(tp.topic())) { - produceRecordsByTopic.put(tp.topic(), new HashMap()); - } - produceRecordsByTopic.get(tp.topic()).put(tp, records); - batchesByPartition.put(new TopicPartition(tp.topic(), tp.partition()), batch);*/ - RequestCompletionHandler callback = new RequestCompletionHandler() { - @Override - public void onComplete(ClientResponse response) { - handleProduceResponse(response, batch, time.milliseconds()); - } - }; - - - ClientRequest request = client.newClientRequest(node, new ProduceRequest.Builder(batch.topicPartition, batch.records(), (short)1, -1), time.milliseconds(), true, -1, callback); - send(request); - } - - } - - /** - * Send produce request to destination - * Handle response generated from send. - */ - public void send(ClientRequest request) { - /*for(Map.Entry> produceRecordsByPartition : request.getproduceRecordsByTopic().entrySet()) { - for(Map.Entry partitionRecords : produceRecordsByPartition.getValue().entrySet()) { - - } - }*/ - ClientResponse response = client.send(request, time.milliseconds()); - completeResponse(response); - - } - - /** - * Handle response using callback in a request - */ - private void completeResponse(ClientResponse response) { - response.onComplete(); - } - + } + + // List expiredBatches = + // this.accumulator.expiredBatches(this.requestTimeoutMs, now); + + List expiredBatches = this.accumulator.expiredBatches(this.requestTimeoutMs); + + // Reset the producer id if an expired batch has previously been sent to the + // broker. Also update the metrics + // for expired batches. see the documentation of + // @TransactionState.resetProducerId to understand why + // we need to reset the producer id here. + if (!expiredBatches.isEmpty()) + log.trace("Expired {} batches in accumulator", expiredBatches.size()); + for (ProducerBatch expiredBatch : expiredBatches) { + String errorMessage = "Expiring " + expiredBatch.recordCount + " record(s) for " + + expiredBatch.topicPartition + ":" + (now - expiredBatch.createdMs) + + " ms has passed since batch creation"; + // failBatch(expiredBatch, NO_TIMESTAMP, new ArrayList() { { + // add("ID:00000000000000000000000000000000"); }}, new + // TimeoutException(errorMessage)); + List msgIdList = new ArrayList(); + failBatch(expiredBatch, -1l, NO_TIMESTAMP, msgIdList, + (RuntimeException) (new TimeoutException(errorMessage))); + } + sensors.updateProduceRequestMetrics(batches); + // If we have any nodes that are ready to send + have sendable data, poll with 0 + // timeout so this can immediately + // loop and try sending more data. Otherwise, the timeout is determined by nodes + // that have partitions with data + // that isn't yet sendable (e.g. lingering, backing off). Note that this + // specifically does not include nodes + // with sendable data that aren't ready to send since they would cause busy + // looping. + long pollTimeout = Math.min(result.nextReadyCheckDelayMs, notReadyTimeout); + if (!result.readyNodes.isEmpty()) { + log.trace("Instances with data ready to send: {}", result.readyNodes); + // if some partitions are already ready to be sent, the select time would be 0; + // otherwise if some partition already has some data accumulated but not ready + // yet, + // the select time will be the time difference between now and its linger expiry + // time; + // otherwise the select time will be the time difference between now and the + // metadata expiry time; + pollTimeout = 0; + } + sendProduceRequests(batches, pollTimeout); + return pollTimeout; + } + + /** + * Start closing the sender (won't actually complete until all data is sent out) + */ + public void initiateClose() { + // Ensure accumulator is closed first to guarantee that no more appends are + // accepted after + // breaking from the sender loop. Otherwise, we may miss some callbacks when + // shutting down. + this.accumulator.close(); + this.running = false; + } + + /** + * Closes the sender without sending out any pending messages. + */ + public void forceClose() { + this.forceClose = true; + initiateClose(); + } + + /** + * Transfer the record batches into a list of produce requests on a per-node + * basis + */ + private void sendProduceRequests(Map> collated, long pollTimeout) { + + for (Map.Entry> entry : collated.entrySet()) { + sendProduceRequest(metadata.getNodeById(entry.getKey()), entry.getValue()); + } + } + + /** + * Create a produce request from the given record batch + */ + private void sendProduceRequest(Node node, List batches) { + if (batches.isEmpty()) + return; + // Map> produceRecordsByTopic = new + // HashMap<>(); + // Map batchesByPartition = new HashMap<>(); + for (ProducerBatch batch : batches) { + boolean checkForDuplicate = false; + List retryMsgIdList = null; + /* + * TopicPartition tp = batch.topicPartition; MemoryRecords records = + * batch.records(); if(!produceRecordsByTopic.containsKey(tp.topic())) { + * produceRecordsByTopic.put(tp.topic(), new HashMap()); } produceRecordsByTopic.get(tp.topic()).put(tp, records); + * batchesByPartition.put(new TopicPartition(tp.topic(), tp.partition()), + * batch); + */ + + RequestCompletionHandler callback = new RequestCompletionHandler() { + @Override + public void onComplete(ClientResponse response) { + handleProduceResponse(response, batch, time.milliseconds()); + } + }; + + if (batch.inRetry() && batch.retryMsgIdList() != null) { + checkForDuplicate = true; + retryMsgIdList = batch.retryMsgIdList(); + } + ProduceRequest.Builder builderRequest = new ProduceRequest.Builder(batch.topicPartition, batch.records(), + (short) 1, -1, checkForDuplicate, retryMsgIdList); + ClientRequest request = client.newClientRequest(node, builderRequest, time.milliseconds(), true, -1, + callback); + send(request, batch); + } + } + + /** + * Send produce request to destination Handle response generated from send. + */ + public void send(ClientRequest request, ProducerBatch batch) { + ClientResponse response = null; + try { + response = client.send(request, time.milliseconds()); + } catch (Exception e) { + log.error("Exception while sending the produce request for batch " + batch.topicPartition + " " + e, e); + accumulator.reenqueue(batch, System.currentTimeMillis()); + return; + } + + log.info("Batch Send complete, evaluating response " + batch.topicPartition); + ProduceResponse pResponse = (ProduceResponse) response.responseBody(); + ProduceResponse.PartitionResponse partitionResponse = pResponse.getPartitionResponse(); + if (response.wasDisconnected()) { + log.info("Connection to oracle database node " + response.destination() + " was broken. Retry again"); + + if (partitionResponse.getCheckDuplicate()) { + // During retry : check for the first message id. + // If first record is published successfully then entire batch would be + // published successfully. + log.debug("Exception while sending publish request. Check storage before retry."); + if (partitionResponse.msgIds != null && partitionResponse.msgIds.size() > 0) { + log.debug("Check for message id " + partitionResponse.msgIds.get(0).getMsgId()); + batch.setRetryMsgId(partitionResponse.msgIds); + } + } + accumulator.reenqueue(batch, System.currentTimeMillis()); + // Request for MetaData update since the Database instance has went down. + int cuVNo = this.metadata.requestUpdate(); + log.debug("Requested for update of metadata from " + cuVNo); + } else if (partitionResponse.exception != null) { + RuntimeException producerException = partitionResponse.exception; + if (producerException instanceof NotLeaderForPartitionException) { + + log.info("No Owner for Topic Partition " + batch.topicPartition + " retrying."); + this.metadata.requestUpdate(); + } + if (producerException instanceof InvalidTopicException) { + log.info(producerException.getMessage()); + completeResponse(response); + } else { + log.info("Exception while sending batch for partiton " + batch.topicPartition + ". " + + producerException); + } + if (partitionResponse.getCheckDuplicate()) { + // During retry : check for the first message id. + // If first record is published successfully then entire batch would be + // published successfully. + log.debug("Exception while sending publish request. Check storage before retry."); + if (partitionResponse.msgIds != null && partitionResponse.msgIds.size() > 0) { + log.debug("Check for message id " + partitionResponse.msgIds.get(0).getMsgId()); + batch.setRetryMsgId(partitionResponse.msgIds); + } + } + accumulator.reenqueue(batch, System.currentTimeMillis()); + } else { + log.trace("No Exception from send. Completing the batch"); + completeResponse(response); + } + } + + /** + * Handle response using callback in a request + */ + private void completeResponse(ClientResponse response) { + response.onComplete(); + } + /** * Handle a produce response */ private void handleProduceResponse(ClientResponse response, ProducerBatch batch, long now) { - if(response.wasDisconnected()) { - client.disconnected(response.destination(), now); - metadata.requestUpdate(); - } + if (response.wasDisconnected()) { + client.disconnected(metadata.getNodeById(Integer.parseInt(response.destination())), now); + metadata.requestUpdate(); + } + long receivedTimeMs = response.receivedTimeMs(); int correlationId = response.requestHeader().correlationId(); - /*if (response.wasDisconnected()) { - log.trace("Cancelled request with header {} due to node {} being disconnected", requestHeader, - response.destination()); - for (ProducerBatch batch : batches.values()) - completeBatch(batch, new ProduceResponse.PartitionResponse(Errors.NETWORK_EXCEPTION), correlationId, - now, 0L); - } else { - log.trace("Received produce response from node {} with correlation id {}", response.destination(), - correlationId); - if we have a response, parse it - for (Map.Entry entry : response.responses().entrySet()) { - TopicPartition tp = entry.getKey(); - ProduceResponse.PartitionResponse partResp = entry.getValue(); - ProducerBatch batch = batches.get(tp); - completeBatch(batch, partResp, correlationId, now, - receivedTimeMs + response.throttleTimeMs()); - } - this.sensors.recordLatency(response.destination(), response.requestLatencyMs()); - } else { - this is the acks = 0 case, just complete all requests - for (ProducerBatch batch : batches.values()) { - completeBatch(batch, new ProduceResponse.PartitionResponse(Errors.NONE), correlationId, now, 0L); - } - }*/ - ProduceResponse produceResponse = (ProduceResponse)response.responseBody(); + + /* + * if (response.wasDisconnected()) { log. + * trace("Cancelled request with header {} due to node {} being disconnected", + * requestHeader, response.destination()); for (ProducerBatch batch : + * batches.values()) completeBatch(batch, new + * ProduceResponse.PartitionResponse(Errors.NETWORK_EXCEPTION), correlationId, + * now, 0L); } else { + * log.trace("Received produce response from node {} with correlation id {}", + * response.destination(), correlationId); if we have a response, parse it for + * (Map.Entry entry : + * response.responses().entrySet()) { TopicPartition tp = entry.getKey(); + * ProduceResponse.PartitionResponse partResp = entry.getValue(); ProducerBatch + * batch = batches.get(tp); completeBatch(batch, partResp, correlationId, now, + * receivedTimeMs + response.throttleTimeMs()); } + * this.sensors.recordLatency(response.destination(), + * response.requestLatencyMs()); } else { this is the acks = 0 case, just + * complete all requests for (ProducerBatch batch : batches.values()) { + * completeBatch(batch, new ProduceResponse.PartitionResponse(Errors.NONE), + * correlationId, now, 0L); } } + */ + this.sensors.recordLatency(response.destination(), response.requestLatencyMs()); + ProduceResponse produceResponse = (ProduceResponse) response.responseBody(); ProduceResponse.PartitionResponse partResp = produceResponse.getPartitionResponse(); - completeBatch(batch, partResp, correlationId, now, - receivedTimeMs + produceResponse.throttleTimeMs()); - - - - + completeBatch(batch, partResp, correlationId, now, receivedTimeMs + produceResponse.throttleTimeMs()); } - + /** * Complete or retry the given batch of records. * @@ -370,40 +508,106 @@ private void handleProduceResponse(ClientResponse response, ProducerBatch batch, */ private void completeBatch(ProducerBatch batch, ProduceResponse.PartitionResponse response, long correlationId, long now, long throttleUntilTimeMs) { - - Exception exception = response.exception(); + RuntimeException exception = response.exception(); - if(exception != null) { - if(canRetry(batch, response)) { + if (exception != null) { + if (canRetry(batch, response)) { reenqueueBatch(batch, now); - } else failBatch(batch, response, exception); - - if( exception instanceof InvalidMetadataException) { + } else + failBatch(batch, response, exception); + + if (exception instanceof InvalidMetadataException) { metadata.requestUpdate(); } - } else completeBatch(batch, response); - + } else + completeBatch(batch, response); + // Unmute the completed partition. - if (guaranteeMessageOrder) - this.accumulator.unmutePartition(batch.topicPartition, throttleUntilTimeMs); + if (guaranteeMessageOrder) { + // this.accumulator.unmutePartition(batch.topicPartition, throttleUntilTimeMs); + this.accumulator.unmutePartition(batch.topicPartition); + } + } - + private void completeBatch(ProducerBatch batch, ProduceResponse.PartitionResponse response) { - if (batch.done(response.msgIds, response.logAppendTime, null)) + + if (batch.done(response.subPartitionId * MessageIdConverter.DEFAULT_SUBPARTITION_SIZE, response.logAppendTime, + response.msgIds, null, null)) this.accumulator.deallocate(batch); } - - private void failBatch(ProducerBatch batch, ProduceResponse.PartitionResponse response, Exception exception) { - failBatch(batch, response.msgIds, response.logAppendTime, exception); + + private void failBatch(ProducerBatch batch, ProduceResponse.PartitionResponse response, + RuntimeException topLevelException) { + if (response.recordErrors == null || response.recordErrors.isEmpty()) { + + failBatch(batch, response.subPartitionId, response.logAppendTime, response.msgIds, topLevelException); + + } else { + Map recordErrorMap = new HashMap<>(response.recordErrors.size()); + for (RecordError recordError : response.recordErrors) { + // The API leaves us with some awkwardness interpreting the errors in the + // response. + // We cannot differentiate between different error cases (such as + // INVALID_TIMESTAMP) + // from the single error code at the partition level, so instead we use + // INVALID_RECORD + // for all failed records and rely on the message to distinguish the cases. + final String errorMessage; + if (recordError.message != null) { + errorMessage = recordError.message; + } else if (response.errorMessage != null) { + errorMessage = response.errorMessage; + } else { + errorMessage = response.error.message(); + } + + // If the batch contained only a single record error, then we can unambiguously + // use the exception type corresponding to the partition-level error code. + if (response.recordErrors.size() == 1) { + recordErrorMap.put(recordError.batchIndex, response.error.exception(errorMessage)); + } else { + recordErrorMap.put(recordError.batchIndex, new InvalidRecordException(errorMessage)); + } + } + + Function recordExceptions = batchIndex -> { + RuntimeException exception = recordErrorMap.get(batchIndex); + if (exception != null) { + return exception; + } else { + // If the response contains record errors, then the records which failed + // validation + // will be present in the response. To avoid confusion for the remaining + // records, we + // return a generic exception. + return new KafkaException("Failed to append record because it was part of a batch " + + "which had one more more invalid records"); + } + }; + + failBatch(batch, response.subPartitionId, response.logAppendTime, response.msgIds, topLevelException, + recordExceptions); + } + } + + private void failBatch(ProducerBatch batch, long baseOffSet, long logAppendTime, List msgIds, + RuntimeException exception) { + this.sensors.recordErrors(batch.topicPartition.topic(), batch.recordCount); + if (batch.done(baseOffSet, logAppendTime, msgIds, exception, batchIndex -> exception)) + this.accumulator.deallocate(batch); } - private void failBatch(ProducerBatch batch, List msgIds, List logAppendTime, Exception exception) { - if (batch.done(msgIds, logAppendTime, exception)) + private void failBatch(ProducerBatch batch, long baseOffSet, long logAppendTime, List msgIds, + RuntimeException exception, Function recordExceptions) { + this.sensors.recordErrors(batch.topicPartition.topic(), batch.recordCount); + if (batch.done(baseOffSet, logAppendTime, msgIds, exception, recordExceptions)) this.accumulator.deallocate(batch); } private void reenqueueBatch(ProducerBatch batch, long currentTimeMs) { this.accumulator.reenqueue(batch, currentTimeMs); + this.sensors.recordRetries(batch.topicPartition.topic(), batch.recordCount); } /** @@ -414,4 +618,187 @@ private boolean canRetry(ProducerBatch batch, ProduceResponse.PartitionResponse return batch.attempts() < this.retries && ((response.exception instanceof RetriableException)); } + public void wakeup() { + try { + synchronized (syncObject) { + syncObject.notifyAll(); + } + } catch (Exception e) { + } + } + + public boolean isRunning() { + return running; + } + + /** + * A collection of sensors for the sender + */ + private static class SenderMetrics { + public final Sensor retrySensor; + public final Sensor errorSensor; + public final Sensor queueTimeSensor; + public final Sensor requestTimeSensor; + public final Sensor recordsPerRequestSensor; + public final Sensor batchSizeSensor; + public final Sensor compressionRateSensor; + public final Sensor maxRecordSizeSensor; + public final Sensor batchSplitSensor; + private final SenderMetricsRegistry metrics; + + private final Time time; + + public SenderMetrics(SenderMetricsRegistry metrics, Metadata metadata, KafkaClient client, Time time) { + this.metrics = metrics; + this.time = time; + + this.batchSizeSensor = metrics.sensor("batch-size"); + this.batchSizeSensor.add(metrics.batchSizeAvg, new Avg()); + this.batchSizeSensor.add(metrics.batchSizeMax, new Max()); + + this.compressionRateSensor = metrics.sensor("compression-rate"); + this.compressionRateSensor.add(metrics.compressionRateAvg, new Avg()); + + this.queueTimeSensor = metrics.sensor("queue-time"); + this.queueTimeSensor.add(metrics.recordQueueTimeAvg, new Avg()); + this.queueTimeSensor.add(metrics.recordQueueTimeMax, new Max()); + + this.requestTimeSensor = metrics.sensor("request-time"); + this.requestTimeSensor.add(metrics.requestLatencyAvg, new Avg()); + this.requestTimeSensor.add(metrics.requestLatencyMax, new Max()); + + this.recordsPerRequestSensor = metrics.sensor("records-per-request"); + this.recordsPerRequestSensor.add(new Meter(metrics.recordSendRate, metrics.recordSendTotal)); + this.recordsPerRequestSensor.add(metrics.recordsPerRequestAvg, new Avg()); + + this.retrySensor = metrics.sensor("record-retries"); + this.retrySensor.add(new Meter(metrics.recordRetryRate, metrics.recordRetryTotal)); + + this.errorSensor = metrics.sensor("errors"); + this.errorSensor.add(new Meter(metrics.recordErrorRate, metrics.recordErrorTotal)); + + this.maxRecordSizeSensor = metrics.sensor("record-size"); + this.maxRecordSizeSensor.add(metrics.recordSizeMax, new Max()); + this.maxRecordSizeSensor.add(metrics.recordSizeAvg, new Avg()); + + // method inFlightRequestCount() is unavailabe in Okafka KafkaClient file + +// this.metrics.addMetric(metrics.requestsInFlight, (config, now) -> client.inFlightRequestCount()); + this.metrics.addMetric(metrics.metadataAge, + (config, now) -> (now - metadata.lastSuccessfulUpdate()) / 1000.0); + + this.batchSplitSensor = metrics.sensor("batch-split-rate"); + this.batchSplitSensor.add(new Meter(metrics.batchSplitRate, metrics.batchSplitTotal)); + } + + private void maybeRegisterTopicMetrics(String topic) { + // if one sensor of the metrics has been registered for the topic, + // then all other sensors should have been registered; and vice versa + String topicRecordsCountName = "topic." + topic + ".records-per-batch"; + Sensor topicRecordCount = this.metrics.getSensor(topicRecordsCountName); + if (topicRecordCount == null) { + Map metricTags = Collections.singletonMap("topic", topic); + + topicRecordCount = this.metrics.sensor(topicRecordsCountName); + MetricName rateMetricName = this.metrics.topicRecordSendRate(metricTags); + MetricName totalMetricName = this.metrics.topicRecordSendTotal(metricTags); + topicRecordCount.add(new Meter(rateMetricName, totalMetricName)); + + String topicByteRateName = "topic." + topic + ".bytes"; + Sensor topicByteRate = this.metrics.sensor(topicByteRateName); + rateMetricName = this.metrics.topicByteRate(metricTags); + totalMetricName = this.metrics.topicByteTotal(metricTags); + topicByteRate.add(new Meter(rateMetricName, totalMetricName)); + + String topicCompressionRateName = "topic." + topic + ".compression-rate"; + Sensor topicCompressionRate = this.metrics.sensor(topicCompressionRateName); + MetricName m = this.metrics.topicCompressionRate(metricTags); + topicCompressionRate.add(m, new Avg()); + + String topicRetryName = "topic." + topic + ".record-retries"; + Sensor topicRetrySensor = this.metrics.sensor(topicRetryName); + rateMetricName = this.metrics.topicRecordRetryRate(metricTags); + totalMetricName = this.metrics.topicRecordRetryTotal(metricTags); + topicRetrySensor.add(new Meter(rateMetricName, totalMetricName)); + + String topicErrorName = "topic." + topic + ".record-errors"; + Sensor topicErrorSensor = this.metrics.sensor(topicErrorName); + rateMetricName = this.metrics.topicRecordErrorRate(metricTags); + totalMetricName = this.metrics.topicRecordErrorTotal(metricTags); + topicErrorSensor.add(new Meter(rateMetricName, totalMetricName)); + } + } + + public void updateProduceRequestMetrics(Map> batches) { + long now = time.milliseconds(); + for (List nodeBatch : batches.values()) { + int records = 0; + for (ProducerBatch batch : nodeBatch) { + // register all per-topic metrics at once + String topic = batch.topicPartition.topic(); + maybeRegisterTopicMetrics(topic); + + // per-topic record send rate + String topicRecordsCountName = "topic." + topic + ".records-per-batch"; + Sensor topicRecordCount = Objects.requireNonNull(this.metrics.getSensor(topicRecordsCountName)); + topicRecordCount.record(batch.recordCount); + + // per-topic bytes send rate + String topicByteRateName = "topic." + topic + ".bytes"; + Sensor topicByteRate = Objects.requireNonNull(this.metrics.getSensor(topicByteRateName)); + topicByteRate.record(batch.estimatedSizeInBytes()); + + // per-topic compression rate + String topicCompressionRateName = "topic." + topic + ".compression-rate"; + Sensor topicCompressionRate = Objects + .requireNonNull(this.metrics.getSensor(topicCompressionRateName)); + topicCompressionRate.record(batch.compressionRatio()); + + // global metrics + this.batchSizeSensor.record(batch.estimatedSizeInBytes(), now); + this.queueTimeSensor.record(batch.queueTimeMs(), now); + this.compressionRateSensor.record(batch.compressionRatio()); + this.maxRecordSizeSensor.record(batch.maxRecordSize, now); + records += batch.recordCount; + } + this.recordsPerRequestSensor.record(records, now); + } + +// this.recordsPerRequestSensor.record(totalRecords, now); + + } + + public void recordRetries(String topic, int count) { + long now = time.milliseconds(); + this.retrySensor.record(count, now); + String topicRetryName = "topic." + topic + ".record-retries"; + Sensor topicRetrySensor = this.metrics.getSensor(topicRetryName); + if (topicRetrySensor != null) + topicRetrySensor.record(count, now); + } + + public void recordErrors(String topic, int count) { + long now = time.milliseconds(); + this.errorSensor.record(count, now); + String topicErrorName = "topic." + topic + ".record-errors"; + Sensor topicErrorSensor = this.metrics.getSensor(topicErrorName); + if (topicErrorSensor != null) + topicErrorSensor.record(count, now); + } + + public void recordLatency(String node, long latency) { + long now = time.milliseconds(); + this.requestTimeSensor.record(latency, now); + if (!node.isEmpty()) { + String nodeTimeName = "node-" + node + ".latency"; + Sensor nodeRequestTime = this.metrics.getSensor(nodeTimeName); + if (nodeRequestTime != null) + nodeRequestTime.record(latency, now); + } + } + + void recordBatchSplit() { + this.batchSplitSensor.record(); + } + } } diff --git a/clients/src/main/java/org/oracle/okafka/common/AQException.java b/clients/src/main/java/org/oracle/okafka/common/AQException.java index 90a9ccd..abd583d 100644 --- a/clients/src/main/java/org/oracle/okafka/common/AQException.java +++ b/clients/src/main/java/org/oracle/okafka/common/AQException.java @@ -1,7 +1,7 @@ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. */ diff --git a/clients/src/main/java/org/oracle/okafka/common/Cluster.java b/clients/src/main/java/org/oracle/okafka/common/Cluster.java deleted file mode 100644 index 02feec8..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/Cluster.java +++ /dev/null @@ -1,369 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. - * - */ - -package org.oracle.okafka.common; - -import org.oracle.okafka.clients.CommonClientConfigs; -import org.oracle.okafka.common.PartitionInfo; -import org.oracle.okafka.common.config.AbstractConfig; -import org.oracle.okafka.common.utils.Utils; - - -import java.net.InetSocketAddress; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; - -public final class Cluster { - - //private volatile Connection jdbcConn = null; - private final AbstractConfig configs; - private final boolean isBootstrapConfigured; - private final List nodes; - private final Set unauthorizedTopics; - private final Set internalTopics; - private final Node controller; - private final Map partitionsByTopicPartition; - private final Map> partitionsByTopic; - private final Map> availablePartitionsByTopic; - private final Map> partitionsByNode; - private final Map nodesById; - private final ClusterResource clusterResource; - - public Cluster(String clusterId, - Collection nodes, - Collection partitions, - Set unauthorizedTopics, - Set internalTopics, AbstractConfig configs) { - this(clusterId, false, nodes, partitions, unauthorizedTopics, internalTopics, null, configs); - } - - - public Cluster(String clusterId, - Collection nodes, - Collection partitions, - Set unauthorizedTopics, - Set internalTopics, - Node controller, AbstractConfig configs) { - this(clusterId, false, nodes, partitions, unauthorizedTopics, internalTopics, controller, configs); -} - - - public Cluster(String clusterId, - boolean isBootstrapConfigured, - Collection nodes, - Collection partitions, - Set unauthorizedTopics, - Set internalTopics, - Node controller, - AbstractConfig configs) { - this.configs = configs; - this.isBootstrapConfigured = isBootstrapConfigured; - this.clusterResource = new ClusterResource(clusterId); - // make a randomized, unmodifiable copy of the nodes - List copy = new ArrayList<>(nodes); - Collections.shuffle(copy); - this.nodes = Collections.synchronizedList(copy); - this.nodesById = new HashMap<>(); - for (Node node : nodes) - this.nodesById.put(node.id(), node); - - // index the partitions by topic/partition for quick lookup - this.partitionsByTopicPartition = new HashMap<>(partitions.size()); - for (PartitionInfo p : partitions) - this.partitionsByTopicPartition.put(new TopicPartition(p.topic(), p.partition()), p); - - // index the partitions by topic and node respectively, and make the lists - // unmodifiable so we can hand them out in user-facing apis without risk - // of the client modifying the contents - HashMap> partsForTopic = new HashMap<>(); - HashMap> partsForNode = new HashMap<>(); - for (Node node : this.nodes) { - partsForNode.put(node, new ArrayList()); - } - for (PartitionInfo p : partitions) { - if (!partsForTopic.containsKey(p.topic())) - partsForTopic.put(p.topic(), new ArrayList()); - List psTopic = partsForTopic.get(p.topic()); - psTopic.add(p); - - if (p.leader() != null) { - List psNode = Utils.notNull(partsForNode.get(p.leader())); - psNode.add(p); - } - } - this.partitionsByTopic = new HashMap<>(partsForTopic.size()); - this.availablePartitionsByTopic = new HashMap<>(partsForTopic.size()); - for (Map.Entry> entry : partsForTopic.entrySet()) { - String topic = entry.getKey(); - List partitionList = entry.getValue(); - this.partitionsByTopic.put(topic, Collections.unmodifiableList(partitionList)); - List availablePartitions = new ArrayList<>(); - for (PartitionInfo part : partitionList) { - if (part.leader() != null) - availablePartitions.add(part); - } - this.availablePartitionsByTopic.put(topic, Collections.synchronizedList(availablePartitions)); - } - this.partitionsByNode = new HashMap<>(partsForNode.size()); - for (Map.Entry> entry : partsForNode.entrySet()) - this.partitionsByNode.put(entry.getKey(), Collections.synchronizedList(entry.getValue())); - - this.unauthorizedTopics = Collections.synchronizedSet(unauthorizedTopics); - this.internalTopics = Collections.synchronizedSet(internalTopics); - this.controller = controller; - } - - /* private synchronized Connection getConnection() { - if(jdbcConn != null) return jdbcConn; - - final List nodeList = configs.getList(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG); - - for(int i=0; i < nodeList.size(); i++) { - String[] node1 = nodeList.get(i).trim().split(":"); - if(node1.length != 2) - throw new ConfigException("Invalid node details:" + nodeList); - String host = node1[0].trim(); - String port = node1[1].trim(); - StringBuilder urlBuilder =new StringBuilder("jdbc:oracle:thin:@(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(PORT=" + port +")(HOST=" + host +"))"); - urlBuilder.append("(CONNECT_DATA="); - if(!(configs.getString(CommonClientConfigs.ORACLE_SERVICE_NAME ).isEmpty()) ) { - //Integer nodeid = configs.getInt(CommonClientConfigs.ORACLE_INSTANCE_ID); - urlBuilder.append("(SERVICE_NAME=" + configs.getString(CommonClientConfigs.ORACLE_SERVICE_NAME) +")"); - //(((nodeid == 0) || (nodeid == ConfigDef.NO_DEFAULT_VALUE)) ? "" : ("_i" + Integer.toString(nodeid) + ".regress.rdbms.dev.us.oracle.com"))+")"); - } else { - urlBuilder.append("(SID=" + configs.getString(CommonClientConfigs.ORACLE_SID) + ")"); - } - urlBuilder.append("))"); - try { - OracleDataSource s=new OracleDataSource(); - s.setURL(urlBuilder.toString()); - jdbcConn = s.getConnection(configs.getString(CommonClientConfigs.ORACLE_USER_NAME), configs.getString(CommonClientConfigs.ORACLE_PASSWORD)); - break; - } - catch (SQLException sql) { - if(i == nodeList.size()-1) { - jdbcConn = null; - throw new KafkaException("unable to connect to node", sql); - } - - } - } - return jdbcConn; - } - - public synchronized int getPartitions(String topic) { - if(topic == null) return 0; - if(jdbcConn == null) - jdbcConn = getConnection(); - String query = "begin dbms_aqadm.get_queue_parameter(?,?,?); end;"; - CallableStatement cStmt = null; - int part = 1; - try { - cStmt = jdbcConn.prepareCall(query); - cStmt.setString(1, topic); - cStmt.setString(2, "SHARD_NUM"); - cStmt.registerOutParameter(3, OracleTypes.NUMBER); - cStmt.execute(); - part = cStmt.getInt(3); - cStmt.close(); - } - catch(SQLException sql) { - throw new KafkaException("Failed to get number of partitions :", sql); - } - return part; - }*/ - - - /** - * Create an empty cluster node with no Nodes and no topic-partitions. - */ - public static Cluster empty() { - return new Cluster(null, new ArrayList(0), new ArrayList(0), Collections.emptySet(), - Collections.emptySet(), null); - } - - /** - * Create a "bootstrap" cluster using the given list of host/ports - * @param addresses The addresses - * @return A cluster for these hosts/ports - */ - public static Cluster bootstrap(List addresses, AbstractConfig configs, String serviceName, String instanceName) { - List nodes = new ArrayList<>(); - for (InetSocketAddress address : addresses) { - if( serviceName != null) - nodes.add(new Node(-1, address.getHostString(), address.getPort(), serviceName, instanceName)); - else - nodes.add(new Node(-1, address.getHostString(), address.getPort(), configs.getString(CommonClientConfigs.ORACLE_SERVICE_NAME), configs.getString(CommonClientConfigs.ORACLE_INSTANCE_NAME))); - break; - } - return new Cluster(null, true, nodes, new ArrayList(0), Collections.emptySet(), Collections.emptySet(), null, configs); - } - - /** - * Return a copy of this cluster combined with `partitions`. - */ - public Cluster withPartitions(Map partitions) { - Map combinedPartitions = new HashMap<>(this.partitionsByTopicPartition); - combinedPartitions.putAll(partitions); - return new Cluster(clusterResource.clusterId(), this.nodes, combinedPartitions.values(), - new HashSet<>(this.unauthorizedTopics), new HashSet<>(this.internalTopics), this.controller, null); - } - - public AbstractConfig getConfigs() { - return this.configs; - } - /** - * @return The known set of Nodes - */ - public List nodes() { - return this.nodes; - } - - /** - * Get the node by the node id (or null if no such node exists) - * @param id The id of the node - * @return The node, or null if no such node exists - */ - public Node nodeById(int id) { - return this.nodesById.get(id); - } - - /** - * Get the current leader for the given topic-partition - * @param topicPartition The topic and partition we want to know the leader for - * @return The Node that is the leader for this topic-partition, or null if there is currently no leader - */ - public Node leaderFor(TopicPartition topicPartition) { - - PartitionInfo info = partitionsByTopicPartition.get(topicPartition); - if (info == null) - return null; - else - return info.leader(); - } - - public Node leader() { - return nodes.size()!=0 ? nodes.get(0) : null; - } - - /** - * Get the metadata for the specified partition - * @param topicPartition The topic and partition to fetch info for - * @return The metadata about the given topic and partition - */ - public PartitionInfo partition(TopicPartition topicPartition) { - return partitionsByTopicPartition.get(topicPartition); - } - - /** - * Get the list of partitions for this topic - * @param topic The topic name - * @return A list of partitions - */ - public List partitionsForTopic(String topic) { - List parts = this.partitionsByTopic.get(topic); - return (parts == null) ? Collections.emptyList() : parts; - } - - /** - * Get the number of partitions for the given topic - * @param topic The topic to get the number of partitions for - * @return The number of partitions or null if there is no corresponding metadata - */ - public Integer partitionCountForTopic(String topic) { - List partitions = this.partitionsByTopic.get(topic); - return partitions == null ? null : partitions.size(); - } - /** - * Get the list of available partitions for this topic - * @param topic The topic name - * @return A list of partitions - */ - public List availablePartitionsForTopic(String topic) { - List parts = this.availablePartitionsByTopic.get(topic); - return (parts == null) ? Collections.emptyList() : parts; - } - - /** - * Get the list of partitions whose leader is this Node - * @param node The Node - * @return A list of partitions - */ - public List partitionsForNode(Node node) { - List parts = this.partitionsByNode.get(node); - return (parts == null) ? Collections.emptyList() : parts; - } - - /** - * Get all topics. - * @return a set of all topics - */ - public Set topics() { - return this.partitionsByTopic.keySet(); - } - public Set unauthorizedTopics() { - return unauthorizedTopics; - } - - public Set internalTopics() { - return internalTopics; - } - - public boolean isBootstrapConfigured() { - return isBootstrapConfigured; - } - - public ClusterResource clusterResource() { - return clusterResource; - } - - public Node controller() { - return leader(); - } - - @Override - public String toString() { - return "Cluster(id = " + clusterResource.clusterId() + ", nodes = " + this.nodes + - ", partitions = " + this.partitionsByTopicPartition.values() + ", controller = " + controller + ")"; - } - - /*public void close() throws SQLException { - if(jdbcConn != null) { - jdbcConn.close(); - } - - }*/ - -} diff --git a/clients/src/main/java/org/oracle/okafka/common/ClusterResource.java b/clients/src/main/java/org/oracle/okafka/common/ClusterResource.java deleted file mode 100644 index f1b0c55..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/ClusterResource.java +++ /dev/null @@ -1,47 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.oracle.okafka.common; - - -/** - * The ClusterResource class encapsulates metadata for a TEQ/oracle db cluster. - * This feature is not yet supported. - */ -public class ClusterResource { - - private final String clusterId; - - public ClusterResource(String clusterId) { - this.clusterId = clusterId; - } - - public String clusterId() { - return clusterId; - } - - @Override - public String toString() { - return "ClusterResource(clusterId=" + clusterId + ")"; - } -} diff --git a/clients/src/main/java/org/oracle/okafka/common/ClusterResourceListener.java b/clients/src/main/java/org/oracle/okafka/common/ClusterResourceListener.java deleted file mode 100644 index bf7ce16..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/ClusterResourceListener.java +++ /dev/null @@ -1,35 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.oracle.okafka.common; - -/** - * This feature is not yet supported. - */ -public interface ClusterResourceListener { - /** - * A callback method that a user can implement to get updates for {@link ClusterResource}. - * @param clusterResource cluster metadata - */ - void onUpdate(ClusterResource clusterResource); -} diff --git a/clients/src/main/java/org/oracle/okafka/common/Configurable.java b/clients/src/main/java/org/oracle/okafka/common/Configurable.java deleted file mode 100644 index 5bf57f4..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/Configurable.java +++ /dev/null @@ -1,38 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.oracle.okafka.common; - -import java.util.Map; - -/** - * A Mix-in style interface for classes that are instantiated by reflection and need to take configuration parameters - */ -public interface Configurable { - - /** - * Configure this class with the given key-value pairs - */ - void configure(Map configs); - -} diff --git a/clients/src/main/java/org/oracle/okafka/common/KafkaException.java b/clients/src/main/java/org/oracle/okafka/common/KafkaException.java deleted file mode 100644 index 5592508..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/KafkaException.java +++ /dev/null @@ -1,49 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.oracle.okafka.common; - -/** - * The base class of all other Kafka exceptions - */ -public class KafkaException extends RuntimeException { - - private final static long serialVersionUID = 1L; - - public KafkaException(String message, Throwable cause) { - super(message, cause); - } - - public KafkaException(String message) { - super(message); - } - - public KafkaException(Throwable cause) { - super(cause); - } - - public KafkaException() { - super(); - } - -} diff --git a/clients/src/main/java/org/oracle/okafka/common/KafkaFuture.java b/clients/src/main/java/org/oracle/okafka/common/KafkaFuture.java deleted file mode 100644 index 4606f4e..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/KafkaFuture.java +++ /dev/null @@ -1,212 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.oracle.okafka.common; - -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -import org.oracle.okafka.common.annotation.InterfaceStability; -import org.oracle.okafka.common.internals.KafkaFutureImpl; - -/** - * A flexible future which supports call chaining and other asynchronous programming patterns. This will - * eventually become a thin shim on top of Java 8's CompletableFuture. - * - * The API for this class is still evolving and we may break compatibility in minor releases, if necessary. - */ -@InterfaceStability.Evolving -public abstract class KafkaFuture implements Future { - /** - * A function which takes objects of type A and returns objects of type B. - */ - public interface BaseFunction { - B apply(A a); - } - - /** - * A function which takes objects of type A and returns objects of type B. - * - * Prefer the functional interface {@link BaseFunction} over the class {@link Function}. This class is here for - * backwards compatibility reasons and might be deprecated/removed in a future release. - */ - public static abstract class Function implements BaseFunction { } - - /** - * A consumer of two different types of object. - */ - public interface BiConsumer { - void accept(A a, B b); - } - - private static class AllOfAdapter implements BiConsumer { - private int remainingResponses; - private KafkaFuture future; - - public AllOfAdapter(int remainingResponses, KafkaFuture future) { - this.remainingResponses = remainingResponses; - this.future = future; - maybeComplete(); - } - - @Override - public synchronized void accept(R newValue, Throwable exception) { - if (remainingResponses <= 0) - return; - if (exception != null) { - remainingResponses = 0; - future.completeExceptionally(exception); - } else { - remainingResponses--; - maybeComplete(); - } - } - - private void maybeComplete() { - if (remainingResponses <= 0) - future.complete(null); - } - } - - /** - * Returns a new KafkaFuture that is already completed with the given value. - */ - public static KafkaFuture completedFuture(U value) { - KafkaFuture future = new KafkaFutureImpl(); - future.complete(value); - return future; - } - - /** - * Returns a new KafkaFuture that is completed when all the given futures have completed. If - * any future throws an exception, the returned future returns it. If multiple futures throw - * an exception, which one gets returned is arbitrarily chosen. - */ - public static KafkaFuture allOf(KafkaFuture... futures) { - KafkaFuture allOfFuture = new KafkaFutureImpl<>(); - AllOfAdapter allOfWaiter = new AllOfAdapter<>(futures.length, allOfFuture); - for (KafkaFuture future : futures) { - future.addWaiter(allOfWaiter); - } - return allOfFuture; - } - - /** - * Returns a new KafkaFuture that, when this future completes normally, is executed with this - * futures's result as the argument to the supplied function. - * - * The function may be invoked by the thread that calls {@code thenApply} or it may be invoked by the thread that - * completes the future. - */ - public abstract KafkaFuture thenApply(BaseFunction function); - - /** - * @see KafkaFuture#thenApply(BaseFunction) - * - * Prefer {@link KafkaFuture#thenApply(BaseFunction)} as this function is here for backwards compatibility reasons - * and might be deprecated/removed in a future release. - */ - public abstract KafkaFuture thenApply(Function function); - - /** - * Returns a new KafkaFuture with the same result or exception as this future, that executes the given action - * when this future completes. - * - * When this future is done, the given action is invoked with the result (or null if none) and the exception - * (or null if none) of this future as arguments. - * - * The returned future is completed when the action returns. - * The supplied action should not throw an exception. However, if it does, the following rules apply: - * if this future completed normally but the supplied action throws an exception, then the returned future completes - * exceptionally with the supplied action's exception. - * Or, if this future completed exceptionally and the supplied action throws an exception, then the returned future - * completes exceptionally with this future's exception. - * - * The action may be invoked by the thread that calls {@code whenComplete} or it may be invoked by the thread that - * completes the future. - * - * @param action the action to preform - * @return the new future - */ - public abstract KafkaFuture whenComplete(BiConsumer action); - - protected abstract void addWaiter(BiConsumer action); - /** - * If not already completed, sets the value returned by get() and related methods to the given - * value. - */ - protected abstract boolean complete(T newValue); - - /** - * If not already completed, causes invocations of get() and related methods to throw the given - * exception. - */ - protected abstract boolean completeExceptionally(Throwable newException); - - /** - * If not already completed, completes this future with a CancellationException. Dependent - * futures that have not already completed will also complete exceptionally, with a - * CompletionException caused by this CancellationException. - */ - @Override - public abstract boolean cancel(boolean mayInterruptIfRunning); - - /** - * Waits if necessary for this future to complete, and then returns its result. - */ - @Override - public abstract T get() throws InterruptedException, ExecutionException; - - /** - * Waits if necessary for at most the given time for this future to complete, and then returns - * its result, if available. - */ - @Override - public abstract T get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, - TimeoutException; - - /** - * Returns the result value (or throws any encountered exception) if completed, else returns - * the given valueIfAbsent. - */ - public abstract T getNow(T valueIfAbsent) throws InterruptedException, ExecutionException; - - /** - * Returns true if this CompletableFuture was cancelled before it completed normally. - */ - @Override - public abstract boolean isCancelled(); - - /** - * Returns true if this CompletableFuture completed exceptionally, in any way. - */ - public abstract boolean isCompletedExceptionally(); - - /** - * Returns true if completed in any fashion: normally, exceptionally, or via cancellation. - */ - @Override - public abstract boolean isDone(); -} diff --git a/clients/src/main/java/org/oracle/okafka/common/Metric.java b/clients/src/main/java/org/oracle/okafka/common/Metric.java deleted file mode 100644 index afb17dd..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/Metric.java +++ /dev/null @@ -1,50 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common; - -/** - * A metric tracked for monitoring purposes. Metrics feature is not yet supported. - */ -public interface Metric { - - /** - * A name for this metric - */ - MetricName metricName(); - - /** - * The value of the metric as double if the metric is measurable and `0.0` otherwise. - * - * @deprecated As of kafka 1.0.0, use {@link #metricValue()} instead. This will be removed in a future major release. - */ - @Deprecated - double value(); - - /** - * The value of the metric, which may be measurable or a non-measurable gauge - */ - Object metricValue(); - -} diff --git a/clients/src/main/java/org/oracle/okafka/common/MetricName.java b/clients/src/main/java/org/oracle/okafka/common/MetricName.java deleted file mode 100644 index c0daa19..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/MetricName.java +++ /dev/null @@ -1,110 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common; - -import java.util.Map; - -import org.oracle.okafka.common.utils.Utils; - -/** - * Metrics feature is not yet supported. - */ -public final class MetricName { - - private final String name; - private final String group; - private final String description; - private Map tags; - private int hash = 0; - - public MetricName(String name, String group, String description, Map tags) { - this.name = Utils.notNull(name); - this.group = Utils.notNull(group); - this.description = Utils.notNull(description); - this.tags = Utils.notNull(tags); - } - - public String name() { - return this.name; - } - - public String group() { - return this.group; - } - - public Map tags() { - return this.tags; - } - - public String description() { - return this.description; - } - - @Override - public int hashCode() { - if (hash != 0) - return hash; - final int prime = 31; - int result = 1; - result = prime * result + ((group == null) ? 0 : group.hashCode()); - result = prime * result + ((name == null) ? 0 : name.hashCode()); - result = prime * result + ((tags == null) ? 0 : tags.hashCode()); - this.hash = result; - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) - return true; - if (obj == null) - return false; - if (getClass() != obj.getClass()) - return false; - MetricName other = (MetricName) obj; - if (group == null) { - if (other.group != null) - return false; - } else if (!group.equals(other.group)) - return false; - if (name == null) { - if (other.name != null) - return false; - } else if (!name.equals(other.name)) - return false; - if (tags == null) { - if (other.tags != null) - return false; - } else if (!tags.equals(other.tags)) - return false; - return true; - } - - @Override - public String toString() { - return "MetricName [name=" + name + ", group=" + group + ", description=" - + description + ", tags=" + tags + "]"; - } -} diff --git a/clients/src/main/java/org/oracle/okafka/common/MetricNameTemplate.java b/clients/src/main/java/org/oracle/okafka/common/MetricNameTemplate.java deleted file mode 100644 index 01c7d8d..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/MetricNameTemplate.java +++ /dev/null @@ -1,98 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common; - -import java.util.Collections; -import java.util.LinkedHashSet; -import java.util.Objects; -import java.util.Set; - -import org.oracle.okafka.common.utils.Utils; - -/** - * Metrics Feature is not yet supported. - */ -public class MetricNameTemplate { - private final String name; - private final String group; - private final String description; - private LinkedHashSet tags; - - public MetricNameTemplate(String name, String group, String description, Set tagsNames) { - this.name = Utils.notNull(name); - this.group = Utils.notNull(group); - this.description = Utils.notNull(description); - this.tags = new LinkedHashSet<>(Utils.notNull(tagsNames)); - } - - public MetricNameTemplate(String name, String group, String description, String... tagsNames) { - this(name, group, description, getTags(tagsNames)); - } - - private static LinkedHashSet getTags(String... keys) { - LinkedHashSet tags = new LinkedHashSet<>(); - - Collections.addAll(tags, keys); - - return tags; - } - - public String name() { - return this.name; - } - - public String group() { - return this.group; - } - - public String description() { - return this.description; - } - - public Set tags() { - return tags; - } - - @Override - public int hashCode() { - return Objects.hash(name, group, tags); - } - - @Override - public boolean equals(Object o) { - if (this == o) - return true; - if (o == null || getClass() != o.getClass()) - return false; - MetricNameTemplate other = (MetricNameTemplate) o; - return Objects.equals(name, other.name) && Objects.equals(group, other.group) && - Objects.equals(tags, other.tags); - } - - @Override - public String toString() { - return String.format("name=%s, group=%s, tags=%s", name, group, tags); - } -} diff --git a/clients/src/main/java/org/oracle/okafka/common/Node.java b/clients/src/main/java/org/oracle/okafka/common/Node.java index ee20b8b..43ee9f4 100644 --- a/clients/src/main/java/org/oracle/okafka/common/Node.java +++ b/clients/src/main/java/org/oracle/okafka/common/Node.java @@ -1,7 +1,7 @@ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. */ @@ -24,15 +24,17 @@ package org.oracle.okafka.common; -public class Node { +public class Node extends org.apache.kafka.common.Node{ private static final Node NO_NODE = new Node(-1, "", -1, "", ""); - private final int id; - private final String idString; - private final String instanceName; - private final String host; - private final int port; - private final String serviceName; + private int id; + private String idString; + private String instanceName; + private String host; + private int port; + private String serviceName; + private String user; + private String protocol; // Cache hashCode as it is called in performance sensitive parts of the code (e.g. RecordAccumulator.ready) private Integer hash; @@ -46,14 +48,19 @@ public Node(int id, String host, int port, String serviceName) { } public Node(int id, String host, int port, String serviceName, String instanceName) { + super(id, host, port); + if(id<=0) id = 0; this.id = id; - this.idString = "instance"+id; - this.host = host; + this.idString = "INSTANCE_"+id; + if(host != null) + this.host = host.toUpperCase(); this.port = port; - this.serviceName = serviceName; - this.instanceName = instanceName; + if(serviceName != null) + this.serviceName = serviceName.toUpperCase(); + if(instanceName != null) + this.instanceName = instanceName.toUpperCase(); } public static Node noNode() { @@ -70,6 +77,37 @@ public boolean isEmpty() { public int id() { return id; } + + public void setId(int newId) + { + if(id <=0) + { + id = newId; + } + } + + public void setService(String _service) + { + if(serviceName==null) + serviceName = _service.toUpperCase(); + } + + public void setInstanceName(String _instanceName) + { + if(instanceName == null || instanceName.length()==0) + instanceName = _instanceName.toUpperCase(); + } + + public void setUser(String _user) { + if(user == null || user.length() == 0 ) + this.user = _user.toUpperCase(); + } + + public void setProtocol(String _protocol) + { + if(protocol == null) + protocol = _protocol.toUpperCase(); + } /** * The host name for this Node @@ -103,32 +141,55 @@ public String instanceName() { public String idString() { return idString; } + + public String user() + { + return user; + } + + public String protocol() + { + return protocol; + } + @Override public int hashCode() { - Integer h = this.hash; - if (h == null) { - int result = 31 + (((host == null) || host.isEmpty()) ? 0 : host.hashCode()); - result = 31 * result + id; - result = 31 * result + port; - result = 31 * result +(((serviceName == null) || serviceName.isEmpty()) ? 0 : serviceName.hashCode()); - result = 31 * result +(((instanceName == null) || instanceName.isEmpty()) ? 0 : instanceName.hashCode()); - this.hash = result; - return result; - } else { - return h; - } + Integer h = this.hash; + if (h == null) { + h = getHashCode(); + } + return h; + } + + public int updateHashCode() + { + this.hash = getHashCode(); + return this.hash; + } + + private int getHashCode() + { + int result = 31 + (((host == null) || host.isEmpty()) ? 0 : host.hashCode()); + result = 31 * result + id; + result = 31 * result + port; + result = 31 * result +(((serviceName == null) || serviceName.isEmpty()) ? 0 : serviceName.hashCode()); + result = 31 * result +(((instanceName == null) || instanceName.isEmpty()) ? 0 : instanceName.hashCode()); + result = 31 * result +(((user == null) || user.isEmpty()) ? 0 : user.hashCode()); + return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; + if (obj == null || getClass() != obj.getClass()) return false; + Node other = (Node) obj; return (host== null ? other.host() == null : host.equals(other.host())) && - id == other.id() && + // id == other.id() && port == other.port() && (serviceName == null ? other.serviceName() == null : serviceName.equals(other.serviceName())) && (instanceName == null ? other.instanceName() == null : instanceName.equals(other.instanceName())); @@ -138,6 +199,7 @@ public boolean equals(Object obj) { public String toString() { String str = ((serviceName != null) && !serviceName.equals("")) ? serviceName : ""; String str2 = ((instanceName != null) && !instanceName.equals("")) ? instanceName : ""; - return id + ":" + host + ":" + port + ":" + str + ":" + str2; + String str3 = ((user != null) && !user.equals("")) ? user : ""; + return id + ":" + host + ":" + port + ":" + str + ":" + str2+ ":" + str3; } -} \ No newline at end of file +} diff --git a/clients/src/main/java/org/oracle/okafka/common/PartitionInfo.java b/clients/src/main/java/org/oracle/okafka/common/PartitionInfo.java deleted file mode 100644 index a2cdb6f..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/PartitionInfo.java +++ /dev/null @@ -1,100 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common; - -/** - * This is used to describe per-partition state in the MetadataResponse. - */ -public class PartitionInfo { - - private final String topic; - private final int partition; - private final Node leader; - - public PartitionInfo(String topic, int partition, Node leader, Node[] replicas, Node[] inSyncReplicas) { - this(topic, partition, leader, replicas, inSyncReplicas, new Node[0]); - } - - public PartitionInfo(String topic, int partition, Node leader, Node[] replicas, Node[] inSyncReplicas, Node[] offlineReplicas) { - this.topic = topic; - this.partition = partition; - this.leader = leader; - } - - /** - * The topic name - */ - public String topic() { - return topic; - } - - /** - * The partition id - */ - public int partition() { - return partition; - } - - /** - * The Node currently acting as a leader for this partition or null if there is no leader - */ - public Node leader() { - return leader; - } - - /** - * All oracle db instances run on same disk, so there is no need of replicating data among instances. - * @return null since there is no replication. - */ - public Node[] replicas() { - return null; - } - - /** - * All oracle db instances run on same disk, so there is no need of replicating data among instances. - * @return null since there is no replication. - */ - public Node[] inSyncReplicas() { - return null; - } - - /** - * All oracle db instances run on same disk, so there is no need of replicating data among instances. - * @return null since there is no replication. - */ - public Node[] offlineReplicas() { - return null; - } - - @Override - public String toString() { - return String.format("Partition(topic = %s, partition = %d, leader = %s)", - topic, - partition, - leader == null ? "none" : leader.idString() ); - - } - -} diff --git a/clients/src/main/java/org/oracle/okafka/common/TopicPartition.java b/clients/src/main/java/org/oracle/okafka/common/TopicPartition.java deleted file mode 100644 index bcd566b..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/TopicPartition.java +++ /dev/null @@ -1,86 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common; - -import java.io.Serializable; - -/** - * A topic name and partition number - */ -public final class TopicPartition implements Serializable{ - - private int hash = 0; - private final int partition; - private final String topic; - - public TopicPartition(String topic, int partition) { - this.partition = partition; - this.topic = topic; - } - - public int partition() { - return partition; - } - - public String topic() { - return topic; - } - - @Override - public int hashCode() { - if (hash != 0) - return hash; - final int prime = 31; - int result = 1; - result = prime * result + partition; - result = prime * result + ((topic == null) ? 0 : topic.hashCode()); - this.hash = result; - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) - return true; - if (obj == null) - return false; - if (getClass() != obj.getClass()) - return false; - TopicPartition other = (TopicPartition) obj; - if (partition != other.partition) - return false; - if (topic == null) { - if (other.topic != null) - return false; - } else if (!topic.equals(other.topic)) - return false; - return true; - } - - @Override - public String toString() { - return topic + "-" + partition; - } -} diff --git a/clients/src/main/java/org/oracle/okafka/common/TopicPartitionReplica.java b/clients/src/main/java/org/oracle/okafka/common/TopicPartitionReplica.java deleted file mode 100644 index c3552f3..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/TopicPartitionReplica.java +++ /dev/null @@ -1,99 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.oracle.okafka.common; - -import java.io.Serializable; - - -/** - * The topic name, partition number and the brokerId of the replica. This class is added only to represent public api. - * This class has no meaning in TEQ as there is no replication. - */ -public final class TopicPartitionReplica implements Serializable { - - private int hash = 0; - private final int brokerId; - private final int partition; - private final String topic; - - public TopicPartitionReplica(String topic, int partition, int brokerId) { - this.topic = topic; - this.partition = partition; - this.brokerId = brokerId; - } - - public String topic() { - return topic; - } - - public int partition() { - return partition; - } - - public int brokerId() { - return brokerId; - } - - @Override - public int hashCode() { - if (hash != 0) { - return hash; - } - final int prime = 31; - int result = 1; - result = prime * result + ((topic == null) ? 0 : topic.hashCode()); - result = prime * result + partition; - result = prime * result + brokerId; - this.hash = result; - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) - return true; - if (obj == null) - return false; - if (getClass() != obj.getClass()) - return false; - TopicPartitionReplica other = (TopicPartitionReplica) obj; - if (partition != other.partition) - return false; - if (brokerId != other.brokerId) - return false; - if (topic == null) { - if (other.topic != null) { - return false; - } - } else if (!topic.equals(other.topic)) { - return false; - } - return true; - } - - @Override - public String toString() { - return String.format("%s-%d-%d", topic, partition, brokerId); - } -} diff --git a/clients/src/main/java/org/oracle/okafka/common/acl/AclBinding.java b/clients/src/main/java/org/oracle/okafka/common/acl/AclBinding.java deleted file mode 100644 index 75d72b1..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/acl/AclBinding.java +++ /dev/null @@ -1,39 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. - * - */ - -package org.oracle.okafka.common.acl; - -import org.oracle.okafka.common.annotation.InterfaceStability; - -/** - * Acl feature is not yet supported. - */ -@InterfaceStability.Evolving -public class AclBinding { -} diff --git a/clients/src/main/java/org/oracle/okafka/common/acl/AclBindingFilter.java b/clients/src/main/java/org/oracle/okafka/common/acl/AclBindingFilter.java deleted file mode 100644 index 28747c5..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/acl/AclBindingFilter.java +++ /dev/null @@ -1,40 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. - * - */ - -package org.oracle.okafka.common.acl; - -import org.oracle.okafka.common.annotation.InterfaceStability; - -/** - * Acl feature is not yet supported. - */ -@InterfaceStability.Evolving -public class AclBindingFilter { - -} diff --git a/clients/src/main/java/org/oracle/okafka/common/annotation/InterfaceStability.java b/clients/src/main/java/org/oracle/okafka/common/annotation/InterfaceStability.java deleted file mode 100644 index a89a072..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/annotation/InterfaceStability.java +++ /dev/null @@ -1,61 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.oracle.okafka.common.annotation; - -import java.lang.annotation.Documented; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; - -/** - * Annotation to inform users of how much to rely on a particular package, class or method not changing over time. - * Currently the stability can be {@link Stable}, {@link Evolving} or {@link Unstable}. - */ -@InterfaceStability.Evolving -public class InterfaceStability { - /** - * Compatibility is maintained in major, minor and patch releases with one exception: compatibility may be broken - * in a major release (i.e. 0.m) for APIs that have been deprecated for at least one major/minor release cycle. - * In cases where the impact of breaking compatibility is significant, there is also a minimum deprecation period - * of one year. - * - * This is the default stability level for public APIs that are not annotated. - */ - @Documented - @Retention(RetentionPolicy.RUNTIME) - public @interface Stable { } - - /** - * Compatibility may be broken at minor release (i.e. m.x). - */ - @Documented - @Retention(RetentionPolicy.RUNTIME) - public @interface Evolving { } - - /** - * No guarantee is provided as to reliability or stability across any level of release granularity. - */ - @Documented - @Retention(RetentionPolicy.RUNTIME) - public @interface Unstable { } -} diff --git a/clients/src/main/java/org/oracle/okafka/common/config/AbstractConfig.java b/clients/src/main/java/org/oracle/okafka/common/config/AbstractConfig.java deleted file mode 100644 index 677305c..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/config/AbstractConfig.java +++ /dev/null @@ -1,318 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. - * - */ - -package org.oracle.okafka.common.config; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.HashMap; -import java.util.Set; -import java.util.TreeMap; - -import org.oracle.okafka.common.Configurable; -import org.oracle.okafka.common.KafkaException; -import org.oracle.okafka.common.utils.Utils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A convenient base class for configurations to extend. - * This class holds both the original configuration that was provided as well as the parsed - * - */ -public class AbstractConfig { - - private final Logger log = LoggerFactory.getLogger(getClass()); - - /* configs for which values have been requested, used to detect unused configs */ - private final Set used; - - /* the original values passed in by the user */ - private final Map originals; - - /* the parsed values */ - private final Map values; - - private final ConfigDef definition; - - @SuppressWarnings("unchecked") - public AbstractConfig(ConfigDef definition, Map originals, boolean doLog) { - /* check that all the keys are really strings */ - for (Map.Entry entry : originals.entrySet()) - if (!(entry.getKey() instanceof String)) - throw new ConfigException(entry.getKey().toString(), entry.getValue(), "Key must be a string."); - this.originals = (Map) originals; - this.values = definition.parse(this.originals); - Map configUpdates = postProcessParsedConfig(Collections.unmodifiableMap(this.values)); - for (Map.Entry update : configUpdates.entrySet()) { - this.values.put(update.getKey(), update.getValue()); - } - definition.parse(this.values); - this.used = Collections.synchronizedSet(new HashSet()); - this.definition = definition; - if (doLog) - logAll(); - } - - public AbstractConfig(ConfigDef definition, Map originals) { - this(definition, originals, true); - } - - public Object get(String key) - { - if(!values.containsKey(key)) - throw new ConfigException(String.format("Unknown configuration %s", key)); - used.add(key); - return values.get(key); - } - - public Integer getInt(String key) - { - return (Integer)get(key); - } - - public Long getLong(String key) - { - return (Long)get(key); - } - - public Short getShort(String key) - { - return (Short)get(key); - } - public String getString(String key) - { - return (String)get(key); - } - - public Class getClass(String key) - { - return (Class)get(key); - } - - @SuppressWarnings("unchecked") - public List getList(String key) { - return (List) get(key); - } - - public Boolean getBoolean(String key) { - return (Boolean) get(key); - } - - public void ignore(String key) { - used.add(key); - } - - public Map originals() { - Map copy = new RecordingMap<>(); - copy.putAll(originals); - return copy; - } - - protected Map postProcessParsedConfig(Map parsedValues) { - return Collections.emptyMap(); - } - - /** - * Log warnings for any unused configurations - */ - public void logUnused() { - for (String key : unused()) - log.warn("The configuration '{}' was supplied but isn't a known config.", key); - } - - public Set unused() { - Set keys = new HashSet<>(originals.keySet()); - keys.removeAll(used); - return keys; - } - - private void logAll() { - StringBuilder b = new StringBuilder(); - b.append(getClass().getSimpleName()); - b.append(" values: "); - b.append(Utils.NL); - - for (Map.Entry entry : new TreeMap<>(this.values).entrySet()) { - b.append('\t'); - b.append(entry.getKey()); - b.append(" = "); - b.append(entry.getValue()); - b.append(Utils.NL); - } - log.info(b.toString()); - } - - /** - * Get a configured instance of the give class specified by the given configuration key. If the object implements - * Configurable configure it using the configuration. - * - * @param key The configuration key for the class - * @param t The interface the class should implement - * @return A configured instance of the class - */ - public T getConfiguredInstance(String key, Class t) { - Class c = getClass(key); - if (c == null) - return null; - Object o = Utils.newInstance(c); - if (!t.isInstance(o)) - throw new KafkaException(c.getName() + " is not an instance of " + t.getName()); - if (o instanceof Configurable) - ((Configurable) o).configure(originals()); - return t.cast(o); - } - - /** - * Get a list of configured instances of the given class specified by the given configuration key. The configuration - * may specify either null or an empty string to indicate no configured instances. In both cases, this method - * returns an empty list to indicate no configured instances. - * @param key The configuration key for the class - * @param t The interface the class should implement - * @return The list of configured instances - */ - public List getConfiguredInstances(String key, Class t) { - return getConfiguredInstances(key, t, Collections.emptyMap()); - } - - /** - * Get a list of configured instances of the given class specified by the given configuration key. The configuration - * may specify either null or an empty string to indicate no configured instances. In both cases, this method - * returns an empty list to indicate no configured instances. - * @param key The configuration key for the class - * @param t The interface the class should implement - * @param configOverrides Configuration overrides to use. - * @return The list of configured instances - */ - public List getConfiguredInstances(String key, Class t, Map configOverrides) { - return getConfiguredInstances(getList(key), t, configOverrides); - } - - - /** - * Get a list of configured instances of the given class specified by the given configuration key. The configuration - * may specify either null or an empty string to indicate no configured instances. In both cases, this method - * returns an empty list to indicate no configured instances. - * @param classNames The list of class names of the instances to create - * @param t The interface the class should implement - * @param configOverrides Configuration overrides to use. - * @return The list of configured instances - */ - public List getConfiguredInstances(List classNames, Class t, Map configOverrides) { - List objects = new ArrayList(); - if (classNames == null) - return objects; - Map configPairs = originals(); - configPairs.putAll(configOverrides); - for (Object klass : classNames) { - Object o; - if (klass instanceof String) { - try { - o = Utils.newInstance((String) klass, t); - } catch (ClassNotFoundException e) { - throw new KafkaException(klass + " ClassNotFoundException exception occurred", e); - } - } else if (klass instanceof Class) { - o = Utils.newInstance((Class) klass); - } else - throw new KafkaException("List contains element of type " + klass.getClass().getName() + ", expected String or Class"); - if (!t.isInstance(o)) - throw new KafkaException(klass + " is not an instance of " + t.getName()); - if (o instanceof Configurable) - ((Configurable) o).configure(configPairs); - objects.add(t.cast(o)); - } - return objects; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - AbstractConfig that = (AbstractConfig) o; - - return originals.equals(that.originals); - } - - @Override - public int hashCode() { - return originals.hashCode(); - } - - /** - * Marks keys retrieved via `get` as used. This is needed because `Configurable.configure` takes a `Map` instead - * of an `AbstractConfig` and we can't change that without breaking public API like `Partitioner`. - */ - private class RecordingMap extends HashMap { - - private final String prefix; - private final boolean withIgnoreFallback; - - RecordingMap() { - this("", false); - } - - RecordingMap(String prefix, boolean withIgnoreFallback) { - this.prefix = prefix; - this.withIgnoreFallback = withIgnoreFallback; - } - - RecordingMap(Map m) { - this(m, "", false); - } - - RecordingMap(Map m, String prefix, boolean withIgnoreFallback) { - super(m); - this.prefix = prefix; - this.withIgnoreFallback = withIgnoreFallback; - } - - @Override - public V get(Object key) { - if (key instanceof String) { - String stringKey = (String) key; - String keyWithPrefix; - if (prefix.isEmpty()) { - keyWithPrefix = stringKey; - } else { - keyWithPrefix = prefix + stringKey; - } - ignore(keyWithPrefix); - if (withIgnoreFallback) - ignore(stringKey); - } - return super.get(key); - } - } - -} diff --git a/clients/src/main/java/org/oracle/okafka/common/config/ConfigDef.java b/clients/src/main/java/org/oracle/okafka/common/config/ConfigDef.java deleted file mode 100644 index 5a8ebc7..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/config/ConfigDef.java +++ /dev/null @@ -1,1359 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.config; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.Comparator; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.LinkedHashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.Set; - -import org.oracle.okafka.common.config.types.Password; -import org.oracle.okafka.common.utils.Utils; - -/** - * This class is used for specifying the set of expected configurations. For each configuration, you can specify - * the name, the type, the default value, the documentation, the group information, the order in the group, - * the width of the configuration value and the name suitable for display in the UI. - * - * You can provide special validation logic used for single configuration validation by overriding {@link Validator}. - * - * Moreover, you can specify the dependents of a configuration. The valid values and visibility of a configuration - * may change according to the values of other configurations. You can override {@link Recommender} to get valid - * values and set visibility of a configuration given the current configuration values. - * - *

      - * To use the class: - *

      - *

      - * ConfigDef defs = new ConfigDef();
      - *
      - * defs.define("config_with_default", Type.STRING, "default string value", "Configuration with default value.");
      - * defs.define("config_with_validator", Type.INT, 42, Range.atLeast(0), "Configuration with user provided validator.");
      - * defs.define("config_with_dependents", Type.INT, "Configuration with dependents.", "group", 1, "Config With Dependents", Arrays.asList("config_with_default","config_with_validator"));
      - *
      - * Map props = new HashMap<>();
      - * props.put("config_with_default", "some value");
      - * props.put("config_with_dependents", "some other value");
      - * 
      - * Map configs = defs.parse(props);
      - * // will return "some value"
      - * String someConfig = (String) configs.get("config_with_default");
      - * // will return default value of 42
      - * int anotherConfig = (Integer) configs.get("config_with_validator");
      - *
      - * To validate the full configuration, use:
      - * List configs = defs.validate(props);
      - * 
      - *

      - * This class can be used standalone or in combination with {@link AbstractConfig} which provides some additional - * functionality for accessing configs. - */ -public class ConfigDef { - /** - * A unique Java object which represents the lack of a default value. - */ - public static final Object NO_DEFAULT_VALUE = new Object(); - - private final Map configKeys; - private final List groups; - private Set configsWithNoParent; - - public ConfigDef() { - configKeys = new LinkedHashMap<>(); - groups = new LinkedList<>(); - configsWithNoParent = null; - } - - public ConfigDef(ConfigDef base) { - configKeys = new LinkedHashMap<>(base.configKeys); - groups = new LinkedList<>(base.groups); - // It is not safe to copy this from the parent because we may subsequently add to the set of configs and - // invalidate this - configsWithNoParent = null; - } - - /** - * Returns unmodifiable set of properties names defined in this {@linkplain ConfigDef} - * - * @return new unmodifiable {@link Set} instance containing the keys - */ - public Set names() { - return Collections.unmodifiableSet(configKeys.keySet()); - } - - public Map defaultValues() { - Map defaultValues = new HashMap<>(); - for (ConfigKey key : configKeys.values()) { - if (key.defaultValue != NO_DEFAULT_VALUE) - defaultValues.put(key.name, key.defaultValue); - } - return defaultValues; - } - - public ConfigDef define(ConfigKey key) { - if (configKeys.containsKey(key.name)) { - throw new ConfigException("Configuration " + key.name + " is defined twice."); - } - if (key.group != null && !groups.contains(key.group)) { - groups.add(key.group); - } - configKeys.put(key.name, key); - return this; - } - - /** - * Define a new configuration - * @param name the name of the config parameter - * @param type the type of the config - * @param defaultValue the default value to use if this config isn't present - * @param validator the validator to use in checking the correctness of the config - * @param importance the importance of this config - * @param documentation the documentation string for the config - * @param group the group this config belongs to - * @param orderInGroup the order of this config in the group - * @param width the width of the config - * @param displayName the name suitable for display - * @param dependents the configurations that are dependents of this configuration - * @param recommender the recommender provides valid values given the parent configuration values - * @return This ConfigDef so you can chain calls - */ - public ConfigDef define(String name, Type type, Object defaultValue, Validator validator, Importance importance, String documentation, - String group, int orderInGroup, Width width, String displayName, List dependents, Recommender recommender) { - return define(new ConfigKey(name, type, defaultValue, validator, importance, documentation, group, orderInGroup, width, displayName, dependents, recommender, false)); - } - - /** - * Define a new configuration with no custom recommender - * @param name the name of the config parameter - * @param type the type of the config - * @param defaultValue the default value to use if this config isn't present - * @param validator the validator to use in checking the correctness of the config - * @param importance the importance of this config - * @param documentation the documentation string for the config - * @param group the group this config belongs to - * @param orderInGroup the order of this config in the group - * @param width the width of the config - * @param displayName the name suitable for display - * @param dependents the configurations that are dependents of this configuration - * @return This ConfigDef so you can chain calls - */ - public ConfigDef define(String name, Type type, Object defaultValue, Validator validator, Importance importance, String documentation, - String group, int orderInGroup, Width width, String displayName, List dependents) { - return define(name, type, defaultValue, validator, importance, documentation, group, orderInGroup, width, displayName, dependents, null); - } - - /** - * Define a new configuration with no dependents - * @param name the name of the config parameter - * @param type the type of the config - * @param defaultValue the default value to use if this config isn't present - * @param validator the validator to use in checking the correctness of the config - * @param importance the importance of this config - * @param documentation the documentation string for the config - * @param group the group this config belongs to - * @param orderInGroup the order of this config in the group - * @param width the width of the config - * @param displayName the name suitable for display - * @param recommender the recommender provides valid values given the parent configuration values - * @return This ConfigDef so you can chain calls - */ - public ConfigDef define(String name, Type type, Object defaultValue, Validator validator, Importance importance, String documentation, - String group, int orderInGroup, Width width, String displayName, Recommender recommender) { - return define(name, type, defaultValue, validator, importance, documentation, group, orderInGroup, width, displayName, Collections.emptyList(), recommender); - } - - /** - * Define a new configuration with no dependents and no custom recommender - * @param name the name of the config parameter - * @param type the type of the config - * @param defaultValue the default value to use if this config isn't present - * @param validator the validator to use in checking the correctness of the config - * @param importance the importance of this config - * @param documentation the documentation string for the config - * @param group the group this config belongs to - * @param orderInGroup the order of this config in the group - * @param width the width of the config - * @param displayName the name suitable for display - * @return This ConfigDef so you can chain calls - */ - public ConfigDef define(String name, Type type, Object defaultValue, Validator validator, Importance importance, String documentation, - String group, int orderInGroup, Width width, String displayName) { - return define(name, type, defaultValue, validator, importance, documentation, group, orderInGroup, width, displayName, Collections.emptyList()); - } - - /** - * Define a new configuration with no special validation logic - * @param name the name of the config parameter - * @param type the type of the config - * @param defaultValue the default value to use if this config isn't present - * @param importance the importance of this config - * @param documentation the documentation string for the config - * @param group the group this config belongs to - * @param orderInGroup the order of this config in the group - * @param width the width of the config - * @param displayName the name suitable for display - * @param dependents the configurations that are dependents of this configuration - * @param recommender the recommender provides valid values given the parent configuration values - * @return This ConfigDef so you can chain calls - */ - public ConfigDef define(String name, Type type, Object defaultValue, Importance importance, String documentation, - String group, int orderInGroup, Width width, String displayName, List dependents, Recommender recommender) { - return define(name, type, defaultValue, null, importance, documentation, group, orderInGroup, width, displayName, dependents, recommender); - } - - /** - * Define a new configuration with no special validation logic and no custom recommender - * @param name the name of the config parameter - * @param type the type of the config - * @param defaultValue the default value to use if this config isn't present - * @param importance the importance of this config - * @param documentation the documentation string for the config - * @param group the group this config belongs to - * @param orderInGroup the order of this config in the group - * @param width the width of the config - * @param displayName the name suitable for display - * @param dependents the configurations that are dependents of this configuration - * @return This ConfigDef so you can chain calls - */ - public ConfigDef define(String name, Type type, Object defaultValue, Importance importance, String documentation, - String group, int orderInGroup, Width width, String displayName, List dependents) { - return define(name, type, defaultValue, null, importance, documentation, group, orderInGroup, width, displayName, dependents, null); - } - - /** - * Define a new configuration with no special validation logic and no custom recommender - * @param name the name of the config parameter - * @param type the type of the config - * @param defaultValue the default value to use if this config isn't present - * @param importance the importance of this config - * @param documentation the documentation string for the config - * @param group the group this config belongs to - * @param orderInGroup the order of this config in the group - * @param width the width of the config - * @param displayName the name suitable for display - * @param recommender the recommender provides valid values given the parent configuration values - * @return This ConfigDef so you can chain calls - */ - public ConfigDef define(String name, Type type, Object defaultValue, Importance importance, String documentation, - String group, int orderInGroup, Width width, String displayName, Recommender recommender) { - return define(name, type, defaultValue, null, importance, documentation, group, orderInGroup, width, displayName, Collections.emptyList(), recommender); - } - - /** - * Define a new configuration with no special validation logic, not dependents and no custom recommender - * @param name the name of the config parameter - * @param type the type of the config - * @param defaultValue the default value to use if this config isn't present - * @param importance the importance of this config - * @param documentation the documentation string for the config - * @param group the group this config belongs to - * @param orderInGroup the order of this config in the group - * @param width the width of the config - * @param displayName the name suitable for display - * @return This ConfigDef so you can chain calls - */ - public ConfigDef define(String name, Type type, Object defaultValue, Importance importance, String documentation, - String group, int orderInGroup, Width width, String displayName) { - return define(name, type, defaultValue, null, importance, documentation, group, orderInGroup, width, displayName, Collections.emptyList()); - } - - /** - * Define a new configuration with no default value and no special validation logic - * @param name the name of the config parameter - * @param type the type of the config - * @param importance the importance of this config - * @param documentation the documentation string for the config - * @param group the group this config belongs to - * @param orderInGroup the order of this config in the group - * @param width the width of the config - * @param displayName the name suitable for display - * @param dependents the configurations that are dependents of this configuration - * @param recommender the recommender provides valid values given the parent configuration value - * @return This ConfigDef so you can chain calls - */ - public ConfigDef define(String name, Type type, Importance importance, String documentation, String group, int orderInGroup, - Width width, String displayName, List dependents, Recommender recommender) { - return define(name, type, NO_DEFAULT_VALUE, null, importance, documentation, group, orderInGroup, width, displayName, dependents, recommender); - } - - /** - * Define a new configuration with no default value, no special validation logic and no custom recommender - * @param name the name of the config parameter - * @param type the type of the config - * @param importance the importance of this config - * @param documentation the documentation string for the config - * @param group the group this config belongs to - * @param orderInGroup the order of this config in the group - * @param width the width of the config - * @param displayName the name suitable for display - * @param dependents the configurations that are dependents of this configuration - * @return This ConfigDef so you can chain calls - */ - public ConfigDef define(String name, Type type, Importance importance, String documentation, String group, int orderInGroup, - Width width, String displayName, List dependents) { - return define(name, type, NO_DEFAULT_VALUE, null, importance, documentation, group, orderInGroup, width, displayName, dependents, null); - } - - /** - * Define a new configuration with no default value, no special validation logic and no custom recommender - * @param name the name of the config parameter - * @param type the type of the config - * @param importance the importance of this config - * @param documentation the documentation string for the config - * @param group the group this config belongs to - * @param orderInGroup the order of this config in the group - * @param width the width of the config - * @param displayName the name suitable for display - * @param recommender the recommender provides valid values given the parent configuration value - * @return This ConfigDef so you can chain calls - */ - public ConfigDef define(String name, Type type, Importance importance, String documentation, String group, int orderInGroup, - Width width, String displayName, Recommender recommender) { - return define(name, type, NO_DEFAULT_VALUE, null, importance, documentation, group, orderInGroup, width, displayName, Collections.emptyList(), recommender); - } - - /** - * Define a new configuration with no default value, no special validation logic, no dependents and no custom recommender - * @param name the name of the config parameter - * @param type the type of the config - * @param importance the importance of this config - * @param documentation the documentation string for the config - * @param group the group this config belongs to - * @param orderInGroup the order of this config in the group - * @param width the width of the config - * @param displayName the name suitable for display - * @return This ConfigDef so you can chain calls - */ - public ConfigDef define(String name, Type type, Importance importance, String documentation, String group, int orderInGroup, - Width width, String displayName) { - return define(name, type, NO_DEFAULT_VALUE, null, importance, documentation, group, orderInGroup, width, displayName, Collections.emptyList()); - } - - /** - * Define a new configuration with no group, no order in group, no width, no display name, no dependents and no custom recommender - * @param name the name of the config parameter - * @param type the type of the config - * @param defaultValue the default value to use if this config isn't present - * @param validator the validator to use in checking the correctness of the config - * @param importance the importance of this config - * @param documentation the documentation string for the config - * @return This ConfigDef so you can chain calls - */ - public ConfigDef define(String name, Type type, Object defaultValue, Validator validator, Importance importance, String documentation) { - return define(name, type, defaultValue, validator, importance, documentation, null, -1, Width.NONE, name); - } - - /** - * Define a new configuration with no special validation logic - * @param name The name of the config parameter - * @param type The type of the config - * @param defaultValue The default value to use if this config isn't present - * @param importance The importance of this config: is this something you will likely need to change. - * @param documentation The documentation string for the config - * @return This ConfigDef so you can chain calls - */ - public ConfigDef define(String name, Type type, Object defaultValue, Importance importance, String documentation) { - return define(name, type, defaultValue, null, importance, documentation); - } - - /** - * Define a new configuration with no default value and no special validation logic - * @param name The name of the config parameter - * @param type The type of the config - * @param importance The importance of this config: is this something you will likely need to change. - * @param documentation The documentation string for the config - * @return This ConfigDef so you can chain calls - */ - public ConfigDef define(String name, Type type, Importance importance, String documentation) { - return define(name, type, NO_DEFAULT_VALUE, null, importance, documentation); - } - - /** - * Define a new internal configuration. Internal configuration won't show up in the docs and aren't - * intended for general use. - * @param name The name of the config parameter - * @param type The type of the config - * @param defaultValue The default value to use if this config isn't present - * @param importance - * @return This ConfigDef so you can chain calls - */ - public ConfigDef defineInternal(final String name, final Type type, final Object defaultValue, final Importance importance) { - return define(new ConfigKey(name, type, defaultValue, null, importance, "", "", -1, Width.NONE, name, Collections.emptyList(), null, true)); - } - - /** - * Get the configuration keys - * @return a map containing all configuration keys - */ - public Map configKeys() { - return configKeys; - } - - /** - * Get the groups for the configuration - * @return a list of group names - */ - public List groups() { - return groups; - } - - /** - * Add standard SSL client configuration options. - * @return this - */ - public ConfigDef withClientSslSupport() { - SslConfigs.addClientSslSupport(this); - return this; - } - - /** - * Add standard SASL client configuration options. - * @return this - */ - public ConfigDef withClientSaslSupport() { - //SaslConfigs.addClientSaslSupport(this); - return this; - } - - /** - * Parse and validate configs against this configuration definition. The input is a map of configs. It is expected - * that the keys of the map are strings, but the values can either be strings or they may already be of the - * appropriate type (int, string, etc). This will work equally well with either java.util.Properties instances or a - * programmatically constructed map. - * - * @param props The configs to parse and validate. - * @return Parsed and validated configs. The key will be the config name and the value will be the value parsed into - * the appropriate type (int, string, etc). - */ - public Map parse(Map props) { - // Check all configurations are defined - List undefinedConfigKeys = undefinedDependentConfigs(); - if (!undefinedConfigKeys.isEmpty()) { - String joined = Utils.join(undefinedConfigKeys, ","); - throw new ConfigException("Some configurations in are referred in the dependents, but not defined: " + joined); - } - // parse all known keys - Map values = new HashMap<>(); - for (ConfigKey key : configKeys.values()) - values.put(key.name, parseValue(key, props.get(key.name), props.containsKey(key.name))); - return values; - } - - Object parseValue(ConfigKey key, Object value, boolean isSet) { - Object parsedValue; - if (isSet) { - parsedValue = parseType(key.name, value, key.type); - // props map doesn't contain setting, the key is required because no default value specified - its an error - } else if (NO_DEFAULT_VALUE.equals(key.defaultValue)) { - throw new ConfigException("Missing required configuration \"" + key.name + "\" which has no default value."); - } else { - // otherwise assign setting its default value - parsedValue = key.defaultValue; - } - if (key.validator != null) { - key.validator.ensureValid(key.name, parsedValue); - } - return parsedValue; - } - - /** - * Validate the current configuration values with the configuration definition. - * @param props the current configuration values - * @return List of Config, each Config contains the updated configuration information given - * the current configuration values. - */ - public List validate(Map props) { - return new ArrayList<>(validateAll(props).values()); - } - - public Map validateAll(Map props) { - Map configValues = new HashMap<>(); - for (String name: configKeys.keySet()) { - configValues.put(name, new ConfigValue(name)); - } - - List undefinedConfigKeys = undefinedDependentConfigs(); - for (String undefinedConfigKey: undefinedConfigKeys) { - ConfigValue undefinedConfigValue = new ConfigValue(undefinedConfigKey); - undefinedConfigValue.addErrorMessage(undefinedConfigKey + " is referred in the dependents, but not defined."); - undefinedConfigValue.visible(false); - configValues.put(undefinedConfigKey, undefinedConfigValue); - } - - Map parsed = parseForValidate(props, configValues); - return validate(parsed, configValues); - } - - // package accessible for testing - Map parseForValidate(Map props, Map configValues) { - Map parsed = new HashMap<>(); - Set configsWithNoParent = getConfigsWithNoParent(); - for (String name: configsWithNoParent) { - parseForValidate(name, props, parsed, configValues); - } - return parsed; - } - - - private Map validate(Map parsed, Map configValues) { - Set configsWithNoParent = getConfigsWithNoParent(); - for (String name: configsWithNoParent) { - validate(name, parsed, configValues); - } - return configValues; - } - - private List undefinedDependentConfigs() { - Set undefinedConfigKeys = new HashSet<>(); - for (ConfigKey configKey : configKeys.values()) { - for (String dependent: configKey.dependents) { - if (!configKeys.containsKey(dependent)) { - undefinedConfigKeys.add(dependent); - } - } - } - return new ArrayList<>(undefinedConfigKeys); - } - - // package accessible for testing - Set getConfigsWithNoParent() { - if (this.configsWithNoParent != null) { - return this.configsWithNoParent; - } - Set configsWithParent = new HashSet<>(); - - for (ConfigKey configKey: configKeys.values()) { - List dependents = configKey.dependents; - configsWithParent.addAll(dependents); - } - - Set configs = new HashSet<>(configKeys.keySet()); - configs.removeAll(configsWithParent); - this.configsWithNoParent = configs; - return configs; - } - - private void parseForValidate(String name, Map props, Map parsed, Map configs) { - if (!configKeys.containsKey(name)) { - return; - } - ConfigKey key = configKeys.get(name); - ConfigValue config = configs.get(name); - - Object value = null; - if (props.containsKey(key.name)) { - try { - value = parseType(key.name, props.get(key.name), key.type); - } catch (ConfigException e) { - config.addErrorMessage(e.getMessage()); - } - } else if (NO_DEFAULT_VALUE.equals(key.defaultValue)) { - config.addErrorMessage("Missing required configuration \"" + key.name + "\" which has no default value."); - } else { - value = key.defaultValue; - } - - if (key.validator != null) { - try { - key.validator.ensureValid(key.name, value); - } catch (ConfigException e) { - config.addErrorMessage(e.getMessage()); - } - } - config.value(value); - parsed.put(name, value); - for (String dependent: key.dependents) { - parseForValidate(dependent, props, parsed, configs); - } - } - - private void validate(String name, Map parsed, Map configs) { - if (!configKeys.containsKey(name)) { - return; - } - - ConfigKey key = configKeys.get(name); - ConfigValue value = configs.get(name); - - if (key.recommender != null) { - try { - List recommendedValues = key.recommender.validValues(name, parsed); - List originalRecommendedValues = value.recommendedValues(); - if (!originalRecommendedValues.isEmpty()) { - Set originalRecommendedValueSet = new HashSet<>(originalRecommendedValues); - Iterator it = recommendedValues.iterator(); - while (it.hasNext()) { - Object o = it.next(); - if (!originalRecommendedValueSet.contains(o)) { - it.remove(); - } - } - } - value.recommendedValues(recommendedValues); - value.visible(key.recommender.visible(name, parsed)); - } catch (ConfigException e) { - value.addErrorMessage(e.getMessage()); - } - } - - configs.put(name, value); - for (String dependent: key.dependents) { - validate(dependent, parsed, configs); - } - } - - /** - * Parse a value according to its expected type. - * @param name The config name - * @param value The config value - * @param type The expected type - * @return The parsed object - */ - public static Object parseType(String name, Object value, Type type) { - try { - if (value == null) return null; - - String trimmed = null; - if (value instanceof String) - trimmed = ((String) value).trim(); - - switch (type) { - case BOOLEAN: - if (value instanceof String) { - if (trimmed.equalsIgnoreCase("true")) - return true; - else if (trimmed.equalsIgnoreCase("false")) - return false; - else - throw new ConfigException(name, value, "Expected value to be either true or false"); - } else if (value instanceof Boolean) - return value; - else - throw new ConfigException(name, value, "Expected value to be either true or false"); - case PASSWORD: - if (value instanceof Password) - return value; - else if (value instanceof String) - return new Password(trimmed); - else - throw new ConfigException(name, value, "Expected value to be a string, but it was a " + value.getClass().getName()); - case STRING: - if (value instanceof String) - return trimmed; - else - throw new ConfigException(name, value, "Expected value to be a string, but it was a " + value.getClass().getName()); - case INT: - if (value instanceof Integer) { - return value; - } else if (value instanceof String) { - return Integer.parseInt(trimmed); - } else { - throw new ConfigException(name, value, "Expected value to be a 32-bit integer, but it was a " + value.getClass().getName()); - } - case SHORT: - if (value instanceof Short) { - return value; - } else if (value instanceof String) { - return Short.parseShort(trimmed); - } else { - throw new ConfigException(name, value, "Expected value to be a 16-bit integer (short), but it was a " + value.getClass().getName()); - } - case LONG: - if (value instanceof Integer) - return ((Integer) value).longValue(); - if (value instanceof Long) - return value; - else if (value instanceof String) - return Long.parseLong(trimmed); - else - throw new ConfigException(name, value, "Expected value to be a 64-bit integer (long), but it was a " + value.getClass().getName()); - case DOUBLE: - if (value instanceof Number) - return ((Number) value).doubleValue(); - else if (value instanceof String) - return Double.parseDouble(trimmed); - else - throw new ConfigException(name, value, "Expected value to be a double, but it was a " + value.getClass().getName()); - case LIST: - if (value instanceof List) - return value; - else if (value instanceof String) - if (trimmed.isEmpty()) - return Collections.emptyList(); - else - return Arrays.asList(trimmed.split("\\s*,\\s*", -1)); - else - throw new ConfigException(name, value, "Expected a comma separated list."); - case CLASS: - if (value instanceof Class) - return value; - else if (value instanceof String) - return Class.forName(trimmed, true, Utils.getContextOrAQClassLoader()); - else - throw new ConfigException(name, value, "Expected a Class instance or class name."); - default: - throw new IllegalStateException("Unknown type."); - } - } catch (NumberFormatException e) { - throw new ConfigException(name, value, "Not a number of type " + type); - } catch (ClassNotFoundException e) { - throw new ConfigException(name, value, "Class " + value + " could not be found."); - } - } - - public static String convertToString(Object parsedValue, Type type) { - if (parsedValue == null) { - return null; - } - - if (type == null) { - return parsedValue.toString(); - } - - switch (type) { - case BOOLEAN: - case SHORT: - case INT: - case LONG: - case DOUBLE: - case STRING: - case PASSWORD: - return parsedValue.toString(); - case LIST: - List valueList = (List) parsedValue; - return Utils.join(valueList, ","); - case CLASS: - Class clazz = (Class) parsedValue; - return clazz.getName(); - default: - throw new IllegalStateException("Unknown type."); - } - } - - /** - * Converts a map of config (key, value) pairs to a map of strings where each value - * is converted to a string. This method should be used with care since it stores - * actual password values to String. Values from this map should never be used in log entries. - */ - public static Map convertToStringMapWithPasswordValues(Map configs) { - Map result = new HashMap<>(); - for (Map.Entry entry : configs.entrySet()) { - Object value = entry.getValue(); - String strValue; - if (value instanceof Password) - strValue = ((Password) value).value(); - else if (value instanceof List) - strValue = convertToString(value, Type.LIST); - else if (value instanceof Class) - strValue = convertToString(value, Type.CLASS); - else - strValue = convertToString(value, null); - if (strValue != null) - result.put(entry.getKey(), strValue); - } - return result; - } - - /** - * The config types - */ - public enum Type { - BOOLEAN, STRING, INT, SHORT, LONG, DOUBLE, LIST, CLASS, PASSWORD - } - - /** - * The importance level for a configuration - */ - public enum Importance { - HIGH, MEDIUM, LOW - } - - /** - * The width of a configuration value - */ - public enum Width { - NONE, SHORT, MEDIUM, LONG - } - - /** - * This is used by the {@link #validate(Map)} to get valid values for a configuration given the current - * configuration values in order to perform full configuration validation and visibility modification. - * In case that there are dependencies between configurations, the valid values and visibility - * for a configuration may change given the values of other configurations. - */ - public interface Recommender { - - /** - * The valid values for the configuration given the current configuration values. - * @param name The name of the configuration - * @param parsedConfig The parsed configuration values - * @return The list of valid values. To function properly, the returned objects should have the type - * defined for the configuration using the recommender. - */ - List validValues(String name, Map parsedConfig); - - /** - * Set the visibility of the configuration given the current configuration values. - * @param name The name of the configuration - * @param parsedConfig The parsed configuration values - * @return The visibility of the configuration - */ - boolean visible(String name, Map parsedConfig); - } - - /** - * Validation logic the user may provide to perform single configuration validation. - */ - public interface Validator { - /** - * Perform single configuration validation. - * @param name The name of the configuration - * @param value The value of the configuration - * @throws ConfigException if the value is invalid. - */ - void ensureValid(String name, Object value); - } - - /** - * Validation logic for numeric ranges - */ - public static class Range implements Validator { - private final Number min; - private final Number max; - - private Range(Number min, Number max) { - this.min = min; - this.max = max; - } - - /** - * A numeric range that checks only the lower bound - * - * @param min The minimum acceptable value - */ - public static Range atLeast(Number min) { - return new Range(min, null); - } - - /** - * A numeric range that checks both the upper and lower bound - */ - public static Range between(Number min, Number max) { - return new Range(min, max); - } - - public void ensureValid(String name, Object o) { - if (o == null) - throw new ConfigException(name, null, "Value must be non-null"); - Number n = (Number) o; - if (min != null && n.doubleValue() < min.doubleValue()) - throw new ConfigException(name, o, "Value must be at least " + min); - if (max != null && n.doubleValue() > max.doubleValue()) - throw new ConfigException(name, o, "Value must be no more than " + max); - } - - public String toString() { - if (min == null && max == null) - return "[...]"; - else if (min == null) - return "[...," + max + "]"; - else if (max == null) - return "[" + min + ",...]"; - else - return "[" + min + ",...," + max + "]"; - } - } - - public static class ValidList implements Validator { - - final ValidString validString; - - private ValidList(List validStrings) { - this.validString = new ValidString(validStrings); - } - - public static ValidList in(String... validStrings) { - return new ValidList(Arrays.asList(validStrings)); - } - - @Override - public void ensureValid(final String name, final Object value) { - @SuppressWarnings("unchecked") - List values = (List) value; - for (String string : values) { - validString.ensureValid(name, string); - } - } - - public String toString() { - return validString.toString(); - } - } - - public static class ValidString implements Validator { - final List validStrings; - - private ValidString(List validStrings) { - this.validStrings = validStrings; - } - - public static ValidString in(String... validStrings) { - return new ValidString(Arrays.asList(validStrings)); - } - - @Override - public void ensureValid(String name, Object o) { - - if (!validStrings.contains((String)o)) { - throw new ConfigException(name, o, "String must be one of: " + Utils.join(validStrings, ", ")); - } - - } - - public String toString() { - return "[" + Utils.join(validStrings, ", ") + "]"; - } - } - - public static class NonNullValidator implements Validator { - @Override - public void ensureValid(String name, Object value) { - if (value == null) { - // Pass in the string null to avoid the findbugs warning - throw new ConfigException(name, "null", "entry must be non null"); - } - } - } - - public static class CompositeValidator implements Validator { - private final List validators; - - private CompositeValidator(List validators) { - this.validators = Collections.unmodifiableList(validators); - } - - public static CompositeValidator of(Validator... validators) { - return new CompositeValidator(Arrays.asList(validators)); - } - - @Override - public void ensureValid(String name, Object value) { - for (Validator validator: validators) { - validator.ensureValid(name, value); - } - } - } - - public static class NonEmptyString implements Validator { - - @Override - public void ensureValid(String name, Object o) { - String s = (String) o; - if (s != null && s.isEmpty()) { - throw new ConfigException(name, o, "String must be non-empty"); - } - } - - @Override - public String toString() { - return "non-empty string"; - } - } - - public static class NonEmptyStringWithoutControlChars implements Validator { - - public static NonEmptyStringWithoutControlChars nonEmptyStringWithoutControlChars() { - return new NonEmptyStringWithoutControlChars(); - } - - @Override - public void ensureValid(String name, Object value) { - String s = (String) value; - - if (s == null) { - // This can happen during creation of the config object due to no default value being defined for the - // name configuration - a missing name parameter is caught when checking for mandatory parameters, - // thus we can ok a null value here - return; - } else if (s.isEmpty()) { - throw new ConfigException(name, value, "String may not be empty"); - } - - // Check name string for illegal characters - ArrayList foundIllegalCharacters = new ArrayList<>(); - - for (int i = 0; i < s.length(); i++) { - if (Character.isISOControl(s.codePointAt(i))) { - foundIllegalCharacters.add(s.codePointAt(i)); - } - } - - if (!foundIllegalCharacters.isEmpty()) { - throw new ConfigException(name, value, "String may not contain control sequences but had the following ASCII chars: " + Utils.join(foundIllegalCharacters, ", ")); - } - } - } - - public static class ConfigKey { - public final String name; - public final Type type; - public final String documentation; - public final Object defaultValue; - public final Validator validator; - public final Importance importance; - public final String group; - public final int orderInGroup; - public final Width width; - public final String displayName; - public final List dependents; - public final Recommender recommender; - public final boolean internalConfig; - - public ConfigKey(String name, Type type, Object defaultValue, Validator validator, - Importance importance, String documentation, String group, - int orderInGroup, Width width, String displayName, - List dependents, Recommender recommender, - boolean internalConfig) { - this.name = name; - this.type = type; - this.defaultValue = NO_DEFAULT_VALUE.equals(defaultValue) ? NO_DEFAULT_VALUE : parseType(name, defaultValue, type); - this.validator = validator; - this.importance = importance; - if (this.validator != null && hasDefault()) - this.validator.ensureValid(name, this.defaultValue); - this.documentation = documentation; - this.dependents = dependents; - this.group = group; - this.orderInGroup = orderInGroup; - this.width = width; - this.displayName = displayName; - this.recommender = recommender; - this.internalConfig = internalConfig; - } - - public boolean hasDefault() { - return !NO_DEFAULT_VALUE.equals(this.defaultValue); - } - } - - protected List headers() { - return Arrays.asList("Name", "Description", "Type", "Default", "Valid Values", "Importance"); - } - - protected String getConfigValue(ConfigKey key, String headerName) { - switch (headerName) { - case "Name": - return key.name; - case "Description": - return key.documentation; - case "Type": - return key.type.toString().toLowerCase(Locale.ROOT); - case "Default": - if (key.hasDefault()) { - if (key.defaultValue == null) - return "null"; - String defaultValueStr = convertToString(key.defaultValue, key.type); - if (defaultValueStr.isEmpty()) - return "\"\""; - else - return defaultValueStr; - } else - return ""; - case "Valid Values": - return key.validator != null ? key.validator.toString() : ""; - case "Importance": - return key.importance.toString().toLowerCase(Locale.ROOT); - default: - throw new RuntimeException("Can't find value for header '" + headerName + "' in " + key.name); - } - } - - public String toHtmlTable() { - return toHtmlTable(Collections.emptyMap()); - } - - private void addHeader(StringBuilder builder, String headerName) { - builder.append(""); - builder.append(headerName); - builder.append("\n"); - } - - private void addColumnValue(StringBuilder builder, String value) { - builder.append(""); - builder.append(value); - builder.append(""); - } - - /** - * Converts this config into an HTML table that can be embedded into docs. - * If dynamicUpdateModes is non-empty, a "Dynamic Update Mode" column - * will be included n the table with the value of the update mode. Default - * mode is "read-only". - * @param dynamicUpdateModes Config name -> update mode mapping - */ - public String toHtmlTable(Map dynamicUpdateModes) { - boolean hasUpdateModes = !dynamicUpdateModes.isEmpty(); - List configs = sortedConfigs(); - StringBuilder b = new StringBuilder(); - b.append("\n"); - b.append("\n"); - // print column headers - for (String headerName : headers()) { - addHeader(b, headerName); - } - if (hasUpdateModes) - addHeader(b, "Dynamic Update Mode"); - b.append("\n"); - for (ConfigKey key : configs) { - if (key.internalConfig) { - continue; - } - b.append("\n"); - // print column values - for (String headerName : headers()) { - addColumnValue(b, getConfigValue(key, headerName)); - b.append(""); - } - if (hasUpdateModes) { - String updateMode = dynamicUpdateModes.get(key.name); - if (updateMode == null) - updateMode = "read-only"; - addColumnValue(b, updateMode); - } - b.append("\n"); - } - b.append("
      "); - return b.toString(); - } - - /** - * Get the configs formatted with reStructuredText, suitable for embedding in Sphinx - * documentation. - */ - public String toRst() { - StringBuilder b = new StringBuilder(); - for (ConfigKey key : sortedConfigs()) { - if (key.internalConfig) { - continue; - } - getConfigKeyRst(key, b); - b.append("\n"); - } - return b.toString(); - } - - /** - * Configs with new metadata (group, orderInGroup, dependents) formatted with reStructuredText, suitable for embedding in Sphinx - * documentation. - */ - public String toEnrichedRst() { - StringBuilder b = new StringBuilder(); - - String lastKeyGroupName = ""; - for (ConfigKey key : sortedConfigs()) { - if (key.internalConfig) { - continue; - } - if (key.group != null) { - if (!lastKeyGroupName.equalsIgnoreCase(key.group)) { - b.append(key.group).append("\n"); - - char[] underLine = new char[key.group.length()]; - Arrays.fill(underLine, '^'); - b.append(new String(underLine)).append("\n\n"); - } - lastKeyGroupName = key.group; - } - - getConfigKeyRst(key, b); - - if (key.dependents != null && key.dependents.size() > 0) { - int j = 0; - b.append(" * Dependents: "); - for (String dependent : key.dependents) { - b.append("``"); - b.append(dependent); - if (++j == key.dependents.size()) - b.append("``"); - else - b.append("``, "); - } - b.append("\n"); - } - b.append("\n"); - } - return b.toString(); - } - - /** - * Shared content on Rst and Enriched Rst. - */ - private void getConfigKeyRst(ConfigKey key, StringBuilder b) { - b.append("``").append(key.name).append("``").append("\n"); - for (String docLine : key.documentation.split("\n")) { - if (docLine.length() == 0) { - continue; - } - b.append(" ").append(docLine).append("\n\n"); - } - b.append(" * Type: ").append(getConfigValue(key, "Type")).append("\n"); - if (key.hasDefault()) { - b.append(" * Default: ").append(getConfigValue(key, "Default")).append("\n"); - } - if (key.validator != null) { - b.append(" * Valid Values: ").append(getConfigValue(key, "Valid Values")).append("\n"); - } - b.append(" * Importance: ").append(getConfigValue(key, "Importance")).append("\n"); - } - - /** - * Get a list of configs sorted taking the 'group' and 'orderInGroup' into account. - * - * If grouping is not specified, the result will reflect "natural" order: listing required fields first, then ordering by importance, and finally by name. - */ - private List sortedConfigs() { - final Map groupOrd = new HashMap<>(groups.size()); - int ord = 0; - for (String group: groups) { - groupOrd.put(group, ord++); - } - - List configs = new ArrayList<>(configKeys.values()); - Collections.sort(configs, new Comparator() { - @Override - public int compare(ConfigKey k1, ConfigKey k2) { - int cmp = k1.group == null - ? (k2.group == null ? 0 : -1) - : (k2.group == null ? 1 : Integer.compare(groupOrd.get(k1.group), groupOrd.get(k2.group))); - if (cmp == 0) { - cmp = Integer.compare(k1.orderInGroup, k2.orderInGroup); - if (cmp == 0) { - // first take anything with no default value - if (!k1.hasDefault() && k2.hasDefault()) { - cmp = -1; - } else if (!k2.hasDefault() && k1.hasDefault()) { - cmp = 1; - } else { - cmp = k1.importance.compareTo(k2.importance); - if (cmp == 0) { - return k1.name.compareTo(k2.name); - } - } - } - } - return cmp; - } - }); - return configs; - } - - public void embed(final String keyPrefix, final String groupPrefix, final int startingOrd, final ConfigDef child) { - int orderInGroup = startingOrd; - for (ConfigKey key : child.sortedConfigs()) { - define(new ConfigKey( - keyPrefix + key.name, - key.type, - key.defaultValue, - embeddedValidator(keyPrefix, key.validator), - key.importance, - key.documentation, - groupPrefix + (key.group == null ? "" : ": " + key.group), - orderInGroup++, - key.width, - key.displayName, - embeddedDependents(keyPrefix, key.dependents), - embeddedRecommender(keyPrefix, key.recommender), - key.internalConfig)); - } - } - - /** - * Returns a new validator instance that delegates to the base validator but unprefixes the config name along the way. - */ - private static Validator embeddedValidator(final String keyPrefix, final Validator base) { - if (base == null) return null; - return new ConfigDef.Validator() { - @Override - public void ensureValid(String name, Object value) { - base.ensureValid(name.substring(keyPrefix.length()), value); - } - }; - } - - /** - * Updated list of dependent configs with the specified {@code prefix} added. - */ - private static List embeddedDependents(final String keyPrefix, final List dependents) { - if (dependents == null) return null; - final List updatedDependents = new ArrayList<>(dependents.size()); - for (String dependent : dependents) { - updatedDependents.add(keyPrefix + dependent); - } - return updatedDependents; - } - - /** - * Returns a new recommender instance that delegates to the base recommender but unprefixes the input parameters along the way. - */ - private static Recommender embeddedRecommender(final String keyPrefix, final Recommender base) { - if (base == null) return null; - return new Recommender() { - private String unprefixed(String k) { - return k.substring(keyPrefix.length()); - } - - private Map unprefixed(Map parsedConfig) { - final Map unprefixedParsedConfig = new HashMap<>(parsedConfig.size()); - for (Map.Entry e : parsedConfig.entrySet()) { - if (e.getKey().startsWith(keyPrefix)) { - unprefixedParsedConfig.put(unprefixed(e.getKey()), e.getValue()); - } - } - return unprefixedParsedConfig; - } - - @Override - public List validValues(String name, Map parsedConfig) { - return base.validValues(unprefixed(name), unprefixed(parsedConfig)); - } - - @Override - public boolean visible(String name, Map parsedConfig) { - return base.visible(unprefixed(name), unprefixed(parsedConfig)); - } - }; - } - -} - diff --git a/clients/src/main/java/org/oracle/okafka/common/config/ConfigException.java b/clients/src/main/java/org/oracle/okafka/common/config/ConfigException.java deleted file mode 100644 index 6a884aa..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/config/ConfigException.java +++ /dev/null @@ -1,47 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.oracle.okafka.common.config; - -import org.oracle.okafka.common.KafkaException; - -/** - * Thrown if the user supplies an invalid configuration - */ -public class ConfigException extends KafkaException { - - private static final long serialVersionUID = 1L; - - public ConfigException(String message) { - super(message); - } - - public ConfigException(String name, Object value) { - this(name, value, null); - } - - public ConfigException(String name, Object value, String message) { - super("Invalid value " + value + " for configuration " + name + (message == null ? "" : ": " + message)); - } - -} \ No newline at end of file diff --git a/clients/src/main/java/org/oracle/okafka/common/config/ConfigResource.java b/clients/src/main/java/org/oracle/okafka/common/config/ConfigResource.java deleted file mode 100644 index ad51640..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/config/ConfigResource.java +++ /dev/null @@ -1,125 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.config; - -import java.util.Arrays; -import java.util.Collections; -import java.util.Map; -import java.util.Objects; -import java.util.function.Function; -import java.util.stream.Collectors; - -/** - * A class representing resources that have configs. - */ -public final class ConfigResource { - - /** - * Type of resource. - */ - public enum Type { - BROKER((byte) 4), TOPIC((byte) 2), UNKNOWN((byte) 0); - - private static final Map TYPES = Collections.unmodifiableMap( - Arrays.stream(values()).collect(Collectors.toMap(Type::id, Function.identity())) - ); - - private final byte id; - - Type(final byte id) { - this.id = id; - } - - public byte id() { - return id; - } - - public static Type forId(final byte id) { - return TYPES.getOrDefault(id, UNKNOWN); - } - } - - private final Type type; - private final String name; - - /** - * Create an instance of this class with the provided parameters. - * - * @param type a non-null resource type - * @param name a non-null resource name - */ - public ConfigResource(Type type, String name) { - Objects.requireNonNull(type, "type should not be null"); - Objects.requireNonNull(name, "name should not be null"); - this.type = type; - this.name = name; - } - - /** - * Return the resource type. - */ - public Type type() { - return type; - } - - /** - * Return the resource name. - */ - public String name() { - return name; - } - - /** - * Returns true if this is the default resource of a resource type. - * Resource name is empty for the default resource. - */ - public boolean isDefault() { - return name.isEmpty(); - } - - @Override - public boolean equals(Object o) { - if (this == o) - return true; - if (o == null || getClass() != o.getClass()) - return false; - - ConfigResource that = (ConfigResource) o; - - return type == that.type && name.equals(that.name); - } - - @Override - public int hashCode() { - int result = type.hashCode(); - result = 31 * result + name.hashCode(); - return result; - } - - @Override - public String toString() { - return "ConfigResource(type=" + type + ", name='" + name + "')"; - } -} diff --git a/clients/src/main/java/org/oracle/okafka/common/config/ConfigValue.java b/clients/src/main/java/org/oracle/okafka/common/config/ConfigValue.java deleted file mode 100644 index 44775fc..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/config/ConfigValue.java +++ /dev/null @@ -1,120 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.config; - -import java.util.ArrayList; -import java.util.List; -import java.util.Objects; - -public class ConfigValue { - - private final String name; - private Object value; - private List recommendedValues; - private final List errorMessages; - private boolean visible; - - public ConfigValue(String name) { - this(name, null, new ArrayList<>(), new ArrayList()); - } - - public ConfigValue(String name, Object value, List recommendedValues, List errorMessages) { - this.name = name; - this.value = value; - this.recommendedValues = recommendedValues; - this.errorMessages = errorMessages; - this.visible = true; - } - - public String name() { - return name; - } - - public Object value() { - return value; - } - - public List recommendedValues() { - return recommendedValues; - } - - public List errorMessages() { - return errorMessages; - } - - public boolean visible() { - return visible; - } - - public void value(Object value) { - this.value = value; - } - - public void recommendedValues(List recommendedValues) { - this.recommendedValues = recommendedValues; - } - - public void addErrorMessage(String errorMessage) { - this.errorMessages.add(errorMessage); - } - - public void visible(boolean visible) { - this.visible = visible; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - ConfigValue that = (ConfigValue) o; - return Objects.equals(name, that.name) && - Objects.equals(value, that.value) && - Objects.equals(recommendedValues, that.recommendedValues) && - Objects.equals(errorMessages, that.errorMessages) && - Objects.equals(visible, that.visible); - } - - @Override - public int hashCode() { - return Objects.hash(name, value, recommendedValues, errorMessages, visible); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append("[") - .append(name) - .append(",") - .append(value) - .append(",") - .append(recommendedValues) - .append(",") - .append(errorMessages) - .append(",") - .append(visible) - .append("]"); - return sb.toString(); - } -} diff --git a/clients/src/main/java/org/oracle/okafka/common/config/SslConfigs.java b/clients/src/main/java/org/oracle/okafka/common/config/SslConfigs.java index e4bd98e..08593cf 100644 --- a/clients/src/main/java/org/oracle/okafka/common/config/SslConfigs.java +++ b/clients/src/main/java/org/oracle/okafka/common/config/SslConfigs.java @@ -1,7 +1,7 @@ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. */ @@ -29,14 +29,10 @@ package org.oracle.okafka.common.config; -import org.oracle.okafka.common.config.ConfigDef.Importance; -import org.oracle.okafka.common.utils.Utils; +import org.apache.kafka.common.config.ConfigDef; +import org.apache.kafka.common.config.ConfigDef.Importance; -import javax.net.ssl.KeyManagerFactory; -import javax.net.ssl.TrustManagerFactory; -import java.util.Set; - -public class SslConfigs { +public class SslConfigs extends org.apache.kafka.common.config.SslConfigs { /* * NOTE: DO NOT CHANGE EITHER CONFIG NAMES AS THESE ARE PART OF THE PUBLIC API AND CHANGE WILL BREAK USER CODE. */ @@ -45,6 +41,7 @@ public class SslConfigs { public static final String TNS_ALIAS_DOC = "alias of connection string in tnsnames.ora. This connection is used for connecting to database instance"; public static void addClientSslSupport(ConfigDef config) { + org.apache.kafka.common.config.SslConfigs.addClientSslSupport(config); config.define(SslConfigs.TNS_ALIAS, ConfigDef.Type.STRING, null, Importance.MEDIUM, SslConfigs.TNS_ALIAS_DOC); } } diff --git a/clients/src/main/java/org/oracle/okafka/common/config/types/Password.java b/clients/src/main/java/org/oracle/okafka/common/config/types/Password.java deleted file mode 100644 index 51708f0..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/config/types/Password.java +++ /dev/null @@ -1,75 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.oracle.okafka.common.config.types; - -/** - * A wrapper class for passwords to hide them while logging a config - */ -public class Password { - - public static final String HIDDEN = "[hidden]"; - - private final String value; - - /** - * Construct a new Password object - * @param value The value of a password - */ - public Password(String value) { - this.value = value; - } - - @Override - public int hashCode() { - return value.hashCode(); - } - - @Override - public boolean equals(Object obj) { - if (!(obj instanceof Password)) - return false; - Password other = (Password) obj; - return value.equals(other.value); - } - - /** - * Returns hidden password string - * - * @return hidden password string - */ - @Override - public String toString() { - return HIDDEN; - } - - /** - * Returns real password string - * - * @return real password string - */ - public String value() { - return value; - } - -} diff --git a/clients/src/main/java/org/oracle/okafka/common/errors/ApiException.java b/clients/src/main/java/org/oracle/okafka/common/errors/ApiException.java deleted file mode 100644 index efd025e..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/errors/ApiException.java +++ /dev/null @@ -1,59 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.errors; - -import org.oracle.okafka.common.KafkaException; - -/** - * Any API exception that is part of the public protocol and should be a subclass of this class and be part of this - * package. - */ -public class ApiException extends KafkaException { - - private static final long serialVersionUID = 1L; - - public ApiException(String message, Throwable cause) { - super(message, cause); - } - - public ApiException(String message) { - super(message); - } - - public ApiException(Throwable cause) { - super(cause); - } - - public ApiException() { - super(); - } - - /* avoid the expensive and useless stack trace for api exceptions */ - @Override - public Throwable fillInStackTrace() { - return this; - } - -} diff --git a/clients/src/main/java/org/oracle/okafka/common/errors/AuthenticationException.java b/clients/src/main/java/org/oracle/okafka/common/errors/AuthenticationException.java deleted file mode 100644 index fe0b346..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/errors/AuthenticationException.java +++ /dev/null @@ -1,45 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.errors; - -/** - * This exception indicates that authentication has failed. - * On authentication failure, clients abort the operation requested and raise one - * this exception: - */ - -public class AuthenticationException extends ApiException { - - private static final long serialVersionUID = 1L; - - public AuthenticationException(String message) { - super(message); - } - - public AuthenticationException(String message, Throwable cause) { - super(message, cause); - } - -} diff --git a/clients/src/main/java/org/oracle/okafka/common/errors/AuthorizationException.java b/clients/src/main/java/org/oracle/okafka/common/errors/AuthorizationException.java deleted file mode 100644 index 1855a49..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/errors/AuthorizationException.java +++ /dev/null @@ -1,37 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.errors; - -public class AuthorizationException extends ApiException { - - public AuthorizationException(String message) { - super(message); - } - - public AuthorizationException(String message, Throwable cause) { - super(message, cause); - } - -} diff --git a/clients/src/main/java/org/oracle/okafka/common/errors/ConnectionException.java b/clients/src/main/java/org/oracle/okafka/common/errors/ConnectionException.java index 043719b..6c36f2a 100644 --- a/clients/src/main/java/org/oracle/okafka/common/errors/ConnectionException.java +++ b/clients/src/main/java/org/oracle/okafka/common/errors/ConnectionException.java @@ -1,13 +1,13 @@ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. */ package org.oracle.okafka.common.errors; -import org.oracle.okafka.common.KafkaException; +import org.apache.kafka.common.KafkaException; public class ConnectionException extends KafkaException { private static final long serialVersionUID = 1L; diff --git a/clients/src/main/java/org/oracle/okafka/common/errors/CorruptRecordException.java b/clients/src/main/java/org/oracle/okafka/common/errors/CorruptRecordException.java deleted file mode 100644 index 940bac2..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/errors/CorruptRecordException.java +++ /dev/null @@ -1,50 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.oracle.okafka.common.errors; - -/** - * This exception indicates a record has failed its internal CRC check, this generally indicates network or disk - * corruption. - */ -public class CorruptRecordException extends RetriableException { - - private static final long serialVersionUID = 1L; - - public CorruptRecordException() { - super("This message has failed its CRC checksum, exceeds the valid size, has a null key for a compacted topic, or is otherwise corrupt."); - } - - public CorruptRecordException(String message) { - super(message); - } - - public CorruptRecordException(Throwable cause) { - super(cause); - } - - public CorruptRecordException(String message, Throwable cause) { - super(message, cause); - } - -} diff --git a/clients/src/main/java/org/oracle/okafka/common/errors/FeatureNotSupportedException.java b/clients/src/main/java/org/oracle/okafka/common/errors/FeatureNotSupportedException.java index a7d6d93..496b3d2 100644 --- a/clients/src/main/java/org/oracle/okafka/common/errors/FeatureNotSupportedException.java +++ b/clients/src/main/java/org/oracle/okafka/common/errors/FeatureNotSupportedException.java @@ -1,7 +1,7 @@ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. */ diff --git a/clients/src/main/java/org/oracle/okafka/common/errors/InterruptException.java b/clients/src/main/java/org/oracle/okafka/common/errors/InterruptException.java deleted file mode 100644 index 29c6dce..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/errors/InterruptException.java +++ /dev/null @@ -1,50 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.oracle.okafka.common.errors; - -import org.oracle.okafka.common.KafkaException; - -/** - * An unchecked wrapper for InterruptedException - */ -public class InterruptException extends KafkaException { - - private static final long serialVersionUID = 1L; - - public InterruptException(InterruptedException cause) { - super(cause); - Thread.currentThread().interrupt(); - } - - public InterruptException(String message, InterruptedException cause) { - super(message, cause); - Thread.currentThread().interrupt(); - } - - public InterruptException(String message) { - super(message, new InterruptedException()); - Thread.currentThread().interrupt(); - } - -} diff --git a/clients/src/main/java/org/oracle/okafka/common/errors/InvalidConfigurationException.java b/clients/src/main/java/org/oracle/okafka/common/errors/InvalidConfigurationException.java deleted file mode 100644 index d65f109..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/errors/InvalidConfigurationException.java +++ /dev/null @@ -1,39 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.errors; - -public class InvalidConfigurationException extends ApiException { - - private static final long serialVersionUID = 1L; - - public InvalidConfigurationException(String message) { - super(message); - } - - public InvalidConfigurationException(String message, Throwable cause) { - super(message, cause); - } - -} diff --git a/clients/src/main/java/org/oracle/okafka/common/errors/InvalidLoginCredentialsException.java b/clients/src/main/java/org/oracle/okafka/common/errors/InvalidLoginCredentialsException.java index 6ea7f6c..999267a 100644 --- a/clients/src/main/java/org/oracle/okafka/common/errors/InvalidLoginCredentialsException.java +++ b/clients/src/main/java/org/oracle/okafka/common/errors/InvalidLoginCredentialsException.java @@ -1,7 +1,7 @@ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. */ diff --git a/clients/src/main/java/org/oracle/okafka/common/errors/InvalidMessageIdException.java b/clients/src/main/java/org/oracle/okafka/common/errors/InvalidMessageIdException.java index 73d4c6a..74123c5 100644 --- a/clients/src/main/java/org/oracle/okafka/common/errors/InvalidMessageIdException.java +++ b/clients/src/main/java/org/oracle/okafka/common/errors/InvalidMessageIdException.java @@ -1,7 +1,7 @@ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. */ diff --git a/clients/src/main/java/org/oracle/okafka/common/errors/InvalidMetadataException.java b/clients/src/main/java/org/oracle/okafka/common/errors/InvalidMetadataException.java deleted file mode 100644 index c934fab..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/errors/InvalidMetadataException.java +++ /dev/null @@ -1,50 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.errors; - -/** - * An exception that may indicate the client's metadata is out of date - */ -public abstract class InvalidMetadataException extends RetriableException { - - private static final long serialVersionUID = 1L; - - public InvalidMetadataException() { - super(); - } - - public InvalidMetadataException(String message) { - super(message); - } - - public InvalidMetadataException(String message, Throwable cause) { - super(message, cause); - } - - public InvalidMetadataException(Throwable cause) { - super(cause); - } - -} diff --git a/clients/src/main/java/org/oracle/okafka/common/errors/InvalidTopicException.java b/clients/src/main/java/org/oracle/okafka/common/errors/InvalidTopicException.java deleted file mode 100644 index b8585d0..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/errors/InvalidTopicException.java +++ /dev/null @@ -1,52 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.errors; - -/** - * The client has attempted to perform an operation on an invalid topic. - * For example the topic name is too long, contains invalid characters etc. - * This exception is not retriable because the operation won't suddenly become valid. - */ -public class InvalidTopicException extends ApiException { - - private static final long serialVersionUID = 1L; - - public InvalidTopicException() { - super(); - } - - public InvalidTopicException(String message, Throwable cause) { - super(message, cause); - } - - public InvalidTopicException(String message) { - super(message); - } - - public InvalidTopicException(Throwable cause) { - super(cause); - } - -} diff --git a/clients/src/main/java/org/oracle/okafka/common/errors/NotLeaderForPartitionException.java b/clients/src/main/java/org/oracle/okafka/common/errors/NotLeaderForPartitionException.java deleted file mode 100644 index fb9d71d..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/errors/NotLeaderForPartitionException.java +++ /dev/null @@ -1,50 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.errors; - -/** - * This server is not the leader for the given partition - */ -public class NotLeaderForPartitionException extends InvalidMetadataException { - - private static final long serialVersionUID = 1L; - - public NotLeaderForPartitionException() { - super(); - } - - public NotLeaderForPartitionException(String message) { - super(message); - } - - public NotLeaderForPartitionException(Throwable cause) { - super(cause); - } - - public NotLeaderForPartitionException(String message, Throwable cause) { - super(message, cause); - } - -} diff --git a/clients/src/main/java/org/oracle/okafka/common/errors/ProducerFencedException.java b/clients/src/main/java/org/oracle/okafka/common/errors/ProducerFencedException.java deleted file mode 100644 index 93ac65a..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/errors/ProducerFencedException.java +++ /dev/null @@ -1,39 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.errors; - -/** - * This fatal exception indicates that another producer with the same transactional.id has been - * started. It is only possible to have one producer instance with a transactional.id at any - * given time, and the latest one to be started "fences" the previous instances so that they can no longer - * make transactional requests. When you encounter this exception, you must close the producer instance. - */ -public class ProducerFencedException extends ApiException { - - public ProducerFencedException(String msg) { - super(msg); - } -} - diff --git a/clients/src/main/java/org/oracle/okafka/common/errors/RecordBatchTooLargeException.java b/clients/src/main/java/org/oracle/okafka/common/errors/RecordBatchTooLargeException.java deleted file mode 100644 index 7cf8e2b..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/errors/RecordBatchTooLargeException.java +++ /dev/null @@ -1,51 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.errors; - -/** - * This record batch is larger than the maximum allowable size - */ -public class RecordBatchTooLargeException extends ApiException { - - private static final long serialVersionUID = 1L; - - public RecordBatchTooLargeException() { - super(); - } - - public RecordBatchTooLargeException(String message, Throwable cause) { - super(message, cause); - } - - public RecordBatchTooLargeException(String message) { - super(message); - } - - public RecordBatchTooLargeException(Throwable cause) { - super(cause); - } - -} - diff --git a/clients/src/main/java/org/oracle/okafka/common/errors/RecordTooLargeException.java b/clients/src/main/java/org/oracle/okafka/common/errors/RecordTooLargeException.java deleted file mode 100644 index e24d937..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/errors/RecordTooLargeException.java +++ /dev/null @@ -1,63 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.errors; - -import java.util.Map; - -import org.oracle.okafka.common.TopicPartition; - -/** - * This record is larger than the maximum allowable size - */ -public class RecordTooLargeException extends ApiException { - - private static final long serialVersionUID = 1L; - private Map recordTooLargePartitions = null; - - public RecordTooLargeException() { - super(); - } - - public RecordTooLargeException(String message, Throwable cause) { - super(message, cause); - } - - public RecordTooLargeException(String message) { - super(message); - } - - public RecordTooLargeException(Throwable cause) { - super(cause); - } - - public RecordTooLargeException(String message, Map recordTooLargePartitions) { - super(message); - this.recordTooLargePartitions = recordTooLargePartitions; - } - - public Map recordTooLargePartitions() { - return recordTooLargePartitions; - } -} diff --git a/clients/src/main/java/org/oracle/okafka/common/errors/RetriableException.java b/clients/src/main/java/org/oracle/okafka/common/errors/RetriableException.java deleted file mode 100644 index 4b5c70d..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/errors/RetriableException.java +++ /dev/null @@ -1,49 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.errors; - -/** - * A retriable exception is a transient exception that if retried may succeed. - */ -public abstract class RetriableException extends ApiException { - - private static final long serialVersionUID = 1L; - - public RetriableException(String message, Throwable cause) { - super(message, cause); - } - - public RetriableException(String message) { - super(message); - } - - public RetriableException(Throwable cause) { - super(cause); - } - - public RetriableException() { - } - -} diff --git a/clients/src/main/java/org/oracle/okafka/common/errors/SerializationException.java b/clients/src/main/java/org/oracle/okafka/common/errors/SerializationException.java deleted file mode 100644 index ea1b9c5..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/errors/SerializationException.java +++ /dev/null @@ -1,58 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.errors; - -import org.oracle.okafka.common.KafkaException; - -/** - * Any exception during serialization in the producer - */ -public class SerializationException extends KafkaException { - - private static final long serialVersionUID = 1L; - - public SerializationException(String message, Throwable cause) { - super(message, cause); - } - - public SerializationException(String message) { - super(message); - } - - public SerializationException(Throwable cause) { - super(cause); - } - - public SerializationException() { - super(); - } - - /* avoid the expensive and useless stack trace for serialization exceptions */ - @Override - public Throwable fillInStackTrace() { - return this; - } - -} diff --git a/clients/src/main/java/org/oracle/okafka/common/errors/TimeoutException.java b/clients/src/main/java/org/oracle/okafka/common/errors/TimeoutException.java deleted file mode 100644 index 255981d..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/errors/TimeoutException.java +++ /dev/null @@ -1,50 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.errors; - -/** - * Indicates that a request timed out. - */ -public class TimeoutException extends RetriableException { - - private static final long serialVersionUID = 1L; - - public TimeoutException() { - super(); - } - - public TimeoutException(String message, Throwable cause) { - super(message, cause); - } - - public TimeoutException(String message) { - super(message); - } - - public TimeoutException(Throwable cause) { - super(cause); - } - -} diff --git a/clients/src/main/java/org/oracle/okafka/common/errors/TopicExistsException.java b/clients/src/main/java/org/oracle/okafka/common/errors/TopicExistsException.java deleted file mode 100644 index dc8c3e8..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/errors/TopicExistsException.java +++ /dev/null @@ -1,39 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.errors; - -public class TopicExistsException extends ApiException { - - private static final long serialVersionUID = 1L; - - public TopicExistsException(String message) { - super(message); - } - - public TopicExistsException(String message, Throwable cause) { - super(message, cause); - } - -} diff --git a/clients/src/main/java/org/oracle/okafka/common/header/Header.java b/clients/src/main/java/org/oracle/okafka/common/header/Header.java deleted file mode 100644 index a63c462..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/header/Header.java +++ /dev/null @@ -1,33 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.header; - -/** Header feature is not yet supported.**/ -public interface Header { - - String key(); - - byte[] value(); -} diff --git a/clients/src/main/java/org/oracle/okafka/common/header/Headers.java b/clients/src/main/java/org/oracle/okafka/common/header/Headers.java deleted file mode 100644 index 3a4f011..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/header/Headers.java +++ /dev/null @@ -1,81 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.header; - -/** Headers feature is not yet supported.**/ -public interface Headers extends Iterable
      { - - /** - * Adds a header (key inside), to the end, returning if the operation succeeded. - * - * @param header the Header to be added - * @return this instance of the Headers, once the header is added. - * @throws IllegalStateException is thrown if headers are in a read-only state. - */ - Headers add(Header header) throws IllegalStateException; - - /** - * Creates and adds a header, to the end, returning if the operation succeeded. - * - * @param key of the header to be added. - * @param value of the header to be added. - * @return this instance of the Headers, once the header is added. - * @throws IllegalStateException is thrown if headers are in a read-only state. - */ - Headers add(String key, byte[] value) throws IllegalStateException; - - /** - * Removes all headers for the given key returning if the operation succeeded. - * - * @param key to remove all headers for. - * @return this instance of the Headers, once the header is removed. - * @throws IllegalStateException is thrown if headers are in a read-only state. - */ - Headers remove(String key) throws IllegalStateException; - - /** - * Returns just one (the very last) header for the given key, if present. - * - * @param key to get the last header for. - * @return this last header matching the given key, returns none if not present. - */ - Header lastHeader(String key); - - /** - * Returns all headers for the given key, in the order they were added in, if present. - * - * @param key to return the headers for. - * @return all headers for the given key, in the order they were added in, if NO headers are present an empty iterable is returned. - */ - Iterable
      headers(String key); - - /** - * Returns all headers as an array, in the order they were added in. - * - * @return the headers as a Header[], mutating this array will not affect the Headers, if NO headers are present an empty array is returned. - */ - Header[] toArray(); - -} diff --git a/clients/src/main/java/org/oracle/okafka/common/header/internals/RecordHeader.java b/clients/src/main/java/org/oracle/okafka/common/header/internals/RecordHeader.java deleted file mode 100644 index 471b272..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/header/internals/RecordHeader.java +++ /dev/null @@ -1,87 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.header.internals; - -import java.nio.ByteBuffer; -import java.util.Arrays; -import java.util.Objects; - -import org.oracle.okafka.common.header.Header; -import org.oracle.okafka.common.utils.Utils; - -public class RecordHeader implements Header { - private final String key; - private ByteBuffer valueBuffer; - private byte[] value; - - public RecordHeader(String key, byte[] value) { - Objects.requireNonNull(key, "Null header keys are not permitted"); - this.key = key; - this.value = value; - } - - public RecordHeader(String key, ByteBuffer valueBuffer) { - Objects.requireNonNull(key, "Null header keys are not permitted"); - this.key = key; - this.valueBuffer = valueBuffer; - } - - public String key() { - return key; - } - - public byte[] value() { - if (value == null && valueBuffer != null) { - value = Utils.toArray(valueBuffer); - valueBuffer = null; - } - return value; - } - - @Override - public boolean equals(Object o) { - if (this == o) - return true; - if (o == null || getClass() != o.getClass()) - return false; - - RecordHeader header = (RecordHeader) o; - return (key == null ? header.key == null : key.equals(header.key)) && - Arrays.equals(value(), header.value()); - } - - @Override - public int hashCode() { - int result = key != null ? key.hashCode() : 0; - result = 31 * result + Arrays.hashCode(value()); - return result; - } - - @Override - public String toString() { - return "RecordHeader(key = " + key + ", value = " + Arrays.toString(value()) + ")"; - } - -} diff --git a/clients/src/main/java/org/oracle/okafka/common/header/internals/RecordHeaders.java b/clients/src/main/java/org/oracle/okafka/common/header/internals/RecordHeaders.java deleted file mode 100644 index 580b300..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/header/internals/RecordHeaders.java +++ /dev/null @@ -1,218 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.header.internals; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Iterator; -import java.util.List; - -import org.oracle.okafka.common.header.Header; -import org.oracle.okafka.common.header.Headers; -import org.oracle.okafka.common.record.Record; -import org.oracle.okafka.common.utils.AbstractIterator; - -/** - * This feature is not yet supported. - * - */ -public class RecordHeaders implements Headers { - - private final List
      headers; - private volatile boolean isReadOnly; - - public RecordHeaders() { - this((Iterable
      ) null); - } - - public RecordHeaders(Header[] headers) { - if (headers == null) { - this.headers = new ArrayList<>(); - } else { - this.headers = new ArrayList<>(Arrays.asList(headers)); - } - } - - public RecordHeaders(Iterable
      headers) { - //Use efficient copy constructor if possible, fallback to iteration otherwise - if (headers == null) { - this.headers = new ArrayList<>(); - } else if (headers instanceof RecordHeaders) { - this.headers = new ArrayList<>(((RecordHeaders) headers).headers); - } else if (headers instanceof Collection) { - this.headers = new ArrayList<>((Collection
      ) headers); - } else { - this.headers = new ArrayList<>(); - for (Header header : headers) - this.headers.add(header); - } - } - - @Override - public Headers add(Header header) throws IllegalStateException { - canWrite(); - headers.add(header); - return this; - } - - @Override - public Headers add(String key, byte[] value) throws IllegalStateException { - return add(new RecordHeader(key, value)); - } - - @Override - public Headers remove(String key) throws IllegalStateException { - canWrite(); - checkKey(key); - Iterator
      iterator = iterator(); - while (iterator.hasNext()) { - if (iterator.next().key().equals(key)) { - iterator.remove(); - } - } - return this; - } - - @Override - public Header lastHeader(String key) { - checkKey(key); - for (int i = headers.size() - 1; i >= 0; i--) { - Header header = headers.get(i); - if (header.key().equals(key)) { - return header; - } - } - return null; - } - - @Override - public Iterable
      headers(final String key) { - checkKey(key); - return new Iterable
      () { - @Override - public Iterator
      iterator() { - return new FilterByKeyIterator(headers.iterator(), key); - } - }; - } - - @Override - public Iterator
      iterator() { - return closeAware(headers.iterator()); - } - - public void setReadOnly() { - this.isReadOnly = true; - } - - public Header[] toArray() { - return headers.isEmpty() ? Record.EMPTY_HEADERS : headers.toArray(new Header[headers.size()]); - } - - private void checkKey(String key) { - if (key == null) { - throw new IllegalArgumentException("key cannot be null."); - } - } - - private void canWrite() { - if (isReadOnly) { - throw new IllegalStateException("RecordHeaders has been closed."); - } - } - - private Iterator
      closeAware(final Iterator
      original) { - return new Iterator
      () { - @Override - public boolean hasNext() { - return original.hasNext(); - } - - public Header next() { - return original.next(); - } - - @Override - public void remove() { - canWrite(); - original.remove(); - } - }; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - - RecordHeaders headers1 = (RecordHeaders) o; - - return headers != null ? headers.equals(headers1.headers) : headers1.headers == null; - } - - @Override - public int hashCode() { - return headers != null ? headers.hashCode() : 0; - } - - @Override - public String toString() { - return "RecordHeaders(" + - "headers = " + headers + - ", isReadOnly = " + isReadOnly + - ')'; - } - - private static final class FilterByKeyIterator extends AbstractIterator
      { - - private final Iterator
      original; - private final String key; - - private FilterByKeyIterator(Iterator
      original, String key) { - this.original = original; - this.key = key; - } - - protected Header makeNext() { - while (true) { - if (original.hasNext()) { - Header header = original.next(); - if (!header.key().equals(key)) { - continue; - } - - return header; - } - return this.allDone(); - } - } - } -} - diff --git a/clients/src/main/java/org/oracle/okafka/common/internals/ClusterResourceListeners.java b/clients/src/main/java/org/oracle/okafka/common/internals/ClusterResourceListeners.java deleted file mode 100644 index ee88faf..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/internals/ClusterResourceListeners.java +++ /dev/null @@ -1,70 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.internals; - -import java.util.ArrayList; -import java.util.List; - -import org.oracle.okafka.common.ClusterResource; -import org.oracle.okafka.common.ClusterResourceListener; - -public class ClusterResourceListeners { - - private final List clusterResourceListeners; - - public ClusterResourceListeners() { - this.clusterResourceListeners = new ArrayList<>(); - } - - /** - * Add only if the candidate implements {@link ClusterResourceListener}. - * @param candidate Object which might implement {@link ClusterResourceListener} - */ - public void maybeAdd(Object candidate) { - if (candidate instanceof ClusterResourceListener) { - clusterResourceListeners.add((ClusterResourceListener) candidate); - } - } - - /** - * Add all items who implement {@link ClusterResourceListener} from the list. - * @param candidateList List of objects which might implement {@link ClusterResourceListener} - */ - public void maybeAddAll(List candidateList) { - for (Object candidate : candidateList) { - this.maybeAdd(candidate); - } - } - - /** - * Send the updated cluster metadata to all {@link ClusterResourceListener}. - * @param cluster Cluster metadata - */ - public void onUpdate(ClusterResource cluster) { - for (ClusterResourceListener clusterResourceListener : clusterResourceListeners) { - clusterResourceListener.onUpdate(cluster); - } - } -} \ No newline at end of file diff --git a/clients/src/main/java/org/oracle/okafka/common/internals/KafkaFutureImpl.java b/clients/src/main/java/org/oracle/okafka/common/internals/KafkaFutureImpl.java deleted file mode 100644 index e859cea..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/internals/KafkaFutureImpl.java +++ /dev/null @@ -1,326 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.internals; - -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.CancellationException; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -import org.oracle.okafka.common.KafkaFuture; - -/** - * A flexible future which supports call chaining and other asynchronous programming patterns. - * This will eventually become a thin shim on top of Java 8's CompletableFuture. - */ -public class KafkaFutureImpl extends KafkaFuture { - /** - * A convenience method that throws the current exception, wrapping it if needed. - * - * In general, KafkaFuture throws CancellationException and InterruptedException directly, and - * wraps all other exceptions in an ExecutionException. - */ - private static void wrapAndThrow(Throwable t) throws InterruptedException, ExecutionException { - if (t instanceof CancellationException) { - throw (CancellationException) t; - } else if (t instanceof InterruptedException) { - throw (InterruptedException) t; - } else { - throw new ExecutionException(t); - } - } - - private static class Applicant implements BiConsumer { - private final BaseFunction function; - private final KafkaFutureImpl future; - - Applicant(BaseFunction function, KafkaFutureImpl future) { - this.function = function; - this.future = future; - } - - @Override - public void accept(A a, Throwable exception) { - if (exception != null) { - future.completeExceptionally(exception); - } else { - try { - B b = function.apply(a); - future.complete(b); - } catch (Throwable t) { - future.completeExceptionally(t); - } - } - } - } - - private static class SingleWaiter implements BiConsumer { - private R value = null; - private Throwable exception = null; - private boolean done = false; - - @Override - public synchronized void accept(R newValue, Throwable newException) { - this.value = newValue; - this.exception = newException; - this.done = true; - this.notifyAll(); - } - - synchronized R await() throws InterruptedException, ExecutionException { - while (true) { - if (exception != null) - wrapAndThrow(exception); - if (done) - return value; - this.wait(); - } - } - - R await(long timeout, TimeUnit unit) - throws InterruptedException, ExecutionException, TimeoutException { - long startMs = System.currentTimeMillis(); - long waitTimeMs = unit.toMillis(timeout); - long delta = 0; - synchronized (this) { - while (true) { - if (exception != null) - wrapAndThrow(exception); - if (done) - return value; - if (delta >= waitTimeMs) { - throw new TimeoutException(); - } - this.wait(waitTimeMs - delta); - delta = System.currentTimeMillis() - startMs; - } - } - } - } - - /** - * True if this future is done. - */ - private boolean done = false; - - /** - * The value of this future, or null. Protected by the object monitor. - */ - private T value = null; - - /** - * The exception associated with this future, or null. Protected by the object monitor. - */ - private Throwable exception = null; - - /** - * A list of objects waiting for this future to complete (either successfully or - * exceptionally). Protected by the object monitor. - */ - private List> waiters = new ArrayList<>(); - - /** - * Returns a new KafkaFuture that, when this future completes normally, is executed with this - * futures's result as the argument to the supplied function. - */ - @Override - public KafkaFuture thenApply(BaseFunction function) { - KafkaFutureImpl future = new KafkaFutureImpl<>(); - addWaiter(new Applicant<>(function, future)); - return future; - } - - public void copyWith(KafkaFuture future, BaseFunction function) { - KafkaFutureImpl futureImpl = (KafkaFutureImpl) future; - futureImpl.addWaiter(new Applicant<>(function, this)); - } - - /** - * @See KafkaFutureImpl#thenApply(BaseFunction) - */ - @Override - public KafkaFuture thenApply(Function function) { - return thenApply((BaseFunction) function); - } - - private static class WhenCompleteBiConsumer implements BiConsumer { - private final KafkaFutureImpl future; - private final BiConsumer biConsumer; - - WhenCompleteBiConsumer(KafkaFutureImpl future, BiConsumer biConsumer) { - this.future = future; - this.biConsumer = biConsumer; - } - - @Override - public void accept(T val, Throwable exception) { - try { - if (exception != null) { - biConsumer.accept(null, exception); - } else { - biConsumer.accept(val, null); - } - } catch (Throwable e) { - if (exception == null) { - exception = e; - } - } - if (exception != null) { - future.completeExceptionally(exception); - } else { - future.complete(val); - } - } - } - - @Override - public KafkaFuture whenComplete(final BiConsumer biConsumer) { - final KafkaFutureImpl future = new KafkaFutureImpl<>(); - addWaiter(new WhenCompleteBiConsumer<>(future, biConsumer)); - return future; - } - - protected synchronized void addWaiter(BiConsumer action) { - if (exception != null) { - action.accept(null, exception); - } else if (done) { - action.accept(value, null); - } else { - waiters.add(action); - } - } - - @Override - public synchronized boolean complete(T newValue) { - List> oldWaiters = null; - synchronized (this) { - if (done) - return false; - value = newValue; - done = true; - oldWaiters = waiters; - waiters = null; - } - for (BiConsumer waiter : oldWaiters) { - waiter.accept(newValue, null); - } - return true; - } - - @Override - public boolean completeExceptionally(Throwable newException) { - List> oldWaiters = null; - synchronized (this) { - if (done) - return false; - exception = newException; - done = true; - oldWaiters = waiters; - waiters = null; - } - for (BiConsumer waiter : oldWaiters) { - waiter.accept(null, newException); - } - return true; - } - - /** - * If not already completed, completes this future with a CancellationException. Dependent - * futures that have not already completed will also complete exceptionally, with a - * CompletionException caused by this CancellationException. - */ - @Override - public synchronized boolean cancel(boolean mayInterruptIfRunning) { - if (completeExceptionally(new CancellationException())) - return true; - return exception instanceof CancellationException; - } - - /** - * Waits if necessary for this future to complete, and then returns its result. - */ - @Override - public T get() throws InterruptedException, ExecutionException { - SingleWaiter waiter = new SingleWaiter(); - addWaiter(waiter); - return waiter.await(); - } - - /** - * Waits if necessary for at most the given time for this future to complete, and then returns - * its result, if available. - */ - @Override - public T get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, - TimeoutException { - SingleWaiter waiter = new SingleWaiter(); - addWaiter(waiter); - return waiter.await(timeout, unit); - } - - /** - * Returns the result value (or throws any encountered exception) if completed, else returns - * the given valueIfAbsent. - */ - @Override - public synchronized T getNow(T valueIfAbsent) throws InterruptedException, ExecutionException { - if (exception != null) - wrapAndThrow(exception); - if (done) - return value; - return valueIfAbsent; - } - - /** - * Returns true if this CompletableFuture was cancelled before it completed normally. - */ - @Override - public synchronized boolean isCancelled() { - return (exception != null) && (exception instanceof CancellationException); - } - - /** - * Returns true if this CompletableFuture completed exceptionally, in any way. - */ - @Override - public synchronized boolean isCompletedExceptionally() { - return exception != null; - } - - /** - * Returns true if completed in any fashion: normally, exceptionally, or via cancellation. - */ - @Override - public synchronized boolean isDone() { - return done; - } - - @Override - public String toString() { - return String.format("KafkaFuture{value=%s,exception=%s,done=%b}", value, exception, done); - } -} diff --git a/clients/src/main/java/org/oracle/okafka/common/internals/PartitionData.java b/clients/src/main/java/org/oracle/okafka/common/internals/PartitionData.java new file mode 100644 index 0000000..5d3442e --- /dev/null +++ b/clients/src/main/java/org/oracle/okafka/common/internals/PartitionData.java @@ -0,0 +1,73 @@ +package org.oracle.okafka.common.internals; + +import org.apache.kafka.common.TopicPartition; + +public class PartitionData { + private TopicPartition topicPartition; + private int queueId; + private String subscriberName; + private int subscriberId; + private int ownerInstanceId; + private boolean local; + + public PartitionData(String queueName, int queueId, int partitionId, + String subName, int subId, int ownerInstanceId, boolean local) { + this.topicPartition = new TopicPartition(queueName, partitionId); + this.queueId = queueId; + this.subscriberName = subName; + this.subscriberId = subId; + this.ownerInstanceId = ownerInstanceId; + this.local = local; + } + public String toString() + { + if(topicPartition == null) + return "NULL"; + + return "{Topic:"+topicPartition.topic()+",ConsumerGroupID:"+subscriberName+ + ",Partition:"+topicPartition.partition()+",OwnerInstance:"+ownerInstanceId+",}"; + } + + public TopicPartition getTopicPartition() { + return this.topicPartition; + } + public int getOwnerInstanceId() { + return this.ownerInstanceId; + } + + public void setOwnerInstanceId(int instId) + { + this.ownerInstanceId = instId; + } + + public int getQueueId() { + return this.queueId; + } + + public String getSubName() { + return this.subscriberName; + } + + public int getSubId() { + return this.subscriberId; + } + + public void setLocal(boolean _local) + { + local = _local; + } + public boolean getLocal() + { + return local; + } + + public boolean equals(Object obj) + { + if(!(obj instanceof PartitionData)) + return false; + + PartitionData tPart = (PartitionData)obj; + return this.topicPartition.equals(tPart.topicPartition); + } + +} diff --git a/clients/src/main/java/org/oracle/okafka/common/internals/PartitionStates.java b/clients/src/main/java/org/oracle/okafka/common/internals/PartitionStates.java deleted file mode 100644 index 5beb8e3..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/internals/PartitionStates.java +++ /dev/null @@ -1,184 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.internals; - -import java.util.ArrayList; -import java.util.HashSet; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Set; - -import org.oracle.okafka.common.TopicPartition; - -/** - * This class is a useful building block for doing fetch requests where topic partitions have to be rotated via - * round-robin to ensure fairness and some level of determinism given the existence of a limit on the fetch response - * size. Because the serialization of fetch requests is more efficient if all partitions for the same topic are grouped - * together, we do such grouping in the method `set`. - * - * As partitions are moved to the end, the same topic may be repeated more than once. In the optimal case, a single - * topic would "wrap around" and appear twice. However, as partitions are fetched in different orders and partition - * leadership changes, we will deviate from the optimal. If this turns out to be an issue in practice, we can improve - * it by tracking the partitions per node or calling `set` every so often. - */ -public class PartitionStates { - - private final LinkedHashMap map = new LinkedHashMap<>(); - - public PartitionStates() {} - - public void moveToEnd(TopicPartition topicPartition) { - S state = map.remove(topicPartition); - if (state != null) - map.put(topicPartition, state); - } - - public void updateAndMoveToEnd(TopicPartition topicPartition, S state) { - map.remove(topicPartition); - map.put(topicPartition, state); - } - - public void remove(TopicPartition topicPartition) { - map.remove(topicPartition); - } - - /** - * Returns the partitions in random order. - */ - public Set partitionSet() { - return new HashSet<>(map.keySet()); - } - - public void clear() { - map.clear(); - } - - public boolean contains(TopicPartition topicPartition) { - return map.containsKey(topicPartition); - } - - /** - * Returns the partition states in order. - */ - public List> partitionStates() { - List> result = new ArrayList<>(); - for (Map.Entry entry : map.entrySet()) { - result.add(new PartitionState<>(entry.getKey(), entry.getValue())); - } - return result; - } - - /** - * Returns the partition state values in order. - */ - public List partitionStateValues() { - return new ArrayList<>(map.values()); - } - - public S stateValue(TopicPartition topicPartition) { - return map.get(topicPartition); - } - - public int size() { - return map.size(); - } - - /** - * Update the builder to have the received map as its state (i.e. the previous state is cleared). The builder will - * "batch by topic", so if we have a, b and c, each with two partitions, we may end up with something like the - * following (the order of topics and partitions within topics is dependent on the iteration order of the received - * map): a0, a1, b1, b0, c0, c1. - */ - public void set(Map partitionToState) { - map.clear(); - update(partitionToState); - } - - public void add(TopicPartition tp, S state) { - map.put(tp, state); - } - private void update(Map partitionToState) { - LinkedHashMap> topicToPartitions = new LinkedHashMap<>(); - for (TopicPartition tp : partitionToState.keySet()) { - List partitions = topicToPartitions.get(tp.topic()); - if (partitions == null) { - partitions = new ArrayList<>(); - topicToPartitions.put(tp.topic(), partitions); - } - partitions.add(tp); - } - for (Map.Entry> entry : topicToPartitions.entrySet()) { - for (TopicPartition tp : entry.getValue()) { - S state = partitionToState.get(tp); - map.put(tp, state); - } - } - } - - public static class PartitionState { - private final TopicPartition topicPartition; - private final S value; - - public PartitionState(TopicPartition topicPartition, S state) { - this.topicPartition = Objects.requireNonNull(topicPartition); - this.value = Objects.requireNonNull(state); - } - - public S value() { - return value; - } - - @Override - public boolean equals(Object o) { - if (this == o) - return true; - if (o == null || getClass() != o.getClass()) - return false; - - PartitionState that = (PartitionState) o; - - return topicPartition.equals(that.topicPartition) && value.equals(that.value); - } - - @Override - public int hashCode() { - int result = topicPartition.hashCode(); - result = 31 * result + value.hashCode(); - return result; - } - - public TopicPartition topicPartition() { - return topicPartition; - } - - @Override - public String toString() { - return "PartitionState(" + topicPartition + "=" + value + ')'; - } - } - -} diff --git a/clients/src/main/java/org/oracle/okafka/common/internals/QPATInfo.java b/clients/src/main/java/org/oracle/okafka/common/internals/QPATInfo.java new file mode 100644 index 0000000..9e9678b --- /dev/null +++ b/clients/src/main/java/org/oracle/okafka/common/internals/QPATInfo.java @@ -0,0 +1,209 @@ +package org.oracle.okafka.common.internals; +import java.math.BigDecimal; +import java.sql.Connection; +import java.sql.SQLException; +import oracle.jdbc.OracleConnection; +import oracle.jdbc.OracleData; +import oracle.jdbc.OracleDataFactory; +import oracle.jdbc.OracleStruct; +import oracle.jdbc.internal.ObjectData; +import oracle.jdbc.internal.OracleTypes; +import oracle.jpub.runtime.OracleDataMutableStruct; +public class QPATInfo implements OracleData, OracleDataFactory, ObjectData { + public static final String _SQL_NAME = "SYS.AQ$_QPAT_INFO"; + public static final int _SQL_TYPECODE = OracleTypes.STRUCT; + static int[] _sqlType = + { + 12, 12, 4, 12, 4, 4, 4, 4, 4, 4, 4, 4, -101 + }; + static OracleDataFactory[] _factory = new OracleDataFactory[13]; + public static QPATInfo _QPATInfo_Factory = new QPATInfo(); + + OracleDataMutableStruct _struct; + + private OracleConnection con = null; + public static OracleDataFactory getFactory() { + return _QPATInfo_Factory; + } + + public QPATInfo() { + _struct = new OracleDataMutableStruct(new Object[13], _sqlType, _factory); + } + public String name; + @Override + public OracleData create(Object d, int sqlType) throws SQLException { + if (d == null) return null; + QPATInfo o = new QPATInfo(); + if( d instanceof QPATInfo){ + o.shallowCopy((QPATInfo)d); + }else{ + o._struct = new OracleDataMutableStruct((OracleStruct) d, _sqlType, _factory); + } + return o; + } + @Override + public Object toJDBCObject(Connection con) throws SQLException { + Object[] attrbs = new Object[13]; + attrbs[0] = getSchema(); + attrbs[1] = getQueueName(); + attrbs[2] = getQueueId(); + attrbs[3] = getSubscriberName(); + attrbs[4] = getSubscriberId(); + attrbs[5] = getSessionId(); + attrbs[6] = getGroupLeader(); + attrbs[7] = getPartitionId(); + attrbs[8] = getFlags(); + attrbs[9] = getVersion(); + attrbs[10] = getInstId(); + attrbs[11] = getAuditId(); + attrbs[12] = getOracleTimeStamp(); + return con.createStruct(_SQL_NAME, attrbs); + } + public String toString() + { + try { + if(_struct != null) + { + String str = "{ Schema:" + getSchema()+",Topic:"+getQueueName()+",ConsumerGroupId:"+getSubscriberName()+ + ",GroupLeader:"+getGroupLeader()+",Partition:"+getPartitionId()+",Version"+getVersion()+ + ",Flags:"+getFlags()+"}"; + return str; + + } + }catch(Exception e) + { + System.out.println("Exception from toString in QPATINFO " + e.getMessage()); + e.printStackTrace(); + return "NULL " + e.getMessage(); + } + return "NULL"; + + } + + void shallowCopy(QPATInfo d) throws SQLException { + _struct = d._struct; + } + public void setSchema(String schema) throws SQLException{ + _struct.setAttribute(0, schema); + } + + public String getSchema() throws SQLException{ + return (String)_struct.getAttribute(0); + } + + public void setQueueName(String name) throws SQLException{ + _struct.setAttribute(1, name); + } + + public String getQueueName() throws SQLException{ + return (String)_struct.getAttribute(1); + } + + public void setQueueId(int id) throws SQLException{ + _struct.setAttribute(2, new BigDecimal(id)); + } + + public Integer getQueueId() throws SQLException{ + return (Integer)((BigDecimal)_struct.getAttribute(2)).intValue(); + } + + public void setSubscriberName(String name) throws SQLException{ + _struct.setAttribute(3, name); + } + + public String getSubscriberName() throws SQLException{ + return (String)_struct.getAttribute(3); + } + + public void setSubscriberId(int id) throws SQLException{ + _struct.setAttribute(4, new BigDecimal(id)); + } + + public Integer getSubscriberId() throws SQLException{ + return (Integer)((BigDecimal)_struct.getAttribute(4)).intValue(); + } + + public void setSessionId(long session) throws SQLException{ + _struct.setAttribute(5, new BigDecimal(session)); + } + + public Long getSessionId() throws SQLException{ + return (Long)((BigDecimal)_struct.getAttribute(5)).longValue(); + } + + public void setGroupLeader(int leader) throws SQLException{ + _struct.setAttribute(6, new BigDecimal(leader)); + } + + public Integer getGroupLeader() throws SQLException{ + return (Integer)((BigDecimal)_struct.getAttribute(6)).intValue(); + } + + public void setPartitionId(int partition) throws SQLException{ + _struct.setAttribute(7, new BigDecimal(partition)); + } + + public Integer getPartitionId() throws SQLException{ + return (Integer)((BigDecimal)_struct.getAttribute(7)).intValue(); + } + + public void setFlags(int flags) throws SQLException { + _struct.setAttribute(8, new BigDecimal(flags)); + } + + public Integer getFlags() throws SQLException { + if((BigDecimal)_struct.getAttribute(8) == null) + { + return 0; + } + return (Integer)((BigDecimal)_struct.getAttribute(8)).intValue(); + } + + public void setVersion(int version) throws SQLException{ + _struct.setAttribute(9, new BigDecimal(version)); + } + + public Integer getVersion() throws SQLException{ + return (Integer)((BigDecimal)_struct.getAttribute(9)).intValue(); + } + + public void setInstId(int inst) throws SQLException{ + _struct.setAttribute(10, new BigDecimal(inst)); + } + + public Integer getInstId() throws SQLException{ + return (Integer)((BigDecimal)_struct.getAttribute(10)).intValue(); + } + + public void setAuditId(long auditId) throws SQLException{ + _struct.setAttribute(11, new BigDecimal(auditId)); + } + + public Long getAuditId() throws SQLException{ + return (Long)((BigDecimal)_struct.getAttribute(11)).longValue(); + } + + public void setTimeStamp(java.sql.Time time) throws SQLException{ + if(con != null) + _struct.setAttribute(12, new oracle.sql.TIMESTAMPTZ(con, time)); + else _struct.setAttribute(12, new oracle.sql.TIMESTAMPTZ()); + } + + public java.sql.Time getTimeStamp() throws SQLException{ + return (java.sql.Time)((oracle.sql.TIMESTAMPTZ)_struct.getAttribute(12)).timeValue(); + } + + private oracle.sql.TIMESTAMPTZ getOracleTimeStamp() throws SQLException { + return (oracle.sql.TIMESTAMPTZ)_struct.getAttribute(12); + + } + + public void setConnection(OracleConnection conn) + { + con = conn; + } + + public OracleConnection getConnection() { + return con; + } +} \ No newline at end of file diff --git a/clients/src/main/java/org/oracle/okafka/common/internals/QPATInfoList.java b/clients/src/main/java/org/oracle/okafka/common/internals/QPATInfoList.java new file mode 100644 index 0000000..7fe6ae7 --- /dev/null +++ b/clients/src/main/java/org/oracle/okafka/common/internals/QPATInfoList.java @@ -0,0 +1,75 @@ +package org.oracle.okafka.common.internals; +import java.sql.Connection; +import java.sql.SQLException; +import org.oracle.okafka.common.internals.QPATInfo; +import oracle.jdbc.OracleArray; +import oracle.jdbc.OracleData; +import oracle.jdbc.OracleDataFactory; +import oracle.jdbc.internal.OracleTypes; +import oracle.jpub.runtime.OracleDataMutableArray; +public class QPATInfoList implements OracleData, OracleDataFactory { + public static final String _SQL_NAME = "SYS.AQ$_QPAT_INFO_LIST"; + public static final int _SQL_TYPECODE = OracleTypes.ARRAY; + OracleDataMutableArray _array; + private static final QPATInfoList _QPATInfoList_Factory = new QPATInfoList(); + + public static OracleDataFactory getOracleDataFactory() { + return _QPATInfoList_Factory; + } + public QPATInfoList() + { + this((QPATInfo[])null); + } + public QPATInfoList(QPATInfo[] a) + { + _array = new OracleDataMutableArray(2002, a, QPATInfo.getFactory()); + } + @Override + public OracleData create(Object d, int sqlType) throws SQLException { + if (d == null) return null; + QPATInfoList a = new QPATInfoList(); + a._array = new OracleDataMutableArray(2002, (OracleArray) d, QPATInfo.getFactory()); + return a; + } + @Override + public Object toJDBCObject(Connection con) throws SQLException { + return _array.toJDBCObject(con, _SQL_NAME); + } + + public int length() throws SQLException { + return _array.length(); + } + public int getBaseType() throws SQLException{ + return _array.getBaseType(); + } + public String getBaseTypeName() throws SQLException + { + return _array.getBaseTypeName(); + } + public QPATInfo[] getArray() throws SQLException + { + return (QPATInfo[]) _array.getObjectArray( + new QPATInfo[_array.length()]); + } + public void setArray(QPATInfo[] a) throws SQLException + { + _array.setObjectArray(a); + } + public QPATInfo[] getArray(long index, int count) throws SQLException + { + return (QPATInfo[]) _array.getObjectArray(index, + new QPATInfo[_array.sliceLength(index, count)]); + } + public void setArray(QPATInfo[] a, long index) throws SQLException + { + _array.setObjectArray(a, index); + } + public QPATInfo getElement(long index) throws SQLException + { + return (QPATInfo) _array.getObjectElement(index); + } + public void setElement(QPATInfo a, long index) throws SQLException + { + _array.setObjectElement(a, index); + } +} \ No newline at end of file diff --git a/clients/src/main/java/org/oracle/okafka/common/internals/QPIMInfo.java b/clients/src/main/java/org/oracle/okafka/common/internals/QPIMInfo.java new file mode 100644 index 0000000..13049ff --- /dev/null +++ b/clients/src/main/java/org/oracle/okafka/common/internals/QPIMInfo.java @@ -0,0 +1,101 @@ +package org.oracle.okafka.common.internals; +import java.math.BigDecimal; +import java.sql.Connection; +import java.sql.SQLException; +import oracle.jdbc.OracleConnection; +import oracle.jdbc.OracleData; +import oracle.jdbc.OracleDataFactory; +import oracle.jdbc.OracleStruct; +import oracle.jdbc.internal.ObjectData; +import oracle.jdbc.internal.OracleTypes; +import oracle.jpub.runtime.OracleDataMutableStruct; +public class QPIMInfo implements OracleData, OracleDataFactory, ObjectData { + public static final String _SQL_NAME = "SYS.AQ$_QPIM_INFO"; + public static final int _SQL_TYPECODE = OracleTypes.STRUCT; + static int[] _sqlType = + { + 12, 12, 4, 4 + }; + static OracleDataFactory[] _factory = new OracleDataFactory[4]; + public static QPIMInfo _QPIMInfo_Factory = new QPIMInfo(); + + OracleDataMutableStruct _struct; + + private OracleConnection con = null; + public static OracleDataFactory getFactory() { + return _QPIMInfo_Factory; + } + + public QPIMInfo() { + _struct = new OracleDataMutableStruct(new Object[4], _sqlType, _factory); + } + @Override + public OracleData create(Object d, int sqlType) throws SQLException { + if (d == null) return null; + QPIMInfo o = new QPIMInfo(); + if( d instanceof QPIMInfo){ + o.shallowCopy((QPIMInfo)d); + }else{ + o._struct = new OracleDataMutableStruct((OracleStruct) d, _sqlType, _factory); + } + return o; + } + @Override + public Object toJDBCObject(Connection con) throws SQLException { + + Object[] attrbs = new Object[13]; + attrbs[0] = getOwner(); + attrbs[1] = getQueueName(); + attrbs[2] = getPartitionId(); + attrbs[3] = getOwnerInstId(); + return con.createStruct(_SQL_NAME, attrbs); + } + + public String toString() + { + if(_struct == null) + return null; + try + { + String str = "{OwnerInstance:"+getOwner()+",TopicName:"+getQueueName()+",Partition:"+getPartitionId()+",OwnerInstanceID:"+getOwnerInstId()+"}"; + return str; + }catch(Exception e) + { + return "Null " +e.getMessage(); + } + } + + void shallowCopy(QPIMInfo d) throws SQLException { + _struct = d._struct; + } + public void setOwner(String owner) throws SQLException{ + _struct.setAttribute(0, owner); + } + + public String getOwner() throws SQLException{ + return (String)_struct.getAttribute(0); + } + + public void setQueueName(String name) throws SQLException{ + _struct.setAttribute(1, name); + } + + public String getQueueName() throws SQLException{ + return (String)_struct.getAttribute(1); + } + public void setPartitionId(int partition) throws SQLException{ + _struct.setAttribute(2, new BigDecimal(partition)); + } + + public Integer getPartitionId() throws SQLException{ + return (Integer)((BigDecimal)_struct.getAttribute(2)).intValue(); + } + + public void setOwnerInstId(int inst) throws SQLException{ + _struct.setAttribute(3, new BigDecimal(inst)); + } + + public Integer getOwnerInstId() throws SQLException{ + return (Integer)((BigDecimal)_struct.getAttribute(3)).intValue(); + } +} \ No newline at end of file diff --git a/clients/src/main/java/org/oracle/okafka/common/internals/QPIMInfoList.java b/clients/src/main/java/org/oracle/okafka/common/internals/QPIMInfoList.java new file mode 100644 index 0000000..87bcc4c --- /dev/null +++ b/clients/src/main/java/org/oracle/okafka/common/internals/QPIMInfoList.java @@ -0,0 +1,76 @@ +package org.oracle.okafka.common.internals; +import java.sql.Connection; +import java.sql.SQLException; +import org.oracle.okafka.common.internals.QPIMInfo; +import oracle.jdbc.OracleArray; +import oracle.jdbc.OracleData; +import oracle.jdbc.OracleDataFactory; +import oracle.jdbc.internal.OracleTypes; +import oracle.jpub.runtime.OracleDataMutableArray; +public class QPIMInfoList implements OracleData, OracleDataFactory { + public static final String _SQL_NAME = "SYS.AQ$_QPIM_INFO_LIST"; + public static final int _SQL_TYPECODE = OracleTypes.ARRAY; + OracleDataMutableArray _array; + private static final QPIMInfoList _QPIMInfoList_Factory = new QPIMInfoList(); + + public static OracleDataFactory getOracleDataFactory() { + return _QPIMInfoList_Factory; + } + public QPIMInfoList() + { + this((QPIMInfo[])null); + } + public QPIMInfoList(QPIMInfo[] a) + { + _array = new OracleDataMutableArray(2002, a, QPIMInfo.getFactory()); + } + @Override + public OracleData create(Object d, int sqlType) throws SQLException { + if (d == null) return null; + QPIMInfoList a = new QPIMInfoList(); + a._array = new OracleDataMutableArray(2002, (OracleArray) d, QPIMInfo.getFactory()); + return a; + } + @Override + public Object toJDBCObject(Connection con) throws SQLException { + return _array.toJDBCObject(con, _SQL_NAME); + } + + public int length() throws SQLException { + return _array.length(); + } + public int getBaseType() throws SQLException{ + return _array.getBaseType(); + } + public String getBaseTypeName() throws SQLException + { + return _array.getBaseTypeName(); + } + public QPIMInfo[] getArray() throws SQLException + { + return (QPIMInfo[]) _array.getObjectArray( + new QPIMInfo[_array.length()]); + } + public void setArray(QPIMInfo[] a) throws SQLException + { + _array.setObjectArray(a); + } + public QPIMInfo[] getArray(long index, int count) throws SQLException + { + return (QPIMInfo[]) _array.getObjectArray(index, + new QPIMInfo[_array.sliceLength(index, count)]); + } + public void setArray(QPIMInfo[] a, long index) throws SQLException + { + _array.setObjectArray(a, index); + } + public QPIMInfo getElement(long index) throws SQLException + { + return (QPIMInfo) _array.getObjectElement(index); + } + public void setElement(QPIMInfo a, long index) throws SQLException + { + _array.setObjectElement(a, index); + } + +} \ No newline at end of file diff --git a/clients/src/main/java/org/oracle/okafka/common/internals/SessionData.java b/clients/src/main/java/org/oracle/okafka/common/internals/SessionData.java new file mode 100644 index 0000000..df817fb --- /dev/null +++ b/clients/src/main/java/org/oracle/okafka/common/internals/SessionData.java @@ -0,0 +1,186 @@ +package org.oracle.okafka.common.internals; + +import java.util.ArrayList; +import java.util.Date; +import java.util.List; + +import org.oracle.okafka.common.internals.PartitionData; + +public class SessionData implements Comparable{ + + public String name; // instId_sid + private String schema; + private String subscribedtopic; + private String subscriberName; + private long sessionId; + private int instanceId; + private long auditId; + private int subscriberId; + private int leader; + private int version; + public Date createTime; // Session creation time + private int flags; + private int queueId; + + public int pendingCnt; + public boolean oneMore = false; + private List assignedPartitions; + private List previousPartitions; + + private boolean invalid = false; + + public SessionData(long sessionId, int instanceId, String schema, String subTopic,int queueId, + String subscriberName,int subscriberId, Date createTime, int leader, int version, long auditId) { + this.sessionId = sessionId; + this.instanceId = instanceId; + this.schema = schema; + this.subscribedtopic = subTopic; + this.subscriberName = subscriberName; + this.leader = leader; + this.version = version; + this.auditId = auditId; + this.subscriberId = subscriberId; + this.name = this.instanceId+"_"+this.sessionId; + this.assignedPartitions = new ArrayList<>(); + this.previousPartitions = new ArrayList<>(); + this.flags = -1; + this.queueId = queueId; + + } + + public String toString() + { + String str = "Session:"+sessionId+",Instance:"+instanceId+",SubscribedTopic:"+subscribedtopic+ + ",Leader"+leader+",Version"+version; + String partitionListStr="Partitions:["; + for(PartitionData pData: assignedPartitions) + { + if(pData == null) + partitionListStr+="NULL"; + else + partitionListStr += pData.toString()+","; + } + str = str+","+partitionListStr+"]"; + return str; + } + + public int getInstanceId() { + return this.instanceId; + } + + public long getSessionId() { + return this.sessionId; + } + + public String getSubscribedTopics() { + return this.subscribedtopic; + } + + public String getSchema() { + return this.schema; + } + public int getVersion() { + return this.version; + } + + public int getLeader() { + return this.leader; + } + + public long getAuditId() { + return this.auditId; + } + public List getAssignedPartitions() { + return this.assignedPartitions; + } + + public void addAssignedPartitions(PartitionData pd) { + //System.out.println("addAssignedPartitions:: Partition " +pd.getTopicPartition().partition() + " assigned to " + this.name +" Pending " + this.pendingCnt); + this.assignedPartitions.add(pd); + } + + public void setAssignedPartitions(List pds) { + this.assignedPartitions.addAll(pds); + } + + public void setPreviousPartitions(PartitionData pd) { + this.previousPartitions.add(pd); + } + + public void setPreviousPartitions(List pds) { + this.previousPartitions.addAll(pds); + } + + public List getPreviousPartitions() { + return previousPartitions; + } + + public String getSubscriberName() + { + return subscriberName; + } + public int getSubscriberId() + { + return subscriberId; + } + + public int getFlags() + { + return flags; + } + public void setFlags(int flags) + { + this.flags = flags; + } + public void setQueueId(int queueId) + { + this.queueId = queueId; + } + public int getQueueId() + { + return queueId; + } + + @Override + public boolean equals(Object obj) + { + if(!(obj instanceof SessionData)) + return false; + + SessionData sData = (SessionData)obj; + + if(this.sessionId == sData.sessionId && this.instanceId == sData.instanceId) + return true; + + return false; + } + + @Override + public int compareTo(SessionData sData) + { + if(this.auditId < sData.auditId) + return -1; + else if (this.auditId > sData.auditId) + return 1; + else + return 0; + } + + public boolean isInvalid() + { + return invalid; + } + /* Session data to be invalidated when one database connection fails and + * we fail-over and restart a new database connection. + * This will eventually trigger rebalancing and we will pass version = -1 this time. + */ + public void invalidSessionData() + { + this.invalid=true; + this.version = -1; + assignedPartitions.clear(); + this.leader = -1; + } + + +} \ No newline at end of file diff --git a/clients/src/main/java/org/oracle/okafka/common/metrics/CompoundStat.java b/clients/src/main/java/org/oracle/okafka/common/metrics/CompoundStat.java deleted file mode 100644 index 44af7cb..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/metrics/CompoundStat.java +++ /dev/null @@ -1,60 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.metrics; - -import java.util.List; - -import org.oracle.okafka.common.MetricName; - -/** - * A compound stat is a stat where a single measurement and associated data structure feeds many metrics. This is the - * example for a histogram which has many associated percentiles. - */ -public interface CompoundStat extends Stat { - - public List stats(); - - public static class NamedMeasurable { - - private final MetricName name; - private final Measurable stat; - - public NamedMeasurable(MetricName name, Measurable stat) { - super(); - this.name = name; - this.stat = stat; - } - - public MetricName name() { - return name; - } - - public Measurable stat() { - return stat; - } - - } - -} diff --git a/clients/src/main/java/org/oracle/okafka/common/metrics/Gauge.java b/clients/src/main/java/org/oracle/okafka/common/metrics/Gauge.java deleted file mode 100644 index 04252e9..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/metrics/Gauge.java +++ /dev/null @@ -1,39 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.metrics; - -/** - * A gauge metric is an instantaneous reading of a particular value. - */ -public interface Gauge extends MetricValueProvider { - - /** - * Returns the current value associated with this gauge. - * @param config The configuration for this metric - * @param now The POSIX time in milliseconds the measurement is being taken - */ - T value(MetricConfig config, long now); - -} diff --git a/clients/src/main/java/org/oracle/okafka/common/metrics/JmxReporter.java b/clients/src/main/java/org/oracle/okafka/common/metrics/JmxReporter.java deleted file mode 100644 index 358f45b..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/metrics/JmxReporter.java +++ /dev/null @@ -1,261 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.metrics; - -import java.lang.management.ManagementFactory; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import javax.management.Attribute; -import javax.management.AttributeList; -import javax.management.AttributeNotFoundException; -import javax.management.DynamicMBean; -import javax.management.InvalidAttributeValueException; -import javax.management.JMException; -import javax.management.MBeanAttributeInfo; -import javax.management.MBeanException; -import javax.management.MBeanInfo; -import javax.management.MBeanServer; -import javax.management.MalformedObjectNameException; -import javax.management.ObjectName; -import javax.management.ReflectionException; - -import org.oracle.okafka.common.KafkaException; -import org.oracle.okafka.common.MetricName; -import org.oracle.okafka.common.utils.Sanitizer; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Register metrics in JMX as dynamic mbeans based on the metric names - */ -public class JmxReporter implements MetricsReporter { - - private static final Logger log = LoggerFactory.getLogger(JmxReporter.class); - private static final Object LOCK = new Object(); - private String prefix; - private final Map mbeans = new HashMap(); - - public JmxReporter() { - this(""); - } - - /** - * Create a JMX reporter that prefixes all metrics with the given string. - */ - public JmxReporter(String prefix) { - this.prefix = prefix; - } - - @Override - public void configure(Map configs) {} - - @Override - public void init(List metrics) { - synchronized (LOCK) { - for (KafkaMetric metric : metrics) - addAttribute(metric); - for (KafkaMbean mbean : mbeans.values()) - reregister(mbean); - } - } - - boolean containsMbean(String mbeanName) { - return mbeans.containsKey(mbeanName); - } - @Override - public void metricChange(KafkaMetric metric) { - synchronized (LOCK) { - KafkaMbean mbean = addAttribute(metric); - reregister(mbean); - } - } - - @Override - public void metricRemoval(KafkaMetric metric) { - synchronized (LOCK) { - MetricName metricName = metric.metricName(); - String mBeanName = getMBeanName(prefix, metricName); - KafkaMbean mbean = removeAttribute(metric, mBeanName); - if (mbean != null) { - if (mbean.metrics.isEmpty()) { - unregister(mbean); - mbeans.remove(mBeanName); - } else - reregister(mbean); - } - } - } - - private KafkaMbean removeAttribute(KafkaMetric metric, String mBeanName) { - MetricName metricName = metric.metricName(); - KafkaMbean mbean = this.mbeans.get(mBeanName); - if (mbean != null) - mbean.removeAttribute(metricName.name()); - return mbean; - } - - private KafkaMbean addAttribute(KafkaMetric metric) { - try { - MetricName metricName = metric.metricName(); - String mBeanName = getMBeanName(prefix, metricName); - if (!this.mbeans.containsKey(mBeanName)) - mbeans.put(mBeanName, new KafkaMbean(mBeanName)); - KafkaMbean mbean = this.mbeans.get(mBeanName); - mbean.setAttribute(metricName.name(), metric); - return mbean; - } catch (JMException e) { - throw new KafkaException("Error creating mbean attribute for metricName :" + metric.metricName(), e); - } - } - - /** - * @param metricName - * @return standard JMX MBean name in the following format domainName:type=metricType,key1=val1,key2=val2 - */ - static String getMBeanName(String prefix, MetricName metricName) { - StringBuilder mBeanName = new StringBuilder(); - mBeanName.append(prefix); - mBeanName.append(":type="); - mBeanName.append(metricName.group()); - for (Map.Entry entry : metricName.tags().entrySet()) { - if (entry.getKey().length() <= 0 || entry.getValue().length() <= 0) - continue; - mBeanName.append(","); - mBeanName.append(entry.getKey()); - mBeanName.append("="); - mBeanName.append(Sanitizer.jmxSanitize(entry.getValue())); - } - return mBeanName.toString(); - } - - public void close() { - synchronized (LOCK) { - for (KafkaMbean mbean : this.mbeans.values()) - unregister(mbean); - } - } - - private void unregister(KafkaMbean mbean) { - MBeanServer server = ManagementFactory.getPlatformMBeanServer(); - try { - if (server.isRegistered(mbean.name())) - server.unregisterMBean(mbean.name()); - } catch (JMException e) { - throw new KafkaException("Error unregistering mbean", e); - } - } - - private void reregister(KafkaMbean mbean) { - unregister(mbean); - try { - ManagementFactory.getPlatformMBeanServer().registerMBean(mbean, mbean.name()); - } catch (JMException e) { - throw new KafkaException("Error registering mbean " + mbean.name(), e); - } - } - - private static class KafkaMbean implements DynamicMBean { - private final ObjectName objectName; - private final Map metrics; - - public KafkaMbean(String mbeanName) throws MalformedObjectNameException { - this.metrics = new HashMap<>(); - this.objectName = new ObjectName(mbeanName); - } - - public ObjectName name() { - return objectName; - } - - public void setAttribute(String name, KafkaMetric metric) { - this.metrics.put(name, metric); - } - - @Override - public Object getAttribute(String name) throws AttributeNotFoundException, MBeanException, ReflectionException { - if (this.metrics.containsKey(name)) - return this.metrics.get(name).metricValue(); - else - throw new AttributeNotFoundException("Could not find attribute " + name); - } - - @Override - public AttributeList getAttributes(String[] names) { - AttributeList list = new AttributeList(); - for (String name : names) { - try { - list.add(new Attribute(name, getAttribute(name))); - } catch (Exception e) { - log.warn("Error getting JMX attribute '{}'", name, e); - } - } - return list; - } - - public KafkaMetric removeAttribute(String name) { - return this.metrics.remove(name); - } - - @Override - public MBeanInfo getMBeanInfo() { - MBeanAttributeInfo[] attrs = new MBeanAttributeInfo[metrics.size()]; - int i = 0; - for (Map.Entry entry : this.metrics.entrySet()) { - String attribute = entry.getKey(); - KafkaMetric metric = entry.getValue(); - attrs[i] = new MBeanAttributeInfo(attribute, - double.class.getName(), - metric.metricName().description(), - true, - false, - false); - i += 1; - } - return new MBeanInfo(this.getClass().getName(), "", attrs, null, null, null); - } - - @Override - public Object invoke(String name, Object[] params, String[] sig) throws MBeanException, ReflectionException { - throw new UnsupportedOperationException("Set not allowed."); - } - - @Override - public void setAttribute(Attribute attribute) throws AttributeNotFoundException, - InvalidAttributeValueException, - MBeanException, - ReflectionException { - throw new UnsupportedOperationException("Set not allowed."); - } - - @Override - public AttributeList setAttributes(AttributeList list) { - throw new UnsupportedOperationException("Set not allowed."); - } - - } - -} diff --git a/clients/src/main/java/org/oracle/okafka/common/metrics/KafkaMetric.java b/clients/src/main/java/org/oracle/okafka/common/metrics/KafkaMetric.java deleted file mode 100644 index 507d385..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/metrics/KafkaMetric.java +++ /dev/null @@ -1,103 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.metrics; - -import org.oracle.okafka.common.Metric; -import org.oracle.okafka.common.MetricName; -import org.oracle.okafka.common.utils.Time; - -public final class KafkaMetric implements Metric { - - private MetricName metricName; - private final Object lock; - private final Time time; - private final MetricValueProvider metricValueProvider; - private MetricConfig config; - - // public for testing - public KafkaMetric(Object lock, MetricName metricName, MetricValueProvider valueProvider, - MetricConfig config, Time time) { - this.metricName = metricName; - this.lock = lock; - if (!(valueProvider instanceof Measurable) && !(valueProvider instanceof Gauge)) - throw new IllegalArgumentException("Unsupported metric value provider of class " + valueProvider.getClass()); - this.metricValueProvider = valueProvider; - this.config = config; - this.time = time; - } - - public MetricConfig config() { - return this.config; - } - - @Override - public MetricName metricName() { - return this.metricName; - } - - /** - * See {@link Metric#value()} for the details on why this is deprecated. - */ - @Override - @Deprecated - public double value() { - return measurableValue(time.milliseconds()); - } - - @Override - public Object metricValue() { - long now = time.milliseconds(); - synchronized (this.lock) { - if (this.metricValueProvider instanceof Measurable) - return ((Measurable) metricValueProvider).measure(config, now); - else if (this.metricValueProvider instanceof Gauge) - return ((Gauge) metricValueProvider).value(config, now); - else - throw new IllegalStateException("Not a valid metric: " + this.metricValueProvider.getClass()); - } - } - - public Measurable measurable() { - if (this.metricValueProvider instanceof Measurable) - return (Measurable) metricValueProvider; - else - throw new IllegalStateException("Not a measurable: " + this.metricValueProvider.getClass()); - } - - double measurableValue(long timeMs) { - synchronized (this.lock) { - if (this.metricValueProvider instanceof Measurable) - return ((Measurable) metricValueProvider).measure(config, timeMs); - else - return 0; - } - } - - public void config(MetricConfig config) { - synchronized (lock) { - this.config = config; - } - } -} diff --git a/clients/src/main/java/org/oracle/okafka/common/metrics/Measurable.java b/clients/src/main/java/org/oracle/okafka/common/metrics/Measurable.java deleted file mode 100644 index 7c8dc7a..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/metrics/Measurable.java +++ /dev/null @@ -1,40 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.metrics; - -/** - * A measurable quantity that can be registered as a metric - */ -public interface Measurable extends MetricValueProvider { - - /** - * Measure this quantity and return the result as a double - * @param config The configuration for this metric - * @param now The POSIX time in milliseconds the measurement is being taken - * @return The measured value - */ - double measure(MetricConfig config, long now); - -} diff --git a/clients/src/main/java/org/oracle/okafka/common/metrics/MeasurableStat.java b/clients/src/main/java/org/oracle/okafka/common/metrics/MeasurableStat.java deleted file mode 100644 index 0d894be..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/metrics/MeasurableStat.java +++ /dev/null @@ -1,34 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.metrics; - -/** - * A MeasurableStat is a {@link Stat} that is also {@link Measurable} (i.e. can produce a single floating point value). - * This is the interface used for most of the simple statistics such as {@link org.oracle.okafka.common.metrics.stats.Avg}, - * {@link org.oracle.okafka.common.metrics.stats.Max}, {@link org.oracle.okafka.common.metrics.stats.Count}, etc. - */ -public interface MeasurableStat extends Stat, Measurable { - -} diff --git a/clients/src/main/java/org/oracle/okafka/common/metrics/MetricConfig.java b/clients/src/main/java/org/oracle/okafka/common/metrics/MetricConfig.java deleted file mode 100644 index 58dc823..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/metrics/MetricConfig.java +++ /dev/null @@ -1,110 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.metrics; - -import java.util.LinkedHashMap; -import java.util.Map; -import java.util.concurrent.TimeUnit; - -/** - * Configuration values for metrics - */ -public class MetricConfig { - - private Quota quota; - private int samples; - private long eventWindow; - private long timeWindowMs; - private Map tags; - private Sensor.RecordingLevel recordingLevel; - - public MetricConfig() { - super(); - this.quota = null; - this.samples = 2; - this.eventWindow = Long.MAX_VALUE; - this.timeWindowMs = TimeUnit.MILLISECONDS.convert(30, TimeUnit.SECONDS); - this.tags = new LinkedHashMap<>(); - this.recordingLevel = Sensor.RecordingLevel.INFO; - } - - public Quota quota() { - return this.quota; - } - - public MetricConfig quota(Quota quota) { - this.quota = quota; - return this; - } - - public long eventWindow() { - return eventWindow; - } - - public MetricConfig eventWindow(long window) { - this.eventWindow = window; - return this; - } - - public long timeWindowMs() { - return timeWindowMs; - } - - public MetricConfig timeWindow(long window, TimeUnit unit) { - this.timeWindowMs = TimeUnit.MILLISECONDS.convert(window, unit); - return this; - } - - public Map tags() { - return this.tags; - } - - public MetricConfig tags(Map tags) { - this.tags = tags; - return this; - } - - public int samples() { - return this.samples; - } - - public MetricConfig samples(int samples) { - if (samples < 1) - throw new IllegalArgumentException("The number of samples must be at least 1."); - this.samples = samples; - return this; - } - - public Sensor.RecordingLevel recordLevel() { - return this.recordingLevel; - } - - public MetricConfig recordLevel(Sensor.RecordingLevel recordingLevel) { - this.recordingLevel = recordingLevel; - return this; - } - - -} diff --git a/clients/src/main/java/org/oracle/okafka/common/metrics/MetricValueProvider.java b/clients/src/main/java/org/oracle/okafka/common/metrics/MetricValueProvider.java deleted file mode 100644 index 27fff49..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/metrics/MetricValueProvider.java +++ /dev/null @@ -1,36 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.metrics; - -/** - * Super-interface for {@link Measurable} or {@link Gauge} that provides - * metric values. - *

      - * In the future for Java8 and above, {@link Gauge#value(MetricConfig, long)} will be - * moved to this interface with a default implementation in {@link Measurable} that returns - * {@link Measurable#measure(MetricConfig, long)}. - *

      - */ -public interface MetricValueProvider { } diff --git a/clients/src/main/java/org/oracle/okafka/common/metrics/Metrics.java b/clients/src/main/java/org/oracle/okafka/common/metrics/Metrics.java deleted file mode 100644 index 1e907a9..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/metrics/Metrics.java +++ /dev/null @@ -1,666 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.metrics; - -import org.oracle.okafka.common.MetricName; -import org.oracle.okafka.common.MetricNameTemplate; -import org.oracle.okafka.common.utils.KafkaThread; -import org.oracle.okafka.common.utils.Time; -import org.oracle.okafka.common.utils.Utils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.Closeable; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Set; -import java.util.TreeMap; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.ScheduledThreadPoolExecutor; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.TimeUnit; - -/** - * A registry of sensors and metrics. - *

      - * A metric is a named, numerical measurement. A sensor is a handle to record numerical measurements as they occur. Each - * Sensor has zero or more associated metrics. For example a Sensor might represent message sizes and we might associate - * with this sensor a metric for the average, maximum, or other statistics computed off the sequence of message sizes - * that are recorded by the sensor. - *

      - * Usage looks something like this: - * - *

      - * // set up metrics:
      - * Metrics metrics = new Metrics(); // this is the global repository of metrics and sensors
      - * Sensor sensor = metrics.sensor("message-sizes");
      - * MetricName metricName = new MetricName("message-size-avg", "producer-metrics");
      - * sensor.add(metricName, new Avg());
      - * metricName = new MetricName("message-size-max", "producer-metrics");
      - * sensor.add(metricName, new Max());
      - * 
      - * // as messages are sent we record the sizes
      - * sensor.record(messageSize);
      - * 
      - */ -public class Metrics implements Closeable { - - private final MetricConfig config; - private final ConcurrentMap metrics; - private final ConcurrentMap sensors; - private final ConcurrentMap> childrenSensors; - private final List reporters; - private final Time time; - private final ScheduledThreadPoolExecutor metricsScheduler; - private static final Logger log = LoggerFactory.getLogger(Metrics.class); - - /** - * Create a metrics repository with no metric reporters and default configuration. - * Expiration of Sensors is disabled. - */ - public Metrics() { - this(new MetricConfig()); - } - - /** - * Create a metrics repository with no metric reporters and default configuration. - * Expiration of Sensors is disabled. - */ - public Metrics(Time time) { - this(new MetricConfig(), new ArrayList(0), time); - } - - /** - * Create a metrics repository with no metric reporters and the given default configuration. - * Expiration of Sensors is disabled. - */ - public Metrics(MetricConfig defaultConfig, Time time) { - this(defaultConfig, new ArrayList(0), time); - } - - - /** - * Create a metrics repository with no reporters and the given default config. This config will be used for any - * metric that doesn't override its own config. Expiration of Sensors is disabled. - * @param defaultConfig The default config to use for all metrics that don't override their config - */ - public Metrics(MetricConfig defaultConfig) { - this(defaultConfig, new ArrayList(0), Time.SYSTEM); - } - - /** - * Create a metrics repository with a default config and the given metric reporters. - * Expiration of Sensors is disabled. - * @param defaultConfig The default config - * @param reporters The metrics reporters - * @param time The time instance to use with the metrics - */ - public Metrics(MetricConfig defaultConfig, List reporters, Time time) { - this(defaultConfig, reporters, time, false); - } - - /** - * Create a metrics repository with a default config, given metric reporters and the ability to expire eligible sensors - * @param defaultConfig The default config - * @param reporters The metrics reporters - * @param time The time instance to use with the metrics - * @param enableExpiration true if the metrics instance can garbage collect inactive sensors, false otherwise - */ - public Metrics(MetricConfig defaultConfig, List reporters, Time time, boolean enableExpiration) { - this.config = defaultConfig; - this.sensors = new ConcurrentHashMap<>(); - this.metrics = new ConcurrentHashMap<>(); - this.childrenSensors = new ConcurrentHashMap<>(); - this.reporters = Utils.notNull(reporters); - this.time = time; - for (MetricsReporter reporter : reporters) - reporter.init(new ArrayList()); - - // Create the ThreadPoolExecutor only if expiration of Sensors is enabled. - if (enableExpiration) { - this.metricsScheduler = new ScheduledThreadPoolExecutor(1); - // Creating a daemon thread to not block shutdown - this.metricsScheduler.setThreadFactory(new ThreadFactory() { - public Thread newThread(Runnable runnable) { - return KafkaThread.daemon("SensorExpiryThread", runnable); - } - }); - this.metricsScheduler.scheduleAtFixedRate(new ExpireSensorTask(), 30, 30, TimeUnit.SECONDS); - } else { - this.metricsScheduler = null; - } - - addMetric(metricName("count", "kafka-metrics-count", "total number of registered metrics"), - new Measurable() { - @Override - public double measure(MetricConfig config, long now) { - return metrics.size(); - } - }); - } - - /** - * Create a MetricName with the given name, group, description and tags, plus default tags specified in the metric - * configuration. Tag in tags takes precedence if the same tag key is specified in the default metric configuration. - * - * @param name The name of the metric - * @param group logical group name of the metrics to which this metric belongs - * @param description A human-readable description to include in the metric - * @param tags additional key/value attributes of the metric - */ - public MetricName metricName(String name, String group, String description, Map tags) { - Map combinedTag = new LinkedHashMap<>(config.tags()); - combinedTag.putAll(tags); - return new MetricName(name, group, description, combinedTag); - } - - /** - * Create a MetricName with the given name, group, description, and default tags - * specified in the metric configuration. - * - * @param name The name of the metric - * @param group logical group name of the metrics to which this metric belongs - * @param description A human-readable description to include in the metric - */ - public MetricName metricName(String name, String group, String description) { - return metricName(name, group, description, new HashMap()); - } - - /** - * Create a MetricName with the given name, group and default tags specified in the metric configuration. - * - * @param name The name of the metric - * @param group logical group name of the metrics to which this metric belongs - */ - public MetricName metricName(String name, String group) { - return metricName(name, group, "", new HashMap()); - } - - /** - * Create a MetricName with the given name, group, description, and keyValue as tags, plus default tags specified in the metric - * configuration. Tag in keyValue takes precedence if the same tag key is specified in the default metric configuration. - * - * @param name The name of the metric - * @param group logical group name of the metrics to which this metric belongs - * @param description A human-readable description to include in the metric - * @param keyValue additional key/value attributes of the metric (must come in pairs) - */ - public MetricName metricName(String name, String group, String description, String... keyValue) { - return metricName(name, group, description, getTags(keyValue)); - } - - /** - * Create a MetricName with the given name, group and tags, plus default tags specified in the metric - * configuration. Tag in tags takes precedence if the same tag key is specified in the default metric configuration. - * - * @param name The name of the metric - * @param group logical group name of the metrics to which this metric belongs - * @param tags key/value attributes of the metric - */ - public MetricName metricName(String name, String group, Map tags) { - return metricName(name, group, "", tags); - } - - private static Map getTags(String... keyValue) { - if ((keyValue.length % 2) != 0) - throw new IllegalArgumentException("keyValue needs to be specified in pairs"); - Map tags = new HashMap(); - - for (int i = 0; i < keyValue.length; i += 2) - tags.put(keyValue[i], keyValue[i + 1]); - return tags; - } - - /** - * Use the specified domain and metric name templates to generate an HTML table documenting the metrics. A separate table section - * will be generated for each of the MBeans and the associated attributes. The MBean names are lexicographically sorted to - * determine the order of these sections. This order is therefore dependent upon the order of the - * tags in each {@link MetricNameTemplate}. - * - * @param domain the domain or prefix for the JMX MBean names; may not be null - * @param allMetrics the collection of all {@link MetricNameTemplate} instances each describing one metric; may not be null - * @return the string containing the HTML table; never null - */ - public static String toHtmlTable(String domain, Iterable allMetrics) { - Map> beansAndAttributes = new TreeMap>(); - - try (Metrics metrics = new Metrics()) { - for (MetricNameTemplate template : allMetrics) { - Map tags = new LinkedHashMap<>(); - for (String s : template.tags()) { - tags.put(s, "{" + s + "}"); - } - - MetricName metricName = metrics.metricName(template.name(), template.group(), template.description(), tags); - String mBeanName = JmxReporter.getMBeanName(domain, metricName); - if (!beansAndAttributes.containsKey(mBeanName)) { - beansAndAttributes.put(mBeanName, new TreeMap()); - } - Map attrAndDesc = beansAndAttributes.get(mBeanName); - if (!attrAndDesc.containsKey(template.name())) { - attrAndDesc.put(template.name(), template.description()); - } else { - throw new IllegalArgumentException("mBean '" + mBeanName + "' attribute '" + template.name() + "' is defined twice."); - } - } - } - - StringBuilder b = new StringBuilder(); - b.append("\n"); - - for (Entry> e : beansAndAttributes.entrySet()) { - b.append("\n"); - b.append(""); - b.append("\n"); - - b.append("\n"); - b.append("\n"); - b.append("\n"); - b.append("\n"); - b.append("\n"); - - for (Entry e2 : e.getValue().entrySet()) { - b.append("\n"); - b.append(""); - b.append(""); - b.append(""); - b.append("\n"); - } - - } - b.append("
      "); - b.append(e.getKey()); - b.append("
      Attribute nameDescription
      "); - b.append(e2.getKey()); - b.append(""); - b.append(e2.getValue()); - b.append("
      "); - - return b.toString(); - - } - - public MetricConfig config() { - return config; - } - - /** - * Get the sensor with the given name if it exists - * @param name The name of the sensor - * @return Return the sensor or null if no such sensor exists - */ - public Sensor getSensor(String name) { - return this.sensors.get(Utils.notNull(name)); - } - - /** - * Get or create a sensor with the given unique name and no parent sensors. This uses - * a default recording level of INFO. - * @param name The sensor name - * @return The sensor - */ - public Sensor sensor(String name) { - return this.sensor(name, Sensor.RecordingLevel.INFO); - } - - /** - * Get or create a sensor with the given unique name and no parent sensors and with a given - * recording level. - * @param name The sensor name. - * @param recordingLevel The recording level. - * @return The sensor - */ - public Sensor sensor(String name, Sensor.RecordingLevel recordingLevel) { - return sensor(name, null, recordingLevel, (Sensor[]) null); - } - - - /** - * Get or create a sensor with the given unique name and zero or more parent sensors. All parent sensors will - * receive every value recorded with this sensor. This uses a default recording level of INFO. - * @param name The name of the sensor - * @param parents The parent sensors - * @return The sensor that is created - */ - public Sensor sensor(String name, Sensor... parents) { - return this.sensor(name, Sensor.RecordingLevel.INFO, parents); - } - - /** - * Get or create a sensor with the given unique name and zero or more parent sensors. All parent sensors will - * receive every value recorded with this sensor. - * @param name The name of the sensor. - * @param parents The parent sensors. - * @param recordingLevel The recording level. - * @return The sensor that is created - */ - public Sensor sensor(String name, Sensor.RecordingLevel recordingLevel, Sensor... parents) { - return sensor(name, null, recordingLevel, parents); - } - - /** - * Get or create a sensor with the given unique name and zero or more parent sensors. All parent sensors will - * receive every value recorded with this sensor. This uses a default recording level of INFO. - * @param name The name of the sensor - * @param config A default configuration to use for this sensor for metrics that don't have their own config - * @param parents The parent sensors - * @return The sensor that is created - */ - public synchronized Sensor sensor(String name, MetricConfig config, Sensor... parents) { - return this.sensor(name, config, Sensor.RecordingLevel.INFO, parents); - } - - - /** - * Get or create a sensor with the given unique name and zero or more parent sensors. All parent sensors will - * receive every value recorded with this sensor. - * @param name The name of the sensor - * @param config A default configuration to use for this sensor for metrics that don't have their own config - * @param recordingLevel The recording level. - * @param parents The parent sensors - * @return The sensor that is created - */ - public synchronized Sensor sensor(String name, MetricConfig config, Sensor.RecordingLevel recordingLevel, Sensor... parents) { - return sensor(name, config, Long.MAX_VALUE, recordingLevel, parents); - } - - /** - * Get or create a sensor with the given unique name and zero or more parent sensors. All parent sensors will - * receive every value recorded with this sensor. - * @param name The name of the sensor - * @param config A default configuration to use for this sensor for metrics that don't have their own config - * @param inactiveSensorExpirationTimeSeconds If no value if recorded on the Sensor for this duration of time, - * it is eligible for removal - * @param parents The parent sensors - * @param recordingLevel The recording level. - * @return The sensor that is created - */ - public synchronized Sensor sensor(String name, MetricConfig config, long inactiveSensorExpirationTimeSeconds, Sensor.RecordingLevel recordingLevel, Sensor... parents) { - Sensor s = getSensor(name); - if (s == null) { - s = new Sensor(this, name, parents, config == null ? this.config : config, time, inactiveSensorExpirationTimeSeconds, recordingLevel); - this.sensors.put(name, s); - if (parents != null) { - for (Sensor parent : parents) { - List children = childrenSensors.get(parent); - if (children == null) { - children = new ArrayList<>(); - childrenSensors.put(parent, children); - } - children.add(s); - } - } - log.debug("Added sensor with name {}", name); - } - return s; - } - - /** - * Get or create a sensor with the given unique name and zero or more parent sensors. All parent sensors will - * receive every value recorded with this sensor. This uses a default recording level of INFO. - * @param name The name of the sensor - * @param config A default configuration to use for this sensor for metrics that don't have their own config - * @param inactiveSensorExpirationTimeSeconds If no value if recorded on the Sensor for this duration of time, - * it is eligible for removal - * @param parents The parent sensors - * @return The sensor that is created - */ - public synchronized Sensor sensor(String name, MetricConfig config, long inactiveSensorExpirationTimeSeconds, Sensor... parents) { - return this.sensor(name, config, inactiveSensorExpirationTimeSeconds, Sensor.RecordingLevel.INFO, parents); - } - - /** - * Remove a sensor (if it exists), associated metrics and its children. - * - * @param name The name of the sensor to be removed - */ - public void removeSensor(String name) { - Sensor sensor = sensors.get(name); - if (sensor != null) { - List childSensors = null; - synchronized (sensor) { - synchronized (this) { - if (sensors.remove(name, sensor)) { - for (KafkaMetric metric : sensor.metrics()) - removeMetric(metric.metricName()); - log.debug("Removed sensor with name {}", name); - childSensors = childrenSensors.remove(sensor); - } - } - } - if (childSensors != null) { - for (Sensor childSensor : childSensors) - removeSensor(childSensor.name()); - } - } - } - - /** - * Add a metric to monitor an object that implements measurable. This metric won't be associated with any sensor. - * This is a way to expose existing values as metrics. - * - * This method is kept for binary compatibility purposes, it has the same behaviour as - * {@link #addMetric(MetricName, MetricValueProvider)}. - * - * @param metricName The name of the metric - * @param measurable The measurable that will be measured by this metric - */ - public void addMetric(MetricName metricName, Measurable measurable) { - addMetric(metricName, null, measurable); - } - - /** - * Add a metric to monitor an object that implements Measurable. This metric won't be associated with any sensor. - * This is a way to expose existing values as metrics. - * - * This method is kept for binary compatibility purposes, it has the same behaviour as - * {@link #addMetric(MetricName, MetricConfig, MetricValueProvider)}. - * - * @param metricName The name of the metric - * @param config The configuration to use when measuring this measurable - * @param measurable The measurable that will be measured by this metric - */ - public void addMetric(MetricName metricName, MetricConfig config, Measurable measurable) { - addMetric(metricName, config, (MetricValueProvider) measurable); - } - - /** - * Add a metric to monitor an object that implements MetricValueProvider. This metric won't be associated with any - * sensor. This is a way to expose existing values as metrics. User is expected to add any additional - * synchronization to update and access metric values, if required. - * - * @param metricName The name of the metric - * @param metricValueProvider The metric value provider associated with this metric - */ - public void addMetric(MetricName metricName, MetricConfig config, MetricValueProvider metricValueProvider) { - KafkaMetric m = new KafkaMetric(new Object(), - Utils.notNull(metricName), - Utils.notNull(metricValueProvider), - config == null ? this.config : config, - time); - registerMetric(m); - } - - /** - * Add a metric to monitor an object that implements MetricValueProvider. This metric won't be associated with any - * sensor. This is a way to expose existing values as metrics. User is expected to add any additional - * synchronization to update and access metric values, if required. - * - * @param metricName The name of the metric - * @param metricValueProvider The metric value provider associated with this metric - */ - public void addMetric(MetricName metricName, MetricValueProvider metricValueProvider) { - addMetric(metricName, null, metricValueProvider); - } - - /** - * Remove a metric if it exists and return it. Return null otherwise. If a metric is removed, `metricRemoval` - * will be invoked for each reporter. - * - * @param metricName The name of the metric - * @return the removed `KafkaMetric` or null if no such metric exists - */ - public synchronized KafkaMetric removeMetric(MetricName metricName) { - KafkaMetric metric = this.metrics.remove(metricName); - if (metric != null) { - for (MetricsReporter reporter : reporters) { - try { - reporter.metricRemoval(metric); - } catch (Exception e) { - log.error("Error when removing metric from " + reporter.getClass().getName(), e); - } - } - log.trace("Removed metric named {}", metricName); - } - return metric; - } - - /** - * Add a MetricReporter - */ - public synchronized void addReporter(MetricsReporter reporter) { - Utils.notNull(reporter).init(new ArrayList<>(metrics.values())); - this.reporters.add(reporter); - } - - /** - * Remove a MetricReporter - */ - public synchronized void removeReporter(MetricsReporter reporter) { - if (this.reporters.remove(reporter)) { - reporter.close(); - } - } - - synchronized void registerMetric(KafkaMetric metric) { - MetricName metricName = metric.metricName(); - if (this.metrics.containsKey(metricName)) - throw new IllegalArgumentException("A metric named '" + metricName + "' already exists, can't register another one."); - this.metrics.put(metricName, metric); - for (MetricsReporter reporter : reporters) { - try { - reporter.metricChange(metric); - } catch (Exception e) { - log.error("Error when registering metric on " + reporter.getClass().getName(), e); - } - } - log.trace("Registered metric named {}", metricName); - } - - /** - * Get all the metrics currently maintained indexed by metricName - */ - public Map metrics() { - return this.metrics; - } - - public List reporters() { - return this.reporters; - } - - public KafkaMetric metric(MetricName metricName) { - return this.metrics.get(metricName); - } - - /** - * This iterates over every Sensor and triggers a removeSensor if it has expired - * Package private for testing - */ - class ExpireSensorTask implements Runnable { - public void run() { - for (Map.Entry sensorEntry : sensors.entrySet()) { - // removeSensor also locks the sensor object. This is fine because synchronized is reentrant - // There is however a minor race condition here. Assume we have a parent sensor P and child sensor C. - // Calling record on C would cause a record on P as well. - // So expiration time for P == expiration time for C. If the record on P happens via C just after P is removed, - // that will cause C to also get removed. - // Since the expiration time is typically high it is not expected to be a significant concern - // and thus not necessary to optimize - synchronized (sensorEntry.getValue()) { - if (sensorEntry.getValue().hasExpired()) { - log.debug("Removing expired sensor {}", sensorEntry.getKey()); - removeSensor(sensorEntry.getKey()); - } - } - } - } - } - - /* For testing use only. */ - Map> childrenSensors() { - return Collections.unmodifiableMap(childrenSensors); - } - - public MetricName metricInstance(MetricNameTemplate template, String... keyValue) { - return metricInstance(template, getTags(keyValue)); - } - - public MetricName metricInstance(MetricNameTemplate template, Map tags) { - // check to make sure that the runtime defined tags contain all the template tags. - Set runtimeTagKeys = new HashSet<>(tags.keySet()); - runtimeTagKeys.addAll(config().tags().keySet()); - - Set templateTagKeys = template.tags(); - - if (!runtimeTagKeys.equals(templateTagKeys)) { - throw new IllegalArgumentException("For '" + template.name() + "', runtime-defined metric tags do not match the tags in the template. " - + "Runtime = " + runtimeTagKeys.toString() + " Template = " + templateTagKeys.toString()); - } - - return this.metricName(template.name(), template.group(), template.description(), tags); - } - - /** - * Close this metrics repository. - */ - @Override - public void close() { - if (this.metricsScheduler != null) { - this.metricsScheduler.shutdown(); - try { - this.metricsScheduler.awaitTermination(30, TimeUnit.SECONDS); - } catch (InterruptedException ex) { - // ignore and continue shutdown - Thread.currentThread().interrupt(); - } - } - - for (MetricsReporter reporter : reporters) { - try { - reporter.close(); - } catch (Exception e) { - log.error("Error when closing " + reporter.getClass().getName(), e); - } - } - } - -} diff --git a/clients/src/main/java/org/oracle/okafka/common/metrics/MetricsReporter.java b/clients/src/main/java/org/oracle/okafka/common/metrics/MetricsReporter.java deleted file mode 100644 index dd06970..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/metrics/MetricsReporter.java +++ /dev/null @@ -1,61 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.metrics; - -import java.util.List; - -import org.oracle.okafka.common.Configurable; - -/** - * A plugin interface to allow things to listen as new metrics are created so they can be reported. - *

      - * Implement {@link org.oracle.okafka.common.ClusterResourceListener} to receive cluster metadata once it's available. Please see the class documentation for ClusterResourceListener for more information. - */ -public interface MetricsReporter extends Configurable { - - /** - * This is called when the reporter is first registered to initially register all existing metrics - * @param metrics All currently existing metrics - */ - public void init(List metrics); - - /** - * This is called whenever a metric is updated or added - * @param metric - */ - public void metricChange(KafkaMetric metric); - - /** - * This is called whenever a metric is removed - * @param metric - */ - public void metricRemoval(KafkaMetric metric); - - /** - * Called when the metrics repository is closed. - */ - public void close(); - -} diff --git a/clients/src/main/java/org/oracle/okafka/common/metrics/Quota.java b/clients/src/main/java/org/oracle/okafka/common/metrics/Quota.java deleted file mode 100644 index 7e8c7ac..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/metrics/Quota.java +++ /dev/null @@ -1,83 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.metrics; - -/** - * An upper or lower bound for metrics - */ -public final class Quota { - - private final boolean upper; - private final double bound; - - public Quota(double bound, boolean upper) { - this.bound = bound; - this.upper = upper; - } - - public static Quota upperBound(double upperBound) { - return new Quota(upperBound, true); - } - - public static Quota lowerBound(double lowerBound) { - return new Quota(lowerBound, false); - } - - public boolean isUpperBound() { - return this.upper; - } - - public double bound() { - return this.bound; - } - - public boolean acceptable(double value) { - return (upper && value <= bound) || (!upper && value >= bound); - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + (int) this.bound; - result = prime * result + (this.upper ? 1 : 0); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) - return true; - if (!(obj instanceof Quota)) - return false; - Quota that = (Quota) obj; - return (that.bound == this.bound) && (that.upper == this.upper); - } - - @Override - public String toString() { - return (upper ? "upper=" : "lower=") + bound; - } -} diff --git a/clients/src/main/java/org/oracle/okafka/common/metrics/QuotaViolationException.java b/clients/src/main/java/org/oracle/okafka/common/metrics/QuotaViolationException.java deleted file mode 100644 index e05724b..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/metrics/QuotaViolationException.java +++ /dev/null @@ -1,62 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.metrics; - -import org.oracle.okafka.common.KafkaException; -import org.oracle.okafka.common.MetricName; - -/** - * Thrown when a sensor records a value that causes a metric to go outside the bounds configured as its quota - */ -public class QuotaViolationException extends KafkaException { - - private static final long serialVersionUID = 1L; - private final MetricName metricName; - private final double value; - private final double bound; - - public QuotaViolationException(MetricName metricName, double value, double bound) { - super(String.format( - "'%s' violated quota. Actual: %f, Threshold: %f", - metricName, - value, - bound)); - this.metricName = metricName; - this.value = value; - this.bound = bound; - } - - public MetricName metricName() { - return metricName; - } - - public double value() { - return value; - } - - public double bound() { - return bound; - } -} diff --git a/clients/src/main/java/org/oracle/okafka/common/metrics/Sensor.java b/clients/src/main/java/org/oracle/okafka/common/metrics/Sensor.java deleted file mode 100644 index ea150e0..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/metrics/Sensor.java +++ /dev/null @@ -1,328 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.metrics; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashSet; -import java.util.LinkedHashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.TimeUnit; - -import org.oracle.okafka.common.MetricName; -import org.oracle.okafka.common.metrics.CompoundStat.NamedMeasurable; -import org.oracle.okafka.common.utils.Time; -import org.oracle.okafka.common.utils.Utils; - -/** - * A sensor applies a continuous sequence of numerical values to a set of associated metrics. For example a sensor on - * message size would record a sequence of message sizes using the {@link #record(double)} api and would maintain a set - * of metrics about request sizes such as the average or max. - */ -public final class Sensor { - - private final Metrics registry; - private final String name; - private final Sensor[] parents; - private final List stats; - private final Map metrics; - private final MetricConfig config; - private final Time time; - private volatile long lastRecordTime; - private final long inactiveSensorExpirationTimeMs; - private final Object metricLock; - - public enum RecordingLevel { - INFO(0, "INFO"), DEBUG(1, "DEBUG"); - - private static final RecordingLevel[] ID_TO_TYPE; - private static final int MIN_RECORDING_LEVEL_KEY = 0; - public static final int MAX_RECORDING_LEVEL_KEY; - - static { - int maxRL = -1; - for (RecordingLevel level : RecordingLevel.values()) { - maxRL = Math.max(maxRL, level.id); - } - RecordingLevel[] idToName = new RecordingLevel[maxRL + 1]; - for (RecordingLevel level : RecordingLevel.values()) { - idToName[level.id] = level; - } - ID_TO_TYPE = idToName; - MAX_RECORDING_LEVEL_KEY = maxRL; - } - - /** an english description of the api--this is for debugging and can change */ - public final String name; - - /** the permanent and immutable id of an API--this can't change ever */ - public final short id; - - RecordingLevel(int id, String name) { - this.id = (short) id; - this.name = name; - } - - public static RecordingLevel forId(int id) { - if (id < MIN_RECORDING_LEVEL_KEY || id > MAX_RECORDING_LEVEL_KEY) - throw new IllegalArgumentException(String.format("Unexpected RecordLevel id `%d`, it should be between `%d` " + - "and `%d` (inclusive)", id, MIN_RECORDING_LEVEL_KEY, MAX_RECORDING_LEVEL_KEY)); - return ID_TO_TYPE[id]; - } - - /** Case insensitive lookup by protocol name */ - public static RecordingLevel forName(String name) { - return RecordingLevel.valueOf(name.toUpperCase(Locale.ROOT)); - } - - public boolean shouldRecord(final int configId) { - return configId == DEBUG.id || configId == this.id; - } - - } - - private final RecordingLevel recordingLevel; - - Sensor(Metrics registry, String name, Sensor[] parents, MetricConfig config, Time time, - long inactiveSensorExpirationTimeSeconds, RecordingLevel recordingLevel) { - super(); - this.registry = registry; - this.name = Utils.notNull(name); - this.parents = parents == null ? new Sensor[0] : parents; - this.metrics = new LinkedHashMap<>(); - this.stats = new ArrayList<>(); - this.config = config; - this.time = time; - this.inactiveSensorExpirationTimeMs = TimeUnit.MILLISECONDS.convert(inactiveSensorExpirationTimeSeconds, TimeUnit.SECONDS); - this.lastRecordTime = time.milliseconds(); - this.recordingLevel = recordingLevel; - this.metricLock = new Object(); - checkForest(new HashSet()); - } - - /* Validate that this sensor doesn't end up referencing itself */ - private void checkForest(Set sensors) { - if (!sensors.add(this)) - throw new IllegalArgumentException("Circular dependency in sensors: " + name() + " is its own parent."); - for (Sensor parent : parents) - parent.checkForest(sensors); - } - - /** - * The name this sensor is registered with. This name will be unique among all registered sensors. - */ - public String name() { - return this.name; - } - - /** - * Record an occurrence, this is just short-hand for {@link #record(double) record(1.0)} - */ - public void record() { - if (shouldRecord()) { - record(1.0); - } - } - - /** - * @return true if the sensor's record level indicates that the metric will be recorded, false otherwise - */ - public boolean shouldRecord() { - return this.recordingLevel.shouldRecord(config.recordLevel().id); - } - /** - * Record a value with this sensor - * @param value The value to record - * @throws QuotaViolationException if recording this value moves a metric beyond its configured maximum or minimum - * bound - */ - public void record(double value) { - if (shouldRecord()) { - record(value, time.milliseconds()); - } - } - - /** - * Record a value at a known time. This method is slightly faster than {@link #record(double)} since it will reuse - * the time stamp. - * @param value The value we are recording - * @param timeMs The current POSIX time in milliseconds - * @throws QuotaViolationException if recording this value moves a metric beyond its configured maximum or minimum - * bound - */ - public void record(double value, long timeMs) { - record(value, timeMs, true); - } - - public void record(double value, long timeMs, boolean checkQuotas) { - if (shouldRecord()) { - this.lastRecordTime = timeMs; - synchronized (this) { - synchronized (metricLock()) { - // increment all the stats - for (Stat stat : this.stats) - stat.record(config, value, timeMs); - } - if (checkQuotas) - checkQuotas(timeMs); - } - for (Sensor parent : parents) - parent.record(value, timeMs, checkQuotas); - } - } - - /** - * Check if we have violated our quota for any metric that has a configured quota - */ - public void checkQuotas() { - checkQuotas(time.milliseconds()); - } - - public void checkQuotas(long timeMs) { - for (KafkaMetric metric : this.metrics.values()) { - MetricConfig config = metric.config(); - if (config != null) { - Quota quota = config.quota(); - if (quota != null) { - double value = metric.measurableValue(timeMs); - if (!quota.acceptable(value)) { - throw new QuotaViolationException(metric.metricName(), value, - quota.bound()); - } - } - } - } - } - - /** - * Register a compound statistic with this sensor with no config override - * @param stat The stat to register - * @return true if stat is added to sensor, false if sensor is expired - */ - public boolean add(CompoundStat stat) { - return add(stat, null); - } - - /** - * Register a compound statistic with this sensor which yields multiple measurable quantities (like a histogram) - * @param stat The stat to register - * @param config The configuration for this stat. If null then the stat will use the default configuration for this - * sensor. - * @return true if stat is added to sensor, false if sensor is expired - */ - public synchronized boolean add(CompoundStat stat, MetricConfig config) { - if (hasExpired()) - return false; - - this.stats.add(Utils.notNull(stat)); - Object lock = metricLock(); - for (NamedMeasurable m : stat.stats()) { - final KafkaMetric metric = new KafkaMetric(lock, m.name(), m.stat(), config == null ? this.config : config, time); - if (!metrics.containsKey(metric.metricName())) { - registry.registerMetric(metric); - metrics.put(metric.metricName(), metric); - } - } - return true; - } - - /** - * Register a metric with this sensor - * @param metricName The name of the metric - * @param stat The statistic to keep - * @return true if metric is added to sensor, false if sensor is expired - */ - public boolean add(MetricName metricName, MeasurableStat stat) { - return add(metricName, stat, null); - } - - /** - * Register a metric with this sensor - * - * @param metricName The name of the metric - * @param stat The statistic to keep - * @param config A special configuration for this metric. If null use the sensor default configuration. - * @return true if metric is added to sensor, false if sensor is expired - */ - public synchronized boolean add(final MetricName metricName, final MeasurableStat stat, final MetricConfig config) { - if (hasExpired()) { - return false; - } else if (metrics.containsKey(metricName)) { - return true; - } else { - final KafkaMetric metric = new KafkaMetric( - metricLock(), - Utils.notNull(metricName), - Utils.notNull(stat), - config == null ? this.config : config, - time - ); - registry.registerMetric(metric); - metrics.put(metric.metricName(), metric); - stats.add(stat); - return true; - } - } - - /** - * Return true if the Sensor is eligible for removal due to inactivity. - * false otherwise - */ - public boolean hasExpired() { - return (time.milliseconds() - this.lastRecordTime) > this.inactiveSensorExpirationTimeMs; - } - - synchronized List metrics() { - return Collections.unmodifiableList(new LinkedList<>(this.metrics.values())); - } - - /** - * KafkaMetrics of sensors which use SampledStat should be synchronized on the same lock - * for sensor record and metric value read to allow concurrent reads and updates. For simplicity, - * all sensors are synchronized on this object. - *

      - * Sensor object is not used as a lock for reading metric value since metrics reporter is - * invoked while holding Sensor and Metrics locks to report addition and removal of metrics - * and synchronized reporters may deadlock if Sensor lock is used for reading metrics values. - * Note that Sensor object itself is used as a lock to protect the access to stats and metrics - * while recording metric values, adding and deleting sensors. - *

      - * Locking order (assume all MetricsReporter methods may be synchronized): - *

        - *
      • Sensor#add: Sensor -> Metrics -> MetricsReporter
      • - *
      • Metrics#removeSensor: Sensor -> Metrics -> MetricsReporter
      • - *
      • KafkaMetric#metricValue: MetricsReporter -> Sensor#metricLock
      • - *
      • Sensor#record: Sensor -> Sensor#metricLock
      • - *
      - *

      - */ - private Object metricLock() { - return metricLock; - } -} diff --git a/clients/src/main/java/org/oracle/okafka/common/metrics/Stat.java b/clients/src/main/java/org/oracle/okafka/common/metrics/Stat.java deleted file mode 100644 index f7c544e..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/metrics/Stat.java +++ /dev/null @@ -1,40 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.metrics; - -/** - * A Stat is a quantity such as average, max, etc that is computed off the stream of updates to a sensor - */ -public interface Stat { - - /** - * Record the given value - * @param config The configuration to use for this metric - * @param value The value to record - * @param timeMs The POSIX time in milliseconds this value occurred - */ - public void record(MetricConfig config, double value, long timeMs); - -} diff --git a/clients/src/main/java/org/oracle/okafka/common/network/AQClient.java b/clients/src/main/java/org/oracle/okafka/common/network/AQClient.java index 9b3a0ec..5f09d58 100644 --- a/clients/src/main/java/org/oracle/okafka/common/network/AQClient.java +++ b/clients/src/main/java/org/oracle/okafka/common/network/AQClient.java @@ -1,7 +1,7 @@ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. */ @@ -22,42 +22,110 @@ import java.util.Map; import java.util.StringTokenizer; -import org.oracle.okafka.clients.ClientRequest; -import org.oracle.okafka.clients.ClientResponse; +import org.apache.kafka.clients.ClientRequest; +import org.apache.kafka.clients.ClientResponse; import org.oracle.okafka.clients.CommonClientConfigs; +import org.oracle.okafka.clients.TopicTeqParameters; import org.oracle.okafka.common.Node; -import org.oracle.okafka.common.PartitionInfo; -import org.oracle.okafka.common.config.AbstractConfig; +import org.apache.kafka.common.PartitionInfo; +import org.apache.kafka.common.config.AbstractConfig; import org.oracle.okafka.common.requests.MetadataRequest; +import org.oracle.okafka.common.requests.MetadataRequest.Builder; import org.oracle.okafka.common.requests.MetadataResponse; import org.oracle.okafka.common.requests.CreateTopicsRequest.TopicDetails; +import org.oracle.okafka.common.utils.ConnectionUtils; import org.oracle.okafka.common.utils.CreateTopics; -import org.oracle.okafka.common.utils.Utils; import org.slf4j.Logger; +import java.sql.Timestamp; +import java.sql.Date; import javax.jms.JMSException; import oracle.jdbc.OracleTypes; +import oracle.jdbc.driver.OracleConnection; +import oracle.jms.AQjmsSession; +/* + * Abstract class to communicate with Oracle Database. + * This is extended by AQKafkaProducer, AQKafkaConsumer and AQKafkaAdmin classes. + * getMetadataNow is the only implemented method which remains common for all the implemented class. + * + * All class extending this abstract class must implement + * 1. ClientResponse send(ClientRequest request) method. + * Each implementing class has its own meaningful operations to perform against Oracle Database. + * AQKafkaProducer has to produce the records and fetch metadata. + * AQKafkaConsumer has to subscribe to topics, Get involved in Rebalancing activity, Consume the records and commit the offsets. + * AQKafkaAdmin has to create or drop the topic. + * + * 2. void connect(Node node) + * Connects to the database node. + * For AQKafkaProducer and AQKafkaConsumer it also creates a JMS Session internally as well. + * For AQKafkaAdmin it only creates database connection to this node and maintains in a hashtable. + * + * 3. void close(Node node) + * Closes Database connection to this node. + * If a JMS Session is also created then that will also be closed. + * + * 4. void close(); + * Closes all connection to all the database nodes + * + * 5. boolean isChannelReady(Node node) + * Checks if a connection is already created for this node or not + */ public abstract class AQClient { protected final Logger log ; private final AbstractConfig configs; + + + private Map instancesTostarttime; + public List all_nodes = new ArrayList<>(); + public List partitionInfoList = new ArrayList<>(); + + public static final String PARTITION_PROPERTY = "AQINTERNAL_PARTITION"; + public static final String HEADERCOUNT_PROPERTY = "AQINTERNAL_HEADERCOUNT"; + public static final String MESSAGE_VERSION = "AQINTERNAL_MESSAGEVERSION"; + public static final String STICKYDEQ_PARAM = "STICKY_DEQUEUE"; + public static final String KEYBASEDENQ_PARAM = "KEY_BASED_ENQUEUE"; + public static final String SHARDNUM_PARAM = "SHARD_NUM"; + public AQClient(Logger log, AbstractConfig configs) { this.log = log; this.configs = configs; } - private Connection conn = null; + /* + * Each implementing class has its own meaningful operations to perform against Oracle Database. + * AQKafkaProducer has to produce the records and fetch metadata. + * AQKafkaConsumer has to subscribe to topics, Get involved in Rebalancing activity, Consume the records and commit the offsets. + * AQKafkaAdmin has to create or drop the topic. + */ public abstract ClientResponse send(ClientRequest request); - public abstract boolean isChannelReady(Node node); - + /* + * Connects to the database node. + * For AQKafkaProducer and AQKafkaConsumer it also creates a JMS Session internally as well. + * For AQKafkaAdmin it only creates database connection to this node and maintains in a hashtable. + */ public abstract void connect(Node node) throws JMSException; + /* Checks if a connection is already created for this node or not */ + public abstract boolean isChannelReady(Node node); + + /* Checks if a connection is already created for this node or not */ public abstract void close(Node node); + /* Closes all connection to all the database nodes */ public abstract void close(); - public ClientResponse getMetadataNow(ClientRequest request, Connection con) { + + /* Get Metadata from Oracle Database. + * This involves fetching information for all the available database instances. + * Fetching partition count for the interested topics. + * Fetching information as to which topic-partition is owned at what database instance. + * */ + public ClientResponse getMetadataNow(ClientRequest request, Connection con, Node currentNode, boolean metadataRequested) { + + log.debug("AQClient: Getting Metadata now"); + MetadataRequest.Builder builder= (MetadataRequest.Builder)request.requestBuilder(); MetadataRequest metadataRequest = builder.build(); List nodes = new ArrayList<>(); @@ -65,261 +133,503 @@ public ClientResponse getMetadataNow(ClientRequest request, Connection con) { Map errorsPerTopic = new HashMap<>(); List metadataTopics = new ArrayList(metadataRequest.topics()); boolean disconnected = false; - try { - getNodes(nodes, con); - if(nodes.size() == 0) - nodes.add(request.destination()); - if(nodes.size() > 0) - getPartitionInfo(metadataRequest.topics(), metadataTopics, con, nodes, metadataRequest.allowAutoTopicCreation(), partitionInfo, errorsPerTopic); - } catch(Exception exception) { + String clusterId = ""; + boolean getPartitioninfo = false; + Map topiParameterMap = null; + try { + if(con == null) + { + disconnected = true; + throw new NullPointerException("Database connection to fetch metadata is null"); + } + //Database Name to be set as Cluster ID + clusterId = ((oracle.jdbc.internal.OracleConnection)con).getServerSessionInfo().getProperty("DATABASE_NAME"); + + getPartitioninfo = getNodes(nodes, con, currentNode, metadataRequested); + + if(getPartitioninfo || metadataRequested) + { + getPartitionInfo(metadataRequest.topics(), metadataTopics, con, + nodes, metadataRequest.allowAutoTopicCreation(), partitionInfo, errorsPerTopic); + } + + List teqParaList = metadataRequest.teqParaTopics(); + topiParameterMap = new HashMap(teqParaList.size()); + for(String teqTopic : teqParaList) + { + TopicTeqParameters teqPara = fetchQueueParameters(teqTopic, con); + topiParameterMap.put(teqTopic, teqPara); + } + + + } catch(Exception exception) { + log.error("Exception while getting metadata "+ exception.getMessage(), exception ); + //exception.printStackTrace(); + if(exception instanceof SQLException) if(((SQLException)exception).getErrorCode() == 6550) { - log.error("execute on dbms_aqadm is not assigned", ((SQLException)exception).getMessage()); - log.info("create session, execute on dbms_aqin, dbms_aqadm , dbms_aqjms privileges required for producer or consumer to work"); + log.error("Not all privileges granted to the database user.", ((SQLException)exception).getMessage()); + log.info("Please grant all the documented privileges to database user."); } if(exception instanceof SQLSyntaxErrorException) - log.trace("Please grant select on gv_$instnce , gv_$listener_network, user_queues and user_queue_shards."); + log.trace("Please grant all the documented privileges to database user."); for(String topic : metadataTopics) { errorsPerTopic.put(topic, exception); } disconnected = true; try { - log.trace("Unexcepted error occured with connection to node {}, closing the connection", request.destination()); - con.close(); + log.debug("Unexcepted error occured with connection to node {}, closing the connection", request.destination()); + if(con != null) + con.close(); + log.trace("Connection with node {} is closed", request.destination()); } catch(SQLException sqlEx) { log.trace("Failed to close connection with node {}", request.destination()); } - - - } - return new ClientResponse(request.makeHeader(), + return new ClientResponse(request.makeHeader((short)1), request.callback(), request.destination(), request.createdTimeMs(), - System.currentTimeMillis(), disconnected, new MetadataResponse(nodes, partitionInfo, errorsPerTopic)); - + System.currentTimeMillis(), disconnected, null,null, new MetadataResponse(clusterId, all_nodes, partitionInfoList, errorsPerTopic, topiParameterMap)); } - - - private void getNodes(List nodes, Connection con) throws SQLException { + + // Fetches existing cluster nodes + // Returns TRUE if new node is added, existing node went down, or if the startup time changed for the nodes + // otherwise return false + private boolean getNodes(List nodes, Connection con, Node connectedNode, boolean metadataRequested) throws SQLException { Statement stmt = null; + ResultSet result = null; + String user = ""; + boolean furtherMetadata = false; + boolean onlyOneNode = false; + try { + user = con.getMetaData().getUserName(); stmt = con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY); - String query = "select inst_id, instance_name from gv$instance"; - ResultSet result = stmt.executeQuery(query); - Map instances = new HashMap<>(); - while(result.next()) { - instances.put(result.getInt(1), result.getString(2)); - } - query = "select * from gv$listener_network"; + String query = "select inst_id, instance_name, startup_time from gv$instance"; result = stmt.executeQuery(query); - Map services = new HashMap<>(); + Map instance_names = new HashMap<>(); + Map instance_startTimes = new HashMap<>(); + while(result.next()) { - if(result.getString(3).equalsIgnoreCase("SERVICE NAME")) { - services.put(Integer.parseInt(result.getString(1)), result.getString(4)); + int instId = result.getInt(1); + String instName = result.getString(2); + instance_names.put(instId, instName); + Date startup_time = result.getDate(3); + Timestamp ts=new Timestamp(startup_time.getTime()); + instance_startTimes.put(instId, ts); + } + result.close(); + result = null; + + if (instance_names.size()==1) + { + // Connected Node is : + // Node connectedNode = getNodeToThisConnection(con); + // Only one RAC node is up and we are connected to it. + if(connectedNode == null) + { + if(all_nodes == null || all_nodes.size() == 0) + { + furtherMetadata = true; + onlyOneNode = false; + } + } + else { + nodes.add(connectedNode); + all_nodes = nodes; + onlyOneNode = true; } } - result.beforeFirst(); - while(result.next()) { - if(result.getString(3).equalsIgnoreCase("LOCAL LISTENER") ) { - try { - String str = result.getString(4); - StringBuilder sb = new StringBuilder(); - for(int ind = 0;ind < str.length(); ind++) - if(str.charAt(ind) != ' ') - sb.append(str.charAt(ind)); - str = sb.toString(); - String security = configs.getString(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG); - if( (security.equalsIgnoreCase("PLAINTEXT") && getProperty(str, "PROTOCOL").equalsIgnoreCase("TCP")) || (!security.equalsIgnoreCase("PLAINTEXT") && getProperty(str, "PROTOCOL").equalsIgnoreCase("TCPS")) ) { - int id = Integer.parseInt(result.getString(1)); - if(services.get(id) != null) { + + if(!instance_startTimes.equals(instancesTostarttime)) { + instancesTostarttime = instance_startTimes; + furtherMetadata = true; + } + + if(onlyOneNode) { + return furtherMetadata; + } + + if (furtherMetadata || metadataRequested) { + + query = "select inst_id, TYPE, value from gv$listener_network order by inst_id"; + result = stmt.executeQuery(query); + Map> services = new HashMap<>(); + Map> localListenersMap = new HashMap<>(); + + String security = configs.getString(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG); + String preferredService = configs.getString(CommonClientConfigs.ORACLE_SERVICE_NAME); + if(preferredService == null) + { + if(con != null) + { + preferredService = ConnectionUtils.getConnectedService(con); + } + } + + boolean plainText = security.equalsIgnoreCase("PLAINTEXT")?true:false; + + while(result.next()) { + int instId = result.getInt(1); + String type = result.getString(2); + String value = result.getString(3); + + if(type.equalsIgnoreCase("SERVICE NAME")) { + ArrayList servicesList = services.get(instId); + if(servicesList == null) + { + servicesList = new ArrayList(); + services.put(instId,servicesList); + } + if(preferredService != null && value.equalsIgnoreCase(preferredService)) + { + log.debug("Found Preferred Services " + value); + servicesList.add(0, value); + } + else { + servicesList.add(value); + } + } + else if(type.equalsIgnoreCase("LOCAL LISTENER")) + { + ArrayList localListenerList = localListenersMap.get(instId); + if(localListenerList == null) + { + localListenerList = new ArrayList(); + localListenersMap.put(instId, localListenerList); + } + localListenerList.add(value); + } + } //Result set Parsed + result.close(); + result = null; + + for(Integer instIdNow : instance_names.keySet()) + { + /*if( instIdNow.intValue() == connectedInst) + continue; */ + + log.debug("Processing metadata for instance: " + instIdNow); + + ArrayList localListenerList = localListenersMap.get(instIdNow); + + if(localListenerList == null) + { + if(con != null) + { + //String hostNPort = ConnectionUtils.getConnectedHost(con); + String hostNPort = ConnectionUtils.getConnectedHostnPort(con); + localListenerList = new ArrayList(); + localListenerList.add(hostNPort); + } + } + if(localListenerList != null) + { + for(String localListenerNow : localListenerList) + { + log.debug("Processing Local Listener " + localListenerNow); + String str = localListenerNow; + //AdHoc processing of LISTENER STRING + StringBuilder sb = new StringBuilder(); + + for(int ind = 0;ind < str.length(); ind++) + if(str.charAt(ind) != ' ') + sb.append(str.charAt(ind)); + + str = sb.toString(); + String protocolNow = getProperty(str,"PROTOCOL"); + log.debug("Protocol used by this local listener " + protocolNow); + + if( (plainText && protocolNow.equalsIgnoreCase("TCP")) || + (!plainText && protocolNow.equalsIgnoreCase("TCPS"))) + { String host = getProperty(str, "HOST");; - Integer port = Integer.parseInt(getProperty(str, "PORT")); - nodes.add(new Node(id, host, port, services.get(id), instances.get(id))); + Integer port = Integer.parseInt(getProperty(str, "PORT")); + log.debug("Hot:PORT " + host +":"+port); + + // ToDo: Assign Service List instead of a single Service + Node newNode =new Node(instIdNow, host, port, services.get(instIdNow).get(0), instance_names.get(instIdNow)); + newNode.setUser(user); + log.debug("New Node created: " + newNode); + newNode.updateHashCode(); + nodes.add(newNode); + all_nodes = nodes; } } - /* - StringTokenizer st = new StringTokenizer(str, "("); - - if(st.countTokens() == 4) { - st.nextToken(); - String protocolStr = st.nextToken(); - if(protocolStr.substring(9, protocolStr.length()-1).equalsIgnoreCase("tcp")) { - int id = Integer.parseInt(result.getString(1)); - if(services.get(id) != null) { - String hostStr = st.nextToken(); - String host = hostStr.substring(5, hostStr.length()-1); - String portStr = st.nextToken(); - Integer port = Integer.parseInt(portStr.substring(5, portStr.length()-2)); - nodes.add(new Node(id, host, port, services.get(id), instances.get(id))); - } - } - }*/ - - } catch(IndexOutOfBoundsException iob) { - //do nothing + log.debug("Exploring hosts of the cluster. #Nodes " + nodes.size()); + for(Node nodeNow : nodes) + { + log.debug("DB Instance: " + nodeNow); + } + } + else { + if(connectedNode != null) { + nodes.add(connectedNode); + all_nodes = nodes; + onlyOneNode = true; + } } - } } - /*List addresses = ClientUtils.parseAndValidateAddresses(configs.getList(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG)); - InetSocketAddress address = addresses.get(0); - nodes.add(new Node(0, address.getHostString(), address.getPort(), configs.getString(CommonClientConfigs.ORACLE_SERVICE_NAME), configs.getString(CommonClientConfigs.ORACLE_INSTANCE_NAME)));*/ + } + catch(Exception e) + { + log.error("Exception while updating metadata " ,e); } finally { try { + if(result != null) + result.close(); + if(stmt != null) stmt.close(); } catch(SQLException sqlEx) { //do nothing } } + + return furtherMetadata; } -private void getPartitionInfo(List topics, List topicsRem, Connection con, List nodes, boolean allowAutoTopicCreation, List partitionInfo, Map errorsPerTopic) throws Exception { - - if(nodes.size() <= 0 || topics == null || topics.isEmpty()) - return; - - String queryQShard = "select shard_id, enqueue_instance from user_queue_shards where name = ? "; - PreparedStatement stmt1 = null; - try { - stmt1 = con.prepareStatement(queryQShard); - int node = 0 ; - int nodesSize = nodes.size(); - ResultSet result1 = null; - Node[] nodesArray = null; - if(nodesSize > 1) { - int max = -1; - for(Node nodeNew : nodes) - if(nodeNew.id() > max) - max = nodeNew.id(); - - nodesArray = new Node[max]; - for(Node nodeNew : nodes) - nodesArray[nodeNew.id()-1] = nodeNew; - } - - for(String topic : topics) { - boolean topicDone = false; - int partCnt = 0; + private Node getNodeToThisConnection(Connection con) + { + Node node = null; + try { + String url = con.getMetaData().getURL(); + oracle.jdbc.internal.OracleConnection oracleInternalConn = (oracle.jdbc.internal.OracleConnection)con; + String instanceName = oracleInternalConn.getServerSessionInfo().getProperty("INSTANCE_NAME"); + int instanceNum = Integer.parseInt(oracleInternalConn.getServerSessionInfo().getProperty("AUTH_INSTANCE_NO")); + String dbServiceName = oracleInternalConn.getServerSessionInfo().getProperty("SERVICE_NAME"); + String userName = con.getMetaData().getUserName(); + + + String dbHost = null; + + try + { + final String hostStr = "(HOST="; + int sIndex = url.indexOf(hostStr); + int eIndex = url.indexOf(")", sIndex); + dbHost = (url.substring(sIndex+(hostStr.length()), eIndex)); + }catch(Exception e) + { + dbHost = oracleInternalConn.getServerSessionInfo().getProperty("AUTH_SC_SERVER_HOST"); + String dbDomain = oracleInternalConn.getServerSessionInfo().getProperty("AUTH_SC_DB_DOMAIN"); + dbHost = dbHost +"."+dbDomain; + } + + log.debug("DB HOST To This Connection " + dbHost); + String dbPort = null; try { - //Get number of partitions - partCnt = getPartitions(Utils.enquote(topic), con); - } catch(SQLException sqlE) { - int errorNo = sqlE.getErrorCode(); - if(errorNo == 24010) { - //Topic does not exist, it will be created - continue; + final String portStr = "(PORT="; + int sIndex = url.indexOf(portStr); + int eIndex = url.indexOf(")", sIndex); + dbPort = (url.substring(sIndex+(portStr.length()), eIndex)); + + }catch(Exception ignoreE) + {} + + if(dbPort == null) + { + List bootStrapServers = this.configs.getList(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG); + for(String bootServer : bootStrapServers) + { + StringTokenizer stn = new StringTokenizer(bootServer,":"); + + String dbHostOrigin = stn.nextToken(); + // This DB Port may be of different database host. + dbPort = stn.nextToken(); + + //Exit if DB Host Name matches, continue otherwise. If DB Host is not in bootstrap, + //then we will assume that all DB RAC nodes have same listener port + if(dbHostOrigin.equalsIgnoreCase(dbHost)) + { + break; + } } - }catch(Exception excp) { - // Topic May or may not exists. We will not attempt to create it again - topicsRem.remove(topic); - continue; } - - boolean partArr[] = new boolean[partCnt]; - for(int i =0; i 1) { - - stmt1.clearParameters(); - stmt1.setString(1, Utils.enquote(topic)); - result1 = stmt1.executeQuery(); - // If any row exist - if(result1.isBeforeFirst()) { - while(result1.next() ) { - int partNum = result1.getInt(1)/2; - int nodeNum = result1.getInt(2); - partitionInfo.add(new PartitionInfo(topic, partNum , nodesArray[nodeNum-1], null, null)); - partArr[partNum] = true; - } - result1.close(); - // For the partitions not yet mapped to an instance - for(int i = 0; i < partCnt ; i++) { - if( partArr[i] == false ) { - partitionInfo.add(new PartitionInfo(topic, i , nodes.get(node++%nodesSize), null, null)); - } - } - topicDone = true; - } // Entry Existed in USER_QUEUE_SHARD - }// Nodes > 1 + + node = new Node(instanceNum, dbHost,Integer.parseInt(dbPort), dbServiceName, instanceName); + node.setUser(userName); + node.updateHashCode(); + log.info("Connection was setup to node " + node); + } + catch(Exception e) + { + log.error("Exception while creating node from JDBC Connection", e ); + } + return node; + } + + + private void getPartitionInfo(List topics, List topicsRem, Connection con, + List nodes, boolean allowAutoTopicCreation, + List partitionInfo, Map errorsPerTopic) throws Exception { - // No Record in USER_QUEUE_SHARD or Node =1 check if topic exist - if(!topicDone){ - for(int i = 0; i < partCnt ; i++) { - partitionInfo.add(new PartitionInfo(topic, i , nodes.get(node++%nodesSize), null, null)); + if(nodes.size() <= 0 || topics == null || topics.isEmpty()) + return; + + //String queryQShard = "select shard_id, enqueue_instance from user_queue_shards where name = ? "; + String queryQShard = "select SHARD_ID, ENQUEUE_INSTANCE from user_queue_shards where QUEUE_ID = (select qid from user_queues where name = upper(?)) "; + PreparedStatement stmt1 = null; + try { + stmt1 = con.prepareStatement(queryQShard); + int nodeIndex = 0 ; + int nodesSize = nodes.size(); + ResultSet result1 = null; + Node[] nodesArray = null; + if(nodesSize > 1) { + int max = -1; + for(Node nodeNew : nodes) { + if(nodeNew.id() > max) + max = nodeNew.id(); } - topicDone =true; + + nodesArray = new Node[max]; + for(Node nodeNew : nodes) + nodesArray[nodeNew.id()-1] = nodeNew; } - if(topicDone) - topicsRem.remove(topic); + + for(String topic : topics) { + boolean topicDone = false; + int partCnt = 0; + try { + //Get number of partitions + partCnt = getQueueParameter(SHARDNUM_PARAM, ConnectionUtils.enquote(topic), con); + } catch(SQLException sqlE) { + int errorNo = sqlE.getErrorCode(); + if(errorNo == 24010) { + //Topic does not exist, it will be created + continue; + } + }catch(Exception excp) { + // Topic May or may not exists. We will not attempt to create it again + topicsRem.remove(topic); + continue; + } + + boolean partArr[] = new boolean[partCnt]; + for(int i =0; i < partCnt ;i++) + partArr[i] = false; + + // If more than one RAC node then check who is owner Node for which partition + if(nodes.size() > 1) { + + stmt1.clearParameters(); + stmt1.setString(1, topic); + result1 = stmt1.executeQuery(); + // If any row exist + if(result1.isBeforeFirst()) { + while(result1.next() ) { + int partNum = result1.getInt(1)/2; + int nodeNum = result1.getInt(2); + partitionInfo.add(new PartitionInfo(topic, partNum , nodesArray[nodeNum-1], new Node[0], new Node[0])); + partArr[partNum] = true; + } + + result1.close(); + // For the partitions not yet mapped to an instance + for(int i = 0; i < partCnt ; i++) { + if( partArr[i] == false ) { + partitionInfo.add(new PartitionInfo(topic, i , nodes.get(nodeIndex++%nodesSize), null, null)); + } + } + topicDone = true; + } // Entry Existed in USER_QUEUE_SHARD + }// Nodes > 1 + + // No Record in USER_QUEUE_SHARD or Node =1 check if topic exist + if(!topicDone){ + for(int i = 0; i < partCnt ; i++) { + //When nodeSize > 1 but the partition is not yet created, then we distribute this partition across + // available nodes by assigning the partition to node in round robin manner. + partitionInfo.add(new PartitionInfo(topic, i , nodes.get(nodeIndex++%nodesSize), null, null)); + } + topicDone =true; + } + if(topicDone) + topicsRem.remove(topic); } // For all Topics - - if(allowAutoTopicCreation && topicsRem.size() > 0) { - Map topicDetails = new HashMap(); - for(String topicRem : topicsRem) { - topicDetails.put(topicRem, new TopicDetails(1, (short)0 , Collections.emptyMap())); - } - Map errors= CreateTopics.createTopics(con, topicDetails); - for(String topicRem : topicsRem) { - if(errors.get(topicRem) == null) { - partitionInfo.add(new PartitionInfo(topicRem, 0, nodes.get(node++%nodesSize), null, null)); - } else { - errorsPerTopic.put(topicRem, errors.get(topicRem)); - } + + if(allowAutoTopicCreation && topicsRem.size() > 0) { + Map topicDetails = new HashMap(); + for(String topicRem : topicsRem) { + topicDetails.put(topicRem, new TopicDetails(1, (short)0 , Collections.emptyMap())); + } + Map errors= CreateTopics.createTopics(con, topicDetails); + for(String topicRem : topicsRem) { + if(errors.get(topicRem) == null) { + partitionInfo.add(new PartitionInfo(topicRem, 0, nodes.get(nodeIndex++%nodesSize), null, null)); + } else { + errorsPerTopic.put(topicRem, errors.get(topicRem)); + } + } } - } + partitionInfoList = partitionInfo; } finally { try { if(stmt1 != null) stmt1.close(); } catch(Exception ex) { - //do nothing + //do nothing } } } + + // returns the value for a queue Parameter + public int getQueueParameter(String queueParamName, String topic, Connection con) throws SQLException { + if(topic == null) return 0; + String query = "begin dbms_aqadm.get_queue_parameter(?,?,?); end;"; + CallableStatement cStmt = null; + int para= 1; - private int getPartitions(String topic, Connection con) throws Exception { - if(topic == null) return 0; - String query = "begin dbms_aqadm.get_queue_parameter(?,?,?); end;"; - CallableStatement cStmt = null; - int part = 1; - try { - cStmt = con.prepareCall(query); - cStmt.setString(1, topic); - cStmt.setString(2, "SHARD_NUM"); - cStmt.registerOutParameter(3, OracleTypes.NUMBER); - cStmt.execute(); - part = cStmt.getInt(3); - } - catch(SQLException ex) { - throw ex; - } - finally { - try { - if(cStmt != null) - cStmt.close(); - } catch(Exception ex) { - //Do Nothing - } - } - return part; - } + try { + cStmt = con.prepareCall(query); + cStmt.setString(1, topic); + cStmt.setString(2, queueParamName); + cStmt.registerOutParameter(3, OracleTypes.NUMBER); + cStmt.execute(); + para = cStmt.getInt(3); + } + finally { + if(cStmt != null) + cStmt.close(); + } + return para; + } + + // Fetches all the queue parameters for a topic from the TEQ server, + // and maintains metadata(all queue parameter values) for that topic. + public void fetchQueueParameters(String topic, Connection conn, HashMap topicParaMap) throws SQLException { + if(topic == null) return ; + if(!topicParaMap.containsKey(topic)) { + TopicTeqParameters topicTeqParam = fetchQueueParameters(topic, conn); + topicParaMap.put(topic, topicTeqParam); + } + } - private String getProperty(String str, String property) { + public TopicTeqParameters fetchQueueParameters(String topic, Connection conn) throws SQLException { + if(topic == null) return null ; + + TopicTeqParameters topicTeqParam = new TopicTeqParameters(); + topicTeqParam.setKeyBased(getQueueParameter(KEYBASEDENQ_PARAM, topic, conn)); + topicTeqParam.setStickyDeq(getQueueParameter(STICKYDEQ_PARAM, topic, conn)); + topicTeqParam.setShardNum(getQueueParameter(SHARDNUM_PARAM, topic, conn)); + + return topicTeqParam; + } + + public static String getProperty(String str, String property) { String tmp = str.toUpperCase(); int index = tmp.indexOf(property.toUpperCase()); if(index == -1) return null; - int index1 = tmp.indexOf("=", index); - if(index1 == -1) - return null; - int index2 = tmp.indexOf(")", index1); - if(index2 == -1) - return null; - return str.substring(index1 + 1, index2).trim(); - } - + int index1 = tmp.indexOf("=", index); + if(index1 == -1) + return null; + int index2 = tmp.indexOf(")", index1); + if(index2 == -1) + return null; + return str.substring(index1 + 1, index2).trim(); + } + } diff --git a/clients/src/main/java/org/oracle/okafka/common/network/ChannelState.java b/clients/src/main/java/org/oracle/okafka/common/network/ChannelState.java deleted file mode 100644 index 04de044..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/network/ChannelState.java +++ /dev/null @@ -1,100 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.network; - -import org.oracle.okafka.common.errors.AuthenticationException; - -/** - * States for KafkaChannel: - *
        - *
      • NOT_CONNECTED: Connections are created in NOT_CONNECTED state. State is updated - * on {@link TransportLayer#finishConnect()} when socket connection is established. - * PLAINTEXT channels transition from NOT_CONNECTED to READY, others transition - * to AUTHENTICATE. Failures in NOT_CONNECTED state typically indicate that the - * remote endpoint is unavailable, which may be due to misconfigured endpoints.
      • - *
      • AUTHENTICATE: SSL, SASL_SSL and SASL_PLAINTEXT channels are in AUTHENTICATE state during SSL and - * SASL handshake. Disconnections in AUTHENTICATE state may indicate that authentication failed with - * SSL or SASL (broker version < 1.0.0). Channels transition to READY state when authentication completes - * successfully.
      • - *
      • READY: Connected, authenticated channels are in READY state. Channels may transition from - * READY to EXPIRED, FAILED_SEND or LOCAL_CLOSE.
      • - *
      • EXPIRED: Idle connections are moved to EXPIRED state on idle timeout and the channel is closed.
      • - *
      • FAILED_SEND: Channels transition from READY to FAILED_SEND state if the channel is closed due - * to a send failure.
      • - *
      • AUTHENTICATION_FAILED: Channels are moved to this state if the requested SASL mechanism is not - * enabled in the broker or when brokers with versions 1.0.0 and above provide an error response - * during SASL authentication. {@link #exception()} gives the reason provided by the broker for - * authentication failure.
      • - *
      • LOCAL_CLOSE: Channels are moved to LOCAL_CLOSE state if close() is initiated locally.
      • - *
      - * If the remote endpoint closes a channel, the state of the channel reflects the state the channel - * was in at the time of disconnection. This state may be useful to identify the reason for disconnection. - *

      - * Typical transitions: - *

        - *
      • PLAINTEXT Good path: NOT_CONNECTED => READY => LOCAL_CLOSE
      • - *
      • SASL/SSL Good path: NOT_CONNECTED => AUTHENTICATE => READY => LOCAL_CLOSE
      • - *
      • Bootstrap server misconfiguration: NOT_CONNECTED, disconnected in NOT_CONNECTED state
      • - *
      • Security misconfiguration: NOT_CONNECTED => AUTHENTICATE => AUTHENTICATION_FAILED, disconnected in AUTHENTICATION_FAILED state
      • - *
      • Security misconfiguration with older broker: NOT_CONNECTED => AUTHENTICATE, disconnected in AUTHENTICATE state
      • - *
      - */ -public class ChannelState { - public enum State { - NOT_CONNECTED, - AUTHENTICATE, - READY, - EXPIRED, - FAILED_SEND, - AUTHENTICATION_FAILED, - LOCAL_CLOSE - }; - // AUTHENTICATION_FAILED has a custom exception. For other states, - // create a reusable `ChannelState` instance per-state. - public static final ChannelState NOT_CONNECTED = new ChannelState(State.NOT_CONNECTED); - public static final ChannelState AUTHENTICATE = new ChannelState(State.AUTHENTICATE); - public static final ChannelState READY = new ChannelState(State.READY); - public static final ChannelState EXPIRED = new ChannelState(State.EXPIRED); - public static final ChannelState FAILED_SEND = new ChannelState(State.FAILED_SEND); - public static final ChannelState LOCAL_CLOSE = new ChannelState(State.LOCAL_CLOSE); - - private final State state; - private final AuthenticationException exception; - public ChannelState(State state) { - this(state, null); - } - public ChannelState(State state, AuthenticationException exception) { - this.state = state; - this.exception = exception; - } - - public State state() { - return state; - } - - public AuthenticationException exception() { - return exception; - } -} diff --git a/clients/src/main/java/org/oracle/okafka/common/network/SelectorMetrics.java b/clients/src/main/java/org/oracle/okafka/common/network/SelectorMetrics.java new file mode 100644 index 0000000..7cfb312 --- /dev/null +++ b/clients/src/main/java/org/oracle/okafka/common/network/SelectorMetrics.java @@ -0,0 +1,203 @@ +/* +** OKafka Java Client version 23.4. +** +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. +** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +*/ + +package org.oracle.okafka.common.network; + +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; + +import org.apache.kafka.common.MetricName; +import org.apache.kafka.common.metrics.Metrics; +import org.apache.kafka.common.metrics.Sensor; +import org.apache.kafka.common.metrics.stats.Avg; +import org.apache.kafka.common.metrics.stats.Max; +import org.apache.kafka.common.metrics.stats.Meter; +import org.apache.kafka.common.metrics.stats.SampledStat; +import org.apache.kafka.common.metrics.stats.WindowedCount; +import org.oracle.okafka.common.Node; + +public class SelectorMetrics { + + private final Metrics metrics; + private final Map metricTags; + private final boolean metricsPerConnection; + private final String metricGrpName; + private final String perConnectionMetricGrpName; + + public final Sensor connectionClosed; + public final Sensor connectionCreated; + public final Sensor requestsSent; + public final Sensor responsesReceived; + + + /* Names of metrics that are not registered through sensors */ + private final List topLevelMetricNames = new ArrayList<>(); + private final List sensors = new ArrayList<>(); + + public SelectorMetrics(Metrics metrics, String metricGrpPrefix, Map metricTags,boolean metricsPerConnection) { + + this.metrics = metrics; + this.metricTags = metricTags; + this.metricsPerConnection = metricsPerConnection; + this.metricGrpName = metricGrpPrefix + "-metrics"; + this.perConnectionMetricGrpName = metricGrpPrefix + "-node-metrics"; + + StringBuilder tagsSuffix = new StringBuilder(); + + for (Map.Entry tag: metricTags.entrySet()) { + tagsSuffix.append(tag.getKey()); + tagsSuffix.append("-"); + tagsSuffix.append(tag.getValue()); + } + + this.connectionClosed = sensor("connections-closed:" + tagsSuffix); + this.connectionClosed.add(createMeter(metrics, metricGrpName, metricTags, + "connection-close", "connections closed")); + + this.connectionCreated = sensor("connections-created:" + tagsSuffix); + this.connectionCreated.add(createMeter(metrics, metricGrpName, metricTags, + "connection-creation", "new connections established")); + + this.requestsSent = sensor("requests-sent:" + tagsSuffix); + this.requestsSent.add(createMeter(metrics, metricGrpName, metricTags, new WindowedCount(), "request", "requests sent")); + MetricName metricName = metrics.metricName("request-size-avg", metricGrpName, "The average size of requests sent.", metricTags); + this.requestsSent.add(metricName, new Avg()); + metricName = metrics.metricName("request-size-max", metricGrpName, "The maximum size of any request sent.", metricTags); + this.requestsSent.add(metricName, new Max()); + + + this.responsesReceived = sensor("responses-received:" + tagsSuffix); + this.responsesReceived.add(createMeter(metrics, metricGrpName, metricTags, + new WindowedCount(), "response", "responses received")); + + /* + metricName=metrics.metricName("connection-count", metricGrpName,"The current number of active connections.", metricTags); + topLevelMetricNames.add(metricName); + this.metrics.addMetric(metricName, (config, now) -> topicPublishersMap.size()); + */ + } + + private Sensor sensor(String name, Sensor... parents) { + Sensor sensor = metrics.sensor(name, parents); + sensors.add(sensor); + return sensor; + } + + private Meter createMeter(Metrics metrics, String groupName, Map metricTags, + String baseName, String descriptiveName) { + return createMeter(metrics, groupName, metricTags, null, baseName, descriptiveName); + } + + private Meter createMeter(Metrics metrics, String groupName, Map metricTags, SampledStat stat, + String baseName, String descriptiveName) { + MetricName rateMetricName = metrics.metricName(baseName + "-rate", groupName, + String.format("The number of %s per second", descriptiveName), metricTags); + MetricName totalMetricName = metrics.metricName(baseName + "-total", groupName, + String.format("The total number of %s", descriptiveName), metricTags); + if (stat == null) + return new Meter(rateMetricName, totalMetricName); + else + return new Meter(stat, rateMetricName, totalMetricName); + } + + public void recordConnectionCount(Map map ) { + MetricName metricName=metrics.metricName("connection-count", metricGrpName,"The current number of active connections.", metricTags); + topLevelMetricNames.add(metricName); + this.metrics.addMetric(metricName, (config, now) -> map.size()); + } + + public void maybeRegisterConnectionMetrics(Node node) { + + + if (!node.isEmpty() && metricsPerConnection) { + // if one sensor of the metrics has been registered for the connection, + // then all other sensors should have been registered; and vice versa + String connectionId="" +node.id(); + String nodeRequestName = "node-" + connectionId + ".requests-sent"; + Sensor nodeRequest = this.metrics.getSensor(nodeRequestName); + Sensor nodeDescription=sensor("node-description:"); + MetricName metricName= metrics.metricName("Node-"+ node.id(), metricGrpName, node.toString(), metricTags); + nodeDescription.add(metricName,new WindowedCount()); + if (nodeRequest == null) { + Map tags = new LinkedHashMap<>(metricTags); + tags.put("node-id", "node-" + connectionId); + + nodeRequest = sensor(nodeRequestName); + nodeRequest.add(createMeter(metrics, perConnectionMetricGrpName, tags, new WindowedCount(), "request", "requests sent")); + metricName = metrics.metricName("request-size-avg", perConnectionMetricGrpName, "The average size of requests sent.", tags); + nodeRequest.add(metricName, new Avg()); + metricName = metrics.metricName("request-size-max", perConnectionMetricGrpName, "The maximum size of any request sent.", tags); + nodeRequest.add(metricName, new Max()); + + String bytesSentName = "node-" + connectionId + ".bytes-sent"; + Sensor bytesSent = sensor(bytesSentName); + bytesSent.add(createMeter(metrics, perConnectionMetricGrpName, tags, "outgoing-byte", "outgoing bytes")); + + String nodeResponseName = "node-" + connectionId + ".responses-received"; + Sensor nodeResponse = sensor(nodeResponseName); + nodeResponse.add(createMeter(metrics, perConnectionMetricGrpName, tags, new WindowedCount(), "response", "responses received")); + + String bytesReceivedName = "node-" + connectionId + ".bytes-received"; + Sensor bytesReceive = sensor(bytesReceivedName); + bytesReceive.add(createMeter(metrics, perConnectionMetricGrpName, tags, "incoming-byte", "incoming bytes")); + + String nodeTimeName = "node-" + connectionId + ".latency"; + Sensor nodeRequestTime = sensor(nodeTimeName); + metricName = metrics.metricName("request-latency-avg", perConnectionMetricGrpName, tags); + nodeRequestTime.add(metricName, new Avg()); + metricName = metrics.metricName("request-latency-max", perConnectionMetricGrpName, tags); + nodeRequestTime.add(metricName, new Max()); + } + } + } + + public void requestCompletedSend(String connectionId) { + requestsSent.record(); + if (!connectionId.isEmpty()) { + String nodeRequestName = "node-" + connectionId + ".requests-sent"; + Sensor nodeRequest = this.metrics.getSensor(nodeRequestName); + if (nodeRequest != null) + nodeRequest.record(); + + } + } + + public void recordCompletedSend(String connectionId, long totalBytes, long currentTimeMs) { + requestsSent.record(totalBytes, currentTimeMs, false); + if (!connectionId.isEmpty()) { + String nodeRequestName = "node-" + connectionId + ".requests-sent"; + Sensor nodeRequest = this.metrics.getSensor(nodeRequestName); + if (nodeRequest != null) { + nodeRequest.record(totalBytes, currentTimeMs); + } + } + } + + public void recordCompletedReceive(String connectionId,double latencyMs) { + responsesReceived.record(); + if (!connectionId.isEmpty()) { + String nodeRequestName = "node-" + connectionId + ".responses-received"; + Sensor nodeRequest = this.metrics.getSensor(nodeRequestName); + if (nodeRequest != null) + nodeRequest.record(); + String nodeTimeName="node-" + connectionId + ".latency"; + Sensor nodeRequestTime=this.metrics.getSensor(nodeTimeName); + if(nodeRequestTime!=null) + nodeRequestTime.record(latencyMs); + } + } + + public void close() { + for (MetricName metricName : topLevelMetricNames) + metrics.removeMetric(metricName); + for (Sensor sensor : sensors) + metrics.removeSensor(sensor.name()); + + } +} diff --git a/clients/src/main/java/org/oracle/okafka/common/protocol/ApiKeys.java b/clients/src/main/java/org/oracle/okafka/common/protocol/ApiKeys.java index 53f8663..76ba467 100644 --- a/clients/src/main/java/org/oracle/okafka/common/protocol/ApiKeys.java +++ b/clients/src/main/java/org/oracle/okafka/common/protocol/ApiKeys.java @@ -1,7 +1,7 @@ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. */ @@ -23,7 +23,7 @@ */ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** ** Copyright (c) 2019, 2020 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. @@ -43,8 +43,11 @@ public enum ApiKeys { FETCH(4, "Consume"), COMMIT(5, "Commit"), SUBSCRIBE(6, "Subscribe"), - OFFSETRESET(7, "offsetreset"), - UNSUBSCRIBE(8, "unsubscribe"); + OFFSETRESET(7, "OffsetReset"), + UNSUBSCRIBE(8, "Unsubscribe"), + JOIN_GROUP(9, "JoinGroup"), + SYNC_GROUP(10, "SyncGroup"), + CONNECT_ME(11,"ConnectMe"); private static final ApiKeys[] ID_TO_TYPE; private static final int MIN_API_KEY = 0; public static final int MAX_API_KEY; @@ -108,4 +111,78 @@ private static String toHtml() { public static void main(String[] args) { System.out.println(toHtml()); } + + public static org.apache.kafka.common.protocol.ApiKeys convertToApacheKafkaKey(ApiKeys apiKey) + { + switch(apiKey) + { + case CREATE_TOPICS: + return org.apache.kafka.common.protocol.ApiKeys.CREATE_TOPICS; + case DELETE_TOPICS: + return org.apache.kafka.common.protocol.ApiKeys.DELETE_TOPICS; + case METADATA: + return org.apache.kafka.common.protocol.ApiKeys.METADATA; + case PRODUCE: + return org.apache.kafka.common.protocol.ApiKeys.PRODUCE; + case FETCH: + return org.apache.kafka.common.protocol.ApiKeys.FETCH; + case COMMIT: + return org.apache.kafka.common.protocol.ApiKeys.OFFSET_COMMIT; + case SUBSCRIBE: + //Not present in Apache Kafka. Dummy set to DESCRIBE_GROUPS + return org.apache.kafka.common.protocol.ApiKeys.DESCRIBE_GROUPS; + case OFFSETRESET: + //Seek operation. + return org.apache.kafka.common.protocol.ApiKeys.OFFSET_FETCH; + case UNSUBSCRIBE: + return org.apache.kafka.common.protocol.ApiKeys.DELETE_GROUPS; + case JOIN_GROUP: + return org.apache.kafka.common.protocol.ApiKeys.JOIN_GROUP; + case SYNC_GROUP: + return org.apache.kafka.common.protocol.ApiKeys.SYNC_GROUP; + case CONNECT_ME: + //Operation to find Oracle RAC Node to connect to. Not exactly a FIND_CORRDINATOR call. + return org.apache.kafka.common.protocol.ApiKeys.FIND_COORDINATOR; + default: + // Default to HEARTBEAT. No SUpport for HEARTBEAT for oKafka. + return org.apache.kafka.common.protocol.ApiKeys.HEARTBEAT; + } + } + + public static ApiKeys convertToOracleApiKey(org.apache.kafka.common.protocol.ApiKeys apiKey) + { + switch(apiKey) + { + case CREATE_TOPICS: + return ApiKeys.CREATE_TOPICS; + case DELETE_TOPICS: + return ApiKeys.DELETE_TOPICS; + case METADATA: + return ApiKeys.METADATA; + case PRODUCE: + return ApiKeys.PRODUCE; + case FETCH: + return ApiKeys.FETCH; + case OFFSET_COMMIT: + return COMMIT; + case DESCRIBE_GROUPS: + //Not present in Apache Kafka. Dummy set to DESCRIBE_GROUPS + return SUBSCRIBE; + case OFFSET_FETCH: + //Seek operation. + return OFFSETRESET; + case DELETE_GROUPS: + return UNSUBSCRIBE; + case JOIN_GROUP: + return JOIN_GROUP; + case SYNC_GROUP: + return SYNC_GROUP; + case FIND_COORDINATOR: + //Operation to find Oracle RAC Node to connect to. Not exactly a FIND_CORRDINATOR call. + return CONNECT_ME; + default: + // Default to FETCH. + return FETCH; + } + } } diff --git a/clients/src/main/java/org/oracle/okafka/common/record/AbstractLegacyRecordBatch.java b/clients/src/main/java/org/oracle/okafka/common/record/AbstractLegacyRecordBatch.java deleted file mode 100644 index 78ec644..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/record/AbstractLegacyRecordBatch.java +++ /dev/null @@ -1,607 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.record; - -import static org.oracle.okafka.common.record.Records.LOG_OVERHEAD; -import static org.oracle.okafka.common.record.Records.OFFSET_OFFSET; - -import java.io.DataOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.nio.ByteBuffer; -import java.nio.channels.FileChannel; -import java.util.ArrayDeque; -import java.util.Iterator; -import java.util.NoSuchElementException; - -import org.oracle.okafka.common.KafkaException; -import org.oracle.okafka.common.errors.CorruptRecordException; -import org.oracle.okafka.common.header.Header; -import org.oracle.okafka.common.utils.AbstractIterator; -import org.oracle.okafka.common.utils.ByteBufferOutputStream; -import org.oracle.okafka.common.utils.ByteUtils; -import org.oracle.okafka.common.utils.CloseableIterator; -import org.oracle.okafka.common.utils.Utils; - -/** - * This {@link RecordBatch} implementation is for magic versions 0 and 1. In addition to implementing - * {@link RecordBatch}, it also implements {@link Record}, which exposes the duality of the old message - * format in its handling of compressed messages. The wrapper record is considered the record batch in this - * interface, while the inner records are considered the log records (though they both share the same schema). - * - * In general, this class should not be used directly. Instances of {@link Records} provides access to this - * class indirectly through the {@link RecordBatch} interface. - */ -public abstract class AbstractLegacyRecordBatch extends AbstractRecordBatch implements Record { - - public abstract LegacyRecord outerRecord(); - - @Override - public long lastOffset() { - return offset(); - } - - @Override - public boolean isValid() { - return outerRecord().isValid(); - } - - @Override - public void ensureValid() { - outerRecord().ensureValid(); - } - - @Override - public int keySize() { - return outerRecord().keySize(); - } - - @Override - public boolean hasKey() { - return outerRecord().hasKey(); - } - - @Override - public ByteBuffer key() { - return outerRecord().key(); - } - - @Override - public int valueSize() { - return outerRecord().valueSize(); - } - - @Override - public boolean hasValue() { - return !outerRecord().hasNullValue(); - } - - @Override - public ByteBuffer value() { - return outerRecord().value(); - } - - @Override - public Header[] headers() { - return Record.EMPTY_HEADERS; - } - - @Override - public boolean hasMagic(byte magic) { - return magic == outerRecord().magic(); - } - - @Override - public boolean hasTimestampType(TimestampType timestampType) { - return outerRecord().timestampType() == timestampType; - } - - @Override - public Long checksumOrNull() { - return checksum(); - } - - @Override - public long checksum() { - return outerRecord().checksum(); - } - - @Override - public long maxTimestamp() { - return timestamp(); - } - - @Override - public long timestamp() { - return outerRecord().timestamp(); - } - - @Override - public TimestampType timestampType() { - return outerRecord().timestampType(); - } - - @Override - public long baseOffset() { - return iterator().next().offset(); - } - - @Override - public byte magic() { - return outerRecord().magic(); - } - - @Override - public CompressionType compressionType() { - return outerRecord().compressionType(); - } - - @Override - public int sizeInBytes() { - return outerRecord().sizeInBytes() + LOG_OVERHEAD; - } - - @Override - public Integer countOrNull() { - return null; - } - - @Override - public String toString() { - return "LegacyRecordBatch(offset=" + offset() + ", " + outerRecord() + ")"; - } - - @Override - public void writeTo(ByteBuffer buffer) { - writeHeader(buffer, offset(), outerRecord().sizeInBytes()); - buffer.put(outerRecord().buffer().duplicate()); - } - - @Override - public long producerId() { - return RecordBatch.NO_PRODUCER_ID; - } - - @Override - public short producerEpoch() { - return RecordBatch.NO_PRODUCER_EPOCH; - } - - @Override - public boolean hasProducerId() { - return false; - } - - @Override - public int sequence() { - return RecordBatch.NO_SEQUENCE; - } - - @Override - public int baseSequence() { - return RecordBatch.NO_SEQUENCE; - } - - @Override - public int lastSequence() { - return RecordBatch.NO_SEQUENCE; - } - - @Override - public boolean isTransactional() { - return false; - } - - @Override - public int partitionLeaderEpoch() { - return RecordBatch.NO_PARTITION_LEADER_EPOCH; - } - - @Override - public boolean isControlBatch() { - return false; - } - - /** - * Get an iterator for the nested entries contained within this batch. Note that - * if the batch is not compressed, then this method will return an iterator over the - * shallow record only (i.e. this object). - * @return An iterator over the records contained within this batch - */ - @Override - public Iterator iterator() { - return iterator(BufferSupplier.NO_CACHING); - } - - private CloseableIterator iterator(BufferSupplier bufferSupplier) { - if (isCompressed()) - return new DeepRecordsIterator(this, false, Integer.MAX_VALUE, bufferSupplier); - - return new CloseableIterator() { - private boolean hasNext = true; - - @Override - public void close() {} - - @Override - public boolean hasNext() { - return hasNext; - } - - @Override - public Record next() { - if (!hasNext) - throw new NoSuchElementException(); - hasNext = false; - return AbstractLegacyRecordBatch.this; - } - - @Override - public void remove() { - throw new UnsupportedOperationException(); - } - }; - } - - @Override - public CloseableIterator streamingIterator(BufferSupplier bufferSupplier) { - // the older message format versions do not support streaming, so we return the normal iterator - return iterator(bufferSupplier); - } - - static void writeHeader(ByteBuffer buffer, long offset, int size) { - buffer.putLong(offset); - buffer.putInt(size); - } - - static void writeHeader(DataOutputStream out, long offset, int size) throws IOException { - out.writeLong(offset); - out.writeInt(size); - } - - private static final class DataLogInputStream implements LogInputStream { - private final InputStream stream; - protected final int maxMessageSize; - private final ByteBuffer offsetAndSizeBuffer; - - DataLogInputStream(InputStream stream, int maxMessageSize) { - this.stream = stream; - this.maxMessageSize = maxMessageSize; - this.offsetAndSizeBuffer = ByteBuffer.allocate(Records.LOG_OVERHEAD); - } - - public AbstractLegacyRecordBatch nextBatch() throws IOException { - offsetAndSizeBuffer.clear(); - Utils.readFully(stream, offsetAndSizeBuffer); - if (offsetAndSizeBuffer.hasRemaining()) - return null; - - long offset = offsetAndSizeBuffer.getLong(Records.OFFSET_OFFSET); - int size = offsetAndSizeBuffer.getInt(Records.SIZE_OFFSET); - if (size < LegacyRecord.RECORD_OVERHEAD_V0) - throw new CorruptRecordException(String.format("Record size is less than the minimum record overhead (%d)", LegacyRecord.RECORD_OVERHEAD_V0)); - if (size > maxMessageSize) - throw new CorruptRecordException(String.format("Record size exceeds the largest allowable message size (%d).", maxMessageSize)); - - ByteBuffer batchBuffer = ByteBuffer.allocate(size); - Utils.readFully(stream, batchBuffer); - if (batchBuffer.hasRemaining()) - return null; - batchBuffer.flip(); - - return new BasicLegacyRecordBatch(offset, new LegacyRecord(batchBuffer)); - } - } - - private static class DeepRecordsIterator extends AbstractIterator implements CloseableIterator { - private final ArrayDeque innerEntries; - private final long absoluteBaseOffset; - private final byte wrapperMagic; - - private DeepRecordsIterator(AbstractLegacyRecordBatch wrapperEntry, - boolean ensureMatchingMagic, - int maxMessageSize, - BufferSupplier bufferSupplier) { - LegacyRecord wrapperRecord = wrapperEntry.outerRecord(); - this.wrapperMagic = wrapperRecord.magic(); - if (wrapperMagic != RecordBatch.MAGIC_VALUE_V0 && wrapperMagic != RecordBatch.MAGIC_VALUE_V1) - throw new InvalidRecordException("Invalid wrapper magic found in legacy deep record iterator " + wrapperMagic); - - CompressionType compressionType = wrapperRecord.compressionType(); - ByteBuffer wrapperValue = wrapperRecord.value(); - if (wrapperValue == null) - throw new InvalidRecordException("Found invalid compressed record set with null value (magic = " + - wrapperMagic + ")"); - - InputStream stream = compressionType.wrapForInput(wrapperValue, wrapperRecord.magic(), bufferSupplier); - LogInputStream logStream = new DataLogInputStream(stream, maxMessageSize); - - long lastOffsetFromWrapper = wrapperEntry.lastOffset(); - long timestampFromWrapper = wrapperRecord.timestamp(); - this.innerEntries = new ArrayDeque<>(); - - // If relative offset is used, we need to decompress the entire message first to compute - // the absolute offset. For simplicity and because it's a format that is on its way out, we - // do the same for message format version 0 - try { - while (true) { - AbstractLegacyRecordBatch innerEntry = logStream.nextBatch(); - if (innerEntry == null) - break; - - LegacyRecord record = innerEntry.outerRecord(); - byte magic = record.magic(); - - if (ensureMatchingMagic && magic != wrapperMagic) - throw new InvalidRecordException("Compressed message magic " + magic + - " does not match wrapper magic " + wrapperMagic); - - if (magic == RecordBatch.MAGIC_VALUE_V1) { - LegacyRecord recordWithTimestamp = new LegacyRecord( - record.buffer(), - timestampFromWrapper, - wrapperRecord.timestampType()); - innerEntry = new BasicLegacyRecordBatch(innerEntry.lastOffset(), recordWithTimestamp); - } - - innerEntries.addLast(innerEntry); - } - - if (innerEntries.isEmpty()) - throw new InvalidRecordException("Found invalid compressed record set with no inner records"); - - if (wrapperMagic == RecordBatch.MAGIC_VALUE_V1) { - if (lastOffsetFromWrapper == 0) { - // The outer offset may be 0 if this is produce data from certain versions of librdkafka. - this.absoluteBaseOffset = 0; - } else { - long lastInnerOffset = innerEntries.getLast().offset(); - if (lastOffsetFromWrapper < lastInnerOffset) - throw new InvalidRecordException("Found invalid wrapper offset in compressed v1 message set, " + - "wrapper offset '" + lastOffsetFromWrapper + "' is less than the last inner message " + - "offset '" + lastInnerOffset + "' and it is not zero."); - this.absoluteBaseOffset = lastOffsetFromWrapper - lastInnerOffset; - } - } else { - this.absoluteBaseOffset = -1; - } - } catch (IOException e) { - throw new KafkaException(e); - } finally { - Utils.closeQuietly(stream, "records iterator stream"); - } - } - - @Override - protected Record makeNext() { - if (innerEntries.isEmpty()) - return allDone(); - - AbstractLegacyRecordBatch entry = innerEntries.remove(); - - // Convert offset to absolute offset if needed. - if (wrapperMagic == RecordBatch.MAGIC_VALUE_V1) { - long absoluteOffset = absoluteBaseOffset + entry.offset(); - entry = new BasicLegacyRecordBatch(absoluteOffset, entry.outerRecord()); - } - - if (entry.isCompressed()) - throw new InvalidRecordException("Inner messages must not be compressed"); - - return entry; - } - - @Override - public void close() {} - } - - private static class BasicLegacyRecordBatch extends AbstractLegacyRecordBatch { - private final LegacyRecord record; - private final long offset; - - private BasicLegacyRecordBatch(long offset, LegacyRecord record) { - this.offset = offset; - this.record = record; - } - - @Override - public long offset() { - return offset; - } - - @Override - public LegacyRecord outerRecord() { - return record; - } - - @Override - public boolean equals(Object o) { - if (this == o) - return true; - if (o == null || getClass() != o.getClass()) - return false; - - BasicLegacyRecordBatch that = (BasicLegacyRecordBatch) o; - - return offset == that.offset && - (record != null ? record.equals(that.record) : that.record == null); - } - - @Override - public int hashCode() { - int result = record != null ? record.hashCode() : 0; - result = 31 * result + (int) (offset ^ (offset >>> 32)); - return result; - } - } - - static class ByteBufferLegacyRecordBatch extends AbstractLegacyRecordBatch implements MutableRecordBatch { - private final ByteBuffer buffer; - private final LegacyRecord record; - - ByteBufferLegacyRecordBatch(ByteBuffer buffer) { - this.buffer = buffer; - buffer.position(LOG_OVERHEAD); - this.record = new LegacyRecord(buffer.slice()); - buffer.position(OFFSET_OFFSET); - } - - @Override - public long offset() { - return buffer.getLong(OFFSET_OFFSET); - } - - @Override - public LegacyRecord outerRecord() { - return record; - } - - @Override - public void setLastOffset(long offset) { - buffer.putLong(OFFSET_OFFSET, offset); - } - - @Override - public void setMaxTimestamp(TimestampType timestampType, long timestamp) { - if (record.magic() == RecordBatch.MAGIC_VALUE_V0) - throw new UnsupportedOperationException("Cannot set timestamp for a record with magic = 0"); - - long currentTimestamp = record.timestamp(); - // We don't need to recompute crc if the timestamp is not updated. - if (record.timestampType() == timestampType && currentTimestamp == timestamp) - return; - - setTimestampAndUpdateCrc(timestampType, timestamp); - } - - @Override - public void setPartitionLeaderEpoch(int epoch) { - throw new UnsupportedOperationException("Magic versions prior to 2 do not support partition leader epoch"); - } - - private void setTimestampAndUpdateCrc(TimestampType timestampType, long timestamp) { - byte attributes = LegacyRecord.computeAttributes(magic(), compressionType(), timestampType); - buffer.put(LOG_OVERHEAD + LegacyRecord.ATTRIBUTES_OFFSET, attributes); - buffer.putLong(LOG_OVERHEAD + LegacyRecord.TIMESTAMP_OFFSET, timestamp); - long crc = record.computeChecksum(); - ByteUtils.writeUnsignedInt(buffer, LOG_OVERHEAD + LegacyRecord.CRC_OFFSET, crc); - } - - @Override - public void writeTo(ByteBufferOutputStream outputStream) { - outputStream.write(buffer.duplicate()); - } - - @Override - public boolean equals(Object o) { - if (this == o) - return true; - if (o == null || getClass() != o.getClass()) - return false; - - ByteBufferLegacyRecordBatch that = (ByteBufferLegacyRecordBatch) o; - - return buffer != null ? buffer.equals(that.buffer) : that.buffer == null; - } - - @Override - public int hashCode() { - return buffer != null ? buffer.hashCode() : 0; - } - } - - static class LegacyFileChannelRecordBatch extends FileLogInputStream.FileChannelRecordBatch { - - LegacyFileChannelRecordBatch(long offset, - byte magic, - FileChannel channel, - int position, - int batchSize) { - super(offset, magic, channel, position, batchSize); - } - - @Override - protected RecordBatch toMemoryRecordBatch(ByteBuffer buffer) { - return new ByteBufferLegacyRecordBatch(buffer); - } - - @Override - public long baseOffset() { - return loadFullBatch().baseOffset(); - } - - @Override - public long lastOffset() { - return offset; - } - - @Override - public long producerId() { - return RecordBatch.NO_PRODUCER_ID; - } - - @Override - public short producerEpoch() { - return RecordBatch.NO_PRODUCER_EPOCH; - } - - @Override - public int baseSequence() { - return RecordBatch.NO_SEQUENCE; - } - - @Override - public int lastSequence() { - return RecordBatch.NO_SEQUENCE; - } - - @Override - public Integer countOrNull() { - return null; - } - - @Override - public boolean isTransactional() { - return false; - } - - @Override - public boolean isControlBatch() { - return false; - } - - @Override - public int partitionLeaderEpoch() { - return RecordBatch.NO_PARTITION_LEADER_EPOCH; - } - - @Override - protected int headerSize() { - return LOG_OVERHEAD + LegacyRecord.headerSize(magic); - } - - } - -} diff --git a/clients/src/main/java/org/oracle/okafka/common/record/AbstractRecordBatch.java b/clients/src/main/java/org/oracle/okafka/common/record/AbstractRecordBatch.java deleted file mode 100644 index e0f832a..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/record/AbstractRecordBatch.java +++ /dev/null @@ -1,44 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.record; - -abstract class AbstractRecordBatch implements RecordBatch { - - @Override - public boolean hasProducerId() { - return RecordBatch.NO_PRODUCER_ID < producerId(); - } - - @Override - public long nextOffset() { - return lastOffset() + 1; - } - - @Override - public boolean isCompressed() { - return false; - } - -} diff --git a/clients/src/main/java/org/oracle/okafka/common/record/AbstractRecords.java b/clients/src/main/java/org/oracle/okafka/common/record/AbstractRecords.java deleted file mode 100644 index b15ea0e..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/record/AbstractRecords.java +++ /dev/null @@ -1,164 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.record; - -import java.nio.ByteBuffer; -import java.util.Iterator; - -import org.oracle.okafka.common.header.Header; -import org.oracle.okafka.common.utils.AbstractIterator; -import org.oracle.okafka.common.utils.Utils; - -public abstract class AbstractRecords implements Records { - - private final Iterable records = new Iterable() { - @Override - public Iterator iterator() { - return recordsIterator(); - } - }; - - @Override - public boolean hasMatchingMagic(byte magic) { - for (RecordBatch batch : batches()) - if (batch.magic() != magic) - return false; - return true; - } - - @Override - public boolean hasCompatibleMagic(byte magic) { - for (RecordBatch batch : batches()) - if (batch.magic() > magic) - return false; - return true; - } - - /** - * Get an iterator over the deep records. - * @return An iterator over the records - */ - @Override - public Iterable records() { - return records; - } - - /*@Override - public RecordsSend toSend(String destination) { - return new DefaultRecordsSend(destination, this); - }*/ - - private Iterator recordsIterator() { - return new AbstractIterator() { - private final Iterator batches = batches().iterator(); - private Iterator records; - - @Override - protected Record makeNext() { - if (records != null && records.hasNext()) - return records.next(); - - if (batches.hasNext()) { - records = batches.next().iterator(); - return makeNext(); - } - - return allDone(); - } - }; - } - - public static int estimateSizeInBytes(byte magic, - long baseOffset, - CompressionType compressionType, - Iterable records) { - int size = 0; - if (magic <= RecordBatch.MAGIC_VALUE_V1) { - for (Record record : records) - size += Records.LOG_OVERHEAD + LegacyRecord.recordSize(magic, record.key(), record.value()); - } else { - size = DefaultRecordBatch.sizeInBytes(baseOffset, records); - } - return estimateCompressedSizeInBytes(size, compressionType); - } - - public static int estimateSizeInBytes(byte magic, - CompressionType compressionType, - Iterable records) { - int size = 0; - if (magic <= RecordBatch.MAGIC_VALUE_V1) { - for (SimpleRecord record : records) - size += Records.LOG_OVERHEAD + LegacyRecord.recordSize(magic, record.key(), record.value()); - } else { - size = DefaultRecordBatch.sizeInBytes(records); - } - return estimateCompressedSizeInBytes(size, compressionType); - } - - private static int estimateCompressedSizeInBytes(int size, CompressionType compressionType) { - return compressionType == CompressionType.NONE ? size : Math.min(Math.max(size / 2, 1024), 1 << 16); - } - - /** - * Get an upper bound estimate on the batch size needed to hold a record with the given fields. This is only - * an estimate because it does not take into account overhead from the compression algorithm. - */ - public static int estimateSizeInBytesUpperBound(byte magic, CompressionType compressionType, byte[] key, byte[] value, Header[] headers) { - return estimateSizeInBytesUpperBound(magic, compressionType, Utils.wrapNullable(key), Utils.wrapNullable(value), headers); - } - - /** - * Get an upper bound estimate on the batch size needed to hold a record with the given fields. This is only - * an estimate because it does not take into account overhead from the compression algorithm. - */ - public static int estimateSizeInBytesUpperBound(byte magic, CompressionType compressionType, ByteBuffer key, - ByteBuffer value, Header[] headers) { - if (magic >= RecordBatch.MAGIC_VALUE_V2) - return DefaultRecordBatch.estimateBatchSizeUpperBound(key, value, headers); - else if (compressionType != CompressionType.NONE) - return Records.LOG_OVERHEAD + LegacyRecord.recordOverhead(magic) + LegacyRecord.recordSize(magic, key, value); - else - return Records.LOG_OVERHEAD + LegacyRecord.recordSize(magic, key, value); - } - - /** - * Return the size of the record batch header. - * - * For V0 and V1 with no compression, it's unclear if Records.LOG_OVERHEAD or 0 should be chosen. There is no header - * per batch, but a sequence of batches is preceded by the offset and size. This method returns `0` as it's what - * `MemoryRecordsBuilder` requires. - */ - public static int recordBatchHeaderSizeInBytes(byte magic, CompressionType compressionType) { - if (magic > RecordBatch.MAGIC_VALUE_V1) { - return DefaultRecordBatch.RECORD_BATCH_OVERHEAD; - } else if (compressionType != CompressionType.NONE) { - return Records.LOG_OVERHEAD + LegacyRecord.recordOverhead(magic); - } else { - return 0; - } - } - - -} diff --git a/clients/src/main/java/org/oracle/okafka/common/record/BaseRecords.java b/clients/src/main/java/org/oracle/okafka/common/record/BaseRecords.java index 8d0793f..8871f5c 100644 --- a/clients/src/main/java/org/oracle/okafka/common/record/BaseRecords.java +++ b/clients/src/main/java/org/oracle/okafka/common/record/BaseRecords.java @@ -1,7 +1,7 @@ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. */ diff --git a/clients/src/main/java/org/oracle/okafka/common/record/BufferSupplier.java b/clients/src/main/java/org/oracle/okafka/common/record/BufferSupplier.java index d4b0492..093fb3d 100644 --- a/clients/src/main/java/org/oracle/okafka/common/record/BufferSupplier.java +++ b/clients/src/main/java/org/oracle/okafka/common/record/BufferSupplier.java @@ -1,7 +1,7 @@ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. */ diff --git a/clients/src/main/java/org/oracle/okafka/common/record/ByteBufferLogInputStream.java b/clients/src/main/java/org/oracle/okafka/common/record/ByteBufferLogInputStream.java deleted file mode 100644 index 2429874..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/record/ByteBufferLogInputStream.java +++ /dev/null @@ -1,97 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.record; - -import static org.oracle.okafka.common.record.Records.HEADER_SIZE_UP_TO_MAGIC; -import static org.oracle.okafka.common.record.Records.LOG_OVERHEAD; -import static org.oracle.okafka.common.record.Records.MAGIC_OFFSET; -import static org.oracle.okafka.common.record.Records.SIZE_OFFSET; - -import java.io.IOException; -import java.nio.ByteBuffer; - -import org.oracle.okafka.common.errors.CorruptRecordException; - -/** - * A byte buffer backed log input stream. This class avoids the need to copy records by returning - * slices from the underlying byte buffer. - */ -class ByteBufferLogInputStream implements LogInputStream { - private final ByteBuffer buffer; - private final int maxMessageSize; - - ByteBufferLogInputStream(ByteBuffer buffer, int maxMessageSize) { - this.buffer = buffer; - this.maxMessageSize = maxMessageSize; - } - - public MutableRecordBatch nextBatch() throws IOException { - int remaining = buffer.remaining(); - - Integer batchSize = nextBatchSize(); - if (batchSize == null || remaining < batchSize) - return null; - - byte magic = buffer.get(buffer.position() + MAGIC_OFFSET); - - ByteBuffer batchSlice = buffer.slice(); - batchSlice.limit(batchSize); - buffer.position(buffer.position() + batchSize); - - if (magic > RecordBatch.MAGIC_VALUE_V1) - return new DefaultRecordBatch(batchSlice); - else - return new AbstractLegacyRecordBatch.ByteBufferLegacyRecordBatch(batchSlice); - } - - /** - * Validates the header of the next batch and returns batch size. - * @return next batch size including LOG_OVERHEAD if buffer contains header up to - * magic byte, null otherwise - * @throws CorruptRecordException if record size or magic is invalid - */ - Integer nextBatchSize() throws CorruptRecordException { - int remaining = buffer.remaining(); - if (remaining < LOG_OVERHEAD) - return null; - int recordSize = buffer.getInt(buffer.position() + SIZE_OFFSET); - // V0 has the smallest overhead, stricter checking is done later - if (recordSize < LegacyRecord.RECORD_OVERHEAD_V0) - throw new CorruptRecordException(String.format("Record size %d is less than the minimum record overhead (%d)", - recordSize, LegacyRecord.RECORD_OVERHEAD_V0)); - if (recordSize > maxMessageSize) - throw new CorruptRecordException(String.format("Record size %d exceeds the largest allowable message size (%d).", - recordSize, maxMessageSize)); - - if (remaining < HEADER_SIZE_UP_TO_MAGIC) - return null; - - byte magic = buffer.get(buffer.position() + MAGIC_OFFSET); - if (magic < 0 || magic > RecordBatch.CURRENT_MAGIC_VALUE) - throw new CorruptRecordException("Invalid magic found in record: " + magic); - - return recordSize + LOG_OVERHEAD; - } -} diff --git a/clients/src/main/java/org/oracle/okafka/common/record/CompressionRatioEstimator.java b/clients/src/main/java/org/oracle/okafka/common/record/CompressionRatioEstimator.java deleted file mode 100644 index 4df387f..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/record/CompressionRatioEstimator.java +++ /dev/null @@ -1,118 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.record; - -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; - - -/** - * This class help estimate the compression ratio for each topic and compression type combination. - */ -public class CompressionRatioEstimator { - // The constant speed to increase compression ratio when a batch compresses better than expected. - public static final float COMPRESSION_RATIO_IMPROVING_STEP = 0.005f; - // The minimum speed to decrease compression ratio when a batch compresses worse than expected. - public static final float COMPRESSION_RATIO_DETERIORATE_STEP = 0.05f; - private static final ConcurrentMap COMPRESSION_RATIO = new ConcurrentHashMap<>(); - - /** - * Update the compression ratio estimation for a topic and compression type. - * - * @param topic the topic to update compression ratio estimation. - * @param type the compression type. - * @param observedRatio the observed compression ratio. - * @return the compression ratio estimation after the update. - */ - public static float updateEstimation(String topic, CompressionType type, float observedRatio) { - float[] compressionRatioForTopic = getAndCreateEstimationIfAbsent(topic); - float currentEstimation = compressionRatioForTopic[type.id]; - synchronized (compressionRatioForTopic) { - if (observedRatio > currentEstimation) - compressionRatioForTopic[type.id] = Math.max(currentEstimation + COMPRESSION_RATIO_DETERIORATE_STEP, observedRatio); - else if (observedRatio < currentEstimation) { - compressionRatioForTopic[type.id] = currentEstimation - COMPRESSION_RATIO_IMPROVING_STEP; - } - } - return compressionRatioForTopic[type.id]; - } - - /** - * Get the compression ratio estimation for a topic and compression type. - */ - public static float estimation(String topic, CompressionType type) { - float[] compressionRatioForTopic = getAndCreateEstimationIfAbsent(topic); - return compressionRatioForTopic[type.id]; - } - - /** - * Reset the compression ratio estimation to the initial values for a topic. - */ - public static void resetEstimation(String topic) { - float[] compressionRatioForTopic = getAndCreateEstimationIfAbsent(topic); - synchronized (compressionRatioForTopic) { - for (CompressionType type : CompressionType.values()) { - compressionRatioForTopic[type.id] = type.rate; - } - } - } - - /** - * Remove the compression ratio estimation for a topic. - */ - public static void removeEstimation(String topic) { - COMPRESSION_RATIO.remove(topic); - } - - /** - * Set the compression estimation for a topic compression type combination. This method is for unit test purpose. - */ - public static void setEstimation(String topic, CompressionType type, float ratio) { - float[] compressionRatioForTopic = getAndCreateEstimationIfAbsent(topic); - synchronized (compressionRatioForTopic) { - compressionRatioForTopic[type.id] = ratio; - } - } - - private static float[] getAndCreateEstimationIfAbsent(String topic) { - float[] compressionRatioForTopic = COMPRESSION_RATIO.get(topic); - if (compressionRatioForTopic == null) { - compressionRatioForTopic = initialCompressionRatio(); - float[] existingCompressionRatio = COMPRESSION_RATIO.putIfAbsent(topic, compressionRatioForTopic); - // Someone created the compression ratio array before us, use it. - if (existingCompressionRatio != null) - return existingCompressionRatio; - } - return compressionRatioForTopic; - } - - private static float[] initialCompressionRatio() { - float[] compressionRatio = new float[CompressionType.values().length]; - for (CompressionType type : CompressionType.values()) { - compressionRatio[type.id] = type.rate; - } - return compressionRatio; - } -} \ No newline at end of file diff --git a/clients/src/main/java/org/oracle/okafka/common/record/CompressionType.java b/clients/src/main/java/org/oracle/okafka/common/record/CompressionType.java deleted file mode 100644 index 94fcd00..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/record/CompressionType.java +++ /dev/null @@ -1,209 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.record; - -import java.io.BufferedInputStream; -import java.io.BufferedOutputStream; -import java.io.InputStream; -import java.io.OutputStream; -import java.lang.invoke.MethodHandle; -import java.lang.invoke.MethodHandles; -import java.lang.invoke.MethodType; -import java.nio.ByteBuffer; -import java.util.zip.GZIPInputStream; -import java.util.zip.GZIPOutputStream; - -import org.oracle.okafka.common.KafkaException; -import org.oracle.okafka.common.utils.ByteBufferInputStream; -import org.oracle.okafka.common.utils.ByteBufferOutputStream; - -/** - * The compression type to use - */ -public enum CompressionType { - NONE(0, "none", 1.0f) { - @Override - public OutputStream wrapForOutput(ByteBufferOutputStream buffer, byte messageVersion) { - return buffer; - } - - @Override - public InputStream wrapForInput(ByteBuffer buffer, byte messageVersion, BufferSupplier decompressionBufferSupplier) { - return new ByteBufferInputStream(buffer); - } - }, - - GZIP(1, "gzip", 1.0f) { - @Override - public OutputStream wrapForOutput(ByteBufferOutputStream buffer, byte messageVersion) { - try { - // Set input buffer (uncompressed) to 16 KB (none by default) and output buffer (compressed) to - // 8 KB (0.5 KB by default) to ensure reasonable performance in cases where the caller passes a small - // number of bytes to write (potentially a single byte) - return new BufferedOutputStream(new GZIPOutputStream(buffer, 8 * 1024), 16 * 1024); - } catch (Exception e) { - throw new KafkaException(e); - } - } - - @Override - public InputStream wrapForInput(ByteBuffer buffer, byte messageVersion, BufferSupplier decompressionBufferSupplier) { - try { - // Set output buffer (uncompressed) to 16 KB (none by default) and input buffer (compressed) to - // 8 KB (0.5 KB by default) to ensure reasonable performance in cases where the caller reads a small - // number of bytes (potentially a single byte) - return new BufferedInputStream(new GZIPInputStream(new ByteBufferInputStream(buffer), 8 * 1024), - 16 * 1024); - } catch (Exception e) { - throw new KafkaException(e); - } - } - }, - - SNAPPY(2, "snappy", 1.0f) { - @Override - public OutputStream wrapForOutput(ByteBufferOutputStream buffer, byte messageVersion) { - try { - return (OutputStream) SnappyConstructors.OUTPUT.invoke(buffer); - } catch (Throwable e) { - throw new KafkaException(e); - } - } - - @Override - public InputStream wrapForInput(ByteBuffer buffer, byte messageVersion, BufferSupplier decompressionBufferSupplier) { - try { - return (InputStream) SnappyConstructors.INPUT.invoke(new ByteBufferInputStream(buffer)); - } catch (Throwable e) { - throw new KafkaException(e); - } - } - }; - - /*LZ4(3, "lz4", 1.0f) { - @Override - public OutputStream wrapForOutput(ByteBufferOutputStream buffer, byte messageVersion) { - try { - return new KafkaLZ4BlockOutputStream(buffer, messageVersion == RecordBatch.MAGIC_VALUE_V0); - } catch (Throwable e) { - throw new KafkaException(e); - } - } - - @Override - public InputStream wrapForInput(ByteBuffer inputBuffer, byte messageVersion, BufferSupplier decompressionBufferSupplier) { - try { - return new KafkaLZ4BlockInputStream(inputBuffer, decompressionBufferSupplier, - messageVersion == RecordBatch.MAGIC_VALUE_V0); - } catch (Throwable e) { - throw new KafkaException(e); - } - } - };*/ - - public final int id; - public final String name; - public final float rate; - - CompressionType(int id, String name, float rate) { - this.id = id; - this.name = name; - this.rate = rate; - } - - /** - * Wrap bufferStream with an OutputStream that will compress data with this CompressionType. - * - * Note: Unlike {@link #wrapForInput}, {@link #wrapForOutput} cannot take {@#link ByteBuffer}s directly. - * Currently, {@link MemoryRecordsBuilder#writeDefaultBatchHeader()} and {@link MemoryRecordsBuilder#writeLegacyCompressedWrapperHeader()} - * write to the underlying buffer in the given {@link ByteBufferOutputStream} after the compressed data has been written. - * In the event that the buffer needs to be expanded while writing the data, access to the underlying buffer needs to be preserved. - */ - public abstract OutputStream wrapForOutput(ByteBufferOutputStream bufferStream, byte messageVersion); - - /** - * Wrap buffer with an InputStream that will decompress data with this CompressionType. - * - * @param decompressionBufferSupplier The supplier of ByteBuffer(s) used for decompression if supported. - * For small record batches, allocating a potentially large buffer (64 KB for LZ4) - * will dominate the cost of decompressing and iterating over the records in the - * batch. As such, a supplier that reuses buffers will have a significant - * performance impact. - */ - public abstract InputStream wrapForInput(ByteBuffer buffer, byte messageVersion, BufferSupplier decompressionBufferSupplier); - - public static CompressionType forId(int id) { - switch (id) { - case 0: - return NONE; - case 1: - return GZIP; - case 2: - return SNAPPY; - //case 3: - // return LZ4; - default: - throw new IllegalArgumentException("Unknown compression type id: " + id); - } - } - - public static CompressionType forName(String name) { - if (NONE.name.equals(name)) - return NONE; - else if (GZIP.name.equals(name)) - return GZIP; - else if (SNAPPY.name.equals(name)) - return SNAPPY; - //else if (LZ4.name.equals(name)) - // return LZ4; - else - throw new IllegalArgumentException("Unknown compression name: " + name); - } - - // We should only have a runtime dependency on compression algorithms in case the native libraries don't support - // some platforms. - // - // For Snappy, we dynamically load the classes and rely on the initialization-on-demand holder idiom to ensure - // they're only loaded if used. - // - // For LZ4 we are using org.apache.kafka classes, which should always be in the classpath, and would not trigger - // an error until KafkaLZ4BlockInputStream is initialized, which only happens if LZ4 is actually used. - - private static class SnappyConstructors { - static final MethodHandle INPUT = findConstructor("org.xerial.snappy.SnappyInputStream", - MethodType.methodType(void.class, InputStream.class)); - static final MethodHandle OUTPUT = findConstructor("org.xerial.snappy.SnappyOutputStream", - MethodType.methodType(void.class, OutputStream.class)); - } - - private static MethodHandle findConstructor(String className, MethodType methodType) { - try { - return MethodHandles.publicLookup().findConstructor(Class.forName(className), methodType); - } catch (ReflectiveOperationException e) { - throw new RuntimeException(e); - } - } - -} diff --git a/clients/src/main/java/org/oracle/okafka/common/record/DefaultRecord.java b/clients/src/main/java/org/oracle/okafka/common/record/DefaultRecord.java deleted file mode 100644 index b098866..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/record/DefaultRecord.java +++ /dev/null @@ -1,493 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.record; - -import static org.oracle.okafka.common.record.RecordBatch.MAGIC_VALUE_V2; - -import java.io.DataInput; -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.BufferUnderflowException; -import java.nio.ByteBuffer; -import java.util.Arrays; -import java.util.zip.Checksum; - -import org.oracle.okafka.common.header.Header; -import org.oracle.okafka.common.header.internals.RecordHeader; -import org.oracle.okafka.common.utils.ByteUtils; -import org.oracle.okafka.common.utils.Checksums; -import org.oracle.okafka.common.utils.Crc32C; -import org.oracle.okafka.common.utils.Utils; - -/** - * This class implements the inner record format for magic 2 and above. The schema is as follows: - * - * - * Record => - * Length => Varint - * Attributes => Int8 - * TimestampDelta => Varlong - * OffsetDelta => Varint - * Key => Bytes - * Value => Bytes - * Headers => [HeaderKey HeaderValue] - * HeaderKey => String - * HeaderValue => Bytes - * - * Note that in this schema, the Bytes and String types use a variable length integer to represent - * the length of the field. The array type used for the headers also uses a Varint for the number of - * headers. - * - * The current record attributes are depicted below: - * - * ---------------- - * | Unused (0-7) | - * ---------------- - * - * The offset and timestamp deltas compute the difference relative to the base offset and - * base timestamp of the batch that this record is contained in. - */ -public class DefaultRecord implements Record { - - // excluding key, value and headers: 5 bytes length + 10 bytes timestamp + 5 bytes offset + 1 byte attributes - public static final int MAX_RECORD_OVERHEAD = 21; - - private static final int NULL_VARINT_SIZE_BYTES = ByteUtils.sizeOfVarint(-1); - - private final int sizeInBytes; - private final byte attributes; - private final long offset; - private final long timestamp; - private final int sequence; - private final ByteBuffer key; - private final ByteBuffer value; - private final Header[] headers; - - private DefaultRecord(int sizeInBytes, - byte attributes, - long offset, - long timestamp, - int sequence, - ByteBuffer key, - ByteBuffer value, - Header[] headers) { - this.sizeInBytes = sizeInBytes; - this.attributes = attributes; - this.offset = offset; - this.timestamp = timestamp; - this.sequence = sequence; - this.key = key; - this.value = value; - this.headers = headers; - } - - @Override - public long offset() { - return offset; - } - - @Override - public int sequence() { - return sequence; - } - - @Override - public int sizeInBytes() { - return sizeInBytes; - } - - @Override - public long timestamp() { - return timestamp; - } - - public byte attributes() { - return attributes; - } - - @Override - public Long checksumOrNull() { - return null; - } - - @Override - public boolean isValid() { - // new versions of the message format (2 and above) do not contain an individual record checksum; - // instead they are validated with the checksum at the log entry level - return true; - } - - @Override - public void ensureValid() {} - - @Override - public int keySize() { - return key == null ? -1 : key.remaining(); - } - - @Override - public int valueSize() { - return value == null ? -1 : value.remaining(); - } - - @Override - public boolean hasKey() { - return key != null; - } - - @Override - public ByteBuffer key() { - return key == null ? null : key.duplicate(); - } - - @Override - public boolean hasValue() { - return value != null; - } - - @Override - public ByteBuffer value() { - return value == null ? null : value.duplicate(); - } - - @Override - public Header[] headers() { - return headers; - } - - /** - * Write the record to `out` and return its size. - */ - public static int writeTo(DataOutputStream out, - int offsetDelta, - long timestampDelta, - ByteBuffer key, - ByteBuffer value, - Header[] headers) throws IOException { - int sizeInBytes = sizeOfBodyInBytes(offsetDelta, timestampDelta, key, value, headers); - ByteUtils.writeVarint(sizeInBytes, out); - - byte attributes = 0; // there are no used record attributes at the moment - out.write(attributes); - - ByteUtils.writeVarlong(timestampDelta, out); - ByteUtils.writeVarint(offsetDelta, out); - - if (key == null) { - ByteUtils.writeVarint(-1, out); - } else { - int keySize = key.remaining(); - ByteUtils.writeVarint(keySize, out); - Utils.writeTo(out, key, keySize); - } - - if (value == null) { - ByteUtils.writeVarint(-1, out); - } else { - int valueSize = value.remaining(); - ByteUtils.writeVarint(valueSize, out); - Utils.writeTo(out, value, valueSize); - } - - if (headers == null) - throw new IllegalArgumentException("Headers cannot be null"); - - ByteUtils.writeVarint(headers.length, out); - - for (Header header : headers) { - String headerKey = header.key(); - if (headerKey == null) - throw new IllegalArgumentException("Invalid null header key found in headers"); - - byte[] utf8Bytes = Utils.utf8(headerKey); - ByteUtils.writeVarint(utf8Bytes.length, out); - out.write(utf8Bytes); - - byte[] headerValue = header.value(); - if (headerValue == null) { - ByteUtils.writeVarint(-1, out); - } else { - ByteUtils.writeVarint(headerValue.length, out); - out.write(headerValue); - } - } - - return ByteUtils.sizeOfVarint(sizeInBytes) + sizeInBytes; - } - - @Override - public boolean hasMagic(byte magic) { - return magic >= MAGIC_VALUE_V2; - } - - @Override - public boolean isCompressed() { - return false; - } - - @Override - public boolean hasTimestampType(TimestampType timestampType) { - return false; - } - - @Override - public String toString() { - return String.format("DefaultRecord(offset=%d, timestamp=%d, key=%d bytes, value=%d bytes)", - offset, - timestamp, - key == null ? 0 : key.limit(), - value == null ? 0 : value.limit()); - } - - @Override - public boolean equals(Object o) { - if (this == o) - return true; - if (o == null || getClass() != o.getClass()) - return false; - - DefaultRecord that = (DefaultRecord) o; - return sizeInBytes == that.sizeInBytes && - attributes == that.attributes && - offset == that.offset && - timestamp == that.timestamp && - sequence == that.sequence && - (key == null ? that.key == null : key.equals(that.key)) && - (value == null ? that.value == null : value.equals(that.value)) && - Arrays.equals(headers, that.headers); - } - - @Override - public int hashCode() { - int result = sizeInBytes; - result = 31 * result + (int) attributes; - result = 31 * result + (int) (offset ^ (offset >>> 32)); - result = 31 * result + (int) (timestamp ^ (timestamp >>> 32)); - result = 31 * result + sequence; - result = 31 * result + (key != null ? key.hashCode() : 0); - result = 31 * result + (value != null ? value.hashCode() : 0); - result = 31 * result + Arrays.hashCode(headers); - return result; - } - - public static DefaultRecord readFrom(DataInput input, - long baseOffset, - long baseTimestamp, - int baseSequence, - Long logAppendTime) throws IOException { - int sizeOfBodyInBytes = ByteUtils.readVarint(input); - ByteBuffer recordBuffer = ByteBuffer.allocate(sizeOfBodyInBytes); - input.readFully(recordBuffer.array(), 0, sizeOfBodyInBytes); - int totalSizeInBytes = ByteUtils.sizeOfVarint(sizeOfBodyInBytes) + sizeOfBodyInBytes; - return readFrom(recordBuffer, totalSizeInBytes, sizeOfBodyInBytes, baseOffset, baseTimestamp, - baseSequence, logAppendTime); - } - - public static DefaultRecord readFrom(ByteBuffer buffer, - long baseOffset, - long baseTimestamp, - int baseSequence, - Long logAppendTime) { - int sizeOfBodyInBytes = ByteUtils.readVarint(buffer); - if (buffer.remaining() < sizeOfBodyInBytes) - return null; - - int totalSizeInBytes = ByteUtils.sizeOfVarint(sizeOfBodyInBytes) + sizeOfBodyInBytes; - return readFrom(buffer, totalSizeInBytes, sizeOfBodyInBytes, baseOffset, baseTimestamp, - baseSequence, logAppendTime); - } - - private static DefaultRecord readFrom(ByteBuffer buffer, - int sizeInBytes, - int sizeOfBodyInBytes, - long baseOffset, - long baseTimestamp, - int baseSequence, - Long logAppendTime) { - try { - int recordStart = buffer.position(); - byte attributes = buffer.get(); - long timestampDelta = ByteUtils.readVarlong(buffer); - long timestamp = baseTimestamp + timestampDelta; - if (logAppendTime != null) - timestamp = logAppendTime; - - int offsetDelta = ByteUtils.readVarint(buffer); - long offset = baseOffset + offsetDelta; - int sequence = baseSequence >= 0 ? - DefaultRecordBatch.incrementSequence(baseSequence, offsetDelta) : - RecordBatch.NO_SEQUENCE; - - ByteBuffer key = null; - int keySize = ByteUtils.readVarint(buffer); - if (keySize >= 0) { - key = buffer.slice(); - key.limit(keySize); - buffer.position(buffer.position() + keySize); - } - - ByteBuffer value = null; - int valueSize = ByteUtils.readVarint(buffer); - if (valueSize >= 0) { - value = buffer.slice(); - value.limit(valueSize); - buffer.position(buffer.position() + valueSize); - } - - int numHeaders = ByteUtils.readVarint(buffer); - if (numHeaders < 0) - throw new InvalidRecordException("Found invalid number of record headers " + numHeaders); - - final Header[] headers; - if (numHeaders == 0) - headers = Record.EMPTY_HEADERS; - else - headers = readHeaders(buffer, numHeaders); - - // validate whether we have read all header bytes in the current record - if (buffer.position() - recordStart != sizeOfBodyInBytes) - throw new InvalidRecordException("Invalid record size: expected to read " + sizeOfBodyInBytes + - " bytes in record payload, but instead read " + (buffer.position() - recordStart)); - - return new DefaultRecord(sizeInBytes, attributes, offset, timestamp, sequence, key, value, headers); - } catch (BufferUnderflowException | IllegalArgumentException e) { - throw new InvalidRecordException("Found invalid record structure", e); - } - } - - private static Header[] readHeaders(ByteBuffer buffer, int numHeaders) { - Header[] headers = new Header[numHeaders]; - for (int i = 0; i < numHeaders; i++) { - int headerKeySize = ByteUtils.readVarint(buffer); - if (headerKeySize < 0) - throw new InvalidRecordException("Invalid negative header key size " + headerKeySize); - - String headerKey = Utils.utf8(buffer, headerKeySize); - buffer.position(buffer.position() + headerKeySize); - - ByteBuffer headerValue = null; - int headerValueSize = ByteUtils.readVarint(buffer); - if (headerValueSize >= 0) { - headerValue = buffer.slice(); - headerValue.limit(headerValueSize); - buffer.position(buffer.position() + headerValueSize); - } - - headers[i] = new RecordHeader(headerKey, headerValue); - } - - return headers; - } - - public static int sizeInBytes(int offsetDelta, - long timestampDelta, - ByteBuffer key, - ByteBuffer value, - Header[] headers) { - int bodySize = sizeOfBodyInBytes(offsetDelta, timestampDelta, key, value, headers); - return bodySize + ByteUtils.sizeOfVarint(bodySize); - } - - public static int sizeInBytes(int offsetDelta, - long timestampDelta, - int keySize, - int valueSize, - Header[] headers) { - int bodySize = sizeOfBodyInBytes(offsetDelta, timestampDelta, keySize, valueSize, headers); - return bodySize + ByteUtils.sizeOfVarint(bodySize); - } - - private static int sizeOfBodyInBytes(int offsetDelta, - long timestampDelta, - ByteBuffer key, - ByteBuffer value, - Header[] headers) { - - int keySize = key == null ? -1 : key.remaining(); - int valueSize = value == null ? -1 : value.remaining(); - return sizeOfBodyInBytes(offsetDelta, timestampDelta, keySize, valueSize, headers); - } - - private static int sizeOfBodyInBytes(int offsetDelta, - long timestampDelta, - int keySize, - int valueSize, - Header[] headers) { - int size = 1; // always one byte for attributes - size += ByteUtils.sizeOfVarint(offsetDelta); - size += ByteUtils.sizeOfVarlong(timestampDelta); - size += sizeOf(keySize, valueSize, headers); - return size; - } - - private static int sizeOf(int keySize, int valueSize, Header[] headers) { - int size = 0; - if (keySize < 0) - size += NULL_VARINT_SIZE_BYTES; - else - size += ByteUtils.sizeOfVarint(keySize) + keySize; - - if (valueSize < 0) - size += NULL_VARINT_SIZE_BYTES; - else - size += ByteUtils.sizeOfVarint(valueSize) + valueSize; - - if (headers == null) - throw new IllegalArgumentException("Headers cannot be null"); - - size += ByteUtils.sizeOfVarint(headers.length); - for (Header header : headers) { - String headerKey = header.key(); - if (headerKey == null) - throw new IllegalArgumentException("Invalid null header key found in headers"); - - int headerKeySize = Utils.utf8Length(headerKey); - size += ByteUtils.sizeOfVarint(headerKeySize) + headerKeySize; - - byte[] headerValue = header.value(); - if (headerValue == null) { - size += NULL_VARINT_SIZE_BYTES; - } else { - size += ByteUtils.sizeOfVarint(headerValue.length) + headerValue.length; - } - } - return size; - } - - static int recordSizeUpperBound(ByteBuffer key, ByteBuffer value, Header[] headers) { - int keySize = key == null ? -1 : key.remaining(); - int valueSize = value == null ? -1 : value.remaining(); - return MAX_RECORD_OVERHEAD + sizeOf(keySize, valueSize, headers); - } - - - public static long computePartialChecksum(long timestamp, int serializedKeySize, int serializedValueSize) { - Checksum checksum = Crc32C.create(); - Checksums.updateLong(checksum, timestamp); - Checksums.updateInt(checksum, serializedKeySize); - Checksums.updateInt(checksum, serializedValueSize); - return checksum.getValue(); - } -} diff --git a/clients/src/main/java/org/oracle/okafka/common/record/DefaultRecordBatch.java b/clients/src/main/java/org/oracle/okafka/common/record/DefaultRecordBatch.java deleted file mode 100644 index 2f38052..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/record/DefaultRecordBatch.java +++ /dev/null @@ -1,669 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.record; - -import static org.oracle.okafka.common.record.Records.LOG_OVERHEAD; - -import java.io.DataInputStream; -import java.io.EOFException; -import java.io.IOException; -import java.nio.BufferUnderflowException; -import java.nio.ByteBuffer; -import java.nio.channels.FileChannel; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; -import java.util.NoSuchElementException; - -import org.oracle.okafka.common.KafkaException; -import org.oracle.okafka.common.header.Header; -import org.oracle.okafka.common.utils.ByteBufferOutputStream; -import org.oracle.okafka.common.utils.ByteUtils; -import org.oracle.okafka.common.utils.CloseableIterator; -import org.oracle.okafka.common.utils.Crc32C; - -/** - * RecordBatch implementation for magic 2 and above. The schema is given below: - * - * RecordBatch => - * BaseOffset => Int64 - * Length => Int32 - * PartitionLeaderEpoch => Int32 - * Magic => Int8 - * CRC => Uint32 - * Attributes => Int16 - * LastOffsetDelta => Int32 // also serves as LastSequenceDelta - * FirstTimestamp => Int64 - * MaxTimestamp => Int64 - * ProducerId => Int64 - * ProducerEpoch => Int16 - * BaseSequence => Int32 - * Records => [Record] - * - * Note that when compression is enabled (see attributes below), the compressed record data is serialized - * directly following the count of the number of records. - * - * The CRC covers the data from the attributes to the end of the batch (i.e. all the bytes that follow the CRC). It is - * located after the magic byte, which means that clients must parse the magic byte before deciding how to interpret - * the bytes between the batch length and the magic byte. The partition leader epoch field is not included in the CRC - * computation to avoid the need to recompute the CRC when this field is assigned for every batch that is received by - * the broker. The CRC-32C (Castagnoli) polynomial is used for the computation. - * - * On Compaction: Unlike the older message formats, magic v2 and above preserves the first and last offset/sequence - * numbers from the original batch when the log is cleaned. This is required in order to be able to restore the - * producer's state when the log is reloaded. If we did not retain the last sequence number, then following - * a partition leader failure, once the new leader has rebuilt the producer state from the log, the next sequence - * expected number would no longer be in sync with what was written by the client. This would cause an - * unexpected OutOfOrderSequence error, which is typically fatal. The base sequence number must be preserved for - * duplicate checking: the broker checks incoming Produce requests for duplicates by verifying that the first and - * last sequence numbers of the incoming batch match the last from that producer. - * - * Note that if all of the records in a batch are removed during compaction, the broker may still retain an empty - * batch header in order to preserve the producer sequence information as described above. These empty batches - * are retained only until either a new sequence number is written by the corresponding producer or the producerId - * is expired from lack of activity. - * - * There is no similar need to preserve the timestamp from the original batch after compaction. The FirstTimestamp - * field therefore always reflects the timestamp of the first record in the batch. If the batch is empty, the - * FirstTimestamp will be set to -1 (NO_TIMESTAMP). - * - * Similarly, the MaxTimestamp field reflects the maximum timestamp of the current records if the timestamp type - * is CREATE_TIME. For LOG_APPEND_TIME, on the other hand, the MaxTimestamp field reflects the timestamp set - * by the broker and is preserved after compaction. Additionally, the MaxTimestamp of an empty batch always retains - * the previous value prior to becoming empty. - * - * The current attributes are given below: - * - * ------------------------------------------------------------------------------------------------- - * | Unused (6-15) | Control (5) | Transactional (4) | Timestamp Type (3) | Compression Type (0-2) | - * ------------------------------------------------------------------------------------------------- - */ -public class DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { - static final int BASE_OFFSET_OFFSET = 0; - static final int BASE_OFFSET_LENGTH = 8; - static final int LENGTH_OFFSET = BASE_OFFSET_OFFSET + BASE_OFFSET_LENGTH; - static final int LENGTH_LENGTH = 4; - static final int PARTITION_LEADER_EPOCH_OFFSET = LENGTH_OFFSET + LENGTH_LENGTH; - static final int PARTITION_LEADER_EPOCH_LENGTH = 4; - static final int MAGIC_OFFSET = PARTITION_LEADER_EPOCH_OFFSET + PARTITION_LEADER_EPOCH_LENGTH; - static final int MAGIC_LENGTH = 1; - static final int CRC_OFFSET = MAGIC_OFFSET + MAGIC_LENGTH; - static final int CRC_LENGTH = 4; - static final int ATTRIBUTES_OFFSET = CRC_OFFSET + CRC_LENGTH; - static final int ATTRIBUTE_LENGTH = 2; - public static final int LAST_OFFSET_DELTA_OFFSET = ATTRIBUTES_OFFSET + ATTRIBUTE_LENGTH; - static final int LAST_OFFSET_DELTA_LENGTH = 4; - static final int FIRST_TIMESTAMP_OFFSET = LAST_OFFSET_DELTA_OFFSET + LAST_OFFSET_DELTA_LENGTH; - static final int FIRST_TIMESTAMP_LENGTH = 8; - static final int MAX_TIMESTAMP_OFFSET = FIRST_TIMESTAMP_OFFSET + FIRST_TIMESTAMP_LENGTH; - static final int MAX_TIMESTAMP_LENGTH = 8; - static final int PRODUCER_ID_OFFSET = MAX_TIMESTAMP_OFFSET + MAX_TIMESTAMP_LENGTH; - static final int PRODUCER_ID_LENGTH = 8; - static final int PRODUCER_EPOCH_OFFSET = PRODUCER_ID_OFFSET + PRODUCER_ID_LENGTH; - static final int PRODUCER_EPOCH_LENGTH = 2; - static final int BASE_SEQUENCE_OFFSET = PRODUCER_EPOCH_OFFSET + PRODUCER_EPOCH_LENGTH; - static final int BASE_SEQUENCE_LENGTH = 4; - public static final int RECORDS_COUNT_OFFSET = BASE_SEQUENCE_OFFSET + BASE_SEQUENCE_LENGTH; - static final int RECORDS_COUNT_LENGTH = 4; - static final int RECORDS_OFFSET = RECORDS_COUNT_OFFSET + RECORDS_COUNT_LENGTH; - public static final int RECORD_BATCH_OVERHEAD = RECORDS_OFFSET; - - private static final byte COMPRESSION_CODEC_MASK = 0x07; - private static final byte TRANSACTIONAL_FLAG_MASK = 0x10; - private static final int CONTROL_FLAG_MASK = 0x20; - private static final byte TIMESTAMP_TYPE_MASK = 0x08; - - private final ByteBuffer buffer; - - DefaultRecordBatch(ByteBuffer buffer) { - this.buffer = buffer; - } - - @Override - public byte magic() { - return buffer.get(MAGIC_OFFSET); - } - - @Override - public void ensureValid() { - if (sizeInBytes() < RECORD_BATCH_OVERHEAD) - throw new InvalidRecordException("Record batch is corrupt (the size " + sizeInBytes() + - " is smaller than the minimum allowed overhead " + RECORD_BATCH_OVERHEAD + ")"); - - if (!isValid()) - throw new InvalidRecordException("Record is corrupt (stored crc = " + checksum() - + ", computed crc = " + computeChecksum() + ")"); - } - - /** - * Get the timestamp of the first record in this batch. It is always the create time of the record even if the - * timestamp type of the batch is log append time. - * - * @return The first timestamp or {@link RecordBatch#NO_TIMESTAMP} if the batch is empty - */ - public long firstTimestamp() { - return buffer.getLong(FIRST_TIMESTAMP_OFFSET); - } - - @Override - public long maxTimestamp() { - return buffer.getLong(MAX_TIMESTAMP_OFFSET); - } - - @Override - public TimestampType timestampType() { - return (attributes() & TIMESTAMP_TYPE_MASK) == 0 ? TimestampType.CREATE_TIME : TimestampType.LOG_APPEND_TIME; - } - - @Override - public long baseOffset() { - return buffer.getLong(BASE_OFFSET_OFFSET); - } - - @Override - public long lastOffset() { - return baseOffset() + lastOffsetDelta(); - } - - @Override - public long producerId() { - return buffer.getLong(PRODUCER_ID_OFFSET); - } - - @Override - public short producerEpoch() { - return buffer.getShort(PRODUCER_EPOCH_OFFSET); - } - - @Override - public int baseSequence() { - return buffer.getInt(BASE_SEQUENCE_OFFSET); - } - - private int lastOffsetDelta() { - return buffer.getInt(LAST_OFFSET_DELTA_OFFSET); - } - - @Override - public int lastSequence() { - int baseSequence = baseSequence(); - if (baseSequence == RecordBatch.NO_SEQUENCE) - return RecordBatch.NO_SEQUENCE; - return incrementSequence(baseSequence, lastOffsetDelta()); - } - - @Override - public CompressionType compressionType() { - return CompressionType.forId(attributes() & COMPRESSION_CODEC_MASK); - } - - @Override - public int sizeInBytes() { - return LOG_OVERHEAD + buffer.getInt(LENGTH_OFFSET); - } - - private int count() { - return buffer.getInt(RECORDS_COUNT_OFFSET); - } - - @Override - public Integer countOrNull() { - return count(); - } - - @Override - public void writeTo(ByteBuffer buffer) { - buffer.put(this.buffer.duplicate()); - } - - @Override - public void writeTo(ByteBufferOutputStream outputStream) { - outputStream.write(this.buffer.duplicate()); - } - - @Override - public boolean isTransactional() { - return (attributes() & TRANSACTIONAL_FLAG_MASK) > 0; - } - - @Override - public boolean isControlBatch() { - return (attributes() & CONTROL_FLAG_MASK) > 0; - } - - @Override - public int partitionLeaderEpoch() { - return buffer.getInt(PARTITION_LEADER_EPOCH_OFFSET); - } - - private CloseableIterator compressedIterator(BufferSupplier bufferSupplier) { - final ByteBuffer buffer = this.buffer.duplicate(); - buffer.position(RECORDS_OFFSET); - final DataInputStream inputStream = new DataInputStream(compressionType().wrapForInput(buffer, magic(), - bufferSupplier)); - - return new RecordIterator() { - @Override - protected Record readNext(long baseOffset, long firstTimestamp, int baseSequence, Long logAppendTime) { - try { - return DefaultRecord.readFrom(inputStream, baseOffset, firstTimestamp, baseSequence, logAppendTime); - } catch (EOFException e) { - throw new InvalidRecordException("Incorrect declared batch size, premature EOF reached"); - } catch (IOException e) { - throw new KafkaException("Failed to decompress record stream", e); - } - } - - @Override - protected boolean ensureNoneRemaining() { - try { - return inputStream.read() == -1; - } catch (IOException e) { - throw new KafkaException("Error checking for remaining bytes after reading batch", e); - } - } - - @Override - public void close() { - try { - inputStream.close(); - } catch (IOException e) { - throw new KafkaException("Failed to close record stream", e); - } - } - }; - } - - private CloseableIterator uncompressedIterator() { - final ByteBuffer buffer = this.buffer.duplicate(); - buffer.position(RECORDS_OFFSET); - return new RecordIterator() { - @Override - protected Record readNext(long baseOffset, long firstTimestamp, int baseSequence, Long logAppendTime) { - try { - return DefaultRecord.readFrom(buffer, baseOffset, firstTimestamp, baseSequence, logAppendTime); - } catch (BufferUnderflowException e) { - throw new InvalidRecordException("Incorrect declared batch size, premature EOF reached"); - } - } - @Override - protected boolean ensureNoneRemaining() { - return !buffer.hasRemaining(); - } - @Override - public void close() {} - }; - } - - @Override - public Iterator iterator() { - if (count() == 0) - return Collections.emptyIterator(); - - if (!isCompressed()) - return uncompressedIterator(); - - // for a normal iterator, we cannot ensure that the underlying compression stream is closed, - // so we decompress the full record set here. Use cases which call for a lower memory footprint - // can use `streamingIterator` at the cost of additional complexity - try (CloseableIterator iterator = compressedIterator(BufferSupplier.NO_CACHING)) { - List records = new ArrayList<>(count()); - while (iterator.hasNext()) - records.add(iterator.next()); - return records.iterator(); - } - } - - @Override - public CloseableIterator streamingIterator(BufferSupplier bufferSupplier) { - if (isCompressed()) - return compressedIterator(bufferSupplier); - else - return uncompressedIterator(); - } - - @Override - public void setLastOffset(long offset) { - buffer.putLong(BASE_OFFSET_OFFSET, offset - lastOffsetDelta()); - } - - @Override - public void setMaxTimestamp(TimestampType timestampType, long maxTimestamp) { - long currentMaxTimestamp = maxTimestamp(); - // We don't need to recompute crc if the timestamp is not updated. - if (timestampType() == timestampType && currentMaxTimestamp == maxTimestamp) - return; - - byte attributes = computeAttributes(compressionType(), timestampType, isTransactional(), isControlBatch()); - buffer.putShort(ATTRIBUTES_OFFSET, attributes); - buffer.putLong(MAX_TIMESTAMP_OFFSET, maxTimestamp); - long crc = computeChecksum(); - ByteUtils.writeUnsignedInt(buffer, CRC_OFFSET, crc); - } - - @Override - public void setPartitionLeaderEpoch(int epoch) { - buffer.putInt(PARTITION_LEADER_EPOCH_OFFSET, epoch); - } - - @Override - public long checksum() { - return ByteUtils.readUnsignedInt(buffer, CRC_OFFSET); - } - - public boolean isValid() { - return sizeInBytes() >= RECORD_BATCH_OVERHEAD && checksum() == computeChecksum(); - } - - private long computeChecksum() { - return Crc32C.compute(buffer, ATTRIBUTES_OFFSET, buffer.limit() - ATTRIBUTES_OFFSET); - } - - private byte attributes() { - // note we're not using the second byte of attributes - return (byte) buffer.getShort(ATTRIBUTES_OFFSET); - } - - @Override - public boolean equals(Object o) { - if (this == o) - return true; - if (o == null || getClass() != o.getClass()) - return false; - - DefaultRecordBatch that = (DefaultRecordBatch) o; - return buffer != null ? buffer.equals(that.buffer) : that.buffer == null; - } - - @Override - public int hashCode() { - return buffer != null ? buffer.hashCode() : 0; - } - - private static byte computeAttributes(CompressionType type, TimestampType timestampType, - boolean isTransactional, boolean isControl) { - if (timestampType == TimestampType.NO_TIMESTAMP_TYPE) - throw new IllegalArgumentException("Timestamp type must be provided to compute attributes for message " + - "format v2 and above"); - - byte attributes = isTransactional ? TRANSACTIONAL_FLAG_MASK : 0; - if (isControl) - attributes |= CONTROL_FLAG_MASK; - if (type.id > 0) - attributes |= COMPRESSION_CODEC_MASK & type.id; - if (timestampType == TimestampType.LOG_APPEND_TIME) - attributes |= TIMESTAMP_TYPE_MASK; - return attributes; - } - - public static void writeEmptyHeader(ByteBuffer buffer, - byte magic, - long producerId, - short producerEpoch, - int baseSequence, - long baseOffset, - long lastOffset, - int partitionLeaderEpoch, - TimestampType timestampType, - long timestamp, - boolean isTransactional, - boolean isControlRecord) { - int offsetDelta = (int) (lastOffset - baseOffset); - writeHeader(buffer, baseOffset, offsetDelta, DefaultRecordBatch.RECORD_BATCH_OVERHEAD, magic, - CompressionType.NONE, timestampType, RecordBatch.NO_TIMESTAMP, timestamp, producerId, - producerEpoch, baseSequence, isTransactional, isControlRecord, partitionLeaderEpoch, 0); - } - - static void writeHeader(ByteBuffer buffer, - long baseOffset, - int lastOffsetDelta, - int sizeInBytes, - byte magic, - CompressionType compressionType, - TimestampType timestampType, - long firstTimestamp, - long maxTimestamp, - long producerId, - short epoch, - int sequence, - boolean isTransactional, - boolean isControlBatch, - int partitionLeaderEpoch, - int numRecords) { - if (magic < RecordBatch.CURRENT_MAGIC_VALUE) - throw new IllegalArgumentException("Invalid magic value " + magic); - if (firstTimestamp < 0 && firstTimestamp != NO_TIMESTAMP) - throw new IllegalArgumentException("Invalid message timestamp " + firstTimestamp); - - short attributes = computeAttributes(compressionType, timestampType, isTransactional, isControlBatch); - - int position = buffer.position(); - buffer.putLong(position + BASE_OFFSET_OFFSET, baseOffset); - buffer.putInt(position + LENGTH_OFFSET, sizeInBytes - LOG_OVERHEAD); - buffer.putInt(position + PARTITION_LEADER_EPOCH_OFFSET, partitionLeaderEpoch); - buffer.put(position + MAGIC_OFFSET, magic); - buffer.putShort(position + ATTRIBUTES_OFFSET, attributes); - buffer.putLong(position + FIRST_TIMESTAMP_OFFSET, firstTimestamp); - buffer.putLong(position + MAX_TIMESTAMP_OFFSET, maxTimestamp); - buffer.putInt(position + LAST_OFFSET_DELTA_OFFSET, lastOffsetDelta); - buffer.putLong(position + PRODUCER_ID_OFFSET, producerId); - buffer.putShort(position + PRODUCER_EPOCH_OFFSET, epoch); - buffer.putInt(position + BASE_SEQUENCE_OFFSET, sequence); - buffer.putInt(position + RECORDS_COUNT_OFFSET, numRecords); - long crc = Crc32C.compute(buffer, ATTRIBUTES_OFFSET, sizeInBytes - ATTRIBUTES_OFFSET); - buffer.putInt(position + CRC_OFFSET, (int) crc); - buffer.position(position + RECORD_BATCH_OVERHEAD); - } - - @Override - public String toString() { - return "RecordBatch(magic=" + magic() + ", offsets=[" + baseOffset() + ", " + lastOffset() + "], " + - "compression=" + compressionType() + ", timestampType=" + timestampType() + ", crc=" + checksum() + ")"; - } - - public static int sizeInBytes(long baseOffset, Iterable records) { - Iterator iterator = records.iterator(); - if (!iterator.hasNext()) - return 0; - - int size = RECORD_BATCH_OVERHEAD; - Long firstTimestamp = null; - while (iterator.hasNext()) { - Record record = iterator.next(); - int offsetDelta = (int) (record.offset() - baseOffset); - if (firstTimestamp == null) - firstTimestamp = record.timestamp(); - long timestampDelta = record.timestamp() - firstTimestamp; - size += DefaultRecord.sizeInBytes(offsetDelta, timestampDelta, record.key(), record.value(), - record.headers()); - } - return size; - } - - public static int sizeInBytes(Iterable records) { - Iterator iterator = records.iterator(); - if (!iterator.hasNext()) - return 0; - - int size = RECORD_BATCH_OVERHEAD; - int offsetDelta = 0; - Long firstTimestamp = null; - while (iterator.hasNext()) { - SimpleRecord record = iterator.next(); - if (firstTimestamp == null) - firstTimestamp = record.timestamp(); - long timestampDelta = record.timestamp() - firstTimestamp; - size += DefaultRecord.sizeInBytes(offsetDelta++, timestampDelta, record.key(), record.value(), - record.headers()); - } - return size; - } - - /** - * Get an upper bound on the size of a batch with only a single record using a given key and value. This - * is only an estimate because it does not take into account additional overhead from the compression - * algorithm used. - */ - static int estimateBatchSizeUpperBound(ByteBuffer key, ByteBuffer value, Header[] headers) { - return RECORD_BATCH_OVERHEAD + DefaultRecord.recordSizeUpperBound(key, value, headers); - } - - static int incrementSequence(int baseSequence, int increment) { - if (baseSequence > Integer.MAX_VALUE - increment) - return increment - (Integer.MAX_VALUE - baseSequence) - 1; - return baseSequence + increment; - } - - private abstract class RecordIterator implements CloseableIterator { - private final Long logAppendTime; - private final long baseOffset; - private final long firstTimestamp; - private final int baseSequence; - private final int numRecords; - private int readRecords = 0; - - public RecordIterator() { - this.logAppendTime = timestampType() == TimestampType.LOG_APPEND_TIME ? maxTimestamp() : null; - this.baseOffset = baseOffset(); - this.firstTimestamp = firstTimestamp(); - this.baseSequence = baseSequence(); - int numRecords = count(); - if (numRecords < 0) - throw new InvalidRecordException("Found invalid record count " + numRecords + " in magic v" + - magic() + " batch"); - this.numRecords = numRecords; - } - - @Override - public boolean hasNext() { - return readRecords < numRecords; - } - - @Override - public Record next() { - if (readRecords >= numRecords) - throw new NoSuchElementException(); - - readRecords++; - Record rec = readNext(baseOffset, firstTimestamp, baseSequence, logAppendTime); - if (readRecords == numRecords) { - // Validate that the actual size of the batch is equal to declared size - // by checking that after reading declared number of items, there no items left - // (overflow case, i.e. reading past buffer end is checked elsewhere). - if (!ensureNoneRemaining()) - throw new InvalidRecordException("Incorrect declared batch size, records still remaining in file"); - } - return rec; - } - - protected abstract Record readNext(long baseOffset, long firstTimestamp, int baseSequence, Long logAppendTime); - - protected abstract boolean ensureNoneRemaining(); - - @Override - public void remove() { - throw new UnsupportedOperationException(); - } - - } - - static class DefaultFileChannelRecordBatch extends FileLogInputStream.FileChannelRecordBatch { - - DefaultFileChannelRecordBatch(long offset, - byte magic, - FileChannel channel, - int position, - int batchSize) { - super(offset, magic, channel, position, batchSize); - } - - @Override - protected RecordBatch toMemoryRecordBatch(ByteBuffer buffer) { - return new DefaultRecordBatch(buffer); - } - - @Override - public long baseOffset() { - return offset; - } - - @Override - public long lastOffset() { - return loadBatchHeader().lastOffset(); - } - - @Override - public long producerId() { - return loadBatchHeader().producerId(); - } - - @Override - public short producerEpoch() { - return loadBatchHeader().producerEpoch(); - } - - @Override - public int baseSequence() { - return loadBatchHeader().baseSequence(); - } - - @Override - public int lastSequence() { - return loadBatchHeader().lastSequence(); - } - - @Override - public long checksum() { - return loadBatchHeader().checksum(); - } - - @Override - public Integer countOrNull() { - return loadBatchHeader().countOrNull(); - } - - @Override - public boolean isTransactional() { - return loadBatchHeader().isTransactional(); - } - - @Override - public boolean isControlBatch() { - return loadBatchHeader().isControlBatch(); - } - - @Override - public int partitionLeaderEpoch() { - return loadBatchHeader().partitionLeaderEpoch(); - } - - @Override - protected int headerSize() { - return RECORD_BATCH_OVERHEAD; - } - } - -} diff --git a/clients/src/main/java/org/oracle/okafka/common/record/FileLogInputStream.java b/clients/src/main/java/org/oracle/okafka/common/record/FileLogInputStream.java deleted file mode 100644 index 4c9b764..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/record/FileLogInputStream.java +++ /dev/null @@ -1,259 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.record; - -import static org.oracle.okafka.common.record.Records.HEADER_SIZE_UP_TO_MAGIC; -import static org.oracle.okafka.common.record.Records.LOG_OVERHEAD; -import static org.oracle.okafka.common.record.Records.MAGIC_OFFSET; -import static org.oracle.okafka.common.record.Records.OFFSET_OFFSET; -import static org.oracle.okafka.common.record.Records.SIZE_OFFSET; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.channels.FileChannel; -import java.util.Iterator; - -import org.oracle.okafka.common.KafkaException; -import org.oracle.okafka.common.errors.CorruptRecordException; -import org.oracle.okafka.common.record.AbstractLegacyRecordBatch.LegacyFileChannelRecordBatch; -import org.oracle.okafka.common.record.DefaultRecordBatch.DefaultFileChannelRecordBatch; -import org.oracle.okafka.common.utils.CloseableIterator; -import org.oracle.okafka.common.utils.Utils; - -/** - * A log input stream which is backed by a {@link FileChannel}. - */ -public class FileLogInputStream implements LogInputStream { - private int position; - private final int end; - private final FileRecords fileRecords; - private final ByteBuffer logHeaderBuffer = ByteBuffer.allocate(HEADER_SIZE_UP_TO_MAGIC); - - /** - * Create a new log input stream over the FileChannel - * @param records Underlying FileRecords instance - * @param start Position in the file channel to start from - * @param end Position in the file channel not to read past - */ - FileLogInputStream(FileRecords records, - int start, - int end) { - this.fileRecords = records; - this.position = start; - this.end = end; - } - - @Override - public FileChannelRecordBatch nextBatch() throws IOException { - FileChannel channel = fileRecords.channel(); - if (position >= end - HEADER_SIZE_UP_TO_MAGIC) - return null; - - logHeaderBuffer.rewind(); - Utils.readFullyOrFail(channel, logHeaderBuffer, position, "log header"); - - logHeaderBuffer.rewind(); - long offset = logHeaderBuffer.getLong(OFFSET_OFFSET); - int size = logHeaderBuffer.getInt(SIZE_OFFSET); - - // V0 has the smallest overhead, stricter checking is done later - if (size < LegacyRecord.RECORD_OVERHEAD_V0) - throw new CorruptRecordException(String.format("Found record size %d smaller than minimum record " + - "overhead (%d) in file %s.", size, LegacyRecord.RECORD_OVERHEAD_V0, fileRecords.file())); - - if (position > end - LOG_OVERHEAD - size) - return null; - - byte magic = logHeaderBuffer.get(MAGIC_OFFSET); - final FileChannelRecordBatch batch; - - if (magic < RecordBatch.MAGIC_VALUE_V2) - batch = new LegacyFileChannelRecordBatch(offset, magic, channel, position, size); - else - batch = new DefaultFileChannelRecordBatch(offset, magic, channel, position, size); - - position += batch.sizeInBytes(); - return batch; - } - - /** - * Log entry backed by an underlying FileChannel. This allows iteration over the record batches - * without needing to read the record data into memory until it is needed. The downside - * is that entries will generally no longer be readable when the underlying channel is closed. - */ - public abstract static class FileChannelRecordBatch extends AbstractRecordBatch { - protected final long offset; - protected final byte magic; - protected final FileChannel channel; - protected final int position; - protected final int batchSize; - - private RecordBatch fullBatch; - private RecordBatch batchHeader; - - FileChannelRecordBatch(long offset, - byte magic, - FileChannel channel, - int position, - int batchSize) { - this.offset = offset; - this.magic = magic; - this.channel = channel; - this.position = position; - this.batchSize = batchSize; - } - - @Override - public CompressionType compressionType() { - return loadBatchHeader().compressionType(); - } - - @Override - public TimestampType timestampType() { - return loadBatchHeader().timestampType(); - } - - @Override - public long checksum() { - return loadBatchHeader().checksum(); - } - - @Override - public long maxTimestamp() { - return loadBatchHeader().maxTimestamp(); - } - - public int position() { - return position; - } - - @Override - public byte magic() { - return magic; - } - - @Override - public Iterator iterator() { - return loadFullBatch().iterator(); - } - - @Override - public CloseableIterator streamingIterator(BufferSupplier bufferSupplier) { - return loadFullBatch().streamingIterator(bufferSupplier); - } - - @Override - public boolean isValid() { - return loadFullBatch().isValid(); - } - - @Override - public void ensureValid() { - loadFullBatch().ensureValid(); - } - - @Override - public int sizeInBytes() { - return LOG_OVERHEAD + batchSize; - } - - @Override - public void writeTo(ByteBuffer buffer) { - try { - int limit = buffer.limit(); - buffer.limit(buffer.position() + sizeInBytes()); - Utils.readFully(channel, buffer, position); - buffer.limit(limit); - } catch (IOException e) { - throw new KafkaException("Failed to read record batch at position " + position + " from file channel " + - channel, e); - } - } - - protected abstract RecordBatch toMemoryRecordBatch(ByteBuffer buffer); - - protected abstract int headerSize(); - - protected RecordBatch loadFullBatch() { - if (fullBatch == null) { - batchHeader = null; - fullBatch = loadBatchWithSize(sizeInBytes(), "full record batch"); - } - return fullBatch; - } - - protected RecordBatch loadBatchHeader() { - if (fullBatch != null) - return fullBatch; - - if (batchHeader == null) - batchHeader = loadBatchWithSize(headerSize(), "record batch header"); - - return batchHeader; - } - - private RecordBatch loadBatchWithSize(int size, String description) { - try { - ByteBuffer buffer = ByteBuffer.allocate(size); - Utils.readFullyOrFail(channel, buffer, position, description); - buffer.rewind(); - return toMemoryRecordBatch(buffer); - } catch (IOException e) { - throw new KafkaException(e); - } - } - - @Override - public boolean equals(Object o) { - if (this == o) - return true; - if (o == null || getClass() != o.getClass()) - return false; - - FileChannelRecordBatch that = (FileChannelRecordBatch) o; - - return offset == that.offset && - position == that.position && - batchSize == that.batchSize && - (channel == null ? that.channel == null : channel.equals(that.channel)); - } - - @Override - public int hashCode() { - int result = (int) (offset ^ (offset >>> 32)); - result = 31 * result + (channel != null ? channel.hashCode() : 0); - result = 31 * result + position; - result = 31 * result + batchSize; - return result; - } - - @Override - public String toString() { - return "FileChannelRecordBatch(magic: " + magic + - ", offset: " + offset + - ", size: " + batchSize + ")"; - } - } -} diff --git a/clients/src/main/java/org/oracle/okafka/common/record/FileRecords.java b/clients/src/main/java/org/oracle/okafka/common/record/FileRecords.java deleted file mode 100644 index 4c9258c..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/record/FileRecords.java +++ /dev/null @@ -1,491 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.record; - -import java.io.Closeable; -import java.io.File; -import java.io.FileInputStream; -import java.io.IOException; -import java.io.RandomAccessFile; -import java.nio.ByteBuffer; -import java.nio.channels.FileChannel; -import java.nio.file.Files; -import java.util.Iterator; -import java.util.concurrent.atomic.AtomicInteger; - -import org.oracle.okafka.common.KafkaException; -import org.oracle.okafka.common.record.FileLogInputStream.FileChannelRecordBatch; -import org.oracle.okafka.common.utils.AbstractIterator; -import org.oracle.okafka.common.utils.Utils; - -/** - * A {@link Records} implementation backed by a file. An optional start and end position can be applied to this - * instance to enable slicing a range of the log records. - */ -public class FileRecords extends AbstractRecords implements Closeable { - private final boolean isSlice; - private final int start; - private final int end; - - private final Iterable batches; - - // mutable state - private final AtomicInteger size; - private final FileChannel channel; - private volatile File file; - - /** - * The {@code FileRecords.open} methods should be used instead of this constructor whenever possible. - * The constructor is visible for tests. - */ - public FileRecords(File file, - FileChannel channel, - int start, - int end, - boolean isSlice) throws IOException { - this.file = file; - this.channel = channel; - this.start = start; - this.end = end; - this.isSlice = isSlice; - this.size = new AtomicInteger(); - - if (isSlice) { - // don't check the file size if this is just a slice view - size.set(end - start); - } else { - int limit = Math.min((int) channel.size(), end); - size.set(limit - start); - - // if this is not a slice, update the file pointer to the end of the file - // set the file position to the last byte in the file - channel.position(limit); - } - - batches = batchesFrom(start); - } - - @Override - public int sizeInBytes() { - return size.get(); - } - - /** - * Get the underlying file. - * @return The file - */ - public File file() { - return file; - } - - /** - * Get the underlying file channel. - * @return The file channel - */ - public FileChannel channel() { - return channel; - } - - /** - * Read log batches into the given buffer until there are no bytes remaining in the buffer or the end of the file - * is reached. - * - * @param buffer The buffer to write the batches to - * @param position Position in the buffer to read from - * @throws IOException If an I/O error occurs, see {@link FileChannel#read(ByteBuffer, long)} for details on the - * possible exceptions - */ - public void readInto(ByteBuffer buffer, int position) throws IOException { - Utils.readFully(channel, buffer, position + this.start); - buffer.flip(); - } - - /** - * Return a slice of records from this instance, which is a view into this set starting from the given position - * and with the given size limit. - * - * If the size is beyond the end of the file, the end will be based on the size of the file at the time of the read. - * - * If this message set is already sliced, the position will be taken relative to that slicing. - * - * @param position The start position to begin the read from - * @param size The number of bytes after the start position to include - * @return A sliced wrapper on this message set limited based on the given position and size - */ - public FileRecords slice(int position, int size) throws IOException { - if (position < 0) - throw new IllegalArgumentException("Invalid position: " + position + " in read from " + file); - if (size < 0) - throw new IllegalArgumentException("Invalid size: " + size + " in read from " + file); - - int end = this.start + position + size; - // handle integer overflow or if end is beyond the end of the file - if (end < 0 || end >= start + sizeInBytes()) - end = start + sizeInBytes(); - return new FileRecords(file, channel, this.start + position, end, true); - } - - /** - * Append log batches to the buffer - * @param records The records to append - * @return the number of bytes written to the underlying file - */ - public int append(MemoryRecords records) throws IOException { - int written = records.writeFullyTo(channel); - size.getAndAdd(written); - return written; - } - - /** - * Commit all written data to the physical disk - */ - public void flush() throws IOException { - channel.force(true); - } - - /** - * Close this record set - */ - public void close() throws IOException { - flush(); - trim(); - channel.close(); - } - - /** - * Close file handlers used by the FileChannel but don't write to disk. This is used when the disk may have failed - */ - public void closeHandlers() throws IOException { - channel.close(); - } - - /** - * Delete this message set from the filesystem - * @throws IOException if deletion fails due to an I/O error - * @return {@code true} if the file was deleted by this method; {@code false} if the file could not be deleted - * because it did not exist - */ - public boolean deleteIfExists() throws IOException { - Utils.closeQuietly(channel, "FileChannel"); - return Files.deleteIfExists(file.toPath()); - } - - /** - * Trim file when close or roll to next file - */ - public void trim() throws IOException { - truncateTo(sizeInBytes()); - } - - /** - * Update the file reference (to be used with caution since this does not reopen the file channel) - * @param file The new file to use - */ - public void setFile(File file) { - this.file = file; - } - - /** - * Rename the file that backs this message set - * @throws IOException if rename fails. - */ - public void renameTo(File f) throws IOException { - try { - Utils.atomicMoveWithFallback(file.toPath(), f.toPath()); - } finally { - this.file = f; - } - } - - /** - * Truncate this file message set to the given size in bytes. Note that this API does no checking that the - * given size falls on a valid message boundary. - * In some versions of the JDK truncating to the same size as the file message set will cause an - * update of the files mtime, so truncate is only performed if the targetSize is smaller than the - * size of the underlying FileChannel. - * It is expected that no other threads will do writes to the log when this function is called. - * @param targetSize The size to truncate to. Must be between 0 and sizeInBytes. - * @return The number of bytes truncated off - */ - public int truncateTo(int targetSize) throws IOException { - int originalSize = sizeInBytes(); - if (targetSize > originalSize || targetSize < 0) - throw new KafkaException("Attempt to truncate log segment " + file + " to " + targetSize + " bytes failed, " + - " size of this log segment is " + originalSize + " bytes."); - if (targetSize < (int) channel.size()) { - channel.truncate(targetSize); - size.set(targetSize); - } - return originalSize - targetSize; - } - - /** - * Search forward for the file position of the last offset that is greater than or equal to the target offset - * and return its physical position and the size of the message (including log overhead) at the returned offset. If - * no such offsets are found, return null. - * - * @param targetOffset The offset to search for. - * @param startingPosition The starting position in the file to begin searching from. - */ - public LogOffsetPosition searchForOffsetWithSize(long targetOffset, int startingPosition) { - for (FileChannelRecordBatch batch : batchesFrom(startingPosition)) { - long offset = batch.lastOffset(); - if (offset >= targetOffset) - return new LogOffsetPosition(offset, batch.position(), batch.sizeInBytes()); - } - return null; - } - - /** - * Search forward for the first message that meets the following requirements: - * - Message's timestamp is greater than or equals to the targetTimestamp. - * - Message's position in the log file is greater than or equals to the startingPosition. - * - Message's offset is greater than or equals to the startingOffset. - * - * @param targetTimestamp The timestamp to search for. - * @param startingPosition The starting position to search. - * @param startingOffset The starting offset to search. - * @return The timestamp and offset of the message found. Null if no message is found. - */ - public TimestampAndOffset searchForTimestamp(long targetTimestamp, int startingPosition, long startingOffset) { - for (RecordBatch batch : batchesFrom(startingPosition)) { - if (batch.maxTimestamp() >= targetTimestamp) { - // We found a message - for (Record record : batch) { - long timestamp = record.timestamp(); - if (timestamp >= targetTimestamp && record.offset() >= startingOffset) - return new TimestampAndOffset(timestamp, record.offset()); - } - } - } - return null; - } - - /** - * Return the largest timestamp of the messages after a given position in this file message set. - * @param startingPosition The starting position. - * @return The largest timestamp of the messages after the given position. - */ - public TimestampAndOffset largestTimestampAfter(int startingPosition) { - long maxTimestamp = RecordBatch.NO_TIMESTAMP; - long offsetOfMaxTimestamp = -1L; - - for (RecordBatch batch : batchesFrom(startingPosition)) { - long timestamp = batch.maxTimestamp(); - if (timestamp > maxTimestamp) { - maxTimestamp = timestamp; - offsetOfMaxTimestamp = batch.lastOffset(); - } - } - return new TimestampAndOffset(maxTimestamp, offsetOfMaxTimestamp); - } - - /** - * Get an iterator over the record batches in the file. Note that the batches are - * backed by the open file channel. When the channel is closed (i.e. when this instance - * is closed), the batches will generally no longer be readable. - * @return An iterator over the batches - */ - @Override - public Iterable batches() { - return batches; - } - - @Override - public String toString() { - return "FileRecords(file= " + file + - ", start=" + start + - ", end=" + end + - ")"; - } - - /** - * Get an iterator over the record batches in the file, starting at a specific position. This is similar to - * {@link #batches()} except that callers specify a particular position to start reading the batches from. This - * method must be used with caution: the start position passed in must be a known start of a batch. - * @param start The position to start record iteration from; must be a known position for start of a batch - * @return An iterator over batches starting from {@code start} - */ - public Iterable batchesFrom(final int start) { - return new Iterable() { - @Override - public Iterator iterator() { - return batchIterator(start); - } - }; - } - - @Override - public AbstractIterator batchIterator() { - return batchIterator(start); - } - - private AbstractIterator batchIterator(int start) { - final int end; - if (isSlice) - end = this.end; - else - end = this.sizeInBytes(); - FileLogInputStream inputStream = new FileLogInputStream(this, start, end); - return new RecordBatchIterator<>(inputStream); - } - - public static FileRecords open(File file, - boolean mutable, - boolean fileAlreadyExists, - int initFileSize, - boolean preallocate) throws IOException { - FileChannel channel = openChannel(file, mutable, fileAlreadyExists, initFileSize, preallocate); - int end = (!fileAlreadyExists && preallocate) ? 0 : Integer.MAX_VALUE; - return new FileRecords(file, channel, 0, end, false); - } - - public static FileRecords open(File file, - boolean fileAlreadyExists, - int initFileSize, - boolean preallocate) throws IOException { - return open(file, true, fileAlreadyExists, initFileSize, preallocate); - } - - public static FileRecords open(File file, boolean mutable) throws IOException { - return open(file, mutable, false, 0, false); - } - - public static FileRecords open(File file) throws IOException { - return open(file, true); - } - - /** - * Open a channel for the given file - * For windows NTFS and some old LINUX file system, set preallocate to true and initFileSize - * with one value (for example 512 * 1025 *1024 ) can improve the kafka produce performance. - * @param file File path - * @param mutable mutable - * @param fileAlreadyExists File already exists or not - * @param initFileSize The size used for pre allocate file, for example 512 * 1025 *1024 - * @param preallocate Pre-allocate file or not, gotten from configuration. - */ - private static FileChannel openChannel(File file, - boolean mutable, - boolean fileAlreadyExists, - int initFileSize, - boolean preallocate) throws IOException { - if (mutable) { - if (fileAlreadyExists) { - return new RandomAccessFile(file, "rw").getChannel(); - } else { - if (preallocate) { - RandomAccessFile randomAccessFile = new RandomAccessFile(file, "rw"); - randomAccessFile.setLength(initFileSize); - return randomAccessFile.getChannel(); - } else { - return new RandomAccessFile(file, "rw").getChannel(); - } - } - } else { - return new FileInputStream(file).getChannel(); - } - } - - public static class LogOffsetPosition { - public final long offset; - public final int position; - public final int size; - - public LogOffsetPosition(long offset, int position, int size) { - this.offset = offset; - this.position = position; - this.size = size; - } - - @Override - public boolean equals(Object o) { - if (this == o) - return true; - if (o == null || getClass() != o.getClass()) - return false; - - LogOffsetPosition that = (LogOffsetPosition) o; - - return offset == that.offset && - position == that.position && - size == that.size; - - } - - @Override - public int hashCode() { - int result = (int) (offset ^ (offset >>> 32)); - result = 31 * result + position; - result = 31 * result + size; - return result; - } - - @Override - public String toString() { - return "LogOffsetPosition(" + - "offset=" + offset + - ", position=" + position + - ", size=" + size + - ')'; - } - } - - public static class TimestampAndOffset { - public final long timestamp; - public final long offset; - - public TimestampAndOffset(long timestamp, long offset) { - this.timestamp = timestamp; - this.offset = offset; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - TimestampAndOffset that = (TimestampAndOffset) o; - - if (timestamp != that.timestamp) return false; - return offset == that.offset; - } - - @Override - public int hashCode() { - int result = (int) (timestamp ^ (timestamp >>> 32)); - result = 31 * result + (int) (offset ^ (offset >>> 32)); - return result; - } - - @Override - public String toString() { - return "TimestampAndOffset(" + - "timestamp=" + timestamp + - ", offset=" + offset + - ')'; - } - } -} diff --git a/clients/src/main/java/org/oracle/okafka/common/record/InvalidRecordException.java b/clients/src/main/java/org/oracle/okafka/common/record/InvalidRecordException.java deleted file mode 100644 index 74c4123..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/record/InvalidRecordException.java +++ /dev/null @@ -1,41 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.record; - -import org.oracle.okafka.common.errors.CorruptRecordException; - -public class InvalidRecordException extends CorruptRecordException { - - private static final long serialVersionUID = 1; - - public InvalidRecordException(String s) { - super(s); - } - - public InvalidRecordException(String message, Throwable cause) { - super(message, cause); - } - -} diff --git a/clients/src/main/java/org/oracle/okafka/common/record/LegacyRecord.java b/clients/src/main/java/org/oracle/okafka/common/record/LegacyRecord.java deleted file mode 100644 index 8b4ae71..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/record/LegacyRecord.java +++ /dev/null @@ -1,582 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.record; - -import static org.oracle.okafka.common.utils.Utils.wrapNullable; - -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; - -import org.oracle.okafka.common.KafkaException; -import org.oracle.okafka.common.utils.ByteBufferOutputStream; -import org.oracle.okafka.common.utils.ByteUtils; -import org.oracle.okafka.common.utils.Checksums; -import org.oracle.okafka.common.utils.Crc32; -import org.oracle.okafka.common.utils.Utils; - -/** - * This class represents the serialized key and value along with the associated CRC and other fields - * of message format versions 0 and 1. Note that it is uncommon to need to access this class directly. - * Usually it should be accessed indirectly through the {@link Record} interface which is exposed - * through the {@link Records} object. - */ -public final class LegacyRecord { - - /** - * The current offset and size for all the fixed-length fields - */ - public static final int CRC_OFFSET = 0; - public static final int CRC_LENGTH = 4; - public static final int MAGIC_OFFSET = CRC_OFFSET + CRC_LENGTH; - public static final int MAGIC_LENGTH = 1; - public static final int ATTRIBUTES_OFFSET = MAGIC_OFFSET + MAGIC_LENGTH; - public static final int ATTRIBUTES_LENGTH = 1; - public static final int TIMESTAMP_OFFSET = ATTRIBUTES_OFFSET + ATTRIBUTES_LENGTH; - public static final int TIMESTAMP_LENGTH = 8; - public static final int KEY_SIZE_OFFSET_V0 = ATTRIBUTES_OFFSET + ATTRIBUTES_LENGTH; - public static final int KEY_SIZE_OFFSET_V1 = TIMESTAMP_OFFSET + TIMESTAMP_LENGTH; - public static final int KEY_SIZE_LENGTH = 4; - public static final int KEY_OFFSET_V0 = KEY_SIZE_OFFSET_V0 + KEY_SIZE_LENGTH; - public static final int KEY_OFFSET_V1 = KEY_SIZE_OFFSET_V1 + KEY_SIZE_LENGTH; - public static final int VALUE_SIZE_LENGTH = 4; - - /** - * The size for the record header - */ - public static final int HEADER_SIZE_V0 = CRC_LENGTH + MAGIC_LENGTH + ATTRIBUTES_LENGTH; - public static final int HEADER_SIZE_V1 = CRC_LENGTH + MAGIC_LENGTH + ATTRIBUTES_LENGTH + TIMESTAMP_LENGTH; - - /** - * The amount of overhead bytes in a record - */ - public static final int RECORD_OVERHEAD_V0 = HEADER_SIZE_V0 + KEY_SIZE_LENGTH + VALUE_SIZE_LENGTH; - - /** - * The amount of overhead bytes in a record - */ - public static final int RECORD_OVERHEAD_V1 = HEADER_SIZE_V1 + KEY_SIZE_LENGTH + VALUE_SIZE_LENGTH; - - /** - * Specifies the mask for the compression code. 3 bits to hold the compression codec. 0 is reserved to indicate no - * compression - */ - private static final int COMPRESSION_CODEC_MASK = 0x07; - - /** - * Specify the mask of timestamp type: 0 for CreateTime, 1 for LogAppendTime. - */ - private static final byte TIMESTAMP_TYPE_MASK = 0x08; - - /** - * Timestamp value for records without a timestamp - */ - public static final long NO_TIMESTAMP = -1L; - - private final ByteBuffer buffer; - private final Long wrapperRecordTimestamp; - private final TimestampType wrapperRecordTimestampType; - - public LegacyRecord(ByteBuffer buffer) { - this(buffer, null, null); - } - - public LegacyRecord(ByteBuffer buffer, Long wrapperRecordTimestamp, TimestampType wrapperRecordTimestampType) { - this.buffer = buffer; - this.wrapperRecordTimestamp = wrapperRecordTimestamp; - this.wrapperRecordTimestampType = wrapperRecordTimestampType; - } - - /** - * Compute the checksum of the record from the record contents - */ - public long computeChecksum() { - return Crc32.crc32(buffer, MAGIC_OFFSET, buffer.limit() - MAGIC_OFFSET); - } - - /** - * Retrieve the previously computed CRC for this record - */ - public long checksum() { - return ByteUtils.readUnsignedInt(buffer, CRC_OFFSET); - } - - /** - * Returns true if the crc stored with the record matches the crc computed off the record contents - */ - public boolean isValid() { - return sizeInBytes() >= RECORD_OVERHEAD_V0 && checksum() == computeChecksum(); - } - - public Long wrapperRecordTimestamp() { - return wrapperRecordTimestamp; - } - - public TimestampType wrapperRecordTimestampType() { - return wrapperRecordTimestampType; - } - - /** - * Throw an InvalidRecordException if isValid is false for this record - */ - public void ensureValid() { - if (sizeInBytes() < RECORD_OVERHEAD_V0) - throw new InvalidRecordException("Record is corrupt (crc could not be retrieved as the record is too " - + "small, size = " + sizeInBytes() + ")"); - - if (!isValid()) - throw new InvalidRecordException("Record is corrupt (stored crc = " + checksum() - + ", computed crc = " + computeChecksum() + ")"); - } - - /** - * The complete serialized size of this record in bytes (including crc, header attributes, etc), but - * excluding the log overhead (offset and record size). - * @return the size in bytes - */ - public int sizeInBytes() { - return buffer.limit(); - } - - /** - * The length of the key in bytes - * @return the size in bytes of the key (0 if the key is null) - */ - public int keySize() { - if (magic() == RecordBatch.MAGIC_VALUE_V0) - return buffer.getInt(KEY_SIZE_OFFSET_V0); - else - return buffer.getInt(KEY_SIZE_OFFSET_V1); - } - - /** - * Does the record have a key? - * @return true if so, false otherwise - */ - public boolean hasKey() { - return keySize() >= 0; - } - - /** - * The position where the value size is stored - */ - private int valueSizeOffset() { - if (magic() == RecordBatch.MAGIC_VALUE_V0) - return KEY_OFFSET_V0 + Math.max(0, keySize()); - else - return KEY_OFFSET_V1 + Math.max(0, keySize()); - } - - /** - * The length of the value in bytes - * @return the size in bytes of the value (0 if the value is null) - */ - public int valueSize() { - return buffer.getInt(valueSizeOffset()); - } - - /** - * Check whether the value field of this record is null. - * @return true if the value is null, false otherwise - */ - public boolean hasNullValue() { - return valueSize() < 0; - } - - /** - * The magic value (i.e. message format version) of this record - * @return the magic value - */ - public byte magic() { - return buffer.get(MAGIC_OFFSET); - } - - /** - * The attributes stored with this record - * @return the attributes - */ - public byte attributes() { - return buffer.get(ATTRIBUTES_OFFSET); - } - - /** - * When magic value is greater than 0, the timestamp of a record is determined in the following way: - * 1. wrapperRecordTimestampType = null and wrapperRecordTimestamp is null - Uncompressed message, timestamp is in the message. - * 2. wrapperRecordTimestampType = LOG_APPEND_TIME and WrapperRecordTimestamp is not null - Compressed message using LOG_APPEND_TIME - * 3. wrapperRecordTimestampType = CREATE_TIME and wrapperRecordTimestamp is not null - Compressed message using CREATE_TIME - * - * @return the timestamp as determined above - */ - public long timestamp() { - if (magic() == RecordBatch.MAGIC_VALUE_V0) - return RecordBatch.NO_TIMESTAMP; - else { - // case 2 - if (wrapperRecordTimestampType == TimestampType.LOG_APPEND_TIME && wrapperRecordTimestamp != null) - return wrapperRecordTimestamp; - // Case 1, 3 - else - return buffer.getLong(TIMESTAMP_OFFSET); - } - } - - /** - * Get the timestamp type of the record. - * - * @return The timestamp type or {@link TimestampType#NO_TIMESTAMP_TYPE} if the magic is 0. - */ - public TimestampType timestampType() { - return timestampType(magic(), wrapperRecordTimestampType, attributes()); - } - - /** - * The compression type used with this record - */ - public CompressionType compressionType() { - return CompressionType.forId(buffer.get(ATTRIBUTES_OFFSET) & COMPRESSION_CODEC_MASK); - } - - /** - * A ByteBuffer containing the value of this record - * @return the value or null if the value for this record is null - */ - public ByteBuffer value() { - return Utils.sizeDelimited(buffer, valueSizeOffset()); - } - - /** - * A ByteBuffer containing the message key - * @return the buffer or null if the key for this record is null - */ - public ByteBuffer key() { - if (magic() == RecordBatch.MAGIC_VALUE_V0) - return Utils.sizeDelimited(buffer, KEY_SIZE_OFFSET_V0); - else - return Utils.sizeDelimited(buffer, KEY_SIZE_OFFSET_V1); - } - - /** - * Get the underlying buffer backing this record instance. - * - * @return the buffer - */ - public ByteBuffer buffer() { - return this.buffer; - } - - public String toString() { - if (magic() > 0) - return String.format("Record(magic=%d, attributes=%d, crc=%d, %s=%d, key=%d bytes, value=%d bytes)", - magic(), - attributes(), - checksum(), - timestampType(), - timestamp(), - key() == null ? 0 : key().limit(), - value() == null ? 0 : value().limit()); - else - return String.format("Record(magic=%d, attributes=%d, crc=%d, key=%d bytes, value=%d bytes)", - magic(), - attributes(), - checksum(), - key() == null ? 0 : key().limit(), - value() == null ? 0 : value().limit()); - } - - public boolean equals(Object other) { - if (this == other) - return true; - if (other == null) - return false; - if (!other.getClass().equals(LegacyRecord.class)) - return false; - LegacyRecord record = (LegacyRecord) other; - return this.buffer.equals(record.buffer); - } - - public int hashCode() { - return buffer.hashCode(); - } - - /** - * Create a new record instance. If the record's compression type is not none, then - * its value payload should be already compressed with the specified type; the constructor - * would always write the value payload as is and will not do the compression itself. - * - * @param magic The magic value to use - * @param timestamp The timestamp of the record - * @param key The key of the record (null, if none) - * @param value The record value - * @param compressionType The compression type used on the contents of the record (if any) - * @param timestampType The timestamp type to be used for this record - */ - public static LegacyRecord create(byte magic, - long timestamp, - byte[] key, - byte[] value, - CompressionType compressionType, - TimestampType timestampType) { - int keySize = key == null ? 0 : key.length; - int valueSize = value == null ? 0 : value.length; - ByteBuffer buffer = ByteBuffer.allocate(recordSize(magic, keySize, valueSize)); - write(buffer, magic, timestamp, wrapNullable(key), wrapNullable(value), compressionType, timestampType); - buffer.rewind(); - return new LegacyRecord(buffer); - } - - public static LegacyRecord create(byte magic, long timestamp, byte[] key, byte[] value) { - return create(magic, timestamp, key, value, CompressionType.NONE, TimestampType.CREATE_TIME); - } - - /** - * Write the header for a compressed record set in-place (i.e. assuming the compressed record data has already - * been written at the value offset in a wrapped record). This lets you dynamically create a compressed message - * set, and then go back later and fill in its size and CRC, which saves the need for copying to another buffer. - * - * @param buffer The buffer containing the compressed record data positioned at the first offset of the - * @param magic The magic value of the record set - * @param recordSize The size of the record (including record overhead) - * @param timestamp The timestamp of the wrapper record - * @param compressionType The compression type used - * @param timestampType The timestamp type of the wrapper record - */ - public static void writeCompressedRecordHeader(ByteBuffer buffer, - byte magic, - int recordSize, - long timestamp, - CompressionType compressionType, - TimestampType timestampType) { - int recordPosition = buffer.position(); - int valueSize = recordSize - recordOverhead(magic); - - // write the record header with a null value (the key is always null for the wrapper) - write(buffer, magic, timestamp, null, null, compressionType, timestampType); - buffer.position(recordPosition); - - // now fill in the value size - buffer.putInt(recordPosition + keyOffset(magic), valueSize); - - // compute and fill the crc from the beginning of the message - long crc = Crc32.crc32(buffer, MAGIC_OFFSET, recordSize - MAGIC_OFFSET); - ByteUtils.writeUnsignedInt(buffer, recordPosition + CRC_OFFSET, crc); - } - - private static void write(ByteBuffer buffer, - byte magic, - long timestamp, - ByteBuffer key, - ByteBuffer value, - CompressionType compressionType, - TimestampType timestampType) { - try { - DataOutputStream out = new DataOutputStream(new ByteBufferOutputStream(buffer)); - write(out, magic, timestamp, key, value, compressionType, timestampType); - } catch (IOException e) { - throw new KafkaException(e); - } - } - - /** - * Write the record data with the given compression type and return the computed crc. - * - * @param out The output stream to write to - * @param magic The magic value to be used - * @param timestamp The timestamp of the record - * @param key The record key - * @param value The record value - * @param compressionType The compression type - * @param timestampType The timestamp type - * @return the computed CRC for this record. - * @throws IOException for any IO errors writing to the output stream. - */ - public static long write(DataOutputStream out, - byte magic, - long timestamp, - byte[] key, - byte[] value, - CompressionType compressionType, - TimestampType timestampType) throws IOException { - return write(out, magic, timestamp, wrapNullable(key), wrapNullable(value), compressionType, timestampType); - } - - public static long write(DataOutputStream out, - byte magic, - long timestamp, - ByteBuffer key, - ByteBuffer value, - CompressionType compressionType, - TimestampType timestampType) throws IOException { - byte attributes = computeAttributes(magic, compressionType, timestampType); - long crc = computeChecksum(magic, attributes, timestamp, key, value); - write(out, magic, crc, attributes, timestamp, key, value); - return crc; - } - - /** - * Write a record using raw fields (without validation). This should only be used in testing. - */ - public static void write(DataOutputStream out, - byte magic, - long crc, - byte attributes, - long timestamp, - byte[] key, - byte[] value) throws IOException { - write(out, magic, crc, attributes, timestamp, wrapNullable(key), wrapNullable(value)); - } - - // Write a record to the buffer, if the record's compression type is none, then - // its value payload should be already compressed with the specified type - private static void write(DataOutputStream out, - byte magic, - long crc, - byte attributes, - long timestamp, - ByteBuffer key, - ByteBuffer value) throws IOException { - if (magic != RecordBatch.MAGIC_VALUE_V0 && magic != RecordBatch.MAGIC_VALUE_V1) - throw new IllegalArgumentException("Invalid magic value " + magic); - if (timestamp < 0 && timestamp != RecordBatch.NO_TIMESTAMP) - throw new IllegalArgumentException("Invalid message timestamp " + timestamp); - - // write crc - out.writeInt((int) (crc & 0xffffffffL)); - // write magic value - out.writeByte(magic); - // write attributes - out.writeByte(attributes); - - // maybe write timestamp - if (magic > RecordBatch.MAGIC_VALUE_V0) - out.writeLong(timestamp); - - // write the key - if (key == null) { - out.writeInt(-1); - } else { - int size = key.remaining(); - out.writeInt(size); - Utils.writeTo(out, key, size); - } - // write the value - if (value == null) { - out.writeInt(-1); - } else { - int size = value.remaining(); - out.writeInt(size); - Utils.writeTo(out, value, size); - } - } - - static int recordSize(byte magic, ByteBuffer key, ByteBuffer value) { - return recordSize(magic, key == null ? 0 : key.limit(), value == null ? 0 : value.limit()); - } - - public static int recordSize(byte magic, int keySize, int valueSize) { - return recordOverhead(magic) + keySize + valueSize; - } - - // visible only for testing - public static byte computeAttributes(byte magic, CompressionType type, TimestampType timestampType) { - byte attributes = 0; - if (type.id > 0) - attributes |= COMPRESSION_CODEC_MASK & type.id; - if (magic > RecordBatch.MAGIC_VALUE_V0) { - if (timestampType == TimestampType.NO_TIMESTAMP_TYPE) - throw new IllegalArgumentException("Timestamp type must be provided to compute attributes for " + - "message format v1"); - if (timestampType == TimestampType.LOG_APPEND_TIME) - attributes |= TIMESTAMP_TYPE_MASK; - } - return attributes; - } - - // visible only for testing - public static long computeChecksum(byte magic, byte attributes, long timestamp, byte[] key, byte[] value) { - return computeChecksum(magic, attributes, timestamp, wrapNullable(key), wrapNullable(value)); - } - - /** - * Compute the checksum of the record from the attributes, key and value payloads - */ - private static long computeChecksum(byte magic, byte attributes, long timestamp, ByteBuffer key, ByteBuffer value) { - Crc32 crc = new Crc32(); - crc.update(magic); - crc.update(attributes); - if (magic > RecordBatch.MAGIC_VALUE_V0) - Checksums.updateLong(crc, timestamp); - // update for the key - if (key == null) { - Checksums.updateInt(crc, -1); - } else { - int size = key.remaining(); - Checksums.updateInt(crc, size); - Checksums.update(crc, key, size); - } - // update for the value - if (value == null) { - Checksums.updateInt(crc, -1); - } else { - int size = value.remaining(); - Checksums.updateInt(crc, size); - Checksums.update(crc, value, size); - } - return crc.getValue(); - } - - static int recordOverhead(byte magic) { - if (magic == 0) - return RECORD_OVERHEAD_V0; - else if (magic == 1) - return RECORD_OVERHEAD_V1; - throw new IllegalArgumentException("Invalid magic used in LegacyRecord: " + magic); - } - - static int headerSize(byte magic) { - if (magic == 0) - return HEADER_SIZE_V0; - else if (magic == 1) - return HEADER_SIZE_V1; - throw new IllegalArgumentException("Invalid magic used in LegacyRecord: " + magic); - } - - private static int keyOffset(byte magic) { - if (magic == 0) - return KEY_OFFSET_V0; - else if (magic == 1) - return KEY_OFFSET_V1; - throw new IllegalArgumentException("Invalid magic used in LegacyRecord: " + magic); - } - - public static TimestampType timestampType(byte magic, TimestampType wrapperRecordTimestampType, byte attributes) { - if (magic == 0) - return TimestampType.NO_TIMESTAMP_TYPE; - else if (wrapperRecordTimestampType != null) - return wrapperRecordTimestampType; - else - return (attributes & TIMESTAMP_TYPE_MASK) == 0 ? TimestampType.CREATE_TIME : TimestampType.LOG_APPEND_TIME; - } - -} diff --git a/clients/src/main/java/org/oracle/okafka/common/record/LogInputStream.java b/clients/src/main/java/org/oracle/okafka/common/record/LogInputStream.java deleted file mode 100644 index 880c536..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/record/LogInputStream.java +++ /dev/null @@ -1,51 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.record; - -import java.io.IOException; - -/** - * An abstraction between an underlying input stream and record iterators, a {@link LogInputStream} only returns - * the batches at one level. For magic values 0 and 1, this means that it can either handle iteration - * at the top level of the log or deep iteration within the payload of a single message, but it does not attempt - * to handle both. For magic value 2, this is only used for iterating over the top-level record batches (inner - * records do not follow the {@link RecordBatch} interface. - * - * The generic typing allows for implementations which present only a view of the log entries, which enables more - * efficient iteration when the record data is not actually needed. See for example - * {@link FileLogInputStream.FileChannelRecordBatch} in which the record is not brought into memory until needed. - * - * @param Type parameter of the log entry - */ -interface LogInputStream { - - /** - * Get the next record batch from the underlying input stream. - * - * @return The next record batch or null if there is none - * @throws IOException for any IO errors - */ - T nextBatch() throws IOException; -} diff --git a/clients/src/main/java/org/oracle/okafka/common/record/MemoryRecords.java b/clients/src/main/java/org/oracle/okafka/common/record/MemoryRecords.java deleted file mode 100644 index 094bed0..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/record/MemoryRecords.java +++ /dev/null @@ -1,653 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.record; - -import org.oracle.okafka.common.KafkaException; -import org.oracle.okafka.common.TopicPartition; -import org.oracle.okafka.common.errors.CorruptRecordException; -import org.oracle.okafka.common.record.MemoryRecords.RecordFilter.BatchRetention; -import org.oracle.okafka.common.utils.AbstractIterator; -import org.oracle.okafka.common.utils.ByteBufferOutputStream; -import org.oracle.okafka.common.utils.CloseableIterator; -//import org.oracle.okafka.common.utils.Time; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.channels.GatheringByteChannel; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Iterator; -import java.util.List; -import java.util.Objects; - -/** - * A {@link Records} implementation backed by a ByteBuffer. This is used only for reading or - * modifying in-place an existing buffer of record batches. To create a new buffer see {@link MemoryRecordsBuilder}, - * or one of the {@link #builder(ByteBuffer, byte, CompressionType, TimestampType, long)} variants. - */ -public class MemoryRecords extends AbstractRecords { - private static final Logger log = LoggerFactory.getLogger(MemoryRecords.class); - public static final MemoryRecords EMPTY = MemoryRecords.readableRecords(ByteBuffer.allocate(0)); - - private final ByteBuffer buffer; - - private final Iterable batches = new Iterable() { - @Override - public Iterator iterator() { - return batchIterator(); - } - }; - - private int validBytes = -1; - - // Construct a writable memory records - private MemoryRecords(ByteBuffer buffer) { - Objects.requireNonNull(buffer, "buffer should not be null"); - this.buffer = buffer; - } - - @Override - public int sizeInBytes() { - return buffer.limit(); - } - - /* @Override - public long writeTo(GatheringByteChannel channel, long position, int length) throws IOException { - if (position > Integer.MAX_VALUE) - throw new IllegalArgumentException("position should not be greater than Integer.MAX_VALUE: " + position); - if (position + length > buffer.limit()) - throw new IllegalArgumentException("position+length should not be greater than buffer.limit(), position: " - + position + ", length: " + length + ", buffer.limit(): " + buffer.limit()); - - int pos = (int) position; - ByteBuffer dup = buffer.duplicate(); - dup.position(pos); - dup.limit(pos + length); - return channel.write(dup); - }*/ - - /** - * Write all records to the given channel (including partial records). - * @param channel The channel to write to - * @return The number of bytes written - * @throws IOException For any IO errors writing to the channel - */ - public int writeFullyTo(GatheringByteChannel channel) throws IOException { - buffer.mark(); - int written = 0; - while (written < sizeInBytes()) - written += channel.write(buffer); - buffer.reset(); - return written; - } - - /** - * The total number of bytes in this message set not including any partial, trailing messages. This - * may be smaller than what is returned by {@link #sizeInBytes()}. - * @return The number of valid bytes - */ - public int validBytes() { - if (validBytes >= 0) - return validBytes; - - int bytes = 0; - for (RecordBatch batch : batches()) - bytes += batch.sizeInBytes(); - - this.validBytes = bytes; - return bytes; - } - - /*@Override - public ConvertedRecords downConvert(byte toMagic, long firstOffset, Time time) { - return RecordsUtil.downConvert(batches(), toMagic, firstOffset, time); - }*/ - - @Override - public AbstractIterator batchIterator() { - return new RecordBatchIterator<>(new ByteBufferLogInputStream(buffer.duplicate(), Integer.MAX_VALUE)); - } - - /** - * Validates the header of the first batch and returns batch size. - * @return first batch size including LOG_OVERHEAD if buffer contains header up to - * magic byte, null otherwise - * @throws CorruptRecordException if record size or magic is invalid - */ - public Integer firstBatchSize() { - if (buffer.remaining() < HEADER_SIZE_UP_TO_MAGIC) - return null; - return new ByteBufferLogInputStream(buffer, Integer.MAX_VALUE).nextBatchSize(); - } - - /** - * Filter the records into the provided ByteBuffer. - * - * @param partition The partition that is filtered (used only for logging) - * @param filter The filter function - * @param destinationBuffer The byte buffer to write the filtered records to - * @param maxRecordBatchSize The maximum record batch size. Note this is not a hard limit: if a batch - * exceeds this after filtering, we log a warning, but the batch will still be - * created. - * @param decompressionBufferSupplier The supplier of ByteBuffer(s) used for decompression if supported. For small - * record batches, allocating a potentially large buffer (64 KB for LZ4) will - * dominate the cost of decompressing and iterating over the records in the - * batch. As such, a supplier that reuses buffers will have a significant - * performance impact. - * @return A FilterResult with a summary of the output (for metrics) and potentially an overflow buffer - */ - public FilterResult filterTo(TopicPartition partition, RecordFilter filter, ByteBuffer destinationBuffer, - int maxRecordBatchSize, BufferSupplier decompressionBufferSupplier) { - return filterTo(partition, batches(), filter, destinationBuffer, maxRecordBatchSize, decompressionBufferSupplier); - } - - private static FilterResult filterTo(TopicPartition partition, Iterable batches, - RecordFilter filter, ByteBuffer destinationBuffer, int maxRecordBatchSize, - BufferSupplier decompressionBufferSupplier) { - long maxTimestamp = RecordBatch.NO_TIMESTAMP; - long maxOffset = -1L; - long shallowOffsetOfMaxTimestamp = -1L; - int messagesRead = 0; - int bytesRead = 0; // bytes processed from `batches` - int messagesRetained = 0; - int bytesRetained = 0; - - ByteBufferOutputStream bufferOutputStream = new ByteBufferOutputStream(destinationBuffer); - - for (MutableRecordBatch batch : batches) { - bytesRead += batch.sizeInBytes(); - - BatchRetention batchRetention = filter.checkBatchRetention(batch); - if (batchRetention == BatchRetention.DELETE) - continue; - - // We use the absolute offset to decide whether to retain the message or not. Due to KAFKA-4298, we have to - // allow for the possibility that a previous version corrupted the log by writing a compressed record batch - // with a magic value not matching the magic of the records (magic < 2). This will be fixed as we - // recopy the messages to the destination buffer. - - byte batchMagic = batch.magic(); - boolean writeOriginalBatch = true; - List retainedRecords = new ArrayList<>(); - - try (final CloseableIterator iterator = batch.streamingIterator(decompressionBufferSupplier)) { - while (iterator.hasNext()) { - Record record = iterator.next(); - messagesRead += 1; - - if (filter.shouldRetainRecord(batch, record)) { - // Check for log corruption due to KAFKA-4298. If we find it, make sure that we overwrite - // the corrupted batch with correct data. - if (!record.hasMagic(batchMagic)) - writeOriginalBatch = false; - - if (record.offset() > maxOffset) - maxOffset = record.offset(); - - retainedRecords.add(record); - } else { - writeOriginalBatch = false; - } - } - } - - if (!retainedRecords.isEmpty()) { - if (writeOriginalBatch) { - batch.writeTo(bufferOutputStream); - messagesRetained += retainedRecords.size(); - bytesRetained += batch.sizeInBytes(); - if (batch.maxTimestamp() > maxTimestamp) { - maxTimestamp = batch.maxTimestamp(); - shallowOffsetOfMaxTimestamp = batch.lastOffset(); - } - } else { - MemoryRecordsBuilder builder = buildRetainedRecordsInto(batch, retainedRecords, bufferOutputStream); - MemoryRecords records = builder.build(); - int filteredBatchSize = records.sizeInBytes(); - - messagesRetained += retainedRecords.size(); - bytesRetained += filteredBatchSize; - - if (filteredBatchSize > batch.sizeInBytes() && filteredBatchSize > maxRecordBatchSize) - log.warn("Record batch from {} with last offset {} exceeded max record batch size {} after cleaning " + - "(new size is {}). Consumers with version earlier than 0.10.1.0 may need to " + - "increase their fetch sizes.", - partition, batch.lastOffset(), maxRecordBatchSize, filteredBatchSize); - - MemoryRecordsBuilder.RecordsInfo info = builder.info(); - if (info.maxTimestamp > maxTimestamp) { - maxTimestamp = info.maxTimestamp; - shallowOffsetOfMaxTimestamp = info.shallowOffsetOfMaxTimestamp; - } - } - } else if (batchRetention == BatchRetention.RETAIN_EMPTY) { - if (batchMagic < RecordBatch.MAGIC_VALUE_V2) - throw new IllegalStateException("Empty batches are only supported for magic v2 and above"); - - bufferOutputStream.ensureRemaining(DefaultRecordBatch.RECORD_BATCH_OVERHEAD); - DefaultRecordBatch.writeEmptyHeader(bufferOutputStream.buffer(), batchMagic, batch.producerId(), - batch.producerEpoch(), batch.baseSequence(), batch.baseOffset(), batch.lastOffset(), - batch.partitionLeaderEpoch(), batch.timestampType(), batch.maxTimestamp(), - batch.isTransactional(), batch.isControlBatch()); - } - - // If we had to allocate a new buffer to fit the filtered output (see KAFKA-5316), return early to - // avoid the need for additional allocations. - ByteBuffer outputBuffer = bufferOutputStream.buffer(); - if (outputBuffer != destinationBuffer) - return new FilterResult(outputBuffer, messagesRead, bytesRead, messagesRetained, bytesRetained, - maxOffset, maxTimestamp, shallowOffsetOfMaxTimestamp); - } - - return new FilterResult(destinationBuffer, messagesRead, bytesRead, messagesRetained, bytesRetained, - maxOffset, maxTimestamp, shallowOffsetOfMaxTimestamp); - } - - private static MemoryRecordsBuilder buildRetainedRecordsInto(RecordBatch originalBatch, - List retainedRecords, - ByteBufferOutputStream bufferOutputStream) { - byte magic = originalBatch.magic(); - TimestampType timestampType = originalBatch.timestampType(); - long logAppendTime = timestampType == TimestampType.LOG_APPEND_TIME ? - originalBatch.maxTimestamp() : RecordBatch.NO_TIMESTAMP; - long baseOffset = magic >= RecordBatch.MAGIC_VALUE_V2 ? - originalBatch.baseOffset() : retainedRecords.get(0).offset(); - - MemoryRecordsBuilder builder = new MemoryRecordsBuilder(bufferOutputStream, magic, - originalBatch.compressionType(), timestampType, baseOffset, logAppendTime, originalBatch.producerId(), - originalBatch.producerEpoch(), originalBatch.baseSequence(), originalBatch.isTransactional(), - originalBatch.isControlBatch(), originalBatch.partitionLeaderEpoch(), bufferOutputStream.limit()); - - for (Record record : retainedRecords) - builder.append(record); - - if (magic >= RecordBatch.MAGIC_VALUE_V2) - // we must preserve the last offset from the initial batch in order to ensure that the - // last sequence number from the batch remains even after compaction. Otherwise, the producer - // could incorrectly see an out of sequence error. - builder.overrideLastOffset(originalBatch.lastOffset()); - - return builder; - } - - /** - * Get the byte buffer that backs this instance for reading. - */ - public ByteBuffer buffer() { - return buffer.duplicate(); - } - - @Override - public Iterable batches() { - return batches; - } - - @Override - public String toString() { - StringBuilder builder = new StringBuilder(); - builder.append('['); - - Iterator batchIterator = batches.iterator(); - while (batchIterator.hasNext()) { - RecordBatch batch = batchIterator.next(); - try (CloseableIterator recordsIterator = batch.streamingIterator(BufferSupplier.create())) { - while (recordsIterator.hasNext()) { - Record record = recordsIterator.next(); - appendRecordToStringBuilder(builder, record.toString()); - if (recordsIterator.hasNext()) - builder.append(", "); - } - } catch (KafkaException e) { - appendRecordToStringBuilder(builder, "CORRUPTED"); - } - if (batchIterator.hasNext()) - builder.append(", "); - } - builder.append(']'); - return builder.toString(); - } - - private void appendRecordToStringBuilder(StringBuilder builder, String recordAsString) { - builder.append('(') - .append("record=") - .append(recordAsString) - .append(")"); - } - - @Override - public boolean equals(Object o) { - if (this == o) - return true; - if (o == null || getClass() != o.getClass()) - return false; - - MemoryRecords that = (MemoryRecords) o; - - return buffer.equals(that.buffer); - } - - @Override - public int hashCode() { - return buffer.hashCode(); - } - - public static abstract class RecordFilter { - public enum BatchRetention { - DELETE, // Delete the batch without inspecting records - RETAIN_EMPTY, // Retain the batch even if it is empty - DELETE_EMPTY // Delete the batch if it is empty - } - - /** - * Check whether the full batch can be discarded (i.e. whether we even need to - * check the records individually). - */ - protected abstract BatchRetention checkBatchRetention(RecordBatch batch); - - /** - * Check whether a record should be retained in the log. Note that {@link #checkBatchRetention(RecordBatch)} - * is used prior to checking individual record retention. Only records from batches which were not - * explicitly discarded with {@link BatchRetention#DELETE} will be considered. - */ - protected abstract boolean shouldRetainRecord(RecordBatch recordBatch, Record record); - } - - public static class FilterResult { - public final ByteBuffer output; - public final int messagesRead; - public final int bytesRead; - public final int messagesRetained; - public final int bytesRetained; - public final long maxOffset; - public final long maxTimestamp; - public final long shallowOffsetOfMaxTimestamp; - - // Note that `bytesRead` should contain only bytes from batches that have been processed, - // i.e. bytes from `messagesRead` and any discarded batches. - public FilterResult(ByteBuffer output, - int messagesRead, - int bytesRead, - int messagesRetained, - int bytesRetained, - long maxOffset, - long maxTimestamp, - long shallowOffsetOfMaxTimestamp) { - this.output = output; - this.messagesRead = messagesRead; - this.bytesRead = bytesRead; - this.messagesRetained = messagesRetained; - this.bytesRetained = bytesRetained; - this.maxOffset = maxOffset; - this.maxTimestamp = maxTimestamp; - this.shallowOffsetOfMaxTimestamp = shallowOffsetOfMaxTimestamp; - } - } - - public static MemoryRecords readableRecords(ByteBuffer buffer) { - return new MemoryRecords(buffer); - } - - public static MemoryRecordsBuilder builder(ByteBuffer buffer, - CompressionType compressionType, - TimestampType timestampType, - long baseOffset) { - return builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, compressionType, timestampType, baseOffset); - } - - public static MemoryRecordsBuilder idempotentBuilder(ByteBuffer buffer, - CompressionType compressionType, - long baseOffset, - long producerId, - short producerEpoch, - int baseSequence) { - return builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, compressionType, TimestampType.CREATE_TIME, - baseOffset, System.currentTimeMillis(), producerId, producerEpoch, baseSequence); - } - - public static MemoryRecordsBuilder builder(ByteBuffer buffer, - byte magic, - CompressionType compressionType, - TimestampType timestampType, - long baseOffset, - long logAppendTime) { - return builder(buffer, magic, compressionType, timestampType, baseOffset, logAppendTime, - RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, RecordBatch.NO_SEQUENCE, false, - RecordBatch.NO_PARTITION_LEADER_EPOCH); - } - - public static MemoryRecordsBuilder builder(ByteBuffer buffer, - byte magic, - CompressionType compressionType, - TimestampType timestampType, - long baseOffset) { - long logAppendTime = RecordBatch.NO_TIMESTAMP; - if (timestampType == TimestampType.LOG_APPEND_TIME) - logAppendTime = System.currentTimeMillis(); - return builder(buffer, magic, compressionType, timestampType, baseOffset, logAppendTime, - RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, RecordBatch.NO_SEQUENCE, false, - RecordBatch.NO_PARTITION_LEADER_EPOCH); - } - - public static MemoryRecordsBuilder builder(ByteBuffer buffer, - byte magic, - CompressionType compressionType, - TimestampType timestampType, - long baseOffset, - long logAppendTime, - int partitionLeaderEpoch) { - return builder(buffer, magic, compressionType, timestampType, baseOffset, logAppendTime, - RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, RecordBatch.NO_SEQUENCE, false, partitionLeaderEpoch); - } - - public static MemoryRecordsBuilder builder(ByteBuffer buffer, - CompressionType compressionType, - long baseOffset, - long producerId, - short producerEpoch, - int baseSequence, - boolean isTransactional) { - return builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, compressionType, TimestampType.CREATE_TIME, baseOffset, - RecordBatch.NO_TIMESTAMP, producerId, producerEpoch, baseSequence, isTransactional, - RecordBatch.NO_PARTITION_LEADER_EPOCH); - } - - public static MemoryRecordsBuilder builder(ByteBuffer buffer, - byte magic, - CompressionType compressionType, - TimestampType timestampType, - long baseOffset, - long logAppendTime, - long producerId, - short producerEpoch, - int baseSequence) { - return builder(buffer, magic, compressionType, timestampType, baseOffset, logAppendTime, - producerId, producerEpoch, baseSequence, false, RecordBatch.NO_PARTITION_LEADER_EPOCH); - } - - public static MemoryRecordsBuilder builder(ByteBuffer buffer, - byte magic, - CompressionType compressionType, - TimestampType timestampType, - long baseOffset, - long logAppendTime, - long producerId, - short producerEpoch, - int baseSequence, - boolean isTransactional, - int partitionLeaderEpoch) { - return builder(buffer, magic, compressionType, timestampType, baseOffset, - logAppendTime, producerId, producerEpoch, baseSequence, isTransactional, false, partitionLeaderEpoch); - } - - public static MemoryRecordsBuilder builder(ByteBuffer buffer, - byte magic, - CompressionType compressionType, - TimestampType timestampType, - long baseOffset, - long logAppendTime, - long producerId, - short producerEpoch, - int baseSequence, - boolean isTransactional, - boolean isControlBatch, - int partitionLeaderEpoch) { - return new MemoryRecordsBuilder(buffer, magic, compressionType, timestampType, baseOffset, - logAppendTime, producerId, producerEpoch, baseSequence, isTransactional, isControlBatch, partitionLeaderEpoch, - buffer.remaining()); - } - - public static MemoryRecords withRecords(CompressionType compressionType, SimpleRecord... records) { - return withRecords(RecordBatch.CURRENT_MAGIC_VALUE, compressionType, records); - } - - public static MemoryRecords withRecords(CompressionType compressionType, int partitionLeaderEpoch, SimpleRecord... records) { - return withRecords(RecordBatch.CURRENT_MAGIC_VALUE, 0L, compressionType, TimestampType.CREATE_TIME, - RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, RecordBatch.NO_SEQUENCE, - partitionLeaderEpoch, false, records); - } - - public static MemoryRecords withRecords(byte magic, CompressionType compressionType, SimpleRecord... records) { - return withRecords(magic, 0L, compressionType, TimestampType.CREATE_TIME, records); - } - - public static MemoryRecords withRecords(long initialOffset, CompressionType compressionType, SimpleRecord... records) { - return withRecords(RecordBatch.CURRENT_MAGIC_VALUE, initialOffset, compressionType, TimestampType.CREATE_TIME, - records); - } - - public static MemoryRecords withRecords(byte magic, long initialOffset, CompressionType compressionType, SimpleRecord... records) { - return withRecords(magic, initialOffset, compressionType, TimestampType.CREATE_TIME, records); - } - - public static MemoryRecords withRecords(long initialOffset, CompressionType compressionType, Integer partitionLeaderEpoch, SimpleRecord... records) { - return withRecords(RecordBatch.CURRENT_MAGIC_VALUE, initialOffset, compressionType, TimestampType.CREATE_TIME, RecordBatch.NO_PRODUCER_ID, - RecordBatch.NO_PRODUCER_EPOCH, RecordBatch.NO_SEQUENCE, partitionLeaderEpoch, false, records); - } - - public static MemoryRecords withIdempotentRecords(CompressionType compressionType, long producerId, - short producerEpoch, int baseSequence, SimpleRecord... records) { - return withRecords(RecordBatch.CURRENT_MAGIC_VALUE, 0L, compressionType, TimestampType.CREATE_TIME, producerId, producerEpoch, - baseSequence, RecordBatch.NO_PARTITION_LEADER_EPOCH, false, records); - } - - public static MemoryRecords withIdempotentRecords(byte magic, long initialOffset, CompressionType compressionType, - long producerId, short producerEpoch, int baseSequence, - int partitionLeaderEpoch, SimpleRecord... records) { - return withRecords(magic, initialOffset, compressionType, TimestampType.CREATE_TIME, producerId, producerEpoch, - baseSequence, partitionLeaderEpoch, false, records); - } - - public static MemoryRecords withIdempotentRecords(long initialOffset, CompressionType compressionType, long producerId, - short producerEpoch, int baseSequence, int partitionLeaderEpoch, - SimpleRecord... records) { - return withRecords(RecordBatch.CURRENT_MAGIC_VALUE, initialOffset, compressionType, TimestampType.CREATE_TIME, - producerId, producerEpoch, baseSequence, partitionLeaderEpoch, false, records); - } - - public static MemoryRecords withTransactionalRecords(CompressionType compressionType, long producerId, - short producerEpoch, int baseSequence, SimpleRecord... records) { - return withRecords(RecordBatch.CURRENT_MAGIC_VALUE, 0L, compressionType, TimestampType.CREATE_TIME, - producerId, producerEpoch, baseSequence, RecordBatch.NO_PARTITION_LEADER_EPOCH, true, records); - } - - public static MemoryRecords withTransactionalRecords(byte magic, long initialOffset, CompressionType compressionType, - long producerId, short producerEpoch, int baseSequence, - int partitionLeaderEpoch, SimpleRecord... records) { - return withRecords(magic, initialOffset, compressionType, TimestampType.CREATE_TIME, producerId, producerEpoch, - baseSequence, partitionLeaderEpoch, true, records); - } - - public static MemoryRecords withTransactionalRecords(long initialOffset, CompressionType compressionType, long producerId, - short producerEpoch, int baseSequence, int partitionLeaderEpoch, - SimpleRecord... records) { - return withTransactionalRecords(RecordBatch.CURRENT_MAGIC_VALUE, initialOffset, compressionType, - producerId, producerEpoch, baseSequence, partitionLeaderEpoch, records); - } - - public static MemoryRecords withRecords(byte magic, long initialOffset, CompressionType compressionType, - TimestampType timestampType, SimpleRecord... records) { - return withRecords(magic, initialOffset, compressionType, timestampType, RecordBatch.NO_PRODUCER_ID, - RecordBatch.NO_PRODUCER_EPOCH, RecordBatch.NO_SEQUENCE, RecordBatch.NO_PARTITION_LEADER_EPOCH, - false, records); - } - - public static MemoryRecords withRecords(byte magic, long initialOffset, CompressionType compressionType, - TimestampType timestampType, long producerId, short producerEpoch, - int baseSequence, int partitionLeaderEpoch, boolean isTransactional, - SimpleRecord... records) { - if (records.length == 0) - return MemoryRecords.EMPTY; - int sizeEstimate = AbstractRecords.estimateSizeInBytes(magic, compressionType, Arrays.asList(records)); - ByteBufferOutputStream bufferStream = new ByteBufferOutputStream(sizeEstimate); - long logAppendTime = RecordBatch.NO_TIMESTAMP; - if (timestampType == TimestampType.LOG_APPEND_TIME) - logAppendTime = System.currentTimeMillis(); - MemoryRecordsBuilder builder = new MemoryRecordsBuilder(bufferStream, magic, compressionType, timestampType, - initialOffset, logAppendTime, producerId, producerEpoch, baseSequence, isTransactional, false, - partitionLeaderEpoch, sizeEstimate); - for (SimpleRecord record : records) - builder.append(record); - return builder.build(); - } - - /*public static MemoryRecords withEndTransactionMarker(long producerId, short producerEpoch, EndTransactionMarker marker) { - return withEndTransactionMarker(0L, System.currentTimeMillis(), RecordBatch.NO_PARTITION_LEADER_EPOCH, - producerId, producerEpoch, marker); - } - - public static MemoryRecords withEndTransactionMarker(long timestamp, long producerId, short producerEpoch, - EndTransactionMarker marker) { - return withEndTransactionMarker(0L, timestamp, RecordBatch.NO_PARTITION_LEADER_EPOCH, producerId, - producerEpoch, marker); - } - - public static MemoryRecords withEndTransactionMarker(long initialOffset, long timestamp, int partitionLeaderEpoch, - long producerId, short producerEpoch, - EndTransactionMarker marker) { - int endTxnMarkerBatchSize = DefaultRecordBatch.RECORD_BATCH_OVERHEAD + - EndTransactionMarker.CURRENT_END_TXN_SCHEMA_RECORD_SIZE; - ByteBuffer buffer = ByteBuffer.allocate(endTxnMarkerBatchSize); - writeEndTransactionalMarker(buffer, initialOffset, timestamp, partitionLeaderEpoch, producerId, - producerEpoch, marker); - buffer.flip(); - return MemoryRecords.readableRecords(buffer); - } - - public static void writeEndTransactionalMarker(ByteBuffer buffer, long initialOffset, long timestamp, - int partitionLeaderEpoch, long producerId, short producerEpoch, - EndTransactionMarker marker) { - boolean isTransactional = true; - boolean isControlBatch = true; - MemoryRecordsBuilder builder = new MemoryRecordsBuilder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, CompressionType.NONE, - TimestampType.CREATE_TIME, initialOffset, timestamp, producerId, producerEpoch, - RecordBatch.NO_SEQUENCE, isTransactional, isControlBatch, partitionLeaderEpoch, - buffer.capacity()); - builder.appendEndTxnMarker(timestamp, marker); - builder.close(); - }*/ - -} diff --git a/clients/src/main/java/org/oracle/okafka/common/record/MemoryRecordsBuilder.java b/clients/src/main/java/org/oracle/okafka/common/record/MemoryRecordsBuilder.java deleted file mode 100644 index c896acd..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/record/MemoryRecordsBuilder.java +++ /dev/null @@ -1,799 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.record; - -import static org.oracle.okafka.common.utils.Utils.wrapNullable; - -import java.io.DataOutputStream; -import java.io.IOException; -import java.io.OutputStream; -import java.nio.ByteBuffer; - -import org.oracle.okafka.common.KafkaException; -import org.oracle.okafka.common.header.Header; -import org.oracle.okafka.common.utils.ByteBufferOutputStream; - -/** - * This class is used to write new log data in memory, i.e. this is the write path for {@link MemoryRecords}. - * It transparently handles compression and exposes methods for appending new records, possibly with message - * format conversion. - * - * In cases where keeping memory retention low is important and there's a gap between the time that record appends stop - * and the builder is closed (e.g. the Producer), it's important to call `closeForRecordAppends` when the former happens. - * This will release resources like compression buffers that can be relatively large (64 KB for LZ4). - */ -public class MemoryRecordsBuilder { - private static final float COMPRESSION_RATE_ESTIMATION_FACTOR = 1.05f; - private static final DataOutputStream CLOSED_STREAM = new DataOutputStream(new OutputStream() { - @Override - public void write(int b) throws IOException { - throw new IllegalStateException("MemoryRecordsBuilder is closed for record appends"); - } - }); - - private final TimestampType timestampType; - private final CompressionType compressionType; - // Used to hold a reference to the underlying ByteBuffer so that we can write the record batch header and access - // the written bytes. ByteBufferOutputStream allocates a new ByteBuffer if the existing one is not large enough, - // so it's not safe to hold a direct reference to the underlying ByteBuffer. - private final ByteBufferOutputStream bufferStream; - private final byte magic; - private final int initialPosition; - private final long baseOffset; - private final long logAppendTime; - private final boolean isControlBatch; - private final int partitionLeaderEpoch; - private final int writeLimit; - private final int batchHeaderSizeInBytes; - - // Use a conservative estimate of the compression ratio. The producer overrides this using statistics - // from previous batches before appending any records. - private float estimatedCompressionRatio = 1.0F; - - // Used to append records, may compress data on the fly - private DataOutputStream appendStream; - private boolean isTransactional; - private long producerId; - private short producerEpoch; - private int baseSequence; - private int uncompressedRecordsSizeInBytes = 0; // Number of bytes (excluding the header) written before compression - private int numRecords = 0; - private float actualCompressionRatio = 1; - private long maxTimestamp = RecordBatch.NO_TIMESTAMP; - private long offsetOfMaxTimestamp = -1; - private Long lastOffset = null; - private Long firstTimestamp = null; - - private MemoryRecords builtRecords; - private boolean aborted = false; - - public MemoryRecordsBuilder(ByteBufferOutputStream bufferStream, - byte magic, - CompressionType compressionType, - TimestampType timestampType, - long baseOffset, - long logAppendTime, - long producerId, - short producerEpoch, - int baseSequence, - boolean isTransactional, - boolean isControlBatch, - int partitionLeaderEpoch, - int writeLimit) { - if (magic > RecordBatch.MAGIC_VALUE_V0 && timestampType == TimestampType.NO_TIMESTAMP_TYPE) - throw new IllegalArgumentException("TimestampType must be set for magic >= 0"); - if (magic < RecordBatch.MAGIC_VALUE_V2) { - if (isTransactional) - throw new IllegalArgumentException("Transactional records are not supported for magic " + magic); - if (isControlBatch) - throw new IllegalArgumentException("Control records are not supported for magic " + magic); - } - - this.magic = magic; - this.timestampType = timestampType; - this.compressionType = compressionType; - this.baseOffset = baseOffset; - this.logAppendTime = logAppendTime; - this.numRecords = 0; - this.uncompressedRecordsSizeInBytes = 0; - this.actualCompressionRatio = 1; - this.maxTimestamp = RecordBatch.NO_TIMESTAMP; - this.producerId = producerId; - this.producerEpoch = producerEpoch; - this.baseSequence = baseSequence; - this.isTransactional = isTransactional; - this.isControlBatch = isControlBatch; - this.partitionLeaderEpoch = partitionLeaderEpoch; - this.writeLimit = writeLimit; - this.initialPosition = bufferStream.position(); - this.batchHeaderSizeInBytes = AbstractRecords.recordBatchHeaderSizeInBytes(magic, compressionType); - - bufferStream.position(initialPosition + batchHeaderSizeInBytes); - this.bufferStream = bufferStream; - this.appendStream = new DataOutputStream(compressionType.wrapForOutput(this.bufferStream, magic)); - } - - /** - * Construct a new builder. - * - * @param buffer The underlying buffer to use (note that this class will allocate a new buffer if necessary - * to fit the records appended) - * @param magic The magic value to use - * @param compressionType The compression codec to use - * @param timestampType The desired timestamp type. For magic > 0, this cannot be {@link TimestampType#NO_TIMESTAMP_TYPE}. - * @param baseOffset The initial offset to use for - * @param logAppendTime The log append time of this record set. Can be set to NO_TIMESTAMP if CREATE_TIME is used. - * @param producerId The producer ID associated with the producer writing this record set - * @param producerEpoch The epoch of the producer - * @param baseSequence The sequence number of the first record in this set - * @param isTransactional Whether or not the records are part of a transaction - * @param isControlBatch Whether or not this is a control batch (e.g. for transaction markers) - * @param partitionLeaderEpoch The epoch of the partition leader appending the record set to the log - * @param writeLimit The desired limit on the total bytes for this record set (note that this can be exceeded - * when compression is used since size estimates are rough, and in the case that the first - * record added exceeds the size). - */ - public MemoryRecordsBuilder(ByteBuffer buffer, - byte magic, - CompressionType compressionType, - TimestampType timestampType, - long baseOffset, - long logAppendTime, - long producerId, - short producerEpoch, - int baseSequence, - boolean isTransactional, - boolean isControlBatch, - int partitionLeaderEpoch, - int writeLimit) { - this(new ByteBufferOutputStream(buffer), magic, compressionType, timestampType, baseOffset, logAppendTime, - producerId, producerEpoch, baseSequence, isTransactional, isControlBatch, partitionLeaderEpoch, - writeLimit); - } - - public ByteBuffer buffer() { - return bufferStream.buffer(); - } - - public int initialCapacity() { - return bufferStream.initialCapacity(); - } - - public double compressionRatio() { - return actualCompressionRatio; - } - - public CompressionType compressionType() { - return compressionType; - } - - public boolean isControlBatch() { - return isControlBatch; - } - - public boolean isTransactional() { - return isTransactional; - } - - /** - * Close this builder and return the resulting buffer. - * @return The built log buffer - */ - public MemoryRecords build() { - if (aborted) { - throw new IllegalStateException("Attempting to build an aborted record batch"); - } - close(); - return builtRecords; - } - - /** - * Get the max timestamp and its offset. The details of the offset returned are a bit subtle. - * - * If the log append time is used, the offset will be the last offset unless no compression is used and - * the message format version is 0 or 1, in which case, it will be the first offset. - * - * If create time is used, the offset will be the last offset unless no compression is used and the message - * format version is 0 or 1, in which case, it will be the offset of the record with the max timestamp. - * - * @return The max timestamp and its offset - */ - public RecordsInfo info() { - if (timestampType == TimestampType.LOG_APPEND_TIME) { - long shallowOffsetOfMaxTimestamp; - // Use the last offset when dealing with record batches - if (compressionType != CompressionType.NONE || magic >= RecordBatch.MAGIC_VALUE_V2) - shallowOffsetOfMaxTimestamp = lastOffset; - else - shallowOffsetOfMaxTimestamp = baseOffset; - return new RecordsInfo(logAppendTime, shallowOffsetOfMaxTimestamp); - } else if (maxTimestamp == RecordBatch.NO_TIMESTAMP) { - return new RecordsInfo(RecordBatch.NO_TIMESTAMP, lastOffset); - } else { - long shallowOffsetOfMaxTimestamp; - // Use the last offset when dealing with record batches - if (compressionType != CompressionType.NONE || magic >= RecordBatch.MAGIC_VALUE_V2) - shallowOffsetOfMaxTimestamp = lastOffset; - else - shallowOffsetOfMaxTimestamp = offsetOfMaxTimestamp; - return new RecordsInfo(maxTimestamp, shallowOffsetOfMaxTimestamp); - } - } - - public int numRecords() { - return numRecords; - } - - /** - * Return the sum of the size of the batch header (always uncompressed) and the records (before compression). - */ - public int uncompressedBytesWritten() { - return uncompressedRecordsSizeInBytes + batchHeaderSizeInBytes; - } - - public void setProducerState(long producerId, short producerEpoch, int baseSequence, boolean isTransactional) { - if (isClosed()) { - // Sequence numbers are assigned when the batch is closed while the accumulator is being drained. - // If the resulting ProduceRequest to the partition leader failed for a retriable error, the batch will - // be re queued. In this case, we should not attempt to set the state again, since changing the producerId and sequence - // once a batch has been sent to the broker risks introducing duplicates. - throw new IllegalStateException("Trying to set producer state of an already closed batch. This indicates a bug on the client."); - } - this.producerId = producerId; - this.producerEpoch = producerEpoch; - this.baseSequence = baseSequence; - this.isTransactional = isTransactional; - } - - public void overrideLastOffset(long lastOffset) { - if (builtRecords != null) - throw new IllegalStateException("Cannot override the last offset after the records have been built"); - this.lastOffset = lastOffset; - } - - /** - * Release resources required for record appends (e.g. compression buffers). Once this method is called, it's only - * possible to update the RecordBatch header. - */ - public void closeForRecordAppends() { - if (appendStream != CLOSED_STREAM) { - try { - appendStream.close(); - } catch (IOException e) { - throw new KafkaException(e); - } finally { - appendStream = CLOSED_STREAM; - } - } - } - - public void abort() { - closeForRecordAppends(); - buffer().position(initialPosition); - aborted = true; - } - - public void reopenAndRewriteProducerState(long producerId, short producerEpoch, int baseSequence, boolean isTransactional) { - if (aborted) - throw new IllegalStateException("Should not reopen a batch which is already aborted."); - builtRecords = null; - this.producerId = producerId; - this.producerEpoch = producerEpoch; - this.baseSequence = baseSequence; - this.isTransactional = isTransactional; - } - - - public void close() { - if (aborted) - throw new IllegalStateException("Cannot close MemoryRecordsBuilder as it has already been aborted"); - - if (builtRecords != null) - return; - - validateProducerState(); - - closeForRecordAppends(); - - if (numRecords == 0L) { - buffer().position(initialPosition); - builtRecords = MemoryRecords.EMPTY; - } else { - if (magic > RecordBatch.MAGIC_VALUE_V1) - this.actualCompressionRatio = (float) writeDefaultBatchHeader() / this.uncompressedRecordsSizeInBytes; - else if (compressionType != CompressionType.NONE) - this.actualCompressionRatio = (float) writeLegacyCompressedWrapperHeader() / this.uncompressedRecordsSizeInBytes; - - ByteBuffer buffer = buffer().duplicate(); - buffer.flip(); - buffer.position(initialPosition); - builtRecords = MemoryRecords.readableRecords(buffer.slice()); - } - } - - private void validateProducerState() { - if (isTransactional && producerId == RecordBatch.NO_PRODUCER_ID) - throw new IllegalArgumentException("Cannot write transactional messages without a valid producer ID"); - - if (producerId != RecordBatch.NO_PRODUCER_ID) { - if (producerEpoch == RecordBatch.NO_PRODUCER_EPOCH) - throw new IllegalArgumentException("Invalid negative producer epoch"); - - if (baseSequence < 0 && !isControlBatch) - throw new IllegalArgumentException("Invalid negative sequence number used"); - - if (magic < RecordBatch.MAGIC_VALUE_V2) - throw new IllegalArgumentException("Idempotent messages are not supported for magic " + magic); - } - } - - /** - * Write the header to the default batch. - * @return the written compressed bytes. - */ - private int writeDefaultBatchHeader() { - ensureOpenForRecordBatchWrite(); - ByteBuffer buffer = bufferStream.buffer(); - int pos = buffer.position(); - buffer.position(initialPosition); - int size = pos - initialPosition; - int writtenCompressed = size - DefaultRecordBatch.RECORD_BATCH_OVERHEAD; - int offsetDelta = (int) (lastOffset - baseOffset); - - final long maxTimestamp; - if (timestampType == TimestampType.LOG_APPEND_TIME) - maxTimestamp = logAppendTime; - else - maxTimestamp = this.maxTimestamp; - - DefaultRecordBatch.writeHeader(buffer, baseOffset, offsetDelta, size, magic, compressionType, timestampType, - firstTimestamp, maxTimestamp, producerId, producerEpoch, baseSequence, isTransactional, isControlBatch, - partitionLeaderEpoch, numRecords); - - buffer.position(pos); - return writtenCompressed; - } - - /** - * Write the header to the legacy batch. - * @return the written compressed bytes. - */ - private int writeLegacyCompressedWrapperHeader() { - ensureOpenForRecordBatchWrite(); - ByteBuffer buffer = bufferStream.buffer(); - int pos = buffer.position(); - buffer.position(initialPosition); - - int wrapperSize = pos - initialPosition - Records.LOG_OVERHEAD; - int writtenCompressed = wrapperSize - LegacyRecord.recordOverhead(magic); - AbstractLegacyRecordBatch.writeHeader(buffer, lastOffset, wrapperSize); - - long timestamp = timestampType == TimestampType.LOG_APPEND_TIME ? logAppendTime : maxTimestamp; - LegacyRecord.writeCompressedRecordHeader(buffer, magic, wrapperSize, timestamp, compressionType, timestampType); - - buffer.position(pos); - return writtenCompressed; - } - - /** - * Append a record and return its checksum for message format v0 and v1, or null for v2 and above. - */ - private Long appendWithOffset(long offset, boolean isControlRecord, long timestamp, ByteBuffer key, - ByteBuffer value, Header[] headers) { - try { - if (isControlRecord != isControlBatch) - throw new IllegalArgumentException("Control records can only be appended to control batches"); - - if (lastOffset != null && offset <= lastOffset) - throw new IllegalArgumentException(String.format("Illegal offset %s following previous offset %s " + - "(Offsets must increase monotonically).", offset, lastOffset)); - - if (timestamp < 0 && timestamp != RecordBatch.NO_TIMESTAMP) - throw new IllegalArgumentException("Invalid negative timestamp " + timestamp); - - if (magic < RecordBatch.MAGIC_VALUE_V2 && headers != null && headers.length > 0) - throw new IllegalArgumentException("Magic v" + magic + " does not support record headers"); - - if (firstTimestamp == null) - firstTimestamp = timestamp; - - if (magic > RecordBatch.MAGIC_VALUE_V1) { - appendDefaultRecord(offset, timestamp, key, value, headers); - return null; - } else { - return appendLegacyRecord(offset, timestamp, key, value); - } - } catch (IOException e) { - throw new KafkaException("I/O exception when writing to the append stream, closing", e); - } - } - - /** - * Append a new record at the given offset. - * @param offset The absolute offset of the record in the log buffer - * @param timestamp The record timestamp - * @param key The record key - * @param value The record value - * @param headers The record headers if there are any - * @return CRC of the record or null if record-level CRC is not supported for the message format - */ - public Long appendWithOffset(long offset, long timestamp, byte[] key, byte[] value, Header[] headers) { - return appendWithOffset(offset, false, timestamp, wrapNullable(key), wrapNullable(value), headers); - } - - /** - * Append a new record at the given offset. - * @param offset The absolute offset of the record in the log buffer - * @param timestamp The record timestamp - * @param key The record key - * @param value The record value - * @param headers The record headers if there are any - * @return CRC of the record or null if record-level CRC is not supported for the message format - */ - public Long appendWithOffset(long offset, long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers) { - return appendWithOffset(offset, false, timestamp, key, value, headers); - } - - /** - * Append a new record at the given offset. - * @param offset The absolute offset of the record in the log buffer - * @param timestamp The record timestamp - * @param key The record key - * @param value The record value - * @return CRC of the record or null if record-level CRC is not supported for the message format - */ - public Long appendWithOffset(long offset, long timestamp, byte[] key, byte[] value) { - return appendWithOffset(offset, timestamp, wrapNullable(key), wrapNullable(value), Record.EMPTY_HEADERS); - } - - /** - * Append a new record at the given offset. - * @param offset The absolute offset of the record in the log buffer - * @param timestamp The record timestamp - * @param key The record key - * @param value The record value - * @return CRC of the record or null if record-level CRC is not supported for the message format - */ - public Long appendWithOffset(long offset, long timestamp, ByteBuffer key, ByteBuffer value) { - return appendWithOffset(offset, timestamp, key, value, Record.EMPTY_HEADERS); - } - - /** - * Append a new record at the given offset. - * @param offset The absolute offset of the record in the log buffer - * @param record The record to append - * @return CRC of the record or null if record-level CRC is not supported for the message format - */ - public Long appendWithOffset(long offset, SimpleRecord record) { - return appendWithOffset(offset, record.timestamp(), record.key(), record.value(), record.headers()); - } - - /** - * Append a new record at the next sequential offset. - * @param timestamp The record timestamp - * @param key The record key - * @param value The record value - * @return CRC of the record or null if record-level CRC is not supported for the message format - */ - public Long append(long timestamp, ByteBuffer key, ByteBuffer value) { - return append(timestamp, key, value, Record.EMPTY_HEADERS); - } - - /** - * Append a new record at the next sequential offset. - * @param timestamp The record timestamp - * @param key The record key - * @param value The record value - * @param headers The record headers if there are any - * @return CRC of the record or null if record-level CRC is not supported for the message format - */ - public Long append(long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers) { - return appendWithOffset(nextSequentialOffset(), timestamp, key, value, headers); - } - - /** - * Append a new record at the next sequential offset. - * @param timestamp The record timestamp - * @param key The record key - * @param value The record value - * @return CRC of the record or null if record-level CRC is not supported for the message format - */ - public Long append(long timestamp, byte[] key, byte[] value) { - return append(timestamp, wrapNullable(key), wrapNullable(value), Record.EMPTY_HEADERS); - } - - /** - * Append a new record at the next sequential offset. - * @param timestamp The record timestamp - * @param key The record key - * @param value The record value - * @param headers The record headers if there are any - * @return CRC of the record or null if record-level CRC is not supported for the message format - */ - public Long append(long timestamp, byte[] key, byte[] value, Header[] headers) { - return append(timestamp, wrapNullable(key), wrapNullable(value), headers); - } - - /** - * Append a new record at the next sequential offset. - * @param record The record to append - * @return CRC of the record or null if record-level CRC is not supported for the message format - */ - public Long append(SimpleRecord record) { - return appendWithOffset(nextSequentialOffset(), record); - } - - /** - * Append a control record at the next sequential offset. - * @param timestamp The record timestamp - * @param type The control record type (cannot be UNKNOWN) - * @param value The control record value - * @return CRC of the record or null if record-level CRC is not supported for the message format - */ - /* private Long appendControlRecord(long timestamp, ControlRecordType type, ByteBuffer value) { - Struct keyStruct = type.recordKey(); - ByteBuffer key = ByteBuffer.allocate(keyStruct.sizeOf()); - keyStruct.writeTo(key); - key.flip(); - return appendWithOffset(nextSequentialOffset(), true, timestamp, key, value, Record.EMPTY_HEADERS); - }*/ - - /** - * Return CRC of the record or null if record-level CRC is not supported for the message format - */ - /*public Long appendEndTxnMarker(long timestamp, EndTransactionMarker marker) { - if (producerId == RecordBatch.NO_PRODUCER_ID) - throw new IllegalArgumentException("End transaction marker requires a valid producerId"); - if (!isTransactional) - throw new IllegalArgumentException("End transaction marker depends on batch transactional flag being enabled"); - ByteBuffer value = marker.serializeValue(); - return appendControlRecord(timestamp, marker.controlType(), value); - }*/ - - /** - * Add a legacy record without doing offset/magic validation (this should only be used in testing). - * @param offset The offset of the record - * @param record The record to add - */ - public void appendUncheckedWithOffset(long offset, LegacyRecord record) { - ensureOpenForRecordAppend(); - try { - int size = record.sizeInBytes(); - AbstractLegacyRecordBatch.writeHeader(appendStream, toInnerOffset(offset), size); - - ByteBuffer buffer = record.buffer().duplicate(); - appendStream.write(buffer.array(), buffer.arrayOffset(), buffer.limit()); - - recordWritten(offset, record.timestamp(), size + Records.LOG_OVERHEAD); - } catch (IOException e) { - throw new KafkaException("I/O exception when writing to the append stream, closing", e); - } - } - - /** - * Append a record at the next sequential offset. - * @param record the record to add - */ - public void append(Record record) { - appendWithOffset(record.offset(), isControlBatch, record.timestamp(), record.key(), record.value(), record.headers()); - } - - /** - * Append a log record using a different offset - * @param offset The offset of the record - * @param record The record to add - */ - public void appendWithOffset(long offset, Record record) { - appendWithOffset(offset, record.timestamp(), record.key(), record.value(), record.headers()); - } - - /** - * Add a record with a given offset. The record must have a magic which matches the magic use to - * construct this builder and the offset must be greater than the last appended record. - * @param offset The offset of the record - * @param record The record to add - */ - public void appendWithOffset(long offset, LegacyRecord record) { - appendWithOffset(offset, record.timestamp(), record.key(), record.value()); - } - - /** - * Append the record at the next consecutive offset. If no records have been appended yet, use the base - * offset of this builder. - * @param record The record to add - */ - public void append(LegacyRecord record) { - appendWithOffset(nextSequentialOffset(), record); - } - - private void appendDefaultRecord(long offset, long timestamp, ByteBuffer key, ByteBuffer value, - Header[] headers) throws IOException { - ensureOpenForRecordAppend(); - int offsetDelta = (int) (offset - baseOffset); - long timestampDelta = timestamp - firstTimestamp; - int sizeInBytes = DefaultRecord.writeTo(appendStream, offsetDelta, timestampDelta, key, value, headers); - recordWritten(offset, timestamp, sizeInBytes); - } - - private long appendLegacyRecord(long offset, long timestamp, ByteBuffer key, ByteBuffer value) throws IOException { - ensureOpenForRecordAppend(); - if (compressionType == CompressionType.NONE && timestampType == TimestampType.LOG_APPEND_TIME) - timestamp = logAppendTime; - - int size = LegacyRecord.recordSize(magic, key, value); - AbstractLegacyRecordBatch.writeHeader(appendStream, toInnerOffset(offset), size); - - if (timestampType == TimestampType.LOG_APPEND_TIME) - timestamp = logAppendTime; - long crc = LegacyRecord.write(appendStream, magic, timestamp, key, value, CompressionType.NONE, timestampType); - recordWritten(offset, timestamp, size + Records.LOG_OVERHEAD); - return crc; - } - - private long toInnerOffset(long offset) { - // use relative offsets for compressed messages with magic v1 - if (magic > 0 && compressionType != CompressionType.NONE) - return offset - baseOffset; - return offset; - } - - private void recordWritten(long offset, long timestamp, int size) { - if (numRecords == Integer.MAX_VALUE) - throw new IllegalArgumentException("Maximum number of records per batch exceeded, max records: " + Integer.MAX_VALUE); - if (offset - baseOffset > Integer.MAX_VALUE) - throw new IllegalArgumentException("Maximum offset delta exceeded, base offset: " + baseOffset + - ", last offset: " + offset); - - numRecords += 1; - uncompressedRecordsSizeInBytes += size; - lastOffset = offset; - - if (magic > RecordBatch.MAGIC_VALUE_V0 && timestamp > maxTimestamp) { - maxTimestamp = timestamp; - offsetOfMaxTimestamp = offset; - } - } - - private void ensureOpenForRecordAppend() { - if (appendStream == CLOSED_STREAM) - throw new IllegalStateException("Tried to append a record, but MemoryRecordsBuilder is closed for record appends"); - } - - private void ensureOpenForRecordBatchWrite() { - if (isClosed()) - throw new IllegalStateException("Tried to write record batch header, but MemoryRecordsBuilder is closed"); - if (aborted) - throw new IllegalStateException("Tried to write record batch header, but MemoryRecordsBuilder is aborted"); - } - - /** - * Get an estimate of the number of bytes written (based on the estimation factor hard-coded in {@link CompressionType}. - * @return The estimated number of bytes written - */ - private int estimatedBytesWritten() { - if (compressionType == CompressionType.NONE) { - return batchHeaderSizeInBytes + uncompressedRecordsSizeInBytes; - } else { - // estimate the written bytes to the underlying byte buffer based on uncompressed written bytes - return batchHeaderSizeInBytes + (int) (uncompressedRecordsSizeInBytes * estimatedCompressionRatio * COMPRESSION_RATE_ESTIMATION_FACTOR); - } - } - - /** - * Set the estimated compression ratio for the memory records builder. - */ - public void setEstimatedCompressionRatio(float estimatedCompressionRatio) { - this.estimatedCompressionRatio = estimatedCompressionRatio; - } - - /** - * Check if we have room for a new record containing the given key/value pair. If no records have been - * appended, then this returns true. - */ - public boolean hasRoomFor(long timestamp, byte[] key, byte[] value, Header[] headers) { - return hasRoomFor(timestamp, wrapNullable(key), wrapNullable(value), headers); - } - - /** - * Check if we have room for a new record containing the given key/value pair. If no records have been - * appended, then this returns true. - * - * Note that the return value is based on the estimate of the bytes written to the compressor, which may not be - * accurate if compression is used. When this happens, the following append may cause dynamic buffer - * re-allocation in the underlying byte buffer stream. - */ - public boolean hasRoomFor(long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers) { - if (isFull()) - return false; - - // We always allow at least one record to be appended (the ByteBufferOutputStream will grow as needed) - if (numRecords == 0) - return true; - - final int recordSize; - if (magic < RecordBatch.MAGIC_VALUE_V2) { - recordSize = Records.LOG_OVERHEAD + LegacyRecord.recordSize(magic, key, value); - } else { - int nextOffsetDelta = lastOffset == null ? 0 : (int) (lastOffset - baseOffset + 1); - long timestampDelta = firstTimestamp == null ? 0 : timestamp - firstTimestamp; - recordSize = DefaultRecord.sizeInBytes(nextOffsetDelta, timestampDelta, key, value, headers); - } - - // Be conservative and not take compression of the new record into consideration. - return this.writeLimit >= estimatedBytesWritten() + recordSize; - } - - public boolean isClosed() { - return builtRecords != null; - } - - public boolean isFull() { - // note that the write limit is respected only after the first record is added which ensures we can always - // create non-empty batches (this is used to disable batching when the producer's batch size is set to 0). - return appendStream == CLOSED_STREAM || (this.numRecords > 0 && this.writeLimit <= estimatedBytesWritten()); - } - - /** - * Get an estimate of the number of bytes written to the underlying buffer. The returned value - * is exactly correct if the record set is not compressed or if the builder has been closed. - */ - public int estimatedSizeInBytes() { - return builtRecords != null ? builtRecords.sizeInBytes() : estimatedBytesWritten(); - } - - public byte magic() { - return magic; - } - - private long nextSequentialOffset() { - return lastOffset == null ? baseOffset : lastOffset + 1; - } - - public static class RecordsInfo { - public final long maxTimestamp; - public final long shallowOffsetOfMaxTimestamp; - - public RecordsInfo(long maxTimestamp, - long shallowOffsetOfMaxTimestamp) { - this.maxTimestamp = maxTimestamp; - this.shallowOffsetOfMaxTimestamp = shallowOffsetOfMaxTimestamp; - } - } - - /** - * Return the producer id of the RecordBatches created by this builder. - */ - public long producerId() { - return this.producerId; - } - - public short producerEpoch() { - return this.producerEpoch; - } - - public int baseSequence() { - return this.baseSequence; - } -} diff --git a/clients/src/main/java/org/oracle/okafka/common/record/MutableRecordBatch.java b/clients/src/main/java/org/oracle/okafka/common/record/MutableRecordBatch.java deleted file mode 100644 index 161558b..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/record/MutableRecordBatch.java +++ /dev/null @@ -1,66 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.record; - -import org.oracle.okafka.common.utils.ByteBufferOutputStream; - -/** - * A mutable record batch is one that can be modified in place (without copying). This is used by the broker - * to override certain fields in the batch before appending it to the log. - */ -public interface MutableRecordBatch extends RecordBatch { - - /** - * Set the last offset of this batch. - * @param offset The last offset to use - */ - void setLastOffset(long offset); - - /** - * Set the max timestamp for this batch. When using log append time, this effectively overrides the individual - * timestamps of all the records contained in the batch. To avoid recompression, the record fields are not updated - * by this method, but clients ignore them if the timestamp time is log append time. Note that firstTimestamp is not - * updated by this method. - * - * This typically requires re-computation of the batch's CRC. - * - * @param timestampType The timestamp type - * @param maxTimestamp The maximum timestamp - */ - void setMaxTimestamp(TimestampType timestampType, long maxTimestamp); - - /** - * Set the partition leader epoch for this batch of records. - * @param epoch The partition leader epoch to use - */ - void setPartitionLeaderEpoch(int epoch); - - /** - * Write this record batch into an output stream. - * @param outputStream The buffer to write the batch to - */ - void writeTo(ByteBufferOutputStream outputStream); - -} diff --git a/clients/src/main/java/org/oracle/okafka/common/record/Record.java b/clients/src/main/java/org/oracle/okafka/common/record/Record.java deleted file mode 100644 index 5209653..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/record/Record.java +++ /dev/null @@ -1,150 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.record; - -import java.nio.ByteBuffer; - -import org.oracle.okafka.common.header.Header; - -/** - * A log record is a tuple consisting of a unique offset in the log, a sequence number assigned by - * the producer, a timestamp, a key and a value. - */ -public interface Record { - - Header[] EMPTY_HEADERS = new Header[0]; - - /** - * The offset of this record in the log - * @return the offset - */ - long offset(); - - /** - * Get the sequence number assigned by the producer. - * @return the sequence number - */ - int sequence(); - - /** - * Get the size in bytes of this record. - * @return the size of the record in bytes - */ - int sizeInBytes(); - - /** - * Get the record's timestamp. - * @return the record's timestamp - */ - long timestamp(); - - /** - * Get a checksum of the record contents. - * @return A 4-byte unsigned checksum represented as a long or null if the message format does not - * include a checksum (i.e. for v2 and above) - */ - Long checksumOrNull(); - - /** - * Check whether the record has a valid checksum. - * @return true if the record has a valid checksum, false otherwise - */ - boolean isValid(); - - /** - * Raise a {@link org.oracle.okafka.common.errors.CorruptRecordException} if the record does not have a valid checksum. - */ - void ensureValid(); - - /** - * Get the size in bytes of the key. - * @return the size of the key, or -1 if there is no key - */ - int keySize(); - - /** - * Check whether this record has a key - * @return true if there is a key, false otherwise - */ - boolean hasKey(); - - /** - * Get the record's key. - * @return the key or null if there is none - */ - ByteBuffer key(); - - /** - * Get the size in bytes of the value. - * @return the size of the value, or -1 if the value is null - */ - int valueSize(); - - /** - * Check whether a value is present (i.e. if the value is not null) - * @return true if so, false otherwise - */ - boolean hasValue(); - - /** - * Get the record's value - * @return the (nullable) value - */ - ByteBuffer value(); - - /** - * Check whether the record has a particular magic. For versions prior to 2, the record contains its own magic, - * so this function can be used to check whether it matches a particular value. For version 2 and above, this - * method returns true if the passed magic is greater than or equal to 2. - * - * @param magic the magic value to check - * @return true if the record has a magic field (versions prior to 2) and the value matches - */ - boolean hasMagic(byte magic); - - /** - * For versions prior to 2, check whether the record is compressed (and therefore - * has nested record content). For versions 2 and above, this always returns false. - * @return true if the magic is lower than 2 and the record is compressed - */ - boolean isCompressed(); - - /** - * For versions prior to 2, the record contained a timestamp type attribute. This method can be - * used to check whether the value of that attribute matches a particular timestamp type. For versions - * 2 and above, this will always be false. - * - * @param timestampType the timestamp type to compare - * @return true if the version is lower than 2 and the timestamp type matches - */ - boolean hasTimestampType(TimestampType timestampType); - - /** - * Get the headers. For magic versions 1 and below, this always returns an empty array. - * - * @return the array of headers - */ - Header[] headers(); -} \ No newline at end of file diff --git a/clients/src/main/java/org/oracle/okafka/common/record/RecordBatch.java b/clients/src/main/java/org/oracle/okafka/common/record/RecordBatch.java deleted file mode 100644 index 1e283bd..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/record/RecordBatch.java +++ /dev/null @@ -1,248 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.record; - -import java.nio.ByteBuffer; -import java.util.Iterator; - -import org.oracle.okafka.common.utils.CloseableIterator; - -/** - * A record batch is a container for records. In old versions of the record format (versions 0 and 1), - * a batch consisted always of a single record if no compression was enabled, but could contain - * many records otherwise. Newer versions (magic versions 2 and above) will generally contain many records - * regardless of compression. - */ -public interface RecordBatch extends Iterable { - - /** - * The "magic" values - */ - byte MAGIC_VALUE_V0 = 0; - byte MAGIC_VALUE_V1 = 1; - byte MAGIC_VALUE_V2 = 2; - - /** - * The current "magic" value - */ - byte CURRENT_MAGIC_VALUE = MAGIC_VALUE_V2; - - /** - * Timestamp value for records without a timestamp - */ - long NO_TIMESTAMP = -1L; - - /** - * Values used in the v2 record format by non-idempotent/non-transactional producers or when - * up-converting from an older format. - */ - long NO_PRODUCER_ID = -1L; - short NO_PRODUCER_EPOCH = -1; - int NO_SEQUENCE = -1; - - /** - * Used to indicate an unknown leader epoch, which will be the case when the record set is - * first created by the producer. - */ - int NO_PARTITION_LEADER_EPOCH = -1; - - /** - * Check whether the checksum of this batch is correct. - * - * @return true If so, false otherwise - */ - boolean isValid(); - - /** - * Raise an exception if the checksum is not valid. - */ - void ensureValid(); - - /** - * Get the checksum of this record batch, which covers the batch header as well as all of the records. - * - * @return The 4-byte unsigned checksum represented as a long - */ - long checksum(); - - /** - * Get the max timestamp or log append time of this record batch. - * - * If the timestamp type is create time, this is the max timestamp among all records contained in this batch and - * the value is updated during compaction. - * - * @return The max timestamp - */ - long maxTimestamp(); - - /** - * Get the timestamp type of this record batch. This will be {@link TimestampType#NO_TIMESTAMP_TYPE} - * if the batch has magic 0. - * - * @return The timestamp type - */ - TimestampType timestampType(); - - /** - * Get the base offset contained in this record batch. For magic version prior to 2, the base offset will - * always be the offset of the first message in the batch. This generally requires deep iteration and will - * return the offset of the first record in the record batch. For magic version 2 and above, this will return - * the first offset of the original record batch (i.e. prior to compaction). For non-compacted topics, the - * behavior is equivalent. - * - * Because this requires deep iteration for older magic versions, this method should be used with - * caution. Generally {@link #lastOffset()} is safer since access is efficient for all magic versions. - * - * @return The base offset of this record batch (which may or may not be the offset of the first record - * as described above). - */ - long baseOffset(); - - /** - * Get the last offset in this record batch (inclusive). Just like {@link #baseOffset()}, the last offset - * always reflects the offset of the last record in the original batch, even if it is removed during log - * compaction. - * - * @return The offset of the last record in this batch - */ - long lastOffset(); - - /** - * Get the offset following this record batch (i.e. the last offset contained in this batch plus one). - * - * @return the next consecutive offset following this batch - */ - long nextOffset(); - - /** - * Get the record format version of this record batch (i.e its magic value). - * - * @return the magic byte - */ - byte magic(); - - /** - * Get the producer id for this log record batch. For older magic versions, this will return -1. - * - * @return The producer id or -1 if there is none - */ - long producerId(); - - /** - * Get the producer epoch for this log record batch. - * - * @return The producer epoch, or -1 if there is none - */ - short producerEpoch(); - - /** - * Does the batch have a valid producer id set. - */ - boolean hasProducerId(); - - /** - * Get the base sequence number of this record batch. Like {@link #baseOffset()}, this value is not - * affected by compaction: it always retains the base sequence number from the original batch. - * - * @return The first sequence number or -1 if there is none - */ - int baseSequence(); - - /** - * Get the last sequence number of this record batch. Like {@link #lastOffset()}, the last sequence number - * always reflects the sequence number of the last record in the original batch, even if it is removed during log - * compaction. - * - * @return The last sequence number or -1 if there is none - */ - int lastSequence(); - - /** - * Get the compression type of this record batch. - * - * @return The compression type - */ - CompressionType compressionType(); - - /** - * Get the size in bytes of this batch, including the size of the record and the batch overhead. - * @return The size in bytes of this batch - */ - int sizeInBytes(); - - /** - * Get the count if it is efficiently supported by the record format (which is only the case - * for magic 2 and higher). - * - * @return The number of records in the batch or null for magic versions 0 and 1. - */ - Integer countOrNull(); - - /** - * Check whether this record batch is compressed. - * @return true if so, false otherwise - */ - boolean isCompressed(); - - /** - * Write this record batch into a buffer. - * @param buffer The buffer to write the batch to - */ - void writeTo(ByteBuffer buffer); - - /** - * Whether or not this record batch is part of a transaction. - * @return true if it is, false otherwise - */ - boolean isTransactional(); - - /** - * Get the partition leader epoch of this record batch. - * @return The leader epoch or -1 if it is unknown - */ - int partitionLeaderEpoch(); - - /** - * Return a streaming iterator which basically delays decompression of the record stream until the records - * are actually asked for using {@link Iterator#next()}. If the message format does not support streaming - * iteration, then the normal iterator is returned. Either way, callers should ensure that the iterator is closed. - * - * @param decompressionBufferSupplier The supplier of ByteBuffer(s) used for decompression if supported. - * For small record batches, allocating a potentially large buffer (64 KB for LZ4) - * will dominate the cost of decompressing and iterating over the records in the - * batch. As such, a supplier that reuses buffers will have a significant - * performance impact. - * @return The closeable iterator - */ - CloseableIterator streamingIterator(BufferSupplier decompressionBufferSupplier); - - /** - * Check whether this is a control batch (i.e. whether the control bit is set in the batch attributes). - * For magic versions prior to 2, this is always false. - * - * @return Whether this is a batch containing control records - */ - boolean isControlBatch(); -} diff --git a/clients/src/main/java/org/oracle/okafka/common/record/RecordBatchIterator.java b/clients/src/main/java/org/oracle/okafka/common/record/RecordBatchIterator.java deleted file mode 100644 index a0959ab..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/record/RecordBatchIterator.java +++ /dev/null @@ -1,51 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.record; - -import java.io.IOException; - -import org.oracle.okafka.common.KafkaException; -import org.oracle.okafka.common.utils.AbstractIterator; - -class RecordBatchIterator extends AbstractIterator { - - private final LogInputStream logInputStream; - - RecordBatchIterator(LogInputStream logInputStream) { - this.logInputStream = logInputStream; - } - - @Override - protected T makeNext() { - try { - T batch = logInputStream.nextBatch(); - if (batch == null) - return allDone(); - return batch; - } catch (IOException e) { - throw new KafkaException(e); - } - } -} diff --git a/clients/src/main/java/org/oracle/okafka/common/record/Records.java b/clients/src/main/java/org/oracle/okafka/common/record/Records.java deleted file mode 100644 index f893db6..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/record/Records.java +++ /dev/null @@ -1,101 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.record; - -import java.util.Iterator; - -import org.oracle.okafka.common.utils.AbstractIterator; - - -/** - * Interface for accessing the records contained in a log. The log itself is represented as a sequence of record - * batches (see {@link RecordBatch}). - * - * For magic versions 1 and below, each batch consists of an 8 byte offset, a 4 byte record size, and a "shallow" - * {@link Record record}. If the batch is not compressed, then each batch will have only the shallow record contained - * inside it. If it is compressed, the batch contains "deep" records, which are packed into the value field of the - * shallow record. To iterate over the shallow batches, use {@link Records#batches()}; for the deep records, use - * {@link Records#records()}. Note that the deep iterator handles both compressed and non-compressed batches: - * if the batch is not compressed, the shallow record is returned; otherwise, the shallow batch is decompressed and the - * deep records are returned. - * - * For magic version 2, every batch contains 1 or more log record, regardless of compression. You can iterate - * over the batches directly using {@link Records#batches()}. Records can be iterated either directly from an individual - * batch or through {@link Records#records()}. Just as in previous versions, iterating over the records typically involves - * decompression and should therefore be used with caution. - * - * See {@link MemoryRecords} for the in-memory representation and {@link FileRecords} for the on-disk representation. - */ -public interface Records extends BaseRecords { - int OFFSET_OFFSET = 0; - int OFFSET_LENGTH = 8; - int SIZE_OFFSET = OFFSET_OFFSET + OFFSET_LENGTH; - int SIZE_LENGTH = 4; - int LOG_OVERHEAD = SIZE_OFFSET + SIZE_LENGTH; - - // the magic offset is at the same offset for all current message formats, but the 4 bytes - // between the size and the magic is dependent on the version. - int MAGIC_OFFSET = 16; - int MAGIC_LENGTH = 1; - int HEADER_SIZE_UP_TO_MAGIC = MAGIC_OFFSET + MAGIC_LENGTH; - - /** - * Get the record batches. Note that the signature allows subclasses - * to return a more specific batch type. This enables optimizations such as in-place offset - * assignment (see for example {@link DefaultRecordBatch}), and partial reading of - * record data (see {@link FileLogInputStream.FileChannelRecordBatch#magic()}. - * @return An iterator over the record batches of the log - */ - Iterable batches(); - - /** - * Get an iterator over the record batches. This is similar to {@link #batches()} but returns an {@link AbstractIterator} - * instead of {@link Iterator}, so that clients can use methods like {@link AbstractIterator#peek() peek}. - * @return An iterator over the record batches of the log - */ - AbstractIterator batchIterator(); - - /** - * Check whether all batches in this buffer have a certain magic value. - * @param magic The magic value to check - * @return true if all record batches have a matching magic value, false otherwise - */ - boolean hasMatchingMagic(byte magic); - - /** - * Check whether this log buffer has a magic value compatible with a particular value - * (i.e. whether all message sets contained in the buffer have a matching or lower magic). - * @param magic The magic version to ensure compatibility with - * @return true if all batches have compatible magic, false otherwise - */ - boolean hasCompatibleMagic(byte magic); - - /** - * Get an iterator over the records in this log. Note that this generally requires decompression, - * and should therefore be used with care. - * @return The record iterator - */ - Iterable records(); -} diff --git a/clients/src/main/java/org/oracle/okafka/common/record/SimpleRecord.java b/clients/src/main/java/org/oracle/okafka/common/record/SimpleRecord.java deleted file mode 100644 index e82bb05..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/record/SimpleRecord.java +++ /dev/null @@ -1,126 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.record; - -import java.nio.ByteBuffer; -import java.util.Arrays; -import java.util.Objects; - -import org.oracle.okafka.common.header.Header; -import org.oracle.okafka.common.utils.Utils; - -/** - * High-level representation of a kafka record. This is useful when building record sets to - * avoid depending on a specific magic version. - */ -public class SimpleRecord { - private final ByteBuffer key; - private final ByteBuffer value; - private final long timestamp; - private final Header[] headers; - - public SimpleRecord(long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers) { - Objects.requireNonNull(headers, "Headers must be non-null"); - this.key = key; - this.value = value; - this.timestamp = timestamp; - this.headers = headers; - } - - public SimpleRecord(long timestamp, byte[] key, byte[] value, Header[] headers) { - this(timestamp, Utils.wrapNullable(key), Utils.wrapNullable(value), headers); - } - - public SimpleRecord(long timestamp, ByteBuffer key, ByteBuffer value) { - this(timestamp, key, value, Record.EMPTY_HEADERS); - } - - public SimpleRecord(long timestamp, byte[] key, byte[] value) { - this(timestamp, Utils.wrapNullable(key), Utils.wrapNullable(value)); - } - - public SimpleRecord(long timestamp, byte[] value) { - this(timestamp, null, value); - } - - public SimpleRecord(byte[] value) { - this(RecordBatch.NO_TIMESTAMP, null, value); - } - - public SimpleRecord(byte[] key, byte[] value) { - this(RecordBatch.NO_TIMESTAMP, key, value); - } - - public SimpleRecord(Record record) { - this(record.timestamp(), record.key(), record.value(), record.headers()); - } - - public ByteBuffer key() { - return key; - } - - public ByteBuffer value() { - return value; - } - - public long timestamp() { - return timestamp; - } - - public Header[] headers() { - return headers; - } - - @Override - public boolean equals(Object o) { - if (this == o) - return true; - if (o == null || getClass() != o.getClass()) - return false; - - SimpleRecord that = (SimpleRecord) o; - return timestamp == that.timestamp && - (key == null ? that.key == null : key.equals(that.key)) && - (value == null ? that.value == null : value.equals(that.value)) && - Arrays.equals(headers, that.headers); - } - - @Override - public int hashCode() { - int result = key != null ? key.hashCode() : 0; - result = 31 * result + (value != null ? value.hashCode() : 0); - result = 31 * result + (int) (timestamp ^ (timestamp >>> 32)); - result = 31 * result + Arrays.hashCode(headers); - return result; - } - - @Override - public String toString() { - return String.format("SimpleRecord(timestamp=%d, key=%d bytes, value=%d bytes)", - timestamp(), - key == null ? 0 : key.limit(), - value == null ? 0 : value.limit()); - } -} diff --git a/clients/src/main/java/org/oracle/okafka/common/record/TimestampType.java b/clients/src/main/java/org/oracle/okafka/common/record/TimestampType.java deleted file mode 100644 index 44d3448..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/record/TimestampType.java +++ /dev/null @@ -1,54 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.record; - -import java.util.NoSuchElementException; - -/** - * The timestamp type of the records. - */ -public enum TimestampType { - NO_TIMESTAMP_TYPE(-1, "NoTimestampType"), CREATE_TIME(0, "CreateTime"), LOG_APPEND_TIME(1, "LogAppendTime"); - - public final int id; - public final String name; - - TimestampType(int id, String name) { - this.id = id; - this.name = name; - } - - public static TimestampType forName(String name) { - for (TimestampType t : values()) - if (t.name.equals(name)) - return t; - throw new NoSuchElementException("Invalid timestamp type " + name); - } - - @Override - public String toString() { - return name; - } -} \ No newline at end of file diff --git a/clients/src/main/java/org/oracle/okafka/common/requests/AbstractRequest.java b/clients/src/main/java/org/oracle/okafka/common/requests/AbstractRequest.java index fb0c323..a086cb7 100644 --- a/clients/src/main/java/org/oracle/okafka/common/requests/AbstractRequest.java +++ b/clients/src/main/java/org/oracle/okafka/common/requests/AbstractRequest.java @@ -1,7 +1,7 @@ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. */ @@ -31,16 +31,24 @@ import org.oracle.okafka.common.protocol.ApiKeys; -public abstract class AbstractRequest { +public abstract class AbstractRequest extends org.apache.kafka.common.requests.AbstractRequest{ - public static abstract class Builder { + public AbstractRequest(ApiKeys apiKey, short version) + { + super(ApiKeys.convertToApacheKafkaKey(apiKey), version); + } + + public static abstract class Builder extends org.apache.kafka.common.requests.AbstractRequest.Builder + { private final ApiKeys apiKey; - public Builder(ApiKeys apiKey) { + + public Builder(ApiKeys apiKey) { + super(ApiKeys.convertToApacheKafkaKey(apiKey), (short)1); this.apiKey = apiKey; } - public ApiKeys apiKey() { + public ApiKeys apiKeyOKafka() { return apiKey; } diff --git a/clients/src/main/java/org/oracle/okafka/common/requests/AbstractResponse.java b/clients/src/main/java/org/oracle/okafka/common/requests/AbstractResponse.java index 8f1476f..0960c07 100644 --- a/clients/src/main/java/org/oracle/okafka/common/requests/AbstractResponse.java +++ b/clients/src/main/java/org/oracle/okafka/common/requests/AbstractResponse.java @@ -1,7 +1,7 @@ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. */ @@ -29,6 +29,13 @@ package org.oracle.okafka.common.requests; -public abstract class AbstractResponse { +import org.oracle.okafka.common.protocol.ApiKeys; +public abstract class AbstractResponse extends org.apache.kafka.common.requests.AbstractResponse { + ApiKeys apiKey; + + protected AbstractResponse(ApiKeys apiKey) { + super(ApiKeys.convertToApacheKafkaKey(apiKey)); + this.apiKey = apiKey; + } } diff --git a/clients/src/main/java/org/oracle/okafka/common/requests/CommitRequest.java b/clients/src/main/java/org/oracle/okafka/common/requests/CommitRequest.java index 853609b..0352078 100644 --- a/clients/src/main/java/org/oracle/okafka/common/requests/CommitRequest.java +++ b/clients/src/main/java/org/oracle/okafka/common/requests/CommitRequest.java @@ -1,7 +1,7 @@ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. */ @@ -10,26 +10,28 @@ import java.util.List; import java.util.Map; -import org.oracle.okafka.clients.consumer.OffsetAndMetadata; +import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.oracle.okafka.common.Node; -import org.oracle.okafka.common.TopicPartition; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.protocol.ApiMessage; +import org.apache.kafka.common.requests.AbstractResponse; import org.oracle.okafka.common.protocol.ApiKeys; public class CommitRequest extends AbstractRequest { -public static class Builder extends AbstractRequest.Builder { +public static class Builder extends AbstractRequest.Builder { - private final Map> nodes; + private final Map> nodeTPMap; private final Map offsetAndMetadata; - public Builder(Map> nodes, Map offsetAndMetadata) { + public Builder(Map> _nodeTPMap, Map offsetAndMetadata) { super(ApiKeys.COMMIT); - this.nodes = nodes; + this.nodeTPMap = _nodeTPMap; this.offsetAndMetadata = offsetAndMetadata; } @Override public CommitRequest build() { - return new CommitRequest(nodes, offsetAndMetadata); + return new CommitRequest(nodeTPMap, offsetAndMetadata); } @Override @@ -39,22 +41,40 @@ public String toString() { append(")"); return bld.toString(); } + + @Override + public CommitRequest build(short version) { + return new CommitRequest(nodeTPMap, offsetAndMetadata); + } } -private final Map> nodes; +private final Map> nodeTPMap; private final Map offsetAndMetadata; - private CommitRequest(Map> nodes, Map offsetAndMetadata) { - this.nodes = nodes; + private CommitRequest(Map> _nodeTPMap, Map offsetAndMetadata) { + super(ApiKeys.COMMIT,(short)1); + this.nodeTPMap = _nodeTPMap; this.offsetAndMetadata = offsetAndMetadata; } public Map> nodes() { - return this.nodes; + return this.nodeTPMap; } public Map offsets() { return this.offsetAndMetadata; } + @Override + public ApiMessage data() { + // TODO Auto-generated method stub + return null; + } + + @Override + public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { + // TODO Auto-generated method stub + return null; + } + } diff --git a/clients/src/main/java/org/oracle/okafka/common/requests/CommitResponse.java b/clients/src/main/java/org/oracle/okafka/common/requests/CommitResponse.java index b010911..e3e5680 100644 --- a/clients/src/main/java/org/oracle/okafka/common/requests/CommitResponse.java +++ b/clients/src/main/java/org/oracle/okafka/common/requests/CommitResponse.java @@ -1,7 +1,7 @@ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. */ @@ -10,41 +10,68 @@ import java.util.List; import java.util.Map; -import org.oracle.okafka.clients.consumer.OffsetAndMetadata; +import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.oracle.okafka.common.Node; -import org.oracle.okafka.common.TopicPartition; +import org.oracle.okafka.common.errors.FeatureNotSupportedException; +import org.oracle.okafka.common.protocol.ApiKeys; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.protocol.ApiMessage; +import org.apache.kafka.common.protocol.Errors; public class CommitResponse extends AbstractResponse { - + private final boolean error; private final Map result; private final Map> nodes; private final Map offsets; - + public CommitResponse(Map result, Map> nodes, - Map offsets, boolean error) { + Map offsets, boolean error) { + super(ApiKeys.COMMIT); this.result = result; this.nodes = nodes; this.offsets = offsets; this.error = error; - + } - + public Map getResult() { return result; } - + public Map> getNodes() { return nodes; } - + public Map offsets() { return offsets; } - + public boolean error() { return error; } - + + @Override + public ApiMessage data() { + // TODO Auto-generated method stub + return null; + } + + @Override + public Map errorCounts() { + // TODO Auto-generated method stub + return null; + } + + @Override + public int throttleTimeMs() { + // TODO Auto-generated method stub + return 0; + } + + @Override + public void maybeSetThrottleTimeMs(int arg0) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } } diff --git a/clients/src/main/java/org/oracle/okafka/common/requests/ConnectMeRequest.java b/clients/src/main/java/org/oracle/okafka/common/requests/ConnectMeRequest.java new file mode 100644 index 0000000..d05cf1d --- /dev/null +++ b/clients/src/main/java/org/oracle/okafka/common/requests/ConnectMeRequest.java @@ -0,0 +1,71 @@ +package org.oracle.okafka.common.requests; + +import org.apache.kafka.common.protocol.ApiMessage; +import org.apache.kafka.common.requests.AbstractResponse; +import org.oracle.okafka.common.protocol.ApiKeys; + +public class ConnectMeRequest extends AbstractRequest { + + private String schemaName; + private String topicName; + private String groupId; + + public static class Builder extends AbstractRequest.Builder + { + private String schemaName; + private String topicName; + private String groupId; + + public Builder(String _schemaName , String _topicName, String _groupId) + { + super(ApiKeys.CONNECT_ME); + this.schemaName = _schemaName; + this.topicName = _topicName; + this.groupId = _groupId; + } + + public ConnectMeRequest build() + { + return new ConnectMeRequest(this.schemaName,this.topicName,this.groupId); + } + + @Override + public ConnectMeRequest build(short version) { + return new ConnectMeRequest(this.schemaName,this.topicName,this.groupId); + } + } + + public ConnectMeRequest(String _schemaName , String _topicName, String _groupId) + { + super(ApiKeys.CONNECT_ME,(short)1); + this.schemaName = _schemaName; + this.topicName = _topicName; + this.groupId = _groupId; + } + + public String getSchemaName() + { + return schemaName; + } + public String getToipcName() + { + return topicName; + } + public String getGroupId() + { + return groupId; + } + + @Override + public ApiMessage data() { + // TODO Auto-generated method stub + return null; + } + + @Override + public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { + // TODO Auto-generated method stub + return null; + } + +} diff --git a/clients/src/main/java/org/oracle/okafka/common/requests/ConnectMeResponse.java b/clients/src/main/java/org/oracle/okafka/common/requests/ConnectMeResponse.java new file mode 100644 index 0000000..9d50575 --- /dev/null +++ b/clients/src/main/java/org/oracle/okafka/common/requests/ConnectMeResponse.java @@ -0,0 +1,288 @@ +package org.oracle.okafka.common.requests; + +import java.math.BigDecimal; +import java.util.ArrayList; +import java.util.Map; +import java.util.StringTokenizer; + +import org.apache.kafka.common.protocol.ApiMessage; +import org.apache.kafka.common.protocol.Errors; +import org.oracle.okafka.common.Node; +import org.oracle.okafka.common.errors.FeatureNotSupportedException; +import org.oracle.okafka.common.network.AQClient; +import org.oracle.okafka.common.protocol.ApiKeys; + +public class ConnectMeResponse extends AbstractResponse { + + class DBListener { + String protocol; + String host; + int port; + boolean local; + } + + int instId; + String instanceName; + int flags; + String url; + ArrayList serviceNames; + ArrayList localListeners; + ArrayList partitionList; + ArrayList dbListenerList; + ArrayList nodeList; + Node preferredNode; + + public ConnectMeResponse() { + super(ApiKeys.CONNECT_ME); + partitionList = new ArrayList(); + } + + public int getInstId() { + return instId; + } + + public void setInstId(int instId) { + this.instId = instId; + } + + public String getInstanceName() { + return instanceName; + } + + public void setInstanceName(String instanceName) { + this.instanceName = instanceName; + } + + public int getFlags() { + return flags; + } + + public void setFlags(int flags) { + this.flags = flags; + } + + public String getUrl() { + return url; + } + + public Node getPreferredNode() { + return preferredNode; + } + + public void setUrl(String url) { + this.url = url; + + } + + public void setPreferredNode(Node _node) { + this.preferredNode = _node; + } + + public ArrayList processUrl() { + /* + * Expected Url is in JSON format. Below is one example: + * {"INSTANCE_NAME":"v1","SERVICE_NAME":[ + * "CDB1_PDB1_I1.regress.rdbms.dev.us.oracle.com", + * "cdb1_pdb1.regress.rdbms.dev.us.oracle.com"],"LOCAL_LISTENER":[ + * "(ADDRESS=(PROTOCOL=ipc)(KEY=v1))", + * "(ADDRESS=(PROTOCOL=tcp)(HOST=phoenix94147)(PORT=1521))"],"NEW_PARA": + * "NEW_PARA_VALUE","NEW_PARA_2":["VALUE 1","Value 2"]} + */ + + if (url == null) + return nodeList; + + String strippedUrl = url.replace('{', ' '); + strippedUrl = strippedUrl.replace('}', ' '); + strippedUrl = strippedUrl.trim(); + + ArrayList values = new ArrayList(); + + StringTokenizer stn = new StringTokenizer(strippedUrl, ":,[]", true); + while (stn.hasMoreTokens()) { + String tokenNow = stn.nextToken(); + String seperator = stn.nextToken(); + // System.out.println("Token Now " + tokenNow +" Seperator " + seperator); + String tokenName, tokenValue; + boolean multiples = false; + if (seperator.equals(":")) { + tokenNow = tokenNow.trim(); + tokenName = tokenNow.substring(1, tokenNow.length() - 1); + multiples = false; + // Next token decides if this is a stand-alone value or multiples + tokenValue = stn.nextToken(); + if (tokenValue.equals("[")) { + boolean valuesDone = false; + multiples = true; + values.clear(); + do { + tokenValue = stn.nextToken(); + seperator = stn.nextToken(); + // System.out.println("Multiples: TokenValue : " + tokenValue + " seperator= " + + // seperator); + if (seperator.equals("]")) { + valuesDone = true; + if (stn.hasMoreTokens()) { + String endDelim = stn.nextToken(); + // System.out.println("Multiples : ENd Delim " + endDelim); + } + } + tokenValue = tokenValue.trim(); + tokenValue = tokenValue.substring(1, tokenValue.length() - 1); + values.add(tokenValue); + } while (!valuesDone && stn.hasMoreTokens()); + } else // Single Value + { + tokenValue = tokenValue.trim(); + tokenValue = tokenValue.substring(1, tokenValue.length() - 1); + + // System.out.println("Token KEY= '"+tokenName+"' Token Value ='" + tokenValue); + if (stn.hasMoreTokens()) { + seperator = stn.nextToken(); + } + } + + // System.out.println("TokenName " + tokenName); + if (tokenName.equalsIgnoreCase("INSTANCE_NAME")) { + if (!multiples) { + instanceName = tokenValue; + } + } else if (tokenName.equalsIgnoreCase("SERVICE_NAME")) { + serviceNames = new ArrayList(); + if (!multiples) { + serviceNames.add(tokenValue); + } else { + serviceNames.addAll(values); + } + } else if (tokenName.equalsIgnoreCase("LOCAL_LISTENER")) { + localListeners = new ArrayList(); + if (!multiples) { + localListeners.add(tokenValue); + } else { + localListeners.addAll(values); + } + } + } + } + /* + * System.out.println("Instance Name " + instanceName); if(serviceNames != null) + * { for(String service: serviceNames) { System.out.println("Service " + + * service); } } + */ + + if (localListeners != null) { + dbListenerList = new ArrayList(localListeners.size()); + for (String listener : localListeners) { + DBListener dbListener = parseLocalListener(listener); + if (dbListener != null) + dbListenerList.add(dbListener); + } + prepareNodeList(); + } + + return nodeList; + } + + // Ad-Hoc processing of LISTENER STRING + private DBListener parseLocalListener(String listener) { + DBListener dbListener = null; + try { + dbListener = new DBListener(); + String str = listener; + + StringBuilder sb = new StringBuilder(); + for (int ind = 0; ind < str.length(); ind++) + if (str.charAt(ind) != ' ') + sb.append(str.charAt(ind)); + str = sb.toString(); + String protocol = AQClient.getProperty(str, "PROTOCOL"); + String host = AQClient.getProperty(str, "HOST"); + ; + Integer port = Integer.parseInt(AQClient.getProperty(str, "PORT")); + + dbListener.host = host; + dbListener.port = port; + dbListener.protocol = protocol; + } catch (Exception e) { + } + return dbListener; + } + + private void prepareNodeList() { + if (dbListenerList == null || serviceNames == null) + return; + + if (nodeList == null) + nodeList = new ArrayList(); + else + nodeList.clear(); + + for (DBListener listenerNow : dbListenerList) { + if (listenerNow.protocol == null) + continue; + + if (listenerNow.protocol.equalsIgnoreCase("TCP") || listenerNow.protocol.equalsIgnoreCase("TCPS")) { + for (String servcieName : serviceNames) { + Node newNode = new Node(instId, listenerNow.host, listenerNow.port, servcieName, instanceName); + newNode.setProtocol(listenerNow.protocol); + nodeList.add(newNode); + } + } + } + } + + public ArrayList getServiceNames() { + return serviceNames; + } + + public void setServiceNames(ArrayList serviceNames) { + this.serviceNames = serviceNames; + } + + public ArrayList getLocalListeners() { + return localListeners; + } + + public void setLocalListeners(ArrayList localListeners) { + this.localListeners = localListeners; + } + + public ArrayList getPartitionList() { + return partitionList; + } + + public void setPartitionList(BigDecimal[] partitionArray) { + this.partitionList.clear(); + for (BigDecimal pId : partitionArray) { + this.partitionList.add(pId.intValue()); + } + } + + public void setPartitionList(ArrayList partitionList) { + this.partitionList = partitionList; + } + + @Override + public ApiMessage data() { + // TODO Auto-generated method stub + return null; + } + + @Override + public Map errorCounts() { + // TODO Auto-generated method stub + return null; + } + + @Override + public int throttleTimeMs() { + // TODO Auto-generated method stub + return 0; + } + + @Override + public void maybeSetThrottleTimeMs(int arg0) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + +} diff --git a/clients/src/main/java/org/oracle/okafka/common/requests/CreateTopicsRequest.java b/clients/src/main/java/org/oracle/okafka/common/requests/CreateTopicsRequest.java index a4dce65..c0d4427 100644 --- a/clients/src/main/java/org/oracle/okafka/common/requests/CreateTopicsRequest.java +++ b/clients/src/main/java/org/oracle/okafka/common/requests/CreateTopicsRequest.java @@ -1,7 +1,7 @@ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. */ @@ -34,6 +34,8 @@ import java.util.Map; import java.util.Set; +import org.apache.kafka.common.protocol.ApiMessage; +import org.apache.kafka.common.requests.AbstractResponse; import org.oracle.okafka.common.protocol.ApiKeys; public class CreateTopicsRequest extends AbstractRequest { @@ -116,6 +118,11 @@ public String toString() { append(")"); return bld.toString(); } + + @Override + public CreateTopicsRequest build(short version) { + return new CreateTopicsRequest(topics, timeout, validateOnly); + } } private final Map topics; @@ -130,6 +137,7 @@ public String toString() { public static final short NO_REPLICATION_FACTOR = -1; private CreateTopicsRequest(Map topics, Integer timeout, boolean validateOnly) { + super(ApiKeys.CREATE_TOPICS,(short)1); this.topics = topics; this.timeout = timeout; this.validateOnly = validateOnly; @@ -151,4 +159,16 @@ public boolean validateOnly() { public Set duplicateTopics() { return this.duplicateTopics; } + + @Override + public ApiMessage data() { + // TODO Auto-generated method stub + return null; + } + + @Override + public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { + // TODO Auto-generated method stub + return null; + } } diff --git a/clients/src/main/java/org/oracle/okafka/common/requests/CreateTopicsResponse.java b/clients/src/main/java/org/oracle/okafka/common/requests/CreateTopicsResponse.java index c4af966..5a21d52 100644 --- a/clients/src/main/java/org/oracle/okafka/common/requests/CreateTopicsResponse.java +++ b/clients/src/main/java/org/oracle/okafka/common/requests/CreateTopicsResponse.java @@ -1,7 +1,7 @@ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. */ @@ -31,11 +31,17 @@ import java.util.Map; +import org.apache.kafka.common.protocol.ApiMessage; +import org.apache.kafka.common.protocol.Errors; +import org.oracle.okafka.common.errors.FeatureNotSupportedException; +import org.oracle.okafka.common.protocol.ApiKeys; + public class CreateTopicsResponse extends AbstractResponse { final Map errors; private Exception requestResult; public CreateTopicsResponse(Map errors) { + super(ApiKeys.CREATE_TOPICS); this.errors = errors; this.requestResult = null; } @@ -54,4 +60,27 @@ public Exception getResult() { return requestResult; } + @Override + public ApiMessage data() { + // TODO Auto-generated method stub + return null; + } + + @Override + public Map errorCounts() { + // TODO Auto-generated method stub + return null; + } + + @Override + public int throttleTimeMs() { + // TODO Auto-generated method stub + return 0; + } + + @Override + public void maybeSetThrottleTimeMs(int arg0) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + } diff --git a/clients/src/main/java/org/oracle/okafka/common/requests/DeleteTopicsRequest.java b/clients/src/main/java/org/oracle/okafka/common/requests/DeleteTopicsRequest.java index 29fe4fa..2342b56 100644 --- a/clients/src/main/java/org/oracle/okafka/common/requests/DeleteTopicsRequest.java +++ b/clients/src/main/java/org/oracle/okafka/common/requests/DeleteTopicsRequest.java @@ -1,7 +1,7 @@ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. */ @@ -32,11 +32,12 @@ import java.util.Set; import org.oracle.okafka.common.protocol.ApiKeys; -import org.oracle.okafka.common.utils.Utils; +import org.apache.kafka.common.protocol.ApiMessage; +import org.apache.kafka.common.requests.AbstractResponse; +import org.apache.kafka.common.utils.Utils; public class DeleteTopicsRequest extends AbstractRequest { - private final Set topics; private final Integer timeout; @@ -54,7 +55,12 @@ public Builder(Set topics, Integer timeout) { public DeleteTopicsRequest build() { return new DeleteTopicsRequest(topics, timeout); } - + + @Override + public DeleteTopicsRequest build(short version) { + return new DeleteTopicsRequest(topics, timeout); + } + @Override public String toString() { StringBuilder bld = new StringBuilder(); @@ -64,9 +70,12 @@ public String toString() { append(")"); return bld.toString(); } + + } private DeleteTopicsRequest(Set topics, Integer timeout) { + super(ApiKeys.DELETE_TOPICS, (short)1); this.topics = topics; this.timeout = timeout; } @@ -79,4 +88,16 @@ public Integer timeout() { return this.timeout; } + @Override + public ApiMessage data() { + // TODO Auto-generated method stub + return null; + } + + @Override + public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { + // TODO Auto-generated method stub + return null; + } + } diff --git a/clients/src/main/java/org/oracle/okafka/common/requests/DeleteTopicsResponse.java b/clients/src/main/java/org/oracle/okafka/common/requests/DeleteTopicsResponse.java index 42590a8..0544ce6 100644 --- a/clients/src/main/java/org/oracle/okafka/common/requests/DeleteTopicsResponse.java +++ b/clients/src/main/java/org/oracle/okafka/common/requests/DeleteTopicsResponse.java @@ -1,7 +1,7 @@ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. */ @@ -32,12 +32,18 @@ import java.sql.SQLException; import java.util.Map; +import org.apache.kafka.common.protocol.ApiMessage; +import org.apache.kafka.common.protocol.Errors; +import org.oracle.okafka.common.errors.FeatureNotSupportedException; +import org.oracle.okafka.common.protocol.ApiKeys; + public class DeleteTopicsResponse extends AbstractResponse { private final Map errors; private Exception requestResult; public DeleteTopicsResponse(Map errors) { + super(ApiKeys.DELETE_TOPICS); this.errors = errors; this.requestResult = null; } @@ -55,4 +61,27 @@ public void setResult(Exception ex) { public Exception getResult() { return requestResult; } + + @Override + public ApiMessage data() { + // TODO Auto-generated method stub + return null; + } + + @Override + public Map errorCounts() { + // TODO Auto-generated method stub + return null; + } + + @Override + public int throttleTimeMs() { + // TODO Auto-generated method stub + return 0; + } + + @Override + public void maybeSetThrottleTimeMs(int arg0) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } } diff --git a/clients/src/main/java/org/oracle/okafka/common/requests/FetchRequest.java b/clients/src/main/java/org/oracle/okafka/common/requests/FetchRequest.java index 9c640f2..7a558a2 100644 --- a/clients/src/main/java/org/oracle/okafka/common/requests/FetchRequest.java +++ b/clients/src/main/java/org/oracle/okafka/common/requests/FetchRequest.java @@ -1,7 +1,7 @@ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. */ @@ -29,6 +29,8 @@ package org.oracle.okafka.common.requests; +import org.apache.kafka.common.protocol.ApiMessage; +import org.apache.kafka.common.requests.AbstractResponse; import org.oracle.okafka.common.protocol.ApiKeys; public class FetchRequest extends AbstractRequest { @@ -57,11 +59,17 @@ public String toString() { append(")"); return bld.toString(); } + + @Override + public FetchRequest build(short version) { + return build(); + } } private final String topic; private final long pollTimeoutMs; private FetchRequest(String topic, long pollTimeoutMs) { + super(ApiKeys.FETCH, (short)1); this.topic = topic; this.pollTimeoutMs = pollTimeoutMs; } @@ -73,4 +81,16 @@ public String topic() { public long pollTimeout() { return this.pollTimeoutMs; } + + @Override + public ApiMessage data() { + // TODO Auto-generated method stub + return null; + } + + @Override + public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { + // TODO Auto-generated method stub + return null; + } } diff --git a/clients/src/main/java/org/oracle/okafka/common/requests/FetchResponse.java b/clients/src/main/java/org/oracle/okafka/common/requests/FetchResponse.java index 06c2fe7..f3cbe96 100644 --- a/clients/src/main/java/org/oracle/okafka/common/requests/FetchResponse.java +++ b/clients/src/main/java/org/oracle/okafka/common/requests/FetchResponse.java @@ -1,7 +1,7 @@ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. */ @@ -32,14 +32,23 @@ import oracle.jms.AQjmsBytesMessage; import java.util.List; +import java.util.Map; + +import org.apache.kafka.common.protocol.ApiMessage; +import org.apache.kafka.common.protocol.Errors; +import org.oracle.okafka.common.errors.FeatureNotSupportedException; +import org.oracle.okafka.common.protocol.ApiKeys; public class FetchResponse extends AbstractResponse { private final String topic; private final List messages; + private final Exception exception; - public FetchResponse(String topic, List messages) { + public FetchResponse(String topic, List messages, Exception exception) { + super(ApiKeys.FETCH); this.topic = topic; this.messages = messages; + this.exception = exception; } public String topic() { @@ -49,5 +58,32 @@ public String topic() { public List getMessages() { return this.messages; } + + public Exception getException() { + return this.exception; + } + + @Override + public ApiMessage data() { + // TODO Auto-generated method stub + return null; + } + + @Override + public Map errorCounts() { + // TODO Auto-generated method stub + return null; + } + + @Override + public int throttleTimeMs() { + // TODO Auto-generated method stub + return 0; + } + + @Override + public void maybeSetThrottleTimeMs(int arg0) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } } diff --git a/clients/src/main/java/org/oracle/okafka/common/requests/IsolationLevel.java b/clients/src/main/java/org/oracle/okafka/common/requests/IsolationLevel.java index 5149965..82be32a 100644 --- a/clients/src/main/java/org/oracle/okafka/common/requests/IsolationLevel.java +++ b/clients/src/main/java/org/oracle/okafka/common/requests/IsolationLevel.java @@ -1,7 +1,7 @@ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. */ diff --git a/clients/src/main/java/org/oracle/okafka/common/requests/JoinGroupRequest.java b/clients/src/main/java/org/oracle/okafka/common/requests/JoinGroupRequest.java new file mode 100644 index 0000000..94582d1 --- /dev/null +++ b/clients/src/main/java/org/oracle/okafka/common/requests/JoinGroupRequest.java @@ -0,0 +1,61 @@ +package org.oracle.okafka.common.requests; + +import org.apache.kafka.common.protocol.ApiMessage; +import org.apache.kafka.common.requests.AbstractResponse; +import org.oracle.okafka.common.internals.SessionData; +import org.oracle.okafka.common.protocol.ApiKeys; + +public class JoinGroupRequest extends AbstractRequest { + + public static class Builder extends AbstractRequest.Builder { + private SessionData sessionData; + + public Builder(SessionData sessionData) { + super(ApiKeys.JOIN_GROUP); + this.sessionData = sessionData; + } + + @Override + public JoinGroupRequest build() { + return new JoinGroupRequest(sessionData); + } + + @Override + public String toString() { + StringBuilder bld = new StringBuilder(); + bld.append("(type=joinGroupRequest") + .append(")"); + return bld.toString(); + } + + @Override + public JoinGroupRequest build(short version) { + return build(); + } + + } + + private SessionData sessionData; + public JoinGroupRequest(SessionData sessionData ) { + super(ApiKeys.JOIN_GROUP, (short)1); + this.sessionData = sessionData; + + } + + public SessionData getSessionData() { + return this.sessionData; + } + + @Override + public ApiMessage data() { + // TODO Auto-generated method stub + return null; + } + + @Override + public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { + // TODO Auto-generated method stub + return null; + } + +} diff --git a/clients/src/main/java/org/oracle/okafka/common/requests/JoinGroupResponse.java b/clients/src/main/java/org/oracle/okafka/common/requests/JoinGroupResponse.java new file mode 100644 index 0000000..3b9aa0f --- /dev/null +++ b/clients/src/main/java/org/oracle/okafka/common/requests/JoinGroupResponse.java @@ -0,0 +1,98 @@ +package org.oracle.okafka.common.requests; + +import java.util.List; +import java.util.Map; + +import org.oracle.okafka.common.errors.FeatureNotSupportedException; +import org.oracle.okafka.common.internals.PartitionData; +import org.oracle.okafka.common.internals.SessionData; +import org.oracle.okafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ApiMessage; +import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.utils.LogContext; +import org.slf4j.Logger; + +public class JoinGroupResponse extends AbstractResponse { + private Map sessionData; + private List partitions; + private int leader; + private int version; + private Exception exception; + protected final Logger log ; + + public JoinGroupResponse(Map sessionData, List partitions, int leader, int version, Exception exception) { + super(ApiKeys.JOIN_GROUP); + this.sessionData= sessionData; + this.partitions = partitions; + this.leader = leader; + this.version = version; + this.exception = exception; + LogContext logContext = new LogContext("[AQ$_JOIN_GROUP:]"); + this.log = logContext.logger(JoinGroupResponse.class) ; + + log.debug("QPAT:"); + + for(String mapSessionDataKeyNow : sessionData.keySet() ) + { + log.debug("MapSessionDataKey " + mapSessionDataKeyNow ); + SessionData sessionDataNow = sessionData.get(mapSessionDataKeyNow); + log.debug("Session Data Now: " + sessionDataNow.toString()); + } + + if(partitions != null) + { + log.debug("QPIM:"); + for(PartitionData pData: partitions) + { + log.debug("PData: " + pData); + } + }else + { + log.debug("QPIM: NULL"); + } + log.debug("Leader = " +leader +", Verssion: " + version ); + } + + public Map getSessionData() { + return this.sessionData; + } + + public List partitions() { + return this.partitions; + } + + public int leader() { + return this.leader; + } + + public int version() { + return this.version; + } + + public Exception getException() { + return this.exception; + } + + @Override + public ApiMessage data() { + // TODO Auto-generated method stub + return null; + } + + @Override + public Map errorCounts() { + // TODO Auto-generated method stub + return null; + } + + @Override + public int throttleTimeMs() { + // TODO Auto-generated method stub + return 0; + } + + @Override + public void maybeSetThrottleTimeMs(int arg0) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } +} diff --git a/clients/src/main/java/org/oracle/okafka/common/requests/MetadataRequest.java b/clients/src/main/java/org/oracle/okafka/common/requests/MetadataRequest.java index 4b10213..14599e5 100644 --- a/clients/src/main/java/org/oracle/okafka/common/requests/MetadataRequest.java +++ b/clients/src/main/java/org/oracle/okafka/common/requests/MetadataRequest.java @@ -1,7 +1,7 @@ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. */ @@ -32,22 +32,26 @@ import java.util.List; import org.oracle.okafka.common.protocol.ApiKeys; -import org.oracle.okafka.common.utils.Utils; +import org.apache.kafka.common.protocol.ApiMessage; +import org.apache.kafka.common.requests.AbstractResponse; +import org.apache.kafka.common.utils.Utils; public class MetadataRequest extends AbstractRequest { public static class Builder extends AbstractRequest.Builder { private static final List ALL_TOPICS = null; private final List topics; private final boolean allowAutoTopicCreation; + private final List teqParaTopic; - public Builder(List topics, boolean allowAutoTopicCreation) { + public Builder(List topics, boolean allowAutoTopicCreation, List paraTopic) { super(ApiKeys.METADATA); this.topics = topics; this.allowAutoTopicCreation = allowAutoTopicCreation; + this.teqParaTopic = paraTopic; } public static Builder allTopics() { - return new Builder(ALL_TOPICS, false); + return new Builder(ALL_TOPICS, false, ALL_TOPICS); } public List topics() { @@ -57,11 +61,11 @@ public List topics() { public boolean isAllTopics() { return this.topics == ALL_TOPICS; } - - + + @Override public MetadataRequest build() { - return new MetadataRequest(topics, allowAutoTopicCreation); + return new MetadataRequest(topics, allowAutoTopicCreation, teqParaTopic); } @Override @@ -72,13 +76,20 @@ public String toString() { .append(")"); return bld.toString(); } + + @Override + public MetadataRequest build(short version) { + return build(); + } } - + private final List teqParaTopic; private final List topics; private final boolean allowAutoTopicCreation; - private MetadataRequest(List topics, boolean allowAutoTopicCreation) { + private MetadataRequest(List topics, boolean allowAutoTopicCreation, List teqParaTopic) { + super(ApiKeys.METADATA, (short)1); this.topics = topics; this.allowAutoTopicCreation = allowAutoTopicCreation; + this.teqParaTopic = teqParaTopic; } public List topics() { @@ -88,5 +99,22 @@ public List topics() { public boolean allowAutoTopicCreation() { return this.allowAutoTopicCreation; } + + public List teqParaTopics() { + return this.teqParaTopic; + } + + + @Override + public ApiMessage data() { + // TODO Auto-generated method stub + return null; + } + + @Override + public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { + // TODO Auto-generated method stub + return null; + } } diff --git a/clients/src/main/java/org/oracle/okafka/common/requests/MetadataResponse.java b/clients/src/main/java/org/oracle/okafka/common/requests/MetadataResponse.java index bee49a3..22f2cb3 100644 --- a/clients/src/main/java/org/oracle/okafka/common/requests/MetadataResponse.java +++ b/clients/src/main/java/org/oracle/okafka/common/requests/MetadataResponse.java @@ -1,7 +1,7 @@ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. */ @@ -33,21 +33,31 @@ import java.util.List; import java.util.Map; -import org.oracle.okafka.common.Cluster; +import org.apache.kafka.common.Cluster; +import org.oracle.okafka.clients.NetworkClient; +import org.oracle.okafka.clients.TopicTeqParameters; import org.oracle.okafka.common.Node; -import org.oracle.okafka.common.PartitionInfo; -import org.oracle.okafka.common.config.AbstractConfig; +import org.oracle.okafka.common.errors.FeatureNotSupportedException; +import org.oracle.okafka.common.protocol.ApiKeys; +import org.apache.kafka.common.PartitionInfo; +import org.apache.kafka.common.config.AbstractConfig; +import org.apache.kafka.common.protocol.ApiMessage; +import org.apache.kafka.common.protocol.Errors; public class MetadataResponse extends AbstractResponse { - private final String clusterId = ""; + private final String clusterId; private final List nodes; private final List partitionInfo; private final Map errorsPerTopic; + private final Map teqParams; - public MetadataResponse(List nodes, List partitionInfo, Map errorsPerTopic) { + public MetadataResponse(String clusterId, List nodes, List partitionInfo, Map errorsPerTopic, Map _teqParams) { + super(ApiKeys.METADATA); + this.clusterId = clusterId; this.nodes = nodes; this.partitionInfo = partitionInfo; this.errorsPerTopic = errorsPerTopic; + this.teqParams = _teqParams; } public List nodes() { @@ -62,11 +72,43 @@ public List partitions() { * Get a snapshot of the cluster metadata from this response * @return the cluster snapshot */ - public Cluster cluster(AbstractConfig configs) { - return new Cluster(clusterId, nodes, partitionInfo,new HashSet<>(), new HashSet<>(), nodes.size() > 0 ?nodes.get(0) : null, configs); + /* public Cluster cluster(AbstractConfig configs) { + return new Cluster(clusterId, NetworkClient.convertToKafkaNodes(nodes), partitionInfo,new HashSet<>(), new HashSet<>(), nodes.size() > 0 ?nodes.get(0) : null);//, configs); + }*/ + + /** + * Get a snapshot of the cluster metadata from this response + * @return the cluster snapshot + */ + public Cluster cluster() { + return new Cluster(clusterId, NetworkClient.convertToKafkaNodes(nodes), partitionInfo,new HashSet<>(), new HashSet<>(), nodes.size() > 0 ?nodes.get(0) : null);//, configs); } public Map topicErrors() { return this.errorsPerTopic; } + + @Override + public ApiMessage data() { + // TODO Auto-generated method stub + return null; + } + + @Override + public Map errorCounts() { + // TODO Auto-generated method stub + return null; + } + + @Override + public int throttleTimeMs() { + // TODO Auto-generated method stub + return 0; + } + + @Override + public void maybeSetThrottleTimeMs(int arg0) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } } + diff --git a/clients/src/main/java/org/oracle/okafka/common/requests/OffsetResetRequest.java b/clients/src/main/java/org/oracle/okafka/common/requests/OffsetResetRequest.java index 700d342..9db017e 100644 --- a/clients/src/main/java/org/oracle/okafka/common/requests/OffsetResetRequest.java +++ b/clients/src/main/java/org/oracle/okafka/common/requests/OffsetResetRequest.java @@ -1,7 +1,7 @@ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. */ @@ -9,7 +9,9 @@ import java.util.Map; -import org.oracle.okafka.common.TopicPartition; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.protocol.ApiMessage; +import org.apache.kafka.common.requests.AbstractResponse; import org.oracle.okafka.common.protocol.ApiKeys; public class OffsetResetRequest extends AbstractRequest { @@ -38,11 +40,17 @@ public String toString() { append(")"); return bld.toString(); } + + @Override + public OffsetResetRequest build(short version) { + return build(); + } } private final Map offsetResetTimestamps; private final long pollTimeoutMs; private OffsetResetRequest(Map offsetResetTimestamps, long pollTimeoutMs) { + super(ApiKeys.OFFSETRESET, (short)1); this.offsetResetTimestamps = offsetResetTimestamps; this.pollTimeoutMs = pollTimeoutMs; } @@ -54,4 +62,16 @@ public Map offsetResetTimestamps() { public long pollTimeout() { return this.pollTimeoutMs; } + + @Override + public ApiMessage data() { + // TODO Auto-generated method stub + return null; + } + + @Override + public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { + // TODO Auto-generated method stub + return null; + } } diff --git a/clients/src/main/java/org/oracle/okafka/common/requests/OffsetResetResponse.java b/clients/src/main/java/org/oracle/okafka/common/requests/OffsetResetResponse.java index 447f675..f29efd4 100644 --- a/clients/src/main/java/org/oracle/okafka/common/requests/OffsetResetResponse.java +++ b/clients/src/main/java/org/oracle/okafka/common/requests/OffsetResetResponse.java @@ -1,19 +1,24 @@ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. */ package org.oracle.okafka.common.requests; import java.util.Map; -import org.oracle.okafka.common.TopicPartition; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.protocol.ApiMessage; +import org.apache.kafka.common.protocol.Errors; +import org.oracle.okafka.common.errors.FeatureNotSupportedException; +import org.oracle.okafka.common.protocol.ApiKeys; public class OffsetResetResponse extends AbstractResponse { private final Map offsetResetResponse; private final Exception exception ; public OffsetResetResponse(Map offsetResetResponse, Exception exception) { + super(ApiKeys.OFFSETRESET); this.offsetResetResponse = offsetResetResponse; this.exception = exception; } @@ -25,4 +30,27 @@ public Map offsetResetResponse() { public Exception getException() { return exception; } -} \ No newline at end of file + + @Override + public ApiMessage data() { + // TODO Auto-generated method stub + return null; + } + + @Override + public Map errorCounts() { + // TODO Auto-generated method stub + return null; + } + + @Override + public int throttleTimeMs() { + // TODO Auto-generated method stub + return 0; + } + + @Override + public void maybeSetThrottleTimeMs(int arg0) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } +} diff --git a/clients/src/main/java/org/oracle/okafka/clients/producer/internals/ProduceRequest.java b/clients/src/main/java/org/oracle/okafka/common/requests/ProduceRequest.java similarity index 60% rename from clients/src/main/java/org/oracle/okafka/clients/producer/internals/ProduceRequest.java rename to clients/src/main/java/org/oracle/okafka/common/requests/ProduceRequest.java index 7baefcb..839dfb9 100644 --- a/clients/src/main/java/org/oracle/okafka/clients/producer/internals/ProduceRequest.java +++ b/clients/src/main/java/org/oracle/okafka/common/requests/ProduceRequest.java @@ -1,7 +1,7 @@ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. */ @@ -27,12 +27,18 @@ * */ -package org.oracle.okafka.clients.producer.internals; +package org.oracle.okafka.common.requests; -import org.oracle.okafka.common.TopicPartition; +import java.util.ArrayList; +import java.util.List; + +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.protocol.ApiMessage; import org.oracle.okafka.common.protocol.ApiKeys; -import org.oracle.okafka.common.record.MemoryRecords; +import org.apache.kafka.common.record.MemoryRecords; +import org.apache.kafka.common.requests.AbstractResponse; import org.oracle.okafka.common.requests.AbstractRequest; +import org.oracle.okafka.common.utils.MessageIdConverter.OKafkaOffset; public class ProduceRequest extends AbstractRequest{ //private Map> produceRecordsByTopic; @@ -41,13 +47,19 @@ public class ProduceRequest extends AbstractRequest{ //private final Map partitionRecords; private final TopicPartition topicPartition; private final MemoryRecords memoryRecords; - + private boolean checkForDups = false; + // private String checkForMsgId = null; + private List retryMsgList = null; - public ProduceRequest(TopicPartition topicPartition, MemoryRecords memoryRecords, short acks, int timeout) { + public ProduceRequest(TopicPartition topicPartition, MemoryRecords memoryRecords, short acks, int timeout, boolean checkForDups, List retryMsgList) { + super(ApiKeys.PRODUCE, (short)1); this.topicPartition = topicPartition; this.memoryRecords = memoryRecords; this.acks = acks; this.timeout = timeout; + this.checkForDups = checkForDups; + this.retryMsgList = retryMsgList; + //this.checkForMsgId = checkForMsgId; } /*public Map> getproduceRecordsByTopic() { @@ -62,26 +74,59 @@ public MemoryRecords getMemoryRecords() { return memoryRecords; } + public boolean checkForDups() + { + return checkForDups; + } + + public List retryMsgList() + { + return retryMsgList; + } + public static class Builder extends AbstractRequest.Builder { private final short acks; private final int timeout; //private final Map partitionRecords; private final TopicPartition topicPartition; private final MemoryRecords memoryRecords; + private final boolean checkForDups ; + // private final String checkForMsgId ; + private List retryMsgList = null; - public Builder(TopicPartition topicPartition, MemoryRecords memoryRecords, short acks, int timeout) { + public Builder(TopicPartition topicPartition, MemoryRecords memoryRecords, short acks, int timeout, + boolean inRetry, List retryMsgList) { super(ApiKeys.PRODUCE); this.topicPartition = topicPartition; this.memoryRecords = memoryRecords; this.acks = acks; this.timeout = timeout; - + this.checkForDups = inRetry; + this.retryMsgList = retryMsgList; } @Override public ProduceRequest build() { - return new ProduceRequest(topicPartition, memoryRecords, acks, timeout); + return new ProduceRequest(topicPartition, memoryRecords, acks, timeout, checkForDups, retryMsgList); } + + @Override + public ProduceRequest build(short version) { + return new ProduceRequest(topicPartition, memoryRecords, acks, timeout, checkForDups, retryMsgList); + } + } + + @Override + public ApiMessage data() { + // TODO Auto-generated method stub + return null; + } + + @Override + public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { + // TODO Auto-generated method stub + return null; } + } diff --git a/clients/src/main/java/org/oracle/okafka/clients/producer/internals/ProduceResponse.java b/clients/src/main/java/org/oracle/okafka/common/requests/ProduceResponse.java similarity index 61% rename from clients/src/main/java/org/oracle/okafka/clients/producer/internals/ProduceResponse.java rename to clients/src/main/java/org/oracle/okafka/common/requests/ProduceResponse.java index 5cc69ea..6a682b4 100644 --- a/clients/src/main/java/org/oracle/okafka/clients/producer/internals/ProduceResponse.java +++ b/clients/src/main/java/org/oracle/okafka/common/requests/ProduceResponse.java @@ -1,7 +1,7 @@ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. */ @@ -27,12 +27,20 @@ * */ -package org.oracle.okafka.clients.producer.internals; +package org.oracle.okafka.common.requests; +import java.util.ArrayList; import java.util.List; +import java.util.Map; -import org.oracle.okafka.common.TopicPartition; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.protocol.ApiMessage; +import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.requests.ProduceResponse.RecordError; +import org.oracle.okafka.common.errors.FeatureNotSupportedException; +import org.oracle.okafka.common.protocol.ApiKeys; import org.oracle.okafka.common.requests.AbstractResponse; +import org.oracle.okafka.common.utils.MessageIdConverter.OKafkaOffset; public class ProduceResponse extends AbstractResponse { @@ -43,6 +51,8 @@ public class ProduceResponse extends AbstractResponse { private final PartitionResponse partitionResponse; private final int throttleTimeMs; public static final int DEFAULT_THROTTLE_TIME = 0; + private String msgIdToCheck; + private boolean checkDuplicate; /** * Constructor for Version 0 */ @@ -54,6 +64,7 @@ public ProduceResponse(TopicPartition topicPartition, PartitionResponse partitio * Constructor for the latest version */ public ProduceResponse(TopicPartition topicPartition, PartitionResponse partitionResponse, int throttleTimeMs) { + super(ApiKeys.PRODUCE); this.topicPartition = topicPartition; this.partitionResponse = partitionResponse; this.throttleTimeMs = throttleTimeMs; @@ -81,13 +92,20 @@ public int throttleTimeMs() { public static final class PartitionResponse { - public Exception exception; - public List msgIds; - public List logAppendTime; + public RuntimeException exception; + public List msgIds; + //public List logAppendTime; + public long logAppendTime; + public long subPartitionId; + private boolean checkDuplicate; + + public Errors error=null; + public List recordErrors=new ArrayList<>(); + public String errorMessage=null; - public PartitionResponse(Exception exception) { + public PartitionResponse(RuntimeException exception) { this.exception = exception; - this.logAppendTime = null; + this.logAppendTime = 0l; this.msgIds = null; } @@ -103,12 +121,48 @@ public String toString() { return b.toString(); } - public Exception exception() { + public RuntimeException exception() { return exception; } + + public boolean getCheckDuplicate() + { + return this.checkDuplicate; + } + public void setCheckDuplicate(boolean checkDups) + { + this.checkDuplicate = checkDups; + } + + public List getOffsets() + { + return msgIds; + } + public void setOffsets (List offsetList) { + this.msgIds = offsetList; + } } public boolean shouldClientThrottle(short version) { return version >= 6; } + + @Override + public ApiMessage data() { + // TODO Auto-generated method stub + return null; + } + + @Override + public Map errorCounts() { + // TODO Auto-generated method stub + return null; + } + + @Override + public void maybeSetThrottleTimeMs(int arg0) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + + } diff --git a/clients/src/main/java/org/oracle/okafka/common/requests/RequestHeader.java b/clients/src/main/java/org/oracle/okafka/common/requests/RequestHeader.java index 8baeeb8..b22941a 100644 --- a/clients/src/main/java/org/oracle/okafka/common/requests/RequestHeader.java +++ b/clients/src/main/java/org/oracle/okafka/common/requests/RequestHeader.java @@ -1,7 +1,7 @@ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. */ @@ -31,21 +31,26 @@ import static java.util.Objects.requireNonNull; +import org.apache.kafka.common.message.RequestHeaderData; import org.oracle.okafka.common.protocol.ApiKeys; /** * The header for a request in the Kafka protocol */ -public class RequestHeader { - +public class RequestHeader {// extends org.apache.kafka.common.requests.RequestHeader{ + + private final RequestHeaderData data; private final ApiKeys apiKey; private final String clientId; private final int correlationId; + public RequestHeader(ApiKeys apiKey, String clientId, int correlation) { + data = new RequestHeaderData().setClientId(clientId).setCorrelationId(correlation).setRequestApiKey(apiKey.id); this.apiKey = requireNonNull(apiKey); this.clientId = clientId; this.correlationId = correlation; } + public ApiKeys apiKey() { return apiKey; } diff --git a/clients/src/main/java/org/oracle/okafka/common/requests/ResponseHeader.java b/clients/src/main/java/org/oracle/okafka/common/requests/ResponseHeader.java index 635d347..e3df242 100644 --- a/clients/src/main/java/org/oracle/okafka/common/requests/ResponseHeader.java +++ b/clients/src/main/java/org/oracle/okafka/common/requests/ResponseHeader.java @@ -1,7 +1,7 @@ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. */ diff --git a/clients/src/main/java/org/oracle/okafka/common/requests/SubscribeRequest.java b/clients/src/main/java/org/oracle/okafka/common/requests/SubscribeRequest.java index 54f5b41..2d57ce3 100644 --- a/clients/src/main/java/org/oracle/okafka/common/requests/SubscribeRequest.java +++ b/clients/src/main/java/org/oracle/okafka/common/requests/SubscribeRequest.java @@ -1,12 +1,14 @@ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. */ package org.oracle.okafka.common.requests; +import org.apache.kafka.common.protocol.ApiMessage; +import org.apache.kafka.common.requests.AbstractResponse; import org.oracle.okafka.common.protocol.ApiKeys; public class SubscribeRequest extends AbstractRequest { @@ -30,9 +32,15 @@ public String toString() { append(")"); return bld.toString(); } + + @Override + public SubscribeRequest build(short version) { + return build(); + } } private final String topic; public SubscribeRequest(String topic) { + super(ApiKeys.SUBSCRIBE,(short)1); this.topic = topic; } @@ -40,5 +48,17 @@ public String getTopic() { return this.topic; } + @Override + public ApiMessage data() { + // TODO Auto-generated method stub + return null; + } + + @Override + public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { + // TODO Auto-generated method stub + return null; + } + } diff --git a/clients/src/main/java/org/oracle/okafka/common/requests/SubscribeResponse.java b/clients/src/main/java/org/oracle/okafka/common/requests/SubscribeResponse.java index 6b481f0..a34e6de 100644 --- a/clients/src/main/java/org/oracle/okafka/common/requests/SubscribeResponse.java +++ b/clients/src/main/java/org/oracle/okafka/common/requests/SubscribeResponse.java @@ -1,19 +1,27 @@ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. */ package org.oracle.okafka.common.requests; +import java.util.Map; + import javax.jms.JMSException; +import org.apache.kafka.common.protocol.ApiMessage; +import org.apache.kafka.common.protocol.Errors; +import org.oracle.okafka.common.errors.FeatureNotSupportedException; +import org.oracle.okafka.common.protocol.ApiKeys; + public class SubscribeResponse extends AbstractResponse { private final JMSException exception; private final String topic; public SubscribeResponse(String topic, JMSException exception) { + super(ApiKeys.SUBSCRIBE); this.topic = topic; this.exception = exception; } @@ -27,4 +35,27 @@ public JMSException getException() { return this.exception; } + @Override + public ApiMessage data() { + // TODO Auto-generated method stub + return null; + } + + @Override + public Map errorCounts() { + // TODO Auto-generated method stub + return null; + } + + @Override + public int throttleTimeMs() { + // TODO Auto-generated method stub + return 0; + } + + @Override + public void maybeSetThrottleTimeMs(int arg0) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + } diff --git a/clients/src/main/java/org/oracle/okafka/common/requests/SyncGroupRequest.java b/clients/src/main/java/org/oracle/okafka/common/requests/SyncGroupRequest.java new file mode 100644 index 0000000..38b07b2 --- /dev/null +++ b/clients/src/main/java/org/oracle/okafka/common/requests/SyncGroupRequest.java @@ -0,0 +1,69 @@ +package org.oracle.okafka.common.requests; + +import java.util.List; + +import org.apache.kafka.common.protocol.ApiMessage; +import org.apache.kafka.common.requests.AbstractResponse; +import org.oracle.okafka.common.internals.SessionData; +import org.oracle.okafka.common.protocol.ApiKeys; + +public class SyncGroupRequest extends AbstractRequest { + public static class Builder extends AbstractRequest.Builder { + private List sessionData; + private final int version; + + public Builder(List sessionData, int version) { + super(ApiKeys.SYNC_GROUP); + this.sessionData = sessionData; + this.version = version; + } + + @Override + public SyncGroupRequest build() { + return new SyncGroupRequest(sessionData, version); + } + + @Override + public String toString() { + StringBuilder bld = new StringBuilder(); + bld.append("(type=SyncGroupRequest") + .append(")"); + return bld.toString(); + } + + @Override + public SyncGroupRequest build(short version) { + return build(); + } + + } + + private List sessionData; + private int version; + public SyncGroupRequest(List sessionData, int version) { + super(ApiKeys.SYNC_GROUP,(short)1); + this.sessionData = sessionData; + this.version = version; + } + + public List getSessionData() { + return this.sessionData; + } + + public int getVersion() { + return this.version; + } + + @Override + public ApiMessage data() { + // TODO Auto-generated method stub + return null; + } + + @Override + public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { + // TODO Auto-generated method stub + return null; + } + +} \ No newline at end of file diff --git a/clients/src/main/java/org/oracle/okafka/common/requests/SyncGroupResponse.java b/clients/src/main/java/org/oracle/okafka/common/requests/SyncGroupResponse.java new file mode 100644 index 0000000..ab3afc7 --- /dev/null +++ b/clients/src/main/java/org/oracle/okafka/common/requests/SyncGroupResponse.java @@ -0,0 +1,57 @@ +package org.oracle.okafka.common.requests; + +import java.util.Map; + +import org.apache.kafka.common.protocol.ApiMessage; +import org.apache.kafka.common.protocol.Errors; +import org.oracle.okafka.common.internals.SessionData; +import org.oracle.okafka.common.protocol.ApiKeys; + +public class SyncGroupResponse extends AbstractResponse { + private SessionData sessionData; + private int version; + private Exception exception; + + public SyncGroupResponse(SessionData sessionData, int version, Exception exception) { + super(ApiKeys.SYNC_GROUP); + this.sessionData = sessionData; + this.version = version; + this.exception = exception; + } + + public SessionData getSessionData() { + return this.sessionData; + } + + public int getVersion() { + return this.version; + } + + public Exception getException() { + return this.exception; + } + + @Override + public ApiMessage data() { + // TODO Auto-generated method stub + return null; + } + + @Override + public Map errorCounts() { + // TODO Auto-generated method stub + return null; + } + + @Override + public int throttleTimeMs() { + // TODO Auto-generated method stub + return 0; + } + + @Override + public void maybeSetThrottleTimeMs(int arg0) { + // TODO Auto-generated method stub + + } +} diff --git a/clients/src/main/java/org/oracle/okafka/common/requests/UnsubscribeRequest.java b/clients/src/main/java/org/oracle/okafka/common/requests/UnsubscribeRequest.java index ffb62e3..83b5e80 100644 --- a/clients/src/main/java/org/oracle/okafka/common/requests/UnsubscribeRequest.java +++ b/clients/src/main/java/org/oracle/okafka/common/requests/UnsubscribeRequest.java @@ -1,12 +1,14 @@ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. */ package org.oracle.okafka.common.requests; +import org.apache.kafka.common.protocol.ApiMessage; +import org.apache.kafka.common.requests.AbstractResponse; import org.oracle.okafka.common.protocol.ApiKeys; public class UnsubscribeRequest extends AbstractRequest { @@ -28,8 +30,24 @@ public String toString() { append(")"); return bld.toString(); } + + @Override + public UnsubscribeRequest build(short version) { + return build(); + } } public UnsubscribeRequest() { + super(ApiKeys.UNSUBSCRIBE,(short)1); } + @Override + public ApiMessage data() { + // TODO Auto-generated method stub + return null; + } + @Override + public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { + // TODO Auto-generated method stub + return null; + } } diff --git a/clients/src/main/java/org/oracle/okafka/common/requests/UnsubscribeResponse.java b/clients/src/main/java/org/oracle/okafka/common/requests/UnsubscribeResponse.java index 03a428d..3d213fe 100644 --- a/clients/src/main/java/org/oracle/okafka/common/requests/UnsubscribeResponse.java +++ b/clients/src/main/java/org/oracle/okafka/common/requests/UnsubscribeResponse.java @@ -1,7 +1,7 @@ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. */ @@ -9,10 +9,16 @@ import java.util.Map; +import org.apache.kafka.common.protocol.ApiMessage; +import org.apache.kafka.common.protocol.Errors; +import org.oracle.okafka.common.errors.FeatureNotSupportedException; +import org.oracle.okafka.common.protocol.ApiKeys; + public class UnsubscribeResponse extends AbstractResponse { private final Map response; public UnsubscribeResponse(Map response) { + super(ApiKeys.UNSUBSCRIBE); this.response = response; } @@ -21,5 +27,28 @@ public Map response() { } + @Override + public ApiMessage data() { + // TODO Auto-generated method stub + return null; + } + + @Override + public Map errorCounts() { + // TODO Auto-generated method stub + return null; + } + + @Override + public int throttleTimeMs() { + // TODO Auto-generated method stub + return 0; + } + + @Override + public void maybeSetThrottleTimeMs(int arg0) { + throw new FeatureNotSupportedException("This feature is not suported for this release."); + } + } diff --git a/clients/src/main/java/org/oracle/okafka/common/serialization/ByteArrayDeserializer.java b/clients/src/main/java/org/oracle/okafka/common/serialization/ByteArrayDeserializer.java deleted file mode 100644 index fd9375c..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/serialization/ByteArrayDeserializer.java +++ /dev/null @@ -1,45 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.serialization; - -import java.util.Map; - -public class ByteArrayDeserializer implements Deserializer { - - @Override - public void configure(Map configs, boolean isKey) { - // nothing to do - } - - @Override - public byte[] deserialize(String topic, byte[] data) { - return data; - } - - @Override - public void close() { - // nothing to do - } -} diff --git a/clients/src/main/java/org/oracle/okafka/common/serialization/ByteArraySerializer.java b/clients/src/main/java/org/oracle/okafka/common/serialization/ByteArraySerializer.java deleted file mode 100644 index 1ebf9ac..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/serialization/ByteArraySerializer.java +++ /dev/null @@ -1,45 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.serialization; - -import java.util.Map; - -public class ByteArraySerializer implements Serializer { - - @Override - public void configure(Map configs, boolean isKey) { - // nothing to do - } - - @Override - public byte[] serialize(String topic, byte[] data) { - return data; - } - - @Override - public void close() { - // nothing to do - } -} diff --git a/clients/src/main/java/org/oracle/okafka/common/serialization/ByteBufferDeserializer.java b/clients/src/main/java/org/oracle/okafka/common/serialization/ByteBufferDeserializer.java deleted file mode 100644 index bbacb8e..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/serialization/ByteBufferDeserializer.java +++ /dev/null @@ -1,46 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.serialization; - -import java.nio.ByteBuffer; -import java.util.Map; - -public class ByteBufferDeserializer implements Deserializer { - - public void configure(Map configs, boolean isKey) { - // nothing to do - } - - public ByteBuffer deserialize(String topic, byte[] data) { - if (data == null) - return null; - - return ByteBuffer.wrap(data); - } - - public void close() { - // nothing to do - } -} diff --git a/clients/src/main/java/org/oracle/okafka/common/serialization/ByteBufferSerializer.java b/clients/src/main/java/org/oracle/okafka/common/serialization/ByteBufferSerializer.java deleted file mode 100644 index 45b3ba9..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/serialization/ByteBufferSerializer.java +++ /dev/null @@ -1,58 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.serialization; - -import java.nio.ByteBuffer; -import java.util.Map; - -public class ByteBufferSerializer implements Serializer { - - public void configure(Map configs, boolean isKey) { - // nothing to do - } - - public byte[] serialize(String topic, ByteBuffer data) { - if (data == null) - return null; - - data.rewind(); - - if (data.hasArray()) { - byte[] arr = data.array(); - if (data.arrayOffset() == 0 && arr.length == data.remaining()) { - return arr; - } - } - - byte[] ret = new byte[data.remaining()]; - data.get(ret, 0, ret.length); - data.rewind(); - return ret; - } - - public void close() { - // nothing to do - } -} diff --git a/clients/src/main/java/org/oracle/okafka/common/serialization/BytesDeserializer.java b/clients/src/main/java/org/oracle/okafka/common/serialization/BytesDeserializer.java deleted file mode 100644 index 2f75202..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/serialization/BytesDeserializer.java +++ /dev/null @@ -1,47 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.serialization; - -import java.util.Map; - -import org.oracle.okafka.common.utils.Bytes; - -public class BytesDeserializer implements Deserializer { - - public void configure(Map configs, boolean isKey) { - // nothing to do - } - - public Bytes deserialize(String topic, byte[] data) { - if (data == null) - return null; - - return new Bytes(data); - } - - public void close() { - // nothing to do - } -} diff --git a/clients/src/main/java/org/oracle/okafka/common/serialization/BytesSerializer.java b/clients/src/main/java/org/oracle/okafka/common/serialization/BytesSerializer.java deleted file mode 100644 index 3c87897..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/serialization/BytesSerializer.java +++ /dev/null @@ -1,48 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.serialization; - -import java.util.Map; - -import org.oracle.okafka.common.utils.Bytes; - -public class BytesSerializer implements Serializer { - - public void configure(Map configs, boolean isKey) { - // nothing to do - } - - public byte[] serialize(String topic, Bytes data) { - if (data == null) - return null; - - return data.get(); - } - - public void close() { - // nothing to do - } -} - diff --git a/clients/src/main/java/org/oracle/okafka/common/serialization/Deserializer.java b/clients/src/main/java/org/oracle/okafka/common/serialization/Deserializer.java deleted file mode 100644 index e45b4ec..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/serialization/Deserializer.java +++ /dev/null @@ -1,58 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.serialization; - -import java.io.Closeable; -import java.util.Map; - -/** - * An interface for converting bytes to objects. - * - * A class that implements this interface is expected to have a constructor with no parameters. - *

      - * Implement {@link org.oracle.okafka.common.ClusterResourceListener} to receive cluster metadata once it's available. Please see the class documentation for ClusterResourceListener for more information. - * - * @param Type to be deserialized into. - */ -public interface Deserializer extends Closeable { - - /** - * Configure this class. - * @param configs configs in key/value pairs - * @param isKey whether is for key or value - */ - void configure(Map configs, boolean isKey); - - /** - * Deserialize a record value from a byte array into a value or object. - * @param topic topic associated with the data - * @param data serialized bytes; may be null; implementations are recommended to handle null by returning a value or null rather than throwing an exception. - * @return deserialized typed data; may be null - */ - T deserialize(String topic, byte[] data); - - @Override - void close(); -} diff --git a/clients/src/main/java/org/oracle/okafka/common/serialization/DoubleDeserializer.java b/clients/src/main/java/org/oracle/okafka/common/serialization/DoubleDeserializer.java deleted file mode 100644 index 9a89e6a..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/serialization/DoubleDeserializer.java +++ /dev/null @@ -1,58 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.serialization; - -import java.util.Map; - -import org.oracle.okafka.common.errors.SerializationException; - -public class DoubleDeserializer implements Deserializer { - - @Override - public void configure(Map configs, boolean isKey) { - // nothing to do - } - - @Override - public Double deserialize(String topic, byte[] data) { - if (data == null) - return null; - if (data.length != 8) { - throw new SerializationException("Size of data received by Deserializer is not 8"); - } - - long value = 0; - for (byte b : data) { - value <<= 8; - value |= b & 0xFF; - } - return Double.longBitsToDouble(value); - } - - @Override - public void close() { - // nothing to do - } -} diff --git a/clients/src/main/java/org/oracle/okafka/common/serialization/DoubleSerializer.java b/clients/src/main/java/org/oracle/okafka/common/serialization/DoubleSerializer.java deleted file mode 100644 index 0936f7d..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/serialization/DoubleSerializer.java +++ /dev/null @@ -1,58 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.serialization; - -import java.util.Map; - -public class DoubleSerializer implements Serializer { - - @Override - public void configure(Map configs, boolean isKey) { - // nothing to do - } - - @Override - public byte[] serialize(String topic, Double data) { - if (data == null) - return null; - - long bits = Double.doubleToLongBits(data); - return new byte[] { - (byte) (bits >>> 56), - (byte) (bits >>> 48), - (byte) (bits >>> 40), - (byte) (bits >>> 32), - (byte) (bits >>> 24), - (byte) (bits >>> 16), - (byte) (bits >>> 8), - (byte) bits - }; - } - - @Override - public void close() { - // nothing to do - } -} \ No newline at end of file diff --git a/clients/src/main/java/org/oracle/okafka/common/serialization/ExtendedDeserializer.java b/clients/src/main/java/org/oracle/okafka/common/serialization/ExtendedDeserializer.java deleted file mode 100644 index e8da917..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/serialization/ExtendedDeserializer.java +++ /dev/null @@ -1,85 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.serialization; - -import java.util.Map; - -import org.oracle.okafka.common.header.Headers; - -/** - * A Deserializer that has access to the headers associated with the record. - * - * Prefer {@link Deserializer} if access to the headers is not required. Once Kafka drops support for Java 7, the - * {@code deserialize()} method introduced by this interface will be added to Deserializer with a default implementation - * so that backwards compatibility is maintained. This interface may be deprecated once that happens. - * - * A class that implements this interface is expected to have a constructor with no parameters. - * @param - */ -public interface ExtendedDeserializer extends Deserializer { - - /** - * Deserialize a record value from a byte array into a value or object. - * @param topic topic associated with the data - * @param headers headers associated with the record; may be empty. - * @param data serialized bytes; may be null; implementations are recommended to handle null by returning a value or null rather than throwing an exception. - * @return deserialized typed data; may be null - */ - T deserialize(String topic, Headers headers, byte[] data); - - class Wrapper implements ExtendedDeserializer { - - private final Deserializer deserializer; - - public Wrapper(Deserializer deserializer) { - this.deserializer = deserializer; - } - - - @Override - public T deserialize(String topic, Headers headers, byte[] data) { - return deserialize(topic, data); - } - - @Override - public void configure(Map configs, boolean isKey) { - deserializer.configure(configs, isKey); - } - - @Override - public T deserialize(String topic, byte[] data) { - return deserializer.deserialize(topic, data); - } - - @Override - public void close() { - deserializer.close(); - } - - public static ExtendedDeserializer ensureExtended(Deserializer deserializer) { - return deserializer == null ? null : deserializer instanceof ExtendedDeserializer ? (ExtendedDeserializer) deserializer : new ExtendedDeserializer.Wrapper<>(deserializer); - } - } -} diff --git a/clients/src/main/java/org/oracle/okafka/common/serialization/ExtendedSerializer.java b/clients/src/main/java/org/oracle/okafka/common/serialization/ExtendedSerializer.java deleted file mode 100644 index 70b7953..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/serialization/ExtendedSerializer.java +++ /dev/null @@ -1,86 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.serialization; - -import java.util.Map; - -import org.oracle.okafka.common.header.Headers; - -/** -* A Serializer that has access to the headers associated with the record. -* -* Prefer {@link Serializer} if access to the headers is not required. Once Kafka drops support for Java 7, the -* {@code serialize()} method introduced by this interface will be added to Serializer with a default implementation -* so that backwards compatibility is maintained. This interface may be deprecated once that happens. -* -* A class that implements this interface is expected to have a constructor with no parameters. -* @param -*/ -public interface ExtendedSerializer extends Serializer { - - /** - * Convert {@code data} into a byte array. - * - * @param topic topic associated with data - * @param headers headers associated with the record - * @param data typed data - * @return serialized bytes - */ - byte[] serialize(String topic, Headers headers, T data); - - class Wrapper implements ExtendedSerializer { - - private final Serializer serializer; - - public Wrapper(Serializer serializer) { - this.serializer = serializer; - } - - @Override - public byte[] serialize(String topic, Headers headers, T data) { - return serialize(topic, data); - } - - @Override - public void configure(Map configs, boolean isKey) { - serializer.configure(configs, isKey); - } - - @Override - public byte[] serialize(String topic, T data) { - return serializer.serialize(topic, data); - } - - @Override - public void close() { - serializer.close(); - } - - public static ExtendedSerializer ensureExtended(Serializer serializer) { - return serializer == null ? null : serializer instanceof ExtendedSerializer ? (ExtendedSerializer) serializer : new ExtendedSerializer.Wrapper<>(serializer); - } - } -} - diff --git a/clients/src/main/java/org/oracle/okafka/common/serialization/FloatDeserializer.java b/clients/src/main/java/org/oracle/okafka/common/serialization/FloatDeserializer.java deleted file mode 100644 index c3f4620..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/serialization/FloatDeserializer.java +++ /dev/null @@ -1,59 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.serialization; - -import java.util.Map; - -import org.oracle.okafka.common.errors.SerializationException; - -public class FloatDeserializer implements Deserializer { - - @Override - public void configure(final Map configs, final boolean isKey) { - // nothing to do - } - - @Override - public Float deserialize(final String topic, final byte[] data) { - if (data == null) - return null; - if (data.length != 4) { - throw new SerializationException("Size of data received by Deserializer is not 4"); - } - - int value = 0; - for (byte b : data) { - value <<= 8; - value |= b & 0xFF; - } - return Float.intBitsToFloat(value); - } - - @Override - public void close() { - // nothing to do - } - -} \ No newline at end of file diff --git a/clients/src/main/java/org/oracle/okafka/common/serialization/FloatSerializer.java b/clients/src/main/java/org/oracle/okafka/common/serialization/FloatSerializer.java deleted file mode 100644 index 4e1b109..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/serialization/FloatSerializer.java +++ /dev/null @@ -1,54 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.serialization; - -import java.util.Map; - -public class FloatSerializer implements Serializer { - - @Override - public void configure(final Map configs, final boolean isKey) { - // nothing to do - } - - @Override - public byte[] serialize(final String topic, final Float data) { - if (data == null) - return null; - - long bits = Float.floatToRawIntBits(data); - return new byte[] { - (byte) (bits >>> 24), - (byte) (bits >>> 16), - (byte) (bits >>> 8), - (byte) bits - }; - } - - @Override - public void close() { - // nothing to do - } -} \ No newline at end of file diff --git a/clients/src/main/java/org/oracle/okafka/common/serialization/IntegerDeserializer.java b/clients/src/main/java/org/oracle/okafka/common/serialization/IntegerDeserializer.java deleted file mode 100644 index ea5e804..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/serialization/IntegerDeserializer.java +++ /dev/null @@ -1,55 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.serialization; - -import java.util.Map; - -import org.oracle.okafka.common.errors.SerializationException; - -public class IntegerDeserializer implements Deserializer { - - public void configure(Map configs, boolean isKey) { - // nothing to do - } - - public Integer deserialize(String topic, byte[] data) { - if (data == null) - return null; - if (data.length != 4) { - throw new SerializationException("Size of data received by IntegerDeserializer is not 4"); - } - - int value = 0; - for (byte b : data) { - value <<= 8; - value |= b & 0xFF; - } - return value; - } - - public void close() { - // nothing to do - } -} diff --git a/clients/src/main/java/org/oracle/okafka/common/serialization/IntegerSerializer.java b/clients/src/main/java/org/oracle/okafka/common/serialization/IntegerSerializer.java deleted file mode 100644 index fce1c6c..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/serialization/IntegerSerializer.java +++ /dev/null @@ -1,50 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.serialization; - -import java.util.Map; - -public class IntegerSerializer implements Serializer { - - public void configure(Map configs, boolean isKey) { - // nothing to do - } - - public byte[] serialize(String topic, Integer data) { - if (data == null) - return null; - - return new byte[] { - (byte) (data >>> 24), - (byte) (data >>> 16), - (byte) (data >>> 8), - data.byteValue() - }; - } - - public void close() { - // nothing to do - } -} \ No newline at end of file diff --git a/clients/src/main/java/org/oracle/okafka/common/serialization/LongDeserializer.java b/clients/src/main/java/org/oracle/okafka/common/serialization/LongDeserializer.java deleted file mode 100644 index 455dda2..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/serialization/LongDeserializer.java +++ /dev/null @@ -1,55 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.serialization; - -import java.util.Map; - -import org.oracle.okafka.common.errors.SerializationException; - -public class LongDeserializer implements Deserializer { - - public void configure(Map configs, boolean isKey) { - // nothing to do - } - - public Long deserialize(String topic, byte[] data) { - if (data == null) - return null; - if (data.length != 8) { - throw new SerializationException("Size of data received by LongDeserializer is not 8"); - } - - long value = 0; - for (byte b : data) { - value <<= 8; - value |= b & 0xFF; - } - return value; - } - - public void close() { - // nothing to do - } -} diff --git a/clients/src/main/java/org/oracle/okafka/common/serialization/LongSerializer.java b/clients/src/main/java/org/oracle/okafka/common/serialization/LongSerializer.java deleted file mode 100644 index ac0f492..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/serialization/LongSerializer.java +++ /dev/null @@ -1,54 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.serialization; - -import java.util.Map; - -public class LongSerializer implements Serializer { - - public void configure(Map configs, boolean isKey) { - // nothing to do - } - - public byte[] serialize(String topic, Long data) { - if (data == null) - return null; - - return new byte[] { - (byte) (data >>> 56), - (byte) (data >>> 48), - (byte) (data >>> 40), - (byte) (data >>> 32), - (byte) (data >>> 24), - (byte) (data >>> 16), - (byte) (data >>> 8), - data.byteValue() - }; - } - - public void close() { - // nothing to do - } -} \ No newline at end of file diff --git a/clients/src/main/java/org/oracle/okafka/common/serialization/Serializer.java b/clients/src/main/java/org/oracle/okafka/common/serialization/Serializer.java deleted file mode 100644 index 7cb2fd2..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/serialization/Serializer.java +++ /dev/null @@ -1,64 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.serialization; - -import java.io.Closeable; -import java.util.Map; - -/** - * An interface for converting objects to bytes. - * - * A class that implements this interface is expected to have a constructor with no parameter. - *

      - * Implement {@link org.oracle.okafka.common.ClusterResourceListener} to receive cluster metadata once it's available. Please see the class documentation for ClusterResourceListener for more information. - * - * @param Type to be serialized from. - */ -public interface Serializer extends Closeable { - - /** - * Configure this class. - * @param configs configs in key/value pairs - * @param isKey whether is for key or value - */ - void configure(Map configs, boolean isKey); - - /** - * Convert {@code data} into a byte array. - * - * @param topic topic associated with data - * @param data typed data - * @return serialized bytes - */ - byte[] serialize(String topic, T data); - - /** - * Close this serializer. - * - * This method must be idempotent as it may be called multiple times. - */ - @Override - void close(); -} \ No newline at end of file diff --git a/clients/src/main/java/org/oracle/okafka/common/serialization/ShortDeserializer.java b/clients/src/main/java/org/oracle/okafka/common/serialization/ShortDeserializer.java deleted file mode 100644 index 1ae9dbe..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/serialization/ShortDeserializer.java +++ /dev/null @@ -1,55 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.serialization; - -import java.util.Map; - -import org.oracle.okafka.common.errors.SerializationException; - -public class ShortDeserializer implements Deserializer { - - public void configure(Map configs, boolean isKey) { - // nothing to do - } - - public Short deserialize(String topic, byte[] data) { - if (data == null) - return null; - if (data.length != 2) { - throw new SerializationException("Size of data received by ShortDeserializer is not 2"); - } - - short value = 0; - for (byte b : data) { - value <<= 8; - value |= b & 0xFF; - } - return value; - } - - public void close() { - // nothing to do - } -} diff --git a/clients/src/main/java/org/oracle/okafka/common/serialization/ShortSerializer.java b/clients/src/main/java/org/oracle/okafka/common/serialization/ShortSerializer.java deleted file mode 100644 index 746f7e9..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/serialization/ShortSerializer.java +++ /dev/null @@ -1,48 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.serialization; - -import java.util.Map; - -public class ShortSerializer implements Serializer { - - public void configure(Map configs, boolean isKey) { - // nothing to do - } - - public byte[] serialize(String topic, Short data) { - if (data == null) - return null; - - return new byte[] { - (byte) (data >>> 8), - data.byteValue() - }; - } - - public void close() { - // nothing to do - } -} \ No newline at end of file diff --git a/clients/src/main/java/org/oracle/okafka/common/serialization/StringDeserializer.java b/clients/src/main/java/org/oracle/okafka/common/serialization/StringDeserializer.java deleted file mode 100644 index 0f4877a..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/serialization/StringDeserializer.java +++ /dev/null @@ -1,65 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.serialization; - -import java.io.UnsupportedEncodingException; -import java.util.Map; - -import org.oracle.okafka.common.errors.SerializationException; - -/** - * String encoding defaults to UTF8 and can be customized by setting the property key.deserializer.encoding, - * value.deserializer.encoding or deserializer.encoding. The first two take precedence over the last. - */ -public class StringDeserializer implements Deserializer { - private String encoding = "UTF8"; - - @Override - public void configure(Map configs, boolean isKey) { - String propertyName = isKey ? "key.deserializer.encoding" : "value.deserializer.encoding"; - Object encodingValue = configs.get(propertyName); - if (encodingValue == null) - encodingValue = configs.get("deserializer.encoding"); - if (encodingValue instanceof String) - encoding = (String) encodingValue; - } - - @Override - public String deserialize(String topic, byte[] data) { - try { - if (data == null) - return null; - else - return new String(data, encoding); - } catch (UnsupportedEncodingException e) { - throw new SerializationException("Error when deserializing byte[] to string due to unsupported encoding " + encoding); - } - } - - @Override - public void close() { - // nothing to do - } -} diff --git a/clients/src/main/java/org/oracle/okafka/common/serialization/StringSerializer.java b/clients/src/main/java/org/oracle/okafka/common/serialization/StringSerializer.java deleted file mode 100644 index 1b1a160..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/serialization/StringSerializer.java +++ /dev/null @@ -1,65 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.serialization; - -import java.io.UnsupportedEncodingException; -import java.util.Map; - -import org.oracle.okafka.common.errors.SerializationException; - -/** - * String encoding defaults to UTF8 and can be customized by setting the property key.serializer.encoding, - * value.serializer.encoding or serializer.encoding. The first two take precedence over the last. - */ -public class StringSerializer implements Serializer { - private String encoding = "UTF8"; - - @Override - public void configure(Map configs, boolean isKey) { - String propertyName = isKey ? "key.serializer.encoding" : "value.serializer.encoding"; - Object encodingValue = configs.get(propertyName); - if (encodingValue == null) - encodingValue = configs.get("serializer.encoding"); - if (encodingValue instanceof String) - encoding = (String) encodingValue; - } - - @Override - public byte[] serialize(String topic, String data) { - try { - if (data == null) - return null; - - return data.getBytes(encoding); - } catch (UnsupportedEncodingException e) { - throw new SerializationException("Error when serializing string to byte[] due to unsupported encoding " + encoding); - } - } - - @Override - public void close() { - // nothing to do - } -} \ No newline at end of file diff --git a/clients/src/main/java/org/oracle/okafka/common/utils/AbstractIterator.java b/clients/src/main/java/org/oracle/okafka/common/utils/AbstractIterator.java deleted file mode 100644 index e15a8fc..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/utils/AbstractIterator.java +++ /dev/null @@ -1,96 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.utils; - -import java.util.Iterator; -import java.util.NoSuchElementException; - -/** - * A base class that simplifies implementing an iterator - * @param The type of thing we are iterating over - */ -public abstract class AbstractIterator implements Iterator { - - private enum State { - READY, NOT_READY, DONE, FAILED - } - - private State state = State.NOT_READY; - private T next; - - @Override - public boolean hasNext() { - switch (state) { - case FAILED: - throw new IllegalStateException("Iterator is in failed state"); - case DONE: - return false; - case READY: - return true; - default: - return maybeComputeNext(); - } - } - - @Override - public T next() { - if (!hasNext()) - throw new NoSuchElementException(); - state = State.NOT_READY; - if (next == null) - throw new IllegalStateException("Expected item but none found."); - return next; - } - - @Override - public void remove() { - throw new UnsupportedOperationException("Removal not supported"); - } - - public T peek() { - if (!hasNext()) - throw new NoSuchElementException(); - return next; - } - - protected T allDone() { - state = State.DONE; - return null; - } - - protected abstract T makeNext(); - - private Boolean maybeComputeNext() { - state = State.FAILED; - next = makeNext(); - if (state == State.DONE) { - return false; - } else { - state = State.READY; - return true; - } - } - -} diff --git a/clients/src/main/java/org/oracle/okafka/common/utils/AppInfoParser.java b/clients/src/main/java/org/oracle/okafka/common/utils/AppInfoParser.java deleted file mode 100644 index daf818f..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/utils/AppInfoParser.java +++ /dev/null @@ -1,145 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.utils; - -import java.io.InputStream; -import java.lang.management.ManagementFactory; -import java.util.Properties; - -import javax.management.JMException; -import javax.management.MBeanServer; -import javax.management.ObjectName; - -import org.oracle.okafka.common.MetricName; -import org.oracle.okafka.common.metrics.Gauge; -import org.oracle.okafka.common.metrics.MetricConfig; -import org.oracle.okafka.common.metrics.Metrics; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class AppInfoParser { - private static final Logger log = LoggerFactory.getLogger(AppInfoParser.class); - private static final String VERSION; - private static final String COMMIT_ID; - - static { - Properties props = new Properties(); - try (InputStream resourceStream = AppInfoParser.class.getResourceAsStream("/kafka/kafka-version.properties")) { - props.load(resourceStream); - } catch (Exception e) { - log.warn("Error while loading kafka-version.properties :" + e.getMessage()); - } - VERSION = props.getProperty("version", "unknown").trim(); - COMMIT_ID = props.getProperty("commitId", "unknown").trim(); - } - - public static String getVersion() { - return VERSION; - } - - public static String getCommitId() { - return COMMIT_ID; - } - - public static synchronized void registerAppInfo(String prefix, String id, Metrics metrics) { - try { - ObjectName name = new ObjectName(prefix + ":type=app-info,id=" + Sanitizer.jmxSanitize(id)); - AppInfo mBean = new AppInfo(); - ManagementFactory.getPlatformMBeanServer().registerMBean(mBean, name); - - registerMetrics(metrics); // prefix will be added later by JmxReporter - } catch (JMException e) { - log.warn("Error registering AppInfo mbean", e); - } - } - - public static synchronized void unregisterAppInfo(String prefix, String id, Metrics metrics) { - MBeanServer server = ManagementFactory.getPlatformMBeanServer(); - try { - ObjectName name = new ObjectName(prefix + ":type=app-info,id=" + Sanitizer.jmxSanitize(id)); - if (server.isRegistered(name)) - server.unregisterMBean(name); - - unregisterMetrics(metrics); - } catch (JMException e) { - log.warn("Error unregistering AppInfo mbean", e); - } - } - - private static MetricName metricName(Metrics metrics, String name) { - return metrics.metricName(name, "app-info", "Metric indicating " + name); - } - - private static void registerMetrics(Metrics metrics) { - if (metrics != null) { - metrics.addMetric(metricName(metrics, "version"), new ImmutableValue<>(VERSION)); - metrics.addMetric(metricName(metrics, "commit-id"), new ImmutableValue<>(COMMIT_ID)); - } - } - - private static void unregisterMetrics(Metrics metrics) { - if (metrics != null) { - metrics.removeMetric(metricName(metrics, "version")); - metrics.removeMetric(metricName(metrics, "commit-id")); - } - } - - public interface AppInfoMBean { - public String getVersion(); - public String getCommitId(); - } - - public static class AppInfo implements AppInfoMBean { - - public AppInfo() { - log.info("Kafka version : " + AppInfoParser.getVersion()); - log.info("Kafka commitId : " + AppInfoParser.getCommitId()); - } - - @Override - public String getVersion() { - return AppInfoParser.getVersion(); - } - - @Override - public String getCommitId() { - return AppInfoParser.getCommitId(); - } - - } - - static class ImmutableValue implements Gauge { - private final T value; - - public ImmutableValue(T value) { - this.value = value; - } - - @Override - public T value(MetricConfig config, long now) { - return value; - } - } -} diff --git a/clients/src/main/java/org/oracle/okafka/common/utils/ByteBufferInputStream.java b/clients/src/main/java/org/oracle/okafka/common/utils/ByteBufferInputStream.java deleted file mode 100644 index cc4e7fb..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/utils/ByteBufferInputStream.java +++ /dev/null @@ -1,56 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.utils; - -import java.io.InputStream; -import java.nio.ByteBuffer; - -/** - * A byte buffer backed input inputStream - */ -public final class ByteBufferInputStream extends InputStream { - private final ByteBuffer buffer; - - public ByteBufferInputStream(ByteBuffer buffer) { - this.buffer = buffer; - } - - public int read() { - if (!buffer.hasRemaining()) { - return -1; - } - return buffer.get() & 0xFF; - } - - public int read(byte[] bytes, int off, int len) { - if (!buffer.hasRemaining()) { - return -1; - } - - len = Math.min(len, buffer.remaining()); - buffer.get(bytes, off, len); - return len; - } -} diff --git a/clients/src/main/java/org/oracle/okafka/common/utils/ByteBufferOutputStream.java b/clients/src/main/java/org/oracle/okafka/common/utils/ByteBufferOutputStream.java deleted file mode 100644 index 7d3552c..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/utils/ByteBufferOutputStream.java +++ /dev/null @@ -1,141 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.utils; - -import java.io.OutputStream; -import java.nio.ByteBuffer; - -/** - * A ByteBuffer-backed OutputStream that expands the internal ByteBuffer as required. Given this, the caller should - * always access the underlying ByteBuffer via the {@link #buffer()} method until all writes are completed. - * - * This class is typically used for 2 purposes: - * - * 1. Write to a ByteBuffer when there is a chance that we may need to expand it in order to fit all the desired data - * 2. Write to a ByteBuffer via methods that expect an OutputStream interface - * - * Hard to track bugs can happen when this class is used for the second reason and unexpected buffer expansion happens. - * So, it's best to assume that buffer expansion can always happen. An improvement would be to create a separate class - * that throws an error if buffer expansion is required to avoid the issue altogether. - */ -public class ByteBufferOutputStream extends OutputStream { - - private static final float REALLOCATION_FACTOR = 1.1f; - - private final int initialCapacity; - private final int initialPosition; - private ByteBuffer buffer; - - /** - * Creates an instance of this class that will write to the received `buffer` up to its `limit`. If necessary to - * satisfy `write` or `position` calls, larger buffers will be allocated so the {@link #buffer()} method may return - * a different buffer than the received `buffer` parameter. - * - * Prefer one of the constructors that allocate the internal buffer for clearer semantics. - */ - public ByteBufferOutputStream(ByteBuffer buffer) { - this.buffer = buffer; - this.initialPosition = buffer.position(); - this.initialCapacity = buffer.capacity(); - } - - public ByteBufferOutputStream(int initialCapacity) { - this(initialCapacity, false); - } - - public ByteBufferOutputStream(int initialCapacity, boolean directBuffer) { - this(directBuffer ? ByteBuffer.allocateDirect(initialCapacity) : ByteBuffer.allocate(initialCapacity)); - } - - public void write(int b) { - ensureRemaining(1); - buffer.put((byte) b); - } - - public void write(byte[] bytes, int off, int len) { - ensureRemaining(len); - buffer.put(bytes, off, len); - } - - public void write(ByteBuffer sourceBuffer) { - ensureRemaining(sourceBuffer.remaining()); - buffer.put(sourceBuffer); - } - - public ByteBuffer buffer() { - return buffer; - } - - public int position() { - return buffer.position(); - } - - public int remaining() { - return buffer.remaining(); - } - - public int limit() { - return buffer.limit(); - } - - public void position(int position) { - ensureRemaining(position - buffer.position()); - buffer.position(position); - } - - /** - * The capacity of the first internal ByteBuffer used by this class. This is useful in cases where a pooled - * ByteBuffer was passed via the constructor and it needs to be returned to the pool. - */ - public int initialCapacity() { - return initialCapacity; - } - - /** - * Ensure there is enough space to write some number of bytes, expanding the underlying buffer if necessary. - * This can be used to avoid incremental expansions through calls to {@link #write(int)} when you know how - * many total bytes are needed. - * - * @param remainingBytesRequired The number of bytes required - */ - public void ensureRemaining(int remainingBytesRequired) { - if (remainingBytesRequired > buffer.remaining()) - expandBuffer(remainingBytesRequired); - } - - private void expandBuffer(int remainingRequired) { - int expandSize = Math.max((int) (buffer.limit() * REALLOCATION_FACTOR), buffer.position() + remainingRequired); - ByteBuffer temp = ByteBuffer.allocate(expandSize); - int limit = limit(); - buffer.flip(); - temp.put(buffer); - buffer.limit(limit); - // reset the old buffer's position so that the partial data in the new buffer cannot be mistakenly consumed - // we should ideally only do this for the original buffer, but the additional complexity doesn't seem worth it - buffer.position(initialPosition); - buffer = temp; - } - -} diff --git a/clients/src/main/java/org/oracle/okafka/common/utils/ByteUtils.java b/clients/src/main/java/org/oracle/okafka/common/utils/ByteUtils.java deleted file mode 100644 index 9d5a628..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/utils/ByteUtils.java +++ /dev/null @@ -1,342 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.utils; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.nio.ByteBuffer; - -/** - * This classes exposes low-level methods for reading/writing from byte streams or buffers. - */ -public final class ByteUtils { - - private ByteUtils() {} - - /** - * Read an unsigned integer from the current position in the buffer, incrementing the position by 4 bytes - * - * @param buffer The buffer to read from - * @return The integer read, as a long to avoid signedness - */ - public static long readUnsignedInt(ByteBuffer buffer) { - return buffer.getInt() & 0xffffffffL; - } - - /** - * Read an unsigned integer from the given position without modifying the buffers position - * - * @param buffer the buffer to read from - * @param index the index from which to read the integer - * @return The integer read, as a long to avoid signedness - */ - public static long readUnsignedInt(ByteBuffer buffer, int index) { - return buffer.getInt(index) & 0xffffffffL; - } - - /** - * Read an unsigned integer stored in little-endian format from the {@link InputStream}. - * - * @param in The stream to read from - * @return The integer read (MUST BE TREATED WITH SPECIAL CARE TO AVOID SIGNEDNESS) - */ - public static int readUnsignedIntLE(InputStream in) throws IOException { - return in.read() - | (in.read() << 8) - | (in.read() << 16) - | (in.read() << 24); - } - - /** - * Read an unsigned integer stored in little-endian format from a byte array - * at a given offset. - * - * @param buffer The byte array to read from - * @param offset The position in buffer to read from - * @return The integer read (MUST BE TREATED WITH SPECIAL CARE TO AVOID SIGNEDNESS) - */ - public static int readUnsignedIntLE(byte[] buffer, int offset) { - return (buffer[offset] << 0 & 0xff) - | ((buffer[offset + 1] & 0xff) << 8) - | ((buffer[offset + 2] & 0xff) << 16) - | ((buffer[offset + 3] & 0xff) << 24); - } - - /** - * Write the given long value as a 4 byte unsigned integer. Overflow is ignored. - * - * @param buffer The buffer to write to - * @param index The position in the buffer at which to begin writing - * @param value The value to write - */ - public static void writeUnsignedInt(ByteBuffer buffer, int index, long value) { - buffer.putInt(index, (int) (value & 0xffffffffL)); - } - - /** - * Write the given long value as a 4 byte unsigned integer. Overflow is ignored. - * - * @param buffer The buffer to write to - * @param value The value to write - */ - public static void writeUnsignedInt(ByteBuffer buffer, long value) { - buffer.putInt((int) (value & 0xffffffffL)); - } - - /** - * Write an unsigned integer in little-endian format to the {@link OutputStream}. - * - * @param out The stream to write to - * @param value The value to write - */ - public static void writeUnsignedIntLE(OutputStream out, int value) throws IOException { - out.write(value); - out.write(value >>> 8); - out.write(value >>> 16); - out.write(value >>> 24); - } - - /** - * Write an unsigned integer in little-endian format to a byte array - * at a given offset. - * - * @param buffer The byte array to write to - * @param offset The position in buffer to write to - * @param value The value to write - */ - public static void writeUnsignedIntLE(byte[] buffer, int offset, int value) { - buffer[offset] = (byte) value; - buffer[offset + 1] = (byte) (value >>> 8); - buffer[offset + 2] = (byte) (value >>> 16); - buffer[offset + 3] = (byte) (value >>> 24); - } - - /** - * Read an integer stored in variable-length format using zig-zag decoding from - * Google Protocol Buffers. - * - * @param buffer The buffer to read from - * @return The integer read - * - * @throws IllegalArgumentException if variable-length value does not terminate after 5 bytes have been read - */ - public static int readVarint(ByteBuffer buffer) { - int value = 0; - int i = 0; - int b; - while (((b = buffer.get()) & 0x80) != 0) { - value |= (b & 0x7f) << i; - i += 7; - if (i > 28) - throw illegalVarintException(value); - } - value |= b << i; - return (value >>> 1) ^ -(value & 1); - } - - /** - * Read an integer stored in variable-length format using zig-zag decoding from - * Google Protocol Buffers. - * - * @param in The input to read from - * @return The integer read - * - * @throws IllegalArgumentException if variable-length value does not terminate after 5 bytes have been read - * @throws IOException if {@link DataInput} throws {@link IOException} - */ - public static int readVarint(DataInput in) throws IOException { - int value = 0; - int i = 0; - int b; - while (((b = in.readByte()) & 0x80) != 0) { - value |= (b & 0x7f) << i; - i += 7; - if (i > 28) - throw illegalVarintException(value); - } - value |= b << i; - return (value >>> 1) ^ -(value & 1); - } - - /** - * Read a long stored in variable-length format using zig-zag decoding from - * Google Protocol Buffers. - * - * @param in The input to read from - * @return The long value read - * - * @throws IllegalArgumentException if variable-length value does not terminate after 10 bytes have been read - * @throws IOException if {@link DataInput} throws {@link IOException} - */ - public static long readVarlong(DataInput in) throws IOException { - long value = 0L; - int i = 0; - long b; - while (((b = in.readByte()) & 0x80) != 0) { - value |= (b & 0x7f) << i; - i += 7; - if (i > 63) - throw illegalVarlongException(value); - } - value |= b << i; - return (value >>> 1) ^ -(value & 1); - } - - /** - * Read a long stored in variable-length format using zig-zag decoding from - * Google Protocol Buffers. - * - * @param buffer The buffer to read from - * @return The long value read - * - * @throws IllegalArgumentException if variable-length value does not terminate after 10 bytes have been read - */ - public static long readVarlong(ByteBuffer buffer) { - long value = 0L; - int i = 0; - long b; - while (((b = buffer.get()) & 0x80) != 0) { - value |= (b & 0x7f) << i; - i += 7; - if (i > 63) - throw illegalVarlongException(value); - } - value |= b << i; - return (value >>> 1) ^ -(value & 1); - } - - /** - * Write the given integer following the variable-length zig-zag encoding from - * Google Protocol Buffers - * into the output. - * - * @param value The value to write - * @param out The output to write to - */ - public static void writeVarint(int value, DataOutput out) throws IOException { - int v = (value << 1) ^ (value >> 31); - while ((v & 0xffffff80) != 0L) { - out.writeByte((v & 0x7f) | 0x80); - v >>>= 7; - } - out.writeByte((byte) v); - } - - /** - * Write the given integer following the variable-length zig-zag encoding from - * Google Protocol Buffers - * into the buffer. - * - * @param value The value to write - * @param buffer The output to write to - */ - public static void writeVarint(int value, ByteBuffer buffer) { - int v = (value << 1) ^ (value >> 31); - while ((v & 0xffffff80) != 0L) { - byte b = (byte) ((v & 0x7f) | 0x80); - buffer.put(b); - v >>>= 7; - } - buffer.put((byte) v); - } - - /** - * Write the given integer following the variable-length zig-zag encoding from - * Google Protocol Buffers - * into the output. - * - * @param value The value to write - * @param out The output to write to - */ - public static void writeVarlong(long value, DataOutput out) throws IOException { - long v = (value << 1) ^ (value >> 63); - while ((v & 0xffffffffffffff80L) != 0L) { - out.writeByte(((int) v & 0x7f) | 0x80); - v >>>= 7; - } - out.writeByte((byte) v); - } - - /** - * Write the given integer following the variable-length zig-zag encoding from - * Google Protocol Buffers - * into the buffer. - * - * @param value The value to write - * @param buffer The buffer to write to - */ - public static void writeVarlong(long value, ByteBuffer buffer) { - long v = (value << 1) ^ (value >> 63); - while ((v & 0xffffffffffffff80L) != 0L) { - byte b = (byte) ((v & 0x7f) | 0x80); - buffer.put(b); - v >>>= 7; - } - buffer.put((byte) v); - } - - /** - * Number of bytes needed to encode an integer in variable-length format. - * - * @param value The signed value - */ - public static int sizeOfVarint(int value) { - int v = (value << 1) ^ (value >> 31); - int bytes = 1; - while ((v & 0xffffff80) != 0L) { - bytes += 1; - v >>>= 7; - } - return bytes; - } - - /** - * Number of bytes needed to encode a long in variable-length format. - * - * @param value The signed value - */ - public static int sizeOfVarlong(long value) { - long v = (value << 1) ^ (value >> 63); - int bytes = 1; - while ((v & 0xffffffffffffff80L) != 0L) { - bytes += 1; - v >>>= 7; - } - return bytes; - } - - private static IllegalArgumentException illegalVarintException(int value) { - throw new IllegalArgumentException("Varint is too long, the most significant bit in the 5th byte is set, " + - "converted value: " + Integer.toHexString(value)); - } - - private static IllegalArgumentException illegalVarlongException(long value) { - throw new IllegalArgumentException("Varlong is too long, most significant bit in the 10th byte is set, " + - "converted value: " + Long.toHexString(value)); - } -} diff --git a/clients/src/main/java/org/oracle/okafka/common/utils/Bytes.java b/clients/src/main/java/org/oracle/okafka/common/utils/Bytes.java deleted file mode 100644 index 6a9045d..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/utils/Bytes.java +++ /dev/null @@ -1,190 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.utils; - -import java.io.Serializable; -import java.util.Arrays; -import java.util.Comparator; - -/** - * Utility class that handles immutable byte arrays. - */ -public class Bytes implements Comparable { - - private static final char[] HEX_CHARS_UPPER = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F'}; - - private final byte[] bytes; - - // cache the hash code for the string, default to 0 - private int hashCode; - - public static Bytes wrap(byte[] bytes) { - if (bytes == null) - return null; - return new Bytes(bytes); - } - - /** - * Create a Bytes using the byte array. - * - * @param bytes This array becomes the backing storage for the object. - */ - public Bytes(byte[] bytes) { - this.bytes = bytes; - - // initialize hash code to 0 - hashCode = 0; - } - - /** - * Get the data from the Bytes. - * @return The underlying byte array - */ - public byte[] get() { - return this.bytes; - } - - /** - * The hashcode is cached except for the case where it is computed as 0, in which - * case we compute the hashcode on every call. - * - * @return the hashcode - */ - @Override - public int hashCode() { - if (hashCode == 0) { - hashCode = Arrays.hashCode(bytes); - } - - return hashCode; - } - - @Override - public boolean equals(Object other) { - if (this == other) - return true; - if (other == null) - return false; - - // we intentionally use the function to compute hashcode here - if (this.hashCode() != other.hashCode()) - return false; - - if (other instanceof Bytes) - return Arrays.equals(this.bytes, ((Bytes) other).get()); - - return false; - } - - @Override - public int compareTo(Bytes that) { - return BYTES_LEXICO_COMPARATOR.compare(this.bytes, that.bytes); - } - - @Override - public String toString() { - return Bytes.toString(bytes, 0, bytes.length); - } - - /** - * Write a printable representation of a byte array. Non-printable - * characters are hex escaped in the format \\x%02X, eg: - * \x00 \x05 etc. - * - * This function is brought from org.apache.hadoop.hbase.util.Bytes - * - * @param b array to write out - * @param off offset to start at - * @param len length to write - * @return string output - */ - private static String toString(final byte[] b, int off, int len) { - StringBuilder result = new StringBuilder(); - - if (b == null) - return result.toString(); - - // just in case we are passed a 'len' that is > buffer length... - if (off >= b.length) - return result.toString(); - - if (off + len > b.length) - len = b.length - off; - - for (int i = off; i < off + len; ++i) { - int ch = b[i] & 0xFF; - if (ch >= ' ' && ch <= '~' && ch != '\\') { - result.append((char) ch); - } else { - result.append("\\x"); - result.append(HEX_CHARS_UPPER[ch / 0x10]); - result.append(HEX_CHARS_UPPER[ch % 0x10]); - } - } - return result.toString(); - } - - /** - * A byte array comparator based on lexicograpic ordering. - */ - public final static LexicographicByteArrayComparator BYTES_LEXICO_COMPARATOR = new LexicographicByteArrayComparator(); - - public interface ByteArrayComparator extends Comparator, Serializable { - - int compare(final byte[] buffer1, int offset1, int length1, - final byte[] buffer2, int offset2, int length2); - } - - private static class LexicographicByteArrayComparator implements Comparator, Serializable{ - - - public int compare(byte[] buffer1, byte[] buffer2) { - return compare(buffer1, 0, buffer1.length, buffer2, 0, buffer2.length); - } - - public int compare(final byte[] buffer1, int offset1, int length1, - final byte[] buffer2, int offset2, int length2) { - - // short circuit equal case - if (buffer1 == buffer2 && - offset1 == offset2 && - length1 == length2) { - return 0; - } - - // similar to Arrays.compare() but considers offset and length - int end1 = offset1 + length1; - int end2 = offset2 + length2; - for (int i = offset1, j = offset2; i < end1 && j < end2; i++, j++) { - int a = buffer1[i] & 0xff; - int b = buffer2[j] & 0xff; - if (a != b) { - return a - b; - } - } - return length1 - length2; - } - } -} \ No newline at end of file diff --git a/clients/src/main/java/org/oracle/okafka/common/utils/Checksums.java b/clients/src/main/java/org/oracle/okafka/common/utils/Checksums.java deleted file mode 100644 index 326215d..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/utils/Checksums.java +++ /dev/null @@ -1,75 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.utils; - -import java.nio.ByteBuffer; -import java.util.zip.Checksum; - -/** - * Utility methods for `Checksum` instances. - * - * Implementation note: we can add methods to our implementations of CRC32 and CRC32C, but we cannot do the same for - * the Java implementations (we prefer the Java 9 implementation of CRC32C if available). A utility class is the - * simplest way to add methods that are useful for all Checksum implementations. - * - * NOTE: This class is intended for INTERNAL usage only within Kafka. - */ -public final class Checksums { - - private Checksums() { - } - - public static void update(Checksum checksum, ByteBuffer buffer, int length) { - update(checksum, buffer, 0, length); - } - - public static void update(Checksum checksum, ByteBuffer buffer, int offset, int length) { - if (buffer.hasArray()) { - checksum.update(buffer.array(), buffer.position() + buffer.arrayOffset() + offset, length); - } else { - int start = buffer.position() + offset; - for (int i = start; i < start + length; i++) - checksum.update(buffer.get(i)); - } - } - - public static void updateInt(Checksum checksum, int input) { - checksum.update((byte) (input >> 24)); - checksum.update((byte) (input >> 16)); - checksum.update((byte) (input >> 8)); - checksum.update((byte) input /* >> 0 */); - } - - public static void updateLong(Checksum checksum, long input) { - checksum.update((byte) (input >> 56)); - checksum.update((byte) (input >> 48)); - checksum.update((byte) (input >> 40)); - checksum.update((byte) (input >> 32)); - checksum.update((byte) (input >> 24)); - checksum.update((byte) (input >> 16)); - checksum.update((byte) (input >> 8)); - checksum.update((byte) input /* >> 0 */); - } -} diff --git a/clients/src/main/java/org/oracle/okafka/common/utils/CloseableIterator.java b/clients/src/main/java/org/oracle/okafka/common/utils/CloseableIterator.java deleted file mode 100644 index 3afbf56..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/utils/CloseableIterator.java +++ /dev/null @@ -1,38 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.utils; - -import java.io.Closeable; -import java.util.Iterator; - -/** - * Iterators that need to be closed in order to release resources should implement this interface. - * - * Warning: before implementing this interface, consider if there are better options. The chance of misuse is - * a bit high since people are used to iterating without closing. - */ -public interface CloseableIterator extends Iterator, Closeable { - void close(); -} diff --git a/clients/src/main/java/org/oracle/okafka/common/utils/ConnectionUtils.java b/clients/src/main/java/org/oracle/okafka/common/utils/ConnectionUtils.java index 6231f41..ee223e3 100644 --- a/clients/src/main/java/org/oracle/okafka/common/utils/ConnectionUtils.java +++ b/clients/src/main/java/org/oracle/okafka/common/utils/ConnectionUtils.java @@ -1,16 +1,24 @@ /* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ + ** OKafka Java Client version 23.4. + ** + ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. + ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + */ package org.oracle.okafka.common.utils; import java.io.File; import java.io.FileReader; +import java.net.InetSocketAddress; +import java.nio.ByteOrder; import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; import java.sql.SQLException; +import java.sql.Statement; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; import java.util.Properties; import javax.jms.JMSException; @@ -20,77 +28,294 @@ import org.oracle.okafka.clients.CommonClientConfigs; import org.oracle.okafka.common.Node; -import org.oracle.okafka.common.config.AbstractConfig; +import org.apache.kafka.common.config.AbstractConfig; import org.oracle.okafka.common.config.SslConfigs; import org.oracle.okafka.common.errors.ConnectionException; +import org.slf4j.Logger; +import oracle.jdbc.driver.OracleConnection; import oracle.jdbc.pool.OracleDataSource; import oracle.jms.AQjmsFactory; +import oracle.jms.AQjmsSession; +import oracle.jms.AQjmsTopicConnectionFactory; public class ConnectionUtils { - - public static String createUrl(Node node, AbstractConfig configs) { - - if( !configs.getString(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG).equalsIgnoreCase("PLAINTEXT")) { - return "jdbc:oracle:thin:@" + configs.getString(SslConfigs.TNS_ALIAS); // + "?TNS_ADMIN=" + configs.getString(SslConfigs.ORACLE_NET_TNS_ADMIN); - } - StringBuilder urlBuilder =new StringBuilder("jdbc:oracle:thin:@(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(PORT=" + Integer.toString(node.port())+")(HOST=" + node.host() +"))"); - urlBuilder.append("(CONNECT_DATA=(SERVICE_NAME=" + node.serviceName() + ")"+"(INSTANCE_NAME=" + node.instanceName() + ")))"); - return urlBuilder.toString(); - } - public static Connection createJDBCConnection(Node node, AbstractConfig configs) throws SQLException{ - OracleDataSource s=new OracleDataSource(); - s.setURL(createUrl(node, configs)); - return s.getConnection(); - } - - public static TopicConnection createTopicConnection(Node node,AbstractConfig configs) throws JMSException { - if(node==null) - throw new ConnectionException("Invalid argument: Node cannot be null"); - String url = createUrl(node, configs); - OracleDataSource dataSource; - try { - dataSource =new OracleDataSource(); - dataSource.setURL(url); - } - catch(SQLException sql) { - throw new JMSException(sql.toString()); - } - TopicConnectionFactory connFactory = AQjmsFactory.getTopicConnectionFactory(dataSource); - TopicConnection conn = connFactory.createTopicConnection(); - conn.setClientID(configs.getString(CommonClientConfigs.CLIENT_ID_CONFIG)); - - return conn; - } - - public static TopicSession createTopicSession(TopicConnection conn, int mode, boolean transacted) throws JMSException { - if(conn == null) - throw new ConnectionException("Invalid argument: Connection cannot be null"); - return conn.createTopicSession(transacted, mode); - - } - - public static String getUsername(AbstractConfig configs) { - File file = null; - FileReader fr = null; - try { - file = new File(configs.getString(CommonClientConfigs.ORACLE_NET_TNS_ADMIN)+"/ojdbc.properties"); - fr = new FileReader(file); - Properties prop = new Properties(); - prop.load(fr); - return prop.getProperty("user"); - } catch( Exception exception) { - //do nothing - } finally { - try { - if(fr != null) - fr.close(); - }catch (Exception e) { - - } - - } - return null; - } + public static String createUrl(Node node, AbstractConfig configs) { + + if( !configs.getString(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG).equalsIgnoreCase("PLAINTEXT")) { + return "jdbc:oracle:thin:@" + configs.getString(SslConfigs.TNS_ALIAS); // + "?TNS_ADMIN=" + configs.getString(SslConfigs.ORACLE_NET_TNS_ADMIN); + } + StringBuilder urlBuilder =new StringBuilder("jdbc:oracle:thin:@(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(PORT=" + Integer.toString(node.port())+")(HOST=" + node.host() +"))"); + urlBuilder.append("(CONNECT_DATA=(SERVICE_NAME=" + node.serviceName() + ")"); + if(node.instanceName()!=null && node.instanceName().length()>0) + { + urlBuilder.append("(INSTANCE_NAME=" + node.instanceName() + ")"); + } + urlBuilder.append("))"); + String url = urlBuilder.toString(); + return url; + } + public static Connection createJDBCConnection(Node node, AbstractConfig configs) throws SQLException{ + OracleDataSource s=new OracleDataSource(); + String dbUrl = createUrl(node, configs); + s.setURL(dbUrl); + Connection conn = s.getConnection(); + try { + int instId = Integer.parseInt(((oracle.jdbc.internal.OracleConnection)conn).getServerSessionInfo().getProperty("AUTH_INSTANCE_NO")); + String serviceName = ((oracle.jdbc.internal.OracleConnection)conn).getServerSessionInfo().getProperty("SERVICE_NAME"); + String instanceName = ((oracle.jdbc.internal.OracleConnection)conn).getServerSessionInfo().getProperty("INSTANCE_NAME"); + String userName = conn.getMetaData().getUserName(); + node.setId(instId); + node.setService(serviceName); + node.setInstanceName(instanceName); + node.setUser(userName); + node.updateHashCode(); + }catch(Exception e) + { + System.out.println("Exception while connecting to database with connection string " + dbUrl +":" + e); + e.printStackTrace(); + //log.error("Exception while setting new instance ids " + e); + throw e; + } + return conn; + } + + public static TopicConnection createTopicConnection(java.sql.Connection dbConn, AbstractConfig configs, Logger log) + throws JMSException { + if(dbConn==null) + throw new ConnectionException("Invalid argument: Connection cannot be null"); + + try { + log.info("Topic Connection to Oracle Database : " + dbConn.getMetaData().getURL()); + }catch(Exception ignoreE) + { + } + + TopicConnection conn = AQjmsTopicConnectionFactory.createTopicConnection(dbConn); + conn.setClientID(configs.getString(CommonClientConfigs.CLIENT_ID_CONFIG)); + return conn; + } + + public static TopicConnection createTopicConnection(Node node,AbstractConfig configs, Logger log) throws JMSException { + if(node==null) + throw new ConnectionException("Invalid argument: Node cannot be null"); + + String url = createUrl(node, configs); + log.info("Connecting to Oracle Database : "+ url); + OracleDataSource dataSource; + try { + dataSource =new OracleDataSource(); + dataSource.setURL(url); + } + catch(SQLException sql) { + throw new JMSException(sql.toString()); + } + TopicConnectionFactory connFactory = AQjmsFactory.getTopicConnectionFactory(dataSource); + TopicConnection conn = connFactory.createTopicConnection(); + conn.setClientID(configs.getString(CommonClientConfigs.CLIENT_ID_CONFIG)); + return conn; + } + + public static TopicSession createTopicSession(TopicConnection conn, int mode, boolean transacted) throws JMSException { + if(conn == null) + throw new ConnectionException("Invalid argument: Connection cannot be null"); + TopicSession sess = conn.createTopicSession(transacted, mode); + //ToDo: Validate if caching of dequeue statement helps or not + ((AQjmsSession)sess).setDeqStmtCachingFlag(true); + return sess; + } + + public static String getUsername(AbstractConfig configs) { + File file = null; + FileReader fr = null; + try { + file = new File(configs.getString(CommonClientConfigs.ORACLE_NET_TNS_ADMIN)+"/ojdbc.properties"); + fr = new FileReader(file); + Properties prop = new Properties(); + prop.load(fr); + return prop.getProperty("user").trim(); + } catch( Exception exception) { + //do nothing + } finally { + try { + if(fr != null) + fr.close(); + }catch (Exception e) { + + } + + } + return null; + } + public static String enquote(String name) throws IllegalArgumentException{ + if( !name.contains("'")) { + if(!name.contains("\"")) return "\"" + name + "\""; + if(name.indexOf("\"") == 0 && name.indexOf("\"", 1) == name.length() -1 ) + return name; + + } + throw new IllegalArgumentException("Invalid argument provided: " + name); + } + + public static String getDBVersion(Connection conn) throws Exception + { + String dbVersionQuery = "select version_full from PRODUCT_COMPONENT_VERSION where product like 'Oracle Database%'"; + String dbVersionStr = ""; + PreparedStatement dbVerStmt = null; + ResultSet rs = null; + try { + dbVerStmt = conn.prepareStatement(dbVersionQuery); + dbVerStmt.execute(); + rs = dbVerStmt.getResultSet(); + if(rs.next()) { + dbVersionStr = rs.getString(1); + } + }catch(Exception e) + { + throw e; + } + finally { + if(rs != null) + rs.close(); + + if(dbVerStmt != null) + dbVerStmt.close(); + } + return dbVersionStr; + } + + public static int getInstanceId(Connection conn) + { + int instNum = 0; + + try { + oracle.jdbc.internal.OracleConnection oracleInternalConn = (oracle.jdbc.internal.OracleConnection)conn; + instNum = Integer.parseInt(oracleInternalConn.getServerSessionInfo().getProperty("AUTH_INSTANCE_NO")); + + }catch(Exception ignoreE) { + + } + return instNum; + } + + public static byte[] convertTo4Byte(int len) + { + byte[] bArray = new byte[4]; + + bArray[0] = (byte)( len >>> 24 ); + bArray[1] = (byte)( len >>> 16 ); + bArray[2] = (byte)( len >>> 8 ); + bArray[3] = (byte)( len ); + + return bArray; + } + + public static int convertToInt(byte[] bInt) + { + return (((bInt[0] & 0xff) << 24) | + ((bInt[1] & 0xff) << 16) | + ((bInt[2] & 0xff) << 8) | + (bInt[3] & 0xff)); + } + + public static void remDuplicateEntries(List address) + { + if(address == null || address.size() == 0) + return; + + HashMap uniqueAddr = new HashMap(address.size()); + Iterator addIter = address.iterator(); + while(addIter.hasNext()) + { + InetSocketAddress addr = addIter.next(); + if(uniqueAddr.containsKey(addr.getHostName())) + { + addIter.remove(); + } + else + { + uniqueAddr.put(addr.getHostName(), addr); + } + } + } + + public static boolean checkIfMsgIdExist(Connection con,String topicName, String msgId , Logger log) + { + boolean msgIdExists = false; + + if(topicName == null || msgId == null) + return false; + + String qry =" Select count(*) from " +ConnectionUtils.enquote(topicName) + " where msgid = '" + msgId+"'"; + log.debug("Executing " + qry); + ResultSet rs = null; + try (Statement stmt = con.prepareCall(qry);) { + stmt.execute(qry); + rs = stmt.getResultSet(); + if(rs.next()) + { + int msgCnt = rs.getInt(1); + + if(msgCnt == 0) + { + msgIdExists = false; + } + else + msgIdExists = true; + } + else { + msgIdExists = false; + } + rs.close(); + rs = null; + + }catch(Exception e) + { + log.info("Exception while checking if msgId Exists or not. " + e,e); + if(rs!=null) + { + try { + rs.close(); + }catch(Exception ignoreE) {} + } + } + log.debug("Message Id "+ msgId +" Exists?: " + msgIdExists); + return msgIdExists; + } + + public static String getConnectedService(Connection conn) + { + String serviceName = null; + try { + serviceName = ((oracle.jdbc.internal.OracleConnection)conn).getServerSessionInfo().getProperty("SERVICE_NAME"); + }catch(Exception e) + { + return null; + } + return serviceName; + } + public static String getConnectedHostnPort(Connection conn) + { + String hostnPort = null; + try { + String url = conn.getMetaData().getURL(); + String host = TNSParser.getProperty(url, "HOST"); + if(host == null) + { + return null; + } + + String portStr = TNSParser.getProperty(url, "PORT"); + if(portStr== null) + return null; + + hostnPort = host+":"+portStr; + } + catch(Exception e) { + + } + return hostnPort; + } + } diff --git a/clients/src/main/java/org/oracle/okafka/common/utils/CopyOnWriteMap.java b/clients/src/main/java/org/oracle/okafka/common/utils/CopyOnWriteMap.java deleted file mode 100644 index c84e7f6..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/utils/CopyOnWriteMap.java +++ /dev/null @@ -1,157 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.utils; - -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.ConcurrentMap; - -/** - * A simple read-optimized map implementation that synchronizes only writes and does a full copy on each modification - */ -public class CopyOnWriteMap implements ConcurrentMap { - - private volatile Map map; - - public CopyOnWriteMap() { - this.map = new HashMap<>(); - } - - public CopyOnWriteMap(Map map) { - this.map = Collections.unmodifiableMap(map); - } - - @Override - public boolean containsKey(Object k) { - return map.containsKey(k); - } - - @Override - public boolean containsValue(Object v) { - return map.containsValue(v); - } - - @Override - public Set> entrySet() { - return map.entrySet(); - } - - @Override - public V get(Object k) { - return map.get(k); - } - - @Override - public boolean isEmpty() { - return map.isEmpty(); - } - - @Override - public Set keySet() { - return map.keySet(); - } - - @Override - public int size() { - return map.size(); - } - - @Override - public Collection values() { - return map.values(); - } - - @Override - public synchronized void clear() { - this.map = Collections.emptyMap(); - } - - @Override - public synchronized V put(K k, V v) { - Map copy = new HashMap(this.map); - V prev = copy.put(k, v); - this.map = Collections.unmodifiableMap(copy); - return prev; - } - - @Override - public synchronized void putAll(Map entries) { - Map copy = new HashMap(this.map); - copy.putAll(entries); - this.map = Collections.unmodifiableMap(copy); - } - - @Override - public synchronized V remove(Object key) { - Map copy = new HashMap(this.map); - V prev = copy.remove(key); - this.map = Collections.unmodifiableMap(copy); - return prev; - } - - @Override - public synchronized V putIfAbsent(K k, V v) { - if (!containsKey(k)) - return put(k, v); - else - return get(k); - } - - @Override - public synchronized boolean remove(Object k, Object v) { - if (containsKey(k) && get(k).equals(v)) { - remove(k); - return true; - } else { - return false; - } - } - - @Override - public synchronized boolean replace(K k, V original, V replacement) { - if (containsKey(k) && get(k).equals(original)) { - put(k, replacement); - return true; - } else { - return false; - } - } - - @Override - public synchronized V replace(K k, V v) { - if (containsKey(k)) { - return put(k, v); - } else { - return null; - } - } - -} - - - diff --git a/clients/src/main/java/org/oracle/okafka/common/utils/Crc32.java b/clients/src/main/java/org/oracle/okafka/common/utils/Crc32.java deleted file mode 100644 index a048d11..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/utils/Crc32.java +++ /dev/null @@ -1,407 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.utils; - -import java.nio.ByteBuffer; -import java.util.zip.Checksum; - -/** - * This class was taken from Hadoop org.apache.hadoop.util.PureJavaCrc32 - * - * A pure-java implementation of the CRC32 checksum that uses the same polynomial as the built-in native CRC32. - * - * This is to avoid the JNI overhead for certain uses of Checksumming where many small pieces of data are checksummed in - * succession. - * - * The current version is ~10x to 1.8x as fast as Sun's native java.util.zip.CRC32 in Java 1.6 - * - * @see java.util.zip.CRC32 - */ -public class Crc32 implements Checksum { - - /** - * Compute the CRC32 of the byte array - * - * @param bytes The array to compute the checksum for - * @return The CRC32 - */ - public static long crc32(byte[] bytes) { - return crc32(bytes, 0, bytes.length); - } - - /** - * Compute the CRC32 of the segment of the byte array given by the specified size and offset - * - * @param bytes The bytes to checksum - * @param offset the offset at which to begin checksumming - * @param size the number of bytes to checksum - * @return The CRC32 - */ - public static long crc32(byte[] bytes, int offset, int size) { - Crc32 crc = new Crc32(); - crc.update(bytes, offset, size); - return crc.getValue(); - } - - /** - * Compute the CRC32 of a byte buffer from a given offset (relative to the buffer's current position) - * - * @param buffer The buffer with the underlying data - * @param offset The offset relative to the current position - * @param size The number of bytes beginning from the offset to include - * @return The CRC32 - */ - public static long crc32(ByteBuffer buffer, int offset, int size) { - Crc32 crc = new Crc32(); - Checksums.update(crc, buffer, offset, size); - return crc.getValue(); - } - - /** the current CRC value, bit-flipped */ - private int crc; - - /** Create a new PureJavaCrc32 object. */ - public Crc32() { - reset(); - } - - @Override - public long getValue() { - return (~crc) & 0xffffffffL; - } - - @Override - public void reset() { - crc = 0xffffffff; - } - - @Override - public void update(byte[] b, int off, int len) { - if (off < 0 || len < 0 || off > b.length - len) - throw new ArrayIndexOutOfBoundsException(); - - int localCrc = crc; - - while (len > 7) { - final int c0 = (b[off + 0] ^ localCrc) & 0xff; - final int c1 = (b[off + 1] ^ (localCrc >>>= 8)) & 0xff; - final int c2 = (b[off + 2] ^ (localCrc >>>= 8)) & 0xff; - final int c3 = (b[off + 3] ^ (localCrc >>>= 8)) & 0xff; - localCrc = (T[T8_7_START + c0] ^ T[T8_6_START + c1]) ^ (T[T8_5_START + c2] ^ T[T8_4_START + c3]); - - final int c4 = b[off + 4] & 0xff; - final int c5 = b[off + 5] & 0xff; - final int c6 = b[off + 6] & 0xff; - final int c7 = b[off + 7] & 0xff; - - localCrc ^= (T[T8_3_START + c4] ^ T[T8_2_START + c5]) ^ (T[T8_1_START + c6] ^ T[T8_0_START + c7]); - - off += 8; - len -= 8; - } - - /* loop unroll - duff's device style */ - switch (len) { - case 7: - localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; - case 6: - localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; - case 5: - localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; - case 4: - localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; - case 3: - localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; - case 2: - localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; - case 1: - localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; - default: - /* nothing */ - } - - // Publish crc out to object - crc = localCrc; - } - - @Override - final public void update(int b) { - crc = (crc >>> 8) ^ T[T8_0_START + ((crc ^ b) & 0xff)]; - } - - /* - * CRC-32 lookup tables generated by the polynomial 0xEDB88320. See also TestPureJavaCrc32.Table. - */ - private static final int T8_0_START = 0 * 256; - private static final int T8_1_START = 1 * 256; - private static final int T8_2_START = 2 * 256; - private static final int T8_3_START = 3 * 256; - private static final int T8_4_START = 4 * 256; - private static final int T8_5_START = 5 * 256; - private static final int T8_6_START = 6 * 256; - private static final int T8_7_START = 7 * 256; - - private static final int[] T = new int[] { - /* T8_0 */ - 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3, 0x0EDB8832, - 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988, 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91, 0x1DB71064, 0x6AB020F2, - 0xF3B97148, 0x84BE41DE, 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7, 0x136C9856, 0x646BA8C0, 0xFD62F97A, - 0x8A65C9EC, 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5, 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172, - 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B, 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940, 0x32D86CE3, - 0x45DF5C75, 0xDCD60DCF, 0xABD13D59, 0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116, 0x21B4F4B5, 0x56B3C423, - 0xCFBA9599, 0xB8BDA50F, 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924, 0x2F6F7C87, 0x58684C11, 0xC1611DAB, - 0xB6662D3D, 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A, 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433, - 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818, 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01, 0x6B6B51F4, - 0x1C6C6162, 0x856530D8, 0xF262004E, 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457, 0x65B0D9C6, 0x12B7E950, - 0x8BBEB8EA, 0xFCB9887C, 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65, 0x4DB26158, 0x3AB551CE, 0xA3BC0074, - 0xD4BB30E2, 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB, 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0, - 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9, 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086, 0x5768B525, - 0x206F85B3, 0xB966D409, 0xCE61E49F, 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, 0x59B33D17, 0x2EB40D81, - 0xB7BD5C3B, 0xC0BA6CAD, 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A, 0xEAD54739, 0x9DD277AF, 0x04DB2615, - 0x73DC1683, 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8, 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1, - 0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE, 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7, 0xFED41B76, - 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC, 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5, 0xD6D6A3E8, 0xA1D1937E, - 0x38D8C2C4, 0x4FDFF252, 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B, 0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, - 0x41047A60, 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79, 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236, - 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F, 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04, 0xC2D7FFA7, - 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D, 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A, 0x9C0906A9, 0xEB0E363F, - 0x72076785, 0x05005713, 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38, 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, - 0x0BDBDF21, 0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E, 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777, - 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C, 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45, 0xA00AE278, - 0xD70DD2EE, 0x4E048354, 0x3903B3C2, 0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB, 0xAED16A4A, 0xD9D65ADC, - 0x40DF0B66, 0x37D83BF0, 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9, 0xBDBDF21C, 0xCABAC28A, 0x53B39330, - 0x24B4A3A6, 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF, 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94, - 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D, - /* T8_1 */ - 0x00000000, 0x191B3141, 0x32366282, 0x2B2D53C3, 0x646CC504, 0x7D77F445, 0x565AA786, 0x4F4196C7, 0xC8D98A08, - 0xD1C2BB49, 0xFAEFE88A, 0xE3F4D9CB, 0xACB54F0C, 0xB5AE7E4D, 0x9E832D8E, 0x87981CCF, 0x4AC21251, 0x53D92310, - 0x78F470D3, 0x61EF4192, 0x2EAED755, 0x37B5E614, 0x1C98B5D7, 0x05838496, 0x821B9859, 0x9B00A918, 0xB02DFADB, - 0xA936CB9A, 0xE6775D5D, 0xFF6C6C1C, 0xD4413FDF, 0xCD5A0E9E, 0x958424A2, 0x8C9F15E3, 0xA7B24620, 0xBEA97761, - 0xF1E8E1A6, 0xE8F3D0E7, 0xC3DE8324, 0xDAC5B265, 0x5D5DAEAA, 0x44469FEB, 0x6F6BCC28, 0x7670FD69, 0x39316BAE, - 0x202A5AEF, 0x0B07092C, 0x121C386D, 0xDF4636F3, 0xC65D07B2, 0xED705471, 0xF46B6530, 0xBB2AF3F7, 0xA231C2B6, - 0x891C9175, 0x9007A034, 0x179FBCFB, 0x0E848DBA, 0x25A9DE79, 0x3CB2EF38, 0x73F379FF, 0x6AE848BE, 0x41C51B7D, - 0x58DE2A3C, 0xF0794F05, 0xE9627E44, 0xC24F2D87, 0xDB541CC6, 0x94158A01, 0x8D0EBB40, 0xA623E883, 0xBF38D9C2, - 0x38A0C50D, 0x21BBF44C, 0x0A96A78F, 0x138D96CE, 0x5CCC0009, 0x45D73148, 0x6EFA628B, 0x77E153CA, 0xBABB5D54, - 0xA3A06C15, 0x888D3FD6, 0x91960E97, 0xDED79850, 0xC7CCA911, 0xECE1FAD2, 0xF5FACB93, 0x7262D75C, 0x6B79E61D, - 0x4054B5DE, 0x594F849F, 0x160E1258, 0x0F152319, 0x243870DA, 0x3D23419B, 0x65FD6BA7, 0x7CE65AE6, 0x57CB0925, - 0x4ED03864, 0x0191AEA3, 0x188A9FE2, 0x33A7CC21, 0x2ABCFD60, 0xAD24E1AF, 0xB43FD0EE, 0x9F12832D, 0x8609B26C, - 0xC94824AB, 0xD05315EA, 0xFB7E4629, 0xE2657768, 0x2F3F79F6, 0x362448B7, 0x1D091B74, 0x04122A35, 0x4B53BCF2, - 0x52488DB3, 0x7965DE70, 0x607EEF31, 0xE7E6F3FE, 0xFEFDC2BF, 0xD5D0917C, 0xCCCBA03D, 0x838A36FA, 0x9A9107BB, - 0xB1BC5478, 0xA8A76539, 0x3B83984B, 0x2298A90A, 0x09B5FAC9, 0x10AECB88, 0x5FEF5D4F, 0x46F46C0E, 0x6DD93FCD, - 0x74C20E8C, 0xF35A1243, 0xEA412302, 0xC16C70C1, 0xD8774180, 0x9736D747, 0x8E2DE606, 0xA500B5C5, 0xBC1B8484, - 0x71418A1A, 0x685ABB5B, 0x4377E898, 0x5A6CD9D9, 0x152D4F1E, 0x0C367E5F, 0x271B2D9C, 0x3E001CDD, 0xB9980012, - 0xA0833153, 0x8BAE6290, 0x92B553D1, 0xDDF4C516, 0xC4EFF457, 0xEFC2A794, 0xF6D996D5, 0xAE07BCE9, 0xB71C8DA8, - 0x9C31DE6B, 0x852AEF2A, 0xCA6B79ED, 0xD37048AC, 0xF85D1B6F, 0xE1462A2E, 0x66DE36E1, 0x7FC507A0, 0x54E85463, - 0x4DF36522, 0x02B2F3E5, 0x1BA9C2A4, 0x30849167, 0x299FA026, 0xE4C5AEB8, 0xFDDE9FF9, 0xD6F3CC3A, 0xCFE8FD7B, - 0x80A96BBC, 0x99B25AFD, 0xB29F093E, 0xAB84387F, 0x2C1C24B0, 0x350715F1, 0x1E2A4632, 0x07317773, 0x4870E1B4, - 0x516BD0F5, 0x7A468336, 0x635DB277, 0xCBFAD74E, 0xD2E1E60F, 0xF9CCB5CC, 0xE0D7848D, 0xAF96124A, 0xB68D230B, - 0x9DA070C8, 0x84BB4189, 0x03235D46, 0x1A386C07, 0x31153FC4, 0x280E0E85, 0x674F9842, 0x7E54A903, 0x5579FAC0, - 0x4C62CB81, 0x8138C51F, 0x9823F45E, 0xB30EA79D, 0xAA1596DC, 0xE554001B, 0xFC4F315A, 0xD7626299, 0xCE7953D8, - 0x49E14F17, 0x50FA7E56, 0x7BD72D95, 0x62CC1CD4, 0x2D8D8A13, 0x3496BB52, 0x1FBBE891, 0x06A0D9D0, 0x5E7EF3EC, - 0x4765C2AD, 0x6C48916E, 0x7553A02F, 0x3A1236E8, 0x230907A9, 0x0824546A, 0x113F652B, 0x96A779E4, 0x8FBC48A5, - 0xA4911B66, 0xBD8A2A27, 0xF2CBBCE0, 0xEBD08DA1, 0xC0FDDE62, 0xD9E6EF23, 0x14BCE1BD, 0x0DA7D0FC, 0x268A833F, - 0x3F91B27E, 0x70D024B9, 0x69CB15F8, 0x42E6463B, 0x5BFD777A, 0xDC656BB5, 0xC57E5AF4, 0xEE530937, 0xF7483876, - 0xB809AEB1, 0xA1129FF0, 0x8A3FCC33, 0x9324FD72, - /* T8_2 */ - 0x00000000, 0x01C26A37, 0x0384D46E, 0x0246BE59, 0x0709A8DC, 0x06CBC2EB, 0x048D7CB2, 0x054F1685, 0x0E1351B8, - 0x0FD13B8F, 0x0D9785D6, 0x0C55EFE1, 0x091AF964, 0x08D89353, 0x0A9E2D0A, 0x0B5C473D, 0x1C26A370, 0x1DE4C947, - 0x1FA2771E, 0x1E601D29, 0x1B2F0BAC, 0x1AED619B, 0x18ABDFC2, 0x1969B5F5, 0x1235F2C8, 0x13F798FF, 0x11B126A6, - 0x10734C91, 0x153C5A14, 0x14FE3023, 0x16B88E7A, 0x177AE44D, 0x384D46E0, 0x398F2CD7, 0x3BC9928E, 0x3A0BF8B9, - 0x3F44EE3C, 0x3E86840B, 0x3CC03A52, 0x3D025065, 0x365E1758, 0x379C7D6F, 0x35DAC336, 0x3418A901, 0x3157BF84, - 0x3095D5B3, 0x32D36BEA, 0x331101DD, 0x246BE590, 0x25A98FA7, 0x27EF31FE, 0x262D5BC9, 0x23624D4C, 0x22A0277B, - 0x20E69922, 0x2124F315, 0x2A78B428, 0x2BBADE1F, 0x29FC6046, 0x283E0A71, 0x2D711CF4, 0x2CB376C3, 0x2EF5C89A, - 0x2F37A2AD, 0x709A8DC0, 0x7158E7F7, 0x731E59AE, 0x72DC3399, 0x7793251C, 0x76514F2B, 0x7417F172, 0x75D59B45, - 0x7E89DC78, 0x7F4BB64F, 0x7D0D0816, 0x7CCF6221, 0x798074A4, 0x78421E93, 0x7A04A0CA, 0x7BC6CAFD, 0x6CBC2EB0, - 0x6D7E4487, 0x6F38FADE, 0x6EFA90E9, 0x6BB5866C, 0x6A77EC5B, 0x68315202, 0x69F33835, 0x62AF7F08, 0x636D153F, - 0x612BAB66, 0x60E9C151, 0x65A6D7D4, 0x6464BDE3, 0x662203BA, 0x67E0698D, 0x48D7CB20, 0x4915A117, 0x4B531F4E, - 0x4A917579, 0x4FDE63FC, 0x4E1C09CB, 0x4C5AB792, 0x4D98DDA5, 0x46C49A98, 0x4706F0AF, 0x45404EF6, 0x448224C1, - 0x41CD3244, 0x400F5873, 0x4249E62A, 0x438B8C1D, 0x54F16850, 0x55330267, 0x5775BC3E, 0x56B7D609, 0x53F8C08C, - 0x523AAABB, 0x507C14E2, 0x51BE7ED5, 0x5AE239E8, 0x5B2053DF, 0x5966ED86, 0x58A487B1, 0x5DEB9134, 0x5C29FB03, - 0x5E6F455A, 0x5FAD2F6D, 0xE1351B80, 0xE0F771B7, 0xE2B1CFEE, 0xE373A5D9, 0xE63CB35C, 0xE7FED96B, 0xE5B86732, - 0xE47A0D05, 0xEF264A38, 0xEEE4200F, 0xECA29E56, 0xED60F461, 0xE82FE2E4, 0xE9ED88D3, 0xEBAB368A, 0xEA695CBD, - 0xFD13B8F0, 0xFCD1D2C7, 0xFE976C9E, 0xFF5506A9, 0xFA1A102C, 0xFBD87A1B, 0xF99EC442, 0xF85CAE75, 0xF300E948, - 0xF2C2837F, 0xF0843D26, 0xF1465711, 0xF4094194, 0xF5CB2BA3, 0xF78D95FA, 0xF64FFFCD, 0xD9785D60, 0xD8BA3757, - 0xDAFC890E, 0xDB3EE339, 0xDE71F5BC, 0xDFB39F8B, 0xDDF521D2, 0xDC374BE5, 0xD76B0CD8, 0xD6A966EF, 0xD4EFD8B6, - 0xD52DB281, 0xD062A404, 0xD1A0CE33, 0xD3E6706A, 0xD2241A5D, 0xC55EFE10, 0xC49C9427, 0xC6DA2A7E, 0xC7184049, - 0xC25756CC, 0xC3953CFB, 0xC1D382A2, 0xC011E895, 0xCB4DAFA8, 0xCA8FC59F, 0xC8C97BC6, 0xC90B11F1, 0xCC440774, - 0xCD866D43, 0xCFC0D31A, 0xCE02B92D, 0x91AF9640, 0x906DFC77, 0x922B422E, 0x93E92819, 0x96A63E9C, 0x976454AB, - 0x9522EAF2, 0x94E080C5, 0x9FBCC7F8, 0x9E7EADCF, 0x9C381396, 0x9DFA79A1, 0x98B56F24, 0x99770513, 0x9B31BB4A, - 0x9AF3D17D, 0x8D893530, 0x8C4B5F07, 0x8E0DE15E, 0x8FCF8B69, 0x8A809DEC, 0x8B42F7DB, 0x89044982, 0x88C623B5, - 0x839A6488, 0x82580EBF, 0x801EB0E6, 0x81DCDAD1, 0x8493CC54, 0x8551A663, 0x8717183A, 0x86D5720D, 0xA9E2D0A0, - 0xA820BA97, 0xAA6604CE, 0xABA46EF9, 0xAEEB787C, 0xAF29124B, 0xAD6FAC12, 0xACADC625, 0xA7F18118, 0xA633EB2F, - 0xA4755576, 0xA5B73F41, 0xA0F829C4, 0xA13A43F3, 0xA37CFDAA, 0xA2BE979D, 0xB5C473D0, 0xB40619E7, 0xB640A7BE, - 0xB782CD89, 0xB2CDDB0C, 0xB30FB13B, 0xB1490F62, 0xB08B6555, 0xBBD72268, 0xBA15485F, 0xB853F606, 0xB9919C31, - 0xBCDE8AB4, 0xBD1CE083, 0xBF5A5EDA, 0xBE9834ED, - /* T8_3 */ - 0x00000000, 0xB8BC6765, 0xAA09C88B, 0x12B5AFEE, 0x8F629757, 0x37DEF032, 0x256B5FDC, 0x9DD738B9, 0xC5B428EF, - 0x7D084F8A, 0x6FBDE064, 0xD7018701, 0x4AD6BFB8, 0xF26AD8DD, 0xE0DF7733, 0x58631056, 0x5019579F, 0xE8A530FA, - 0xFA109F14, 0x42ACF871, 0xDF7BC0C8, 0x67C7A7AD, 0x75720843, 0xCDCE6F26, 0x95AD7F70, 0x2D111815, 0x3FA4B7FB, - 0x8718D09E, 0x1ACFE827, 0xA2738F42, 0xB0C620AC, 0x087A47C9, 0xA032AF3E, 0x188EC85B, 0x0A3B67B5, 0xB28700D0, - 0x2F503869, 0x97EC5F0C, 0x8559F0E2, 0x3DE59787, 0x658687D1, 0xDD3AE0B4, 0xCF8F4F5A, 0x7733283F, 0xEAE41086, - 0x525877E3, 0x40EDD80D, 0xF851BF68, 0xF02BF8A1, 0x48979FC4, 0x5A22302A, 0xE29E574F, 0x7F496FF6, 0xC7F50893, - 0xD540A77D, 0x6DFCC018, 0x359FD04E, 0x8D23B72B, 0x9F9618C5, 0x272A7FA0, 0xBAFD4719, 0x0241207C, 0x10F48F92, - 0xA848E8F7, 0x9B14583D, 0x23A83F58, 0x311D90B6, 0x89A1F7D3, 0x1476CF6A, 0xACCAA80F, 0xBE7F07E1, 0x06C36084, - 0x5EA070D2, 0xE61C17B7, 0xF4A9B859, 0x4C15DF3C, 0xD1C2E785, 0x697E80E0, 0x7BCB2F0E, 0xC377486B, 0xCB0D0FA2, - 0x73B168C7, 0x6104C729, 0xD9B8A04C, 0x446F98F5, 0xFCD3FF90, 0xEE66507E, 0x56DA371B, 0x0EB9274D, 0xB6054028, - 0xA4B0EFC6, 0x1C0C88A3, 0x81DBB01A, 0x3967D77F, 0x2BD27891, 0x936E1FF4, 0x3B26F703, 0x839A9066, 0x912F3F88, - 0x299358ED, 0xB4446054, 0x0CF80731, 0x1E4DA8DF, 0xA6F1CFBA, 0xFE92DFEC, 0x462EB889, 0x549B1767, 0xEC277002, - 0x71F048BB, 0xC94C2FDE, 0xDBF98030, 0x6345E755, 0x6B3FA09C, 0xD383C7F9, 0xC1366817, 0x798A0F72, 0xE45D37CB, - 0x5CE150AE, 0x4E54FF40, 0xF6E89825, 0xAE8B8873, 0x1637EF16, 0x048240F8, 0xBC3E279D, 0x21E91F24, 0x99557841, - 0x8BE0D7AF, 0x335CB0CA, 0xED59B63B, 0x55E5D15E, 0x47507EB0, 0xFFEC19D5, 0x623B216C, 0xDA874609, 0xC832E9E7, - 0x708E8E82, 0x28ED9ED4, 0x9051F9B1, 0x82E4565F, 0x3A58313A, 0xA78F0983, 0x1F336EE6, 0x0D86C108, 0xB53AA66D, - 0xBD40E1A4, 0x05FC86C1, 0x1749292F, 0xAFF54E4A, 0x322276F3, 0x8A9E1196, 0x982BBE78, 0x2097D91D, 0x78F4C94B, - 0xC048AE2E, 0xD2FD01C0, 0x6A4166A5, 0xF7965E1C, 0x4F2A3979, 0x5D9F9697, 0xE523F1F2, 0x4D6B1905, 0xF5D77E60, - 0xE762D18E, 0x5FDEB6EB, 0xC2098E52, 0x7AB5E937, 0x680046D9, 0xD0BC21BC, 0x88DF31EA, 0x3063568F, 0x22D6F961, - 0x9A6A9E04, 0x07BDA6BD, 0xBF01C1D8, 0xADB46E36, 0x15080953, 0x1D724E9A, 0xA5CE29FF, 0xB77B8611, 0x0FC7E174, - 0x9210D9CD, 0x2AACBEA8, 0x38191146, 0x80A57623, 0xD8C66675, 0x607A0110, 0x72CFAEFE, 0xCA73C99B, 0x57A4F122, - 0xEF189647, 0xFDAD39A9, 0x45115ECC, 0x764DEE06, 0xCEF18963, 0xDC44268D, 0x64F841E8, 0xF92F7951, 0x41931E34, - 0x5326B1DA, 0xEB9AD6BF, 0xB3F9C6E9, 0x0B45A18C, 0x19F00E62, 0xA14C6907, 0x3C9B51BE, 0x842736DB, 0x96929935, - 0x2E2EFE50, 0x2654B999, 0x9EE8DEFC, 0x8C5D7112, 0x34E11677, 0xA9362ECE, 0x118A49AB, 0x033FE645, 0xBB838120, - 0xE3E09176, 0x5B5CF613, 0x49E959FD, 0xF1553E98, 0x6C820621, 0xD43E6144, 0xC68BCEAA, 0x7E37A9CF, 0xD67F4138, - 0x6EC3265D, 0x7C7689B3, 0xC4CAEED6, 0x591DD66F, 0xE1A1B10A, 0xF3141EE4, 0x4BA87981, 0x13CB69D7, 0xAB770EB2, - 0xB9C2A15C, 0x017EC639, 0x9CA9FE80, 0x241599E5, 0x36A0360B, 0x8E1C516E, 0x866616A7, 0x3EDA71C2, 0x2C6FDE2C, - 0x94D3B949, 0x090481F0, 0xB1B8E695, 0xA30D497B, 0x1BB12E1E, 0x43D23E48, 0xFB6E592D, 0xE9DBF6C3, 0x516791A6, - 0xCCB0A91F, 0x740CCE7A, 0x66B96194, 0xDE0506F1, - /* T8_4 */ - 0x00000000, 0x3D6029B0, 0x7AC05360, 0x47A07AD0, 0xF580A6C0, 0xC8E08F70, 0x8F40F5A0, 0xB220DC10, 0x30704BC1, - 0x0D106271, 0x4AB018A1, 0x77D03111, 0xC5F0ED01, 0xF890C4B1, 0xBF30BE61, 0x825097D1, 0x60E09782, 0x5D80BE32, - 0x1A20C4E2, 0x2740ED52, 0x95603142, 0xA80018F2, 0xEFA06222, 0xD2C04B92, 0x5090DC43, 0x6DF0F5F3, 0x2A508F23, - 0x1730A693, 0xA5107A83, 0x98705333, 0xDFD029E3, 0xE2B00053, 0xC1C12F04, 0xFCA106B4, 0xBB017C64, 0x866155D4, - 0x344189C4, 0x0921A074, 0x4E81DAA4, 0x73E1F314, 0xF1B164C5, 0xCCD14D75, 0x8B7137A5, 0xB6111E15, 0x0431C205, - 0x3951EBB5, 0x7EF19165, 0x4391B8D5, 0xA121B886, 0x9C419136, 0xDBE1EBE6, 0xE681C256, 0x54A11E46, 0x69C137F6, - 0x2E614D26, 0x13016496, 0x9151F347, 0xAC31DAF7, 0xEB91A027, 0xD6F18997, 0x64D15587, 0x59B17C37, 0x1E1106E7, - 0x23712F57, 0x58F35849, 0x659371F9, 0x22330B29, 0x1F532299, 0xAD73FE89, 0x9013D739, 0xD7B3ADE9, 0xEAD38459, - 0x68831388, 0x55E33A38, 0x124340E8, 0x2F236958, 0x9D03B548, 0xA0639CF8, 0xE7C3E628, 0xDAA3CF98, 0x3813CFCB, - 0x0573E67B, 0x42D39CAB, 0x7FB3B51B, 0xCD93690B, 0xF0F340BB, 0xB7533A6B, 0x8A3313DB, 0x0863840A, 0x3503ADBA, - 0x72A3D76A, 0x4FC3FEDA, 0xFDE322CA, 0xC0830B7A, 0x872371AA, 0xBA43581A, 0x9932774D, 0xA4525EFD, 0xE3F2242D, - 0xDE920D9D, 0x6CB2D18D, 0x51D2F83D, 0x167282ED, 0x2B12AB5D, 0xA9423C8C, 0x9422153C, 0xD3826FEC, 0xEEE2465C, - 0x5CC29A4C, 0x61A2B3FC, 0x2602C92C, 0x1B62E09C, 0xF9D2E0CF, 0xC4B2C97F, 0x8312B3AF, 0xBE729A1F, 0x0C52460F, - 0x31326FBF, 0x7692156F, 0x4BF23CDF, 0xC9A2AB0E, 0xF4C282BE, 0xB362F86E, 0x8E02D1DE, 0x3C220DCE, 0x0142247E, - 0x46E25EAE, 0x7B82771E, 0xB1E6B092, 0x8C869922, 0xCB26E3F2, 0xF646CA42, 0x44661652, 0x79063FE2, 0x3EA64532, - 0x03C66C82, 0x8196FB53, 0xBCF6D2E3, 0xFB56A833, 0xC6368183, 0x74165D93, 0x49767423, 0x0ED60EF3, 0x33B62743, - 0xD1062710, 0xEC660EA0, 0xABC67470, 0x96A65DC0, 0x248681D0, 0x19E6A860, 0x5E46D2B0, 0x6326FB00, 0xE1766CD1, - 0xDC164561, 0x9BB63FB1, 0xA6D61601, 0x14F6CA11, 0x2996E3A1, 0x6E369971, 0x5356B0C1, 0x70279F96, 0x4D47B626, - 0x0AE7CCF6, 0x3787E546, 0x85A73956, 0xB8C710E6, 0xFF676A36, 0xC2074386, 0x4057D457, 0x7D37FDE7, 0x3A978737, - 0x07F7AE87, 0xB5D77297, 0x88B75B27, 0xCF1721F7, 0xF2770847, 0x10C70814, 0x2DA721A4, 0x6A075B74, 0x576772C4, - 0xE547AED4, 0xD8278764, 0x9F87FDB4, 0xA2E7D404, 0x20B743D5, 0x1DD76A65, 0x5A7710B5, 0x67173905, 0xD537E515, - 0xE857CCA5, 0xAFF7B675, 0x92979FC5, 0xE915E8DB, 0xD475C16B, 0x93D5BBBB, 0xAEB5920B, 0x1C954E1B, 0x21F567AB, - 0x66551D7B, 0x5B3534CB, 0xD965A31A, 0xE4058AAA, 0xA3A5F07A, 0x9EC5D9CA, 0x2CE505DA, 0x11852C6A, 0x562556BA, - 0x6B457F0A, 0x89F57F59, 0xB49556E9, 0xF3352C39, 0xCE550589, 0x7C75D999, 0x4115F029, 0x06B58AF9, 0x3BD5A349, - 0xB9853498, 0x84E51D28, 0xC34567F8, 0xFE254E48, 0x4C059258, 0x7165BBE8, 0x36C5C138, 0x0BA5E888, 0x28D4C7DF, - 0x15B4EE6F, 0x521494BF, 0x6F74BD0F, 0xDD54611F, 0xE03448AF, 0xA794327F, 0x9AF41BCF, 0x18A48C1E, 0x25C4A5AE, - 0x6264DF7E, 0x5F04F6CE, 0xED242ADE, 0xD044036E, 0x97E479BE, 0xAA84500E, 0x4834505D, 0x755479ED, 0x32F4033D, - 0x0F942A8D, 0xBDB4F69D, 0x80D4DF2D, 0xC774A5FD, 0xFA148C4D, 0x78441B9C, 0x4524322C, 0x028448FC, 0x3FE4614C, - 0x8DC4BD5C, 0xB0A494EC, 0xF704EE3C, 0xCA64C78C, - /* T8_5 */ - 0x00000000, 0xCB5CD3A5, 0x4DC8A10B, 0x869472AE, 0x9B914216, 0x50CD91B3, 0xD659E31D, 0x1D0530B8, 0xEC53826D, - 0x270F51C8, 0xA19B2366, 0x6AC7F0C3, 0x77C2C07B, 0xBC9E13DE, 0x3A0A6170, 0xF156B2D5, 0x03D6029B, 0xC88AD13E, - 0x4E1EA390, 0x85427035, 0x9847408D, 0x531B9328, 0xD58FE186, 0x1ED33223, 0xEF8580F6, 0x24D95353, 0xA24D21FD, - 0x6911F258, 0x7414C2E0, 0xBF481145, 0x39DC63EB, 0xF280B04E, 0x07AC0536, 0xCCF0D693, 0x4A64A43D, 0x81387798, - 0x9C3D4720, 0x57619485, 0xD1F5E62B, 0x1AA9358E, 0xEBFF875B, 0x20A354FE, 0xA6372650, 0x6D6BF5F5, 0x706EC54D, - 0xBB3216E8, 0x3DA66446, 0xF6FAB7E3, 0x047A07AD, 0xCF26D408, 0x49B2A6A6, 0x82EE7503, 0x9FEB45BB, 0x54B7961E, - 0xD223E4B0, 0x197F3715, 0xE82985C0, 0x23755665, 0xA5E124CB, 0x6EBDF76E, 0x73B8C7D6, 0xB8E41473, 0x3E7066DD, - 0xF52CB578, 0x0F580A6C, 0xC404D9C9, 0x4290AB67, 0x89CC78C2, 0x94C9487A, 0x5F959BDF, 0xD901E971, 0x125D3AD4, - 0xE30B8801, 0x28575BA4, 0xAEC3290A, 0x659FFAAF, 0x789ACA17, 0xB3C619B2, 0x35526B1C, 0xFE0EB8B9, 0x0C8E08F7, - 0xC7D2DB52, 0x4146A9FC, 0x8A1A7A59, 0x971F4AE1, 0x5C439944, 0xDAD7EBEA, 0x118B384F, 0xE0DD8A9A, 0x2B81593F, - 0xAD152B91, 0x6649F834, 0x7B4CC88C, 0xB0101B29, 0x36846987, 0xFDD8BA22, 0x08F40F5A, 0xC3A8DCFF, 0x453CAE51, - 0x8E607DF4, 0x93654D4C, 0x58399EE9, 0xDEADEC47, 0x15F13FE2, 0xE4A78D37, 0x2FFB5E92, 0xA96F2C3C, 0x6233FF99, - 0x7F36CF21, 0xB46A1C84, 0x32FE6E2A, 0xF9A2BD8F, 0x0B220DC1, 0xC07EDE64, 0x46EAACCA, 0x8DB67F6F, 0x90B34FD7, - 0x5BEF9C72, 0xDD7BEEDC, 0x16273D79, 0xE7718FAC, 0x2C2D5C09, 0xAAB92EA7, 0x61E5FD02, 0x7CE0CDBA, 0xB7BC1E1F, - 0x31286CB1, 0xFA74BF14, 0x1EB014D8, 0xD5ECC77D, 0x5378B5D3, 0x98246676, 0x852156CE, 0x4E7D856B, 0xC8E9F7C5, - 0x03B52460, 0xF2E396B5, 0x39BF4510, 0xBF2B37BE, 0x7477E41B, 0x6972D4A3, 0xA22E0706, 0x24BA75A8, 0xEFE6A60D, - 0x1D661643, 0xD63AC5E6, 0x50AEB748, 0x9BF264ED, 0x86F75455, 0x4DAB87F0, 0xCB3FF55E, 0x006326FB, 0xF135942E, - 0x3A69478B, 0xBCFD3525, 0x77A1E680, 0x6AA4D638, 0xA1F8059D, 0x276C7733, 0xEC30A496, 0x191C11EE, 0xD240C24B, - 0x54D4B0E5, 0x9F886340, 0x828D53F8, 0x49D1805D, 0xCF45F2F3, 0x04192156, 0xF54F9383, 0x3E134026, 0xB8873288, - 0x73DBE12D, 0x6EDED195, 0xA5820230, 0x2316709E, 0xE84AA33B, 0x1ACA1375, 0xD196C0D0, 0x5702B27E, 0x9C5E61DB, - 0x815B5163, 0x4A0782C6, 0xCC93F068, 0x07CF23CD, 0xF6999118, 0x3DC542BD, 0xBB513013, 0x700DE3B6, 0x6D08D30E, - 0xA65400AB, 0x20C07205, 0xEB9CA1A0, 0x11E81EB4, 0xDAB4CD11, 0x5C20BFBF, 0x977C6C1A, 0x8A795CA2, 0x41258F07, - 0xC7B1FDA9, 0x0CED2E0C, 0xFDBB9CD9, 0x36E74F7C, 0xB0733DD2, 0x7B2FEE77, 0x662ADECF, 0xAD760D6A, 0x2BE27FC4, - 0xE0BEAC61, 0x123E1C2F, 0xD962CF8A, 0x5FF6BD24, 0x94AA6E81, 0x89AF5E39, 0x42F38D9C, 0xC467FF32, 0x0F3B2C97, - 0xFE6D9E42, 0x35314DE7, 0xB3A53F49, 0x78F9ECEC, 0x65FCDC54, 0xAEA00FF1, 0x28347D5F, 0xE368AEFA, 0x16441B82, - 0xDD18C827, 0x5B8CBA89, 0x90D0692C, 0x8DD55994, 0x46898A31, 0xC01DF89F, 0x0B412B3A, 0xFA1799EF, 0x314B4A4A, - 0xB7DF38E4, 0x7C83EB41, 0x6186DBF9, 0xAADA085C, 0x2C4E7AF2, 0xE712A957, 0x15921919, 0xDECECABC, 0x585AB812, - 0x93066BB7, 0x8E035B0F, 0x455F88AA, 0xC3CBFA04, 0x089729A1, 0xF9C19B74, 0x329D48D1, 0xB4093A7F, 0x7F55E9DA, - 0x6250D962, 0xA90C0AC7, 0x2F987869, 0xE4C4ABCC, - /* T8_6 */ - 0x00000000, 0xA6770BB4, 0x979F1129, 0x31E81A9D, 0xF44F2413, 0x52382FA7, 0x63D0353A, 0xC5A73E8E, 0x33EF4E67, - 0x959845D3, 0xA4705F4E, 0x020754FA, 0xC7A06A74, 0x61D761C0, 0x503F7B5D, 0xF64870E9, 0x67DE9CCE, 0xC1A9977A, - 0xF0418DE7, 0x56368653, 0x9391B8DD, 0x35E6B369, 0x040EA9F4, 0xA279A240, 0x5431D2A9, 0xF246D91D, 0xC3AEC380, - 0x65D9C834, 0xA07EF6BA, 0x0609FD0E, 0x37E1E793, 0x9196EC27, 0xCFBD399C, 0x69CA3228, 0x582228B5, 0xFE552301, - 0x3BF21D8F, 0x9D85163B, 0xAC6D0CA6, 0x0A1A0712, 0xFC5277FB, 0x5A257C4F, 0x6BCD66D2, 0xCDBA6D66, 0x081D53E8, - 0xAE6A585C, 0x9F8242C1, 0x39F54975, 0xA863A552, 0x0E14AEE6, 0x3FFCB47B, 0x998BBFCF, 0x5C2C8141, 0xFA5B8AF5, - 0xCBB39068, 0x6DC49BDC, 0x9B8CEB35, 0x3DFBE081, 0x0C13FA1C, 0xAA64F1A8, 0x6FC3CF26, 0xC9B4C492, 0xF85CDE0F, - 0x5E2BD5BB, 0x440B7579, 0xE27C7ECD, 0xD3946450, 0x75E36FE4, 0xB044516A, 0x16335ADE, 0x27DB4043, 0x81AC4BF7, - 0x77E43B1E, 0xD19330AA, 0xE07B2A37, 0x460C2183, 0x83AB1F0D, 0x25DC14B9, 0x14340E24, 0xB2430590, 0x23D5E9B7, - 0x85A2E203, 0xB44AF89E, 0x123DF32A, 0xD79ACDA4, 0x71EDC610, 0x4005DC8D, 0xE672D739, 0x103AA7D0, 0xB64DAC64, - 0x87A5B6F9, 0x21D2BD4D, 0xE47583C3, 0x42028877, 0x73EA92EA, 0xD59D995E, 0x8BB64CE5, 0x2DC14751, 0x1C295DCC, - 0xBA5E5678, 0x7FF968F6, 0xD98E6342, 0xE86679DF, 0x4E11726B, 0xB8590282, 0x1E2E0936, 0x2FC613AB, 0x89B1181F, - 0x4C162691, 0xEA612D25, 0xDB8937B8, 0x7DFE3C0C, 0xEC68D02B, 0x4A1FDB9F, 0x7BF7C102, 0xDD80CAB6, 0x1827F438, - 0xBE50FF8C, 0x8FB8E511, 0x29CFEEA5, 0xDF879E4C, 0x79F095F8, 0x48188F65, 0xEE6F84D1, 0x2BC8BA5F, 0x8DBFB1EB, - 0xBC57AB76, 0x1A20A0C2, 0x8816EAF2, 0x2E61E146, 0x1F89FBDB, 0xB9FEF06F, 0x7C59CEE1, 0xDA2EC555, 0xEBC6DFC8, - 0x4DB1D47C, 0xBBF9A495, 0x1D8EAF21, 0x2C66B5BC, 0x8A11BE08, 0x4FB68086, 0xE9C18B32, 0xD82991AF, 0x7E5E9A1B, - 0xEFC8763C, 0x49BF7D88, 0x78576715, 0xDE206CA1, 0x1B87522F, 0xBDF0599B, 0x8C184306, 0x2A6F48B2, 0xDC27385B, - 0x7A5033EF, 0x4BB82972, 0xEDCF22C6, 0x28681C48, 0x8E1F17FC, 0xBFF70D61, 0x198006D5, 0x47ABD36E, 0xE1DCD8DA, - 0xD034C247, 0x7643C9F3, 0xB3E4F77D, 0x1593FCC9, 0x247BE654, 0x820CEDE0, 0x74449D09, 0xD23396BD, 0xE3DB8C20, - 0x45AC8794, 0x800BB91A, 0x267CB2AE, 0x1794A833, 0xB1E3A387, 0x20754FA0, 0x86024414, 0xB7EA5E89, 0x119D553D, - 0xD43A6BB3, 0x724D6007, 0x43A57A9A, 0xE5D2712E, 0x139A01C7, 0xB5ED0A73, 0x840510EE, 0x22721B5A, 0xE7D525D4, - 0x41A22E60, 0x704A34FD, 0xD63D3F49, 0xCC1D9F8B, 0x6A6A943F, 0x5B828EA2, 0xFDF58516, 0x3852BB98, 0x9E25B02C, - 0xAFCDAAB1, 0x09BAA105, 0xFFF2D1EC, 0x5985DA58, 0x686DC0C5, 0xCE1ACB71, 0x0BBDF5FF, 0xADCAFE4B, 0x9C22E4D6, - 0x3A55EF62, 0xABC30345, 0x0DB408F1, 0x3C5C126C, 0x9A2B19D8, 0x5F8C2756, 0xF9FB2CE2, 0xC813367F, 0x6E643DCB, - 0x982C4D22, 0x3E5B4696, 0x0FB35C0B, 0xA9C457BF, 0x6C636931, 0xCA146285, 0xFBFC7818, 0x5D8B73AC, 0x03A0A617, - 0xA5D7ADA3, 0x943FB73E, 0x3248BC8A, 0xF7EF8204, 0x519889B0, 0x6070932D, 0xC6079899, 0x304FE870, 0x9638E3C4, - 0xA7D0F959, 0x01A7F2ED, 0xC400CC63, 0x6277C7D7, 0x539FDD4A, 0xF5E8D6FE, 0x647E3AD9, 0xC209316D, 0xF3E12BF0, - 0x55962044, 0x90311ECA, 0x3646157E, 0x07AE0FE3, 0xA1D90457, 0x579174BE, 0xF1E67F0A, 0xC00E6597, 0x66796E23, - 0xA3DE50AD, 0x05A95B19, 0x34414184, 0x92364A30, - /* T8_7 */ - 0x00000000, 0xCCAA009E, 0x4225077D, 0x8E8F07E3, 0x844A0EFA, 0x48E00E64, 0xC66F0987, 0x0AC50919, 0xD3E51BB5, - 0x1F4F1B2B, 0x91C01CC8, 0x5D6A1C56, 0x57AF154F, 0x9B0515D1, 0x158A1232, 0xD92012AC, 0x7CBB312B, 0xB01131B5, - 0x3E9E3656, 0xF23436C8, 0xF8F13FD1, 0x345B3F4F, 0xBAD438AC, 0x767E3832, 0xAF5E2A9E, 0x63F42A00, 0xED7B2DE3, - 0x21D12D7D, 0x2B142464, 0xE7BE24FA, 0x69312319, 0xA59B2387, 0xF9766256, 0x35DC62C8, 0xBB53652B, 0x77F965B5, - 0x7D3C6CAC, 0xB1966C32, 0x3F196BD1, 0xF3B36B4F, 0x2A9379E3, 0xE639797D, 0x68B67E9E, 0xA41C7E00, 0xAED97719, - 0x62737787, 0xECFC7064, 0x205670FA, 0x85CD537D, 0x496753E3, 0xC7E85400, 0x0B42549E, 0x01875D87, 0xCD2D5D19, - 0x43A25AFA, 0x8F085A64, 0x562848C8, 0x9A824856, 0x140D4FB5, 0xD8A74F2B, 0xD2624632, 0x1EC846AC, 0x9047414F, - 0x5CED41D1, 0x299DC2ED, 0xE537C273, 0x6BB8C590, 0xA712C50E, 0xADD7CC17, 0x617DCC89, 0xEFF2CB6A, 0x2358CBF4, - 0xFA78D958, 0x36D2D9C6, 0xB85DDE25, 0x74F7DEBB, 0x7E32D7A2, 0xB298D73C, 0x3C17D0DF, 0xF0BDD041, 0x5526F3C6, - 0x998CF358, 0x1703F4BB, 0xDBA9F425, 0xD16CFD3C, 0x1DC6FDA2, 0x9349FA41, 0x5FE3FADF, 0x86C3E873, 0x4A69E8ED, - 0xC4E6EF0E, 0x084CEF90, 0x0289E689, 0xCE23E617, 0x40ACE1F4, 0x8C06E16A, 0xD0EBA0BB, 0x1C41A025, 0x92CEA7C6, - 0x5E64A758, 0x54A1AE41, 0x980BAEDF, 0x1684A93C, 0xDA2EA9A2, 0x030EBB0E, 0xCFA4BB90, 0x412BBC73, 0x8D81BCED, - 0x8744B5F4, 0x4BEEB56A, 0xC561B289, 0x09CBB217, 0xAC509190, 0x60FA910E, 0xEE7596ED, 0x22DF9673, 0x281A9F6A, - 0xE4B09FF4, 0x6A3F9817, 0xA6959889, 0x7FB58A25, 0xB31F8ABB, 0x3D908D58, 0xF13A8DC6, 0xFBFF84DF, 0x37558441, - 0xB9DA83A2, 0x7570833C, 0x533B85DA, 0x9F918544, 0x111E82A7, 0xDDB48239, 0xD7718B20, 0x1BDB8BBE, 0x95548C5D, - 0x59FE8CC3, 0x80DE9E6F, 0x4C749EF1, 0xC2FB9912, 0x0E51998C, 0x04949095, 0xC83E900B, 0x46B197E8, 0x8A1B9776, - 0x2F80B4F1, 0xE32AB46F, 0x6DA5B38C, 0xA10FB312, 0xABCABA0B, 0x6760BA95, 0xE9EFBD76, 0x2545BDE8, 0xFC65AF44, - 0x30CFAFDA, 0xBE40A839, 0x72EAA8A7, 0x782FA1BE, 0xB485A120, 0x3A0AA6C3, 0xF6A0A65D, 0xAA4DE78C, 0x66E7E712, - 0xE868E0F1, 0x24C2E06F, 0x2E07E976, 0xE2ADE9E8, 0x6C22EE0B, 0xA088EE95, 0x79A8FC39, 0xB502FCA7, 0x3B8DFB44, - 0xF727FBDA, 0xFDE2F2C3, 0x3148F25D, 0xBFC7F5BE, 0x736DF520, 0xD6F6D6A7, 0x1A5CD639, 0x94D3D1DA, 0x5879D144, - 0x52BCD85D, 0x9E16D8C3, 0x1099DF20, 0xDC33DFBE, 0x0513CD12, 0xC9B9CD8C, 0x4736CA6F, 0x8B9CCAF1, 0x8159C3E8, - 0x4DF3C376, 0xC37CC495, 0x0FD6C40B, 0x7AA64737, 0xB60C47A9, 0x3883404A, 0xF42940D4, 0xFEEC49CD, 0x32464953, - 0xBCC94EB0, 0x70634E2E, 0xA9435C82, 0x65E95C1C, 0xEB665BFF, 0x27CC5B61, 0x2D095278, 0xE1A352E6, 0x6F2C5505, - 0xA386559B, 0x061D761C, 0xCAB77682, 0x44387161, 0x889271FF, 0x825778E6, 0x4EFD7878, 0xC0727F9B, 0x0CD87F05, - 0xD5F86DA9, 0x19526D37, 0x97DD6AD4, 0x5B776A4A, 0x51B26353, 0x9D1863CD, 0x1397642E, 0xDF3D64B0, 0x83D02561, - 0x4F7A25FF, 0xC1F5221C, 0x0D5F2282, 0x079A2B9B, 0xCB302B05, 0x45BF2CE6, 0x89152C78, 0x50353ED4, 0x9C9F3E4A, - 0x121039A9, 0xDEBA3937, 0xD47F302E, 0x18D530B0, 0x965A3753, 0x5AF037CD, 0xFF6B144A, 0x33C114D4, 0xBD4E1337, - 0x71E413A9, 0x7B211AB0, 0xB78B1A2E, 0x39041DCD, 0xF5AE1D53, 0x2C8E0FFF, 0xE0240F61, 0x6EAB0882, 0xA201081C, - 0xA8C40105, 0x646E019B, 0xEAE10678, 0x264B06E6 }; -} diff --git a/clients/src/main/java/org/oracle/okafka/common/utils/Crc32C.java b/clients/src/main/java/org/oracle/okafka/common/utils/Crc32C.java deleted file mode 100644 index 3493533..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/utils/Crc32C.java +++ /dev/null @@ -1,121 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.utils; - -import java.lang.invoke.MethodHandle; -import java.lang.invoke.MethodHandles; -import java.lang.invoke.MethodType; -import java.nio.ByteBuffer; -import java.util.zip.Checksum; - -/** - * A class that can be used to compute the CRC32C (Castagnoli) of a ByteBuffer or array of bytes. - * - * We use java.util.zip.CRC32C (introduced in Java 9) if it is available and fallback to PureJavaCrc32C, otherwise. - * java.util.zip.CRC32C is significantly faster on reasonably modern CPUs as it uses the CRC32 instruction introduced - * in SSE4.2. - * - * NOTE: This class is intended for INTERNAL usage only within Kafka. - */ -public final class Crc32C { - - private static final ChecksumFactory CHECKSUM_FACTORY; - - static { - if (Java.IS_JAVA9_COMPATIBLE) - CHECKSUM_FACTORY = new Java9ChecksumFactory(); - else - CHECKSUM_FACTORY = new PureJavaChecksumFactory(); - } - - private Crc32C() {} - - /** - * Compute the CRC32C (Castagnoli) of the segment of the byte array given by the specified size and offset - * - * @param bytes The bytes to checksum - * @param offset the offset at which to begin the checksum computation - * @param size the number of bytes to checksum - * @return The CRC32C - */ - public static long compute(byte[] bytes, int offset, int size) { - Checksum crc = create(); - crc.update(bytes, offset, size); - return crc.getValue(); - } - - /** - * Compute the CRC32C (Castagnoli) of a byte buffer from a given offset (relative to the buffer's current position) - * - * @param buffer The buffer with the underlying data - * @param offset The offset relative to the current position - * @param size The number of bytes beginning from the offset to include - * @return The CRC32C - */ - public static long compute(ByteBuffer buffer, int offset, int size) { - Checksum crc = create(); - Checksums.update(crc, buffer, offset, size); - return crc.getValue(); - } - - public static Checksum create() { - return CHECKSUM_FACTORY.create(); - } - - private interface ChecksumFactory { - Checksum create(); - } - - private static class Java9ChecksumFactory implements ChecksumFactory { - private static final MethodHandle CONSTRUCTOR; - - static { - try { - Class cls = Class.forName("java.util.zip.CRC32C"); - CONSTRUCTOR = MethodHandles.publicLookup().findConstructor(cls, MethodType.methodType(void.class)); - } catch (ReflectiveOperationException e) { - // Should never happen - throw new RuntimeException(e); - } - } - - @Override - public Checksum create() { - try { - return (Checksum) CONSTRUCTOR.invoke(); - } catch (Throwable throwable) { - // Should never happen - throw new RuntimeException(throwable); - } - } - } - - private static class PureJavaChecksumFactory implements ChecksumFactory { - @Override - public Checksum create() { - return new PureJavaCrc32C(); - } - } -} diff --git a/clients/src/main/java/org/oracle/okafka/common/utils/CreateTopics.java b/clients/src/main/java/org/oracle/okafka/common/utils/CreateTopics.java index 104d331..b594202 100644 --- a/clients/src/main/java/org/oracle/okafka/common/utils/CreateTopics.java +++ b/clients/src/main/java/org/oracle/okafka/common/utils/CreateTopics.java @@ -1,7 +1,7 @@ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. */ @@ -13,9 +13,9 @@ import java.util.HashMap; import java.util.Map; -import org.oracle.okafka.common.errors.InvalidConfigurationException; -import org.oracle.okafka.common.errors.InvalidTopicException; -import org.oracle.okafka.common.errors.TopicExistsException; +import org.apache.kafka.common.errors.InvalidConfigurationException; +import org.apache.kafka.common.errors.InvalidTopicException; +import org.apache.kafka.common.errors.TopicExistsException; import org.oracle.okafka.common.requests.CreateTopicsRequest.TopicDetails; public class CreateTopics { @@ -26,7 +26,7 @@ private static void setQueueParameter(Connection jdbcConn, String topic, String CallableStatement qParaStmt = null; try { qParaStmt = jdbcConn.prepareCall(qParaTxt); - qParaStmt.setString(1, Utils.enquote(topic)); + qParaStmt.setString(1, ConnectionUtils.enquote(topic)); qParaStmt.setString(2, paraName); qParaStmt.setInt(3, paraValue); qParaStmt.execute(); @@ -47,7 +47,7 @@ private static void startTopic(Connection jdbcConn, String topic) throws SQLExce CallableStatement sQStmt = null; try { sQStmt = jdbcConn.prepareCall(sQTxt); - sQStmt.setString(1, Utils.enquote(topic)); + sQStmt.setString(1, ConnectionUtils.enquote(topic)); sQStmt.execute(); } catch(SQLException e) { throw e; @@ -76,42 +76,27 @@ public static Map createTopics(Connection jdbcConn, Map topicDetails : topics.entrySet()) { topic = topicDetails.getKey().trim(); details = topicDetails.getValue(); try { - String query = "declare qprops dbms_aqadm.QUEUE_PROPS_T; begin "; for(Map.Entry config : details.configs.entrySet()) { String property = config.getKey().trim(); if(property.equals("retention.ms")) { retentionSec = Long.parseLong(config.getValue().trim())/1000; - query = query + "qprops.retention_time := ? ;"; - retentionSet = true; - } - else throw new InvalidConfigurationException("Invalid configuration: " + property + "provided for topic: " + topic); + } + else throw new InvalidConfigurationException("Invalid configuration: " + property + " provided for topic: " + topic); } - query = query + "sys.dbms_aqadm.create_sharded_queue(queue_name=>?, multiple_consumers=>(case ? when 1 then true else false end), queue_properties => qprops); end;"; - cStmt = jdbcConn.prepareCall(query); - int indx = 1; - if(retentionSet) { - cStmt.setLong(indx,retentionSec); - indx++; - } - cStmt.setString(indx++, Utils.enquote(topic)); - cStmt.setInt(indx++, 1); - + cStmt = jdbcConn.prepareCall("{call DBMS_TEQK.AQ$_CREATE_KAFKA_TOPIC(topicname=>? ,partition_num=>?, retentiontime=>?)}"); + cStmt.setString(1, ConnectionUtils.enquote(topic)); + cStmt.setInt(2, details.numPartitions); + cStmt.setLong(3,retentionSec); cStmt.execute(); - setPartitionNum(jdbcConn,topic,details.numPartitions); - setKeyBasedEnqueue(jdbcConn,topic); - setStickyDeq(jdbcConn,topic); - startTopic(jdbcConn,topic); - } catch(SQLException sqlEx) { if ( sqlEx.getErrorCode() == 24019 || sqlEx.getErrorCode() == 44003 ) { result.put(topic, new InvalidTopicException(sqlEx)); - } + } else if ( sqlEx.getErrorCode() == 24001 ) { result.put(topic, new TopicExistsException("Topic already exists: ", sqlEx)); } @@ -122,16 +107,15 @@ else if ( sqlEx.getErrorCode() == 24001 ) { if(result.get(topic) == null) { result.put(topic, null); } - } + } } finally { try { if(cStmt != null) cStmt.close(); - } catch(Exception e) { + } catch(Exception e) { //do nothing - } + } } return result; } - } diff --git a/clients/src/main/java/org/oracle/okafka/common/utils/Exit.java b/clients/src/main/java/org/oracle/okafka/common/utils/Exit.java deleted file mode 100644 index 8a0f5a7..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/utils/Exit.java +++ /dev/null @@ -1,86 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.utils; - -/** - * Internal class that should be used instead of `Exit.exit()` and `Runtime.getRuntime().halt()` so that tests can - * easily change the behaviour. - */ -public class Exit { - - public interface Procedure { - void execute(int statusCode, String message); - } - - private static final Procedure DEFAULT_HALT_PROCEDURE = new Procedure() { - @Override - public void execute(int statusCode, String message) { - Runtime.getRuntime().halt(statusCode); - } - }; - - private static final Procedure DEFAULT_EXIT_PROCEDURE = new Procedure() { - @Override - public void execute(int statusCode, String message) { - System.exit(statusCode); - } - }; - - private volatile static Procedure exitProcedure = DEFAULT_EXIT_PROCEDURE; - private volatile static Procedure haltProcedure = DEFAULT_HALT_PROCEDURE; - - public static void exit(int statusCode) { - exit(statusCode, null); - } - - public static void exit(int statusCode, String message) { - exitProcedure.execute(statusCode, message); - } - - public static void halt(int statusCode) { - halt(statusCode, null); - } - - public static void halt(int statusCode, String message) { - haltProcedure.execute(statusCode, message); - } - - public static void setExitProcedure(Procedure procedure) { - exitProcedure = procedure; - } - - public static void setHaltProcedure(Procedure procedure) { - haltProcedure = procedure; - } - - public static void resetExitProcedure() { - exitProcedure = DEFAULT_EXIT_PROCEDURE; - } - - public static void resetHaltProcedure() { - haltProcedure = DEFAULT_HALT_PROCEDURE; - } - -} diff --git a/clients/src/main/java/org/oracle/okafka/common/utils/Java.java b/clients/src/main/java/org/oracle/okafka/common/utils/Java.java deleted file mode 100644 index ac21ea1..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/utils/Java.java +++ /dev/null @@ -1,77 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.utils; - -import java.util.StringTokenizer; - -public final class Java { - - private Java() { } - - private static final Version VERSION = parseVersion(System.getProperty("java.specification.version")); - - // Package private for testing - static Version parseVersion(String versionString) { - final StringTokenizer st = new StringTokenizer(versionString, "."); - int majorVersion = Integer.parseInt(st.nextToken()); - int minorVersion; - if (st.hasMoreTokens()) - minorVersion = Integer.parseInt(st.nextToken()); - else - minorVersion = 0; - return new Version(majorVersion, minorVersion); - } - - // Having these as static final provides the best opportunity for compilar optimization - public static final boolean IS_JAVA9_COMPATIBLE = VERSION.isJava9Compatible(); - - public static boolean isIbmJdk() { - return System.getProperty("java.vendor").contains("IBM"); - } - - // Package private for testing - static class Version { - public final int majorVersion; - public final int minorVersion; - - private Version(int majorVersion, int minorVersion) { - this.majorVersion = majorVersion; - this.minorVersion = minorVersion; - } - - @Override - public String toString() { - return "Version(majorVersion=" + majorVersion + - ", minorVersion=" + minorVersion + ")"; - } - - // Package private for testing - boolean isJava9Compatible() { - return majorVersion >= 9; - } - - } - -} diff --git a/clients/src/main/java/org/oracle/okafka/common/utils/KafkaThread.java b/clients/src/main/java/org/oracle/okafka/common/utils/KafkaThread.java deleted file mode 100644 index 1b174f0..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/utils/KafkaThread.java +++ /dev/null @@ -1,64 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.utils; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A wrapper for Thread that sets things up nicely - */ -public class KafkaThread extends Thread { - - private final Logger log = LoggerFactory.getLogger(getClass()); - - public static KafkaThread daemon(final String name, Runnable runnable) { - return new KafkaThread(name, runnable, true); - } - - public static KafkaThread nonDaemon(final String name, Runnable runnable) { - return new KafkaThread(name, runnable, false); - } - - public KafkaThread(final String name, boolean daemon) { - super(name); - configureThread(name, daemon); - } - - public KafkaThread(final String name, Runnable runnable, boolean daemon) { - super(runnable, name); - configureThread(name, daemon); - } - - private void configureThread(final String name, boolean daemon) { - setDaemon(daemon); - setUncaughtExceptionHandler(new UncaughtExceptionHandler() { - public void uncaughtException(Thread t, Throwable e) { - log.error("Uncaught exception in thread '{}':", name, e); - } - }); - } - -} diff --git a/clients/src/main/java/org/oracle/okafka/common/utils/LogContext.java b/clients/src/main/java/org/oracle/okafka/common/utils/LogContext.java deleted file mode 100644 index ffda821..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/utils/LogContext.java +++ /dev/null @@ -1,801 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.utils; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.slf4j.Marker; -import org.slf4j.helpers.FormattingTuple; -import org.slf4j.helpers.MessageFormatter; -import org.slf4j.spi.LocationAwareLogger; - -/** - * This class provides a way to instrument loggers with a common context which can be used to - * automatically enrich log messages. For example, in the KafkaConsumer, it is often useful to know - * the groupId of the consumer, so this can be added to a context object which can then be passed to - * all of the dependent components in order to build new loggers. This removes the need to manually - * add the groupId to each message. - */ -public class LogContext { - - private final String logPrefix; - - public LogContext(String logPrefix) { - this.logPrefix = logPrefix == null ? "" : logPrefix; - } - - public LogContext() { - this(""); - } - - public Logger logger(Class clazz) { - Logger logger = LoggerFactory.getLogger(clazz); - if (logger instanceof LocationAwareLogger) { - return new LocationAwareKafkaLogger(logPrefix, (LocationAwareLogger) logger); - } else { - return new LocationIgnorantKafkaLogger(logPrefix, logger); - } - } - - public String logPrefix() { - return logPrefix; - } - - private static abstract class AbstractKafkaLogger implements Logger { - private final String prefix; - - protected AbstractKafkaLogger(final String prefix) { - this.prefix = prefix; - } - - protected String addPrefix(final String message) { - return prefix + message; - } - } - - private static class LocationAwareKafkaLogger extends AbstractKafkaLogger { - private final LocationAwareLogger logger; - private final String fqcn; - - LocationAwareKafkaLogger(String logPrefix, LocationAwareLogger logger) { - super(logPrefix); - this.logger = logger; - this.fqcn = LocationAwareKafkaLogger.class.getName(); - } - - @Override - public String getName() { - return logger.getName(); - } - - @Override - public boolean isTraceEnabled() { - return logger.isTraceEnabled(); - } - - @Override - public boolean isTraceEnabled(Marker marker) { - return logger.isTraceEnabled(marker); - } - - @Override - public boolean isDebugEnabled() { - return logger.isDebugEnabled(); - } - - @Override - public boolean isDebugEnabled(Marker marker) { - return logger.isDebugEnabled(marker); - } - - @Override - public boolean isInfoEnabled() { - return logger.isInfoEnabled(); - } - - @Override - public boolean isInfoEnabled(Marker marker) { - return logger.isInfoEnabled(marker); - } - - @Override - public boolean isWarnEnabled() { - return logger.isWarnEnabled(); - } - - @Override - public boolean isWarnEnabled(Marker marker) { - return logger.isWarnEnabled(marker); - } - - @Override - public boolean isErrorEnabled() { - return logger.isErrorEnabled(); - } - - @Override - public boolean isErrorEnabled(Marker marker) { - return logger.isErrorEnabled(marker); - } - - @Override - public void trace(String message) { - if (logger.isTraceEnabled()) { - writeLog(null, LocationAwareLogger.TRACE_INT, message, null, null); - } - } - - @Override - public void trace(String format, Object arg) { - if (logger.isTraceEnabled()) { - writeLog(null, LocationAwareLogger.TRACE_INT, format, new Object[]{arg}, null); - } - } - - @Override - public void trace(String format, Object arg1, Object arg2) { - if (logger.isTraceEnabled()) { - writeLog(null, LocationAwareLogger.TRACE_INT, format, new Object[]{arg1, arg2}, null); - } - } - - @Override - public void trace(String format, Object... args) { - if (logger.isTraceEnabled()) { - writeLog(null, LocationAwareLogger.TRACE_INT, format, args, null); - } - } - - @Override - public void trace(String msg, Throwable t) { - if (logger.isTraceEnabled()) { - writeLog(null, LocationAwareLogger.TRACE_INT, msg, null, t); - } - } - - @Override - public void trace(Marker marker, String msg) { - if (logger.isTraceEnabled()) { - writeLog(marker, LocationAwareLogger.TRACE_INT, msg, null, null); - } - } - - @Override - public void trace(Marker marker, String format, Object arg) { - if (logger.isTraceEnabled()) { - writeLog(marker, LocationAwareLogger.TRACE_INT, format, new Object[]{arg}, null); - } - } - - @Override - public void trace(Marker marker, String format, Object arg1, Object arg2) { - if (logger.isTraceEnabled()) { - writeLog(marker, LocationAwareLogger.TRACE_INT, format, new Object[]{arg1, arg2}, null); - } - } - - @Override - public void trace(Marker marker, String format, Object... argArray) { - if (logger.isTraceEnabled()) { - writeLog(marker, LocationAwareLogger.TRACE_INT, format, argArray, null); - } - } - - @Override - public void trace(Marker marker, String msg, Throwable t) { - if (logger.isTraceEnabled()) { - writeLog(marker, LocationAwareLogger.TRACE_INT, msg, null, t); - } - } - - @Override - public void debug(String message) { - if (logger.isDebugEnabled()) { - writeLog(null, LocationAwareLogger.DEBUG_INT, message, null, null); - } - } - - @Override - public void debug(String format, Object arg) { - if (logger.isDebugEnabled()) { - writeLog(null, LocationAwareLogger.DEBUG_INT, format, new Object[]{arg}, null); - } - } - - @Override - public void debug(String format, Object arg1, Object arg2) { - if (logger.isDebugEnabled()) { - writeLog(null, LocationAwareLogger.DEBUG_INT, format, new Object[]{arg1, arg2}, null); - } - } - - @Override - public void debug(String format, Object... args) { - if (logger.isDebugEnabled()) { - writeLog(null, LocationAwareLogger.DEBUG_INT, format, args, null); - } - } - - @Override - public void debug(String msg, Throwable t) { - if (logger.isDebugEnabled()) { - writeLog(null, LocationAwareLogger.DEBUG_INT, msg, null, t); - } - } - - @Override - public void debug(Marker marker, String msg) { - if (logger.isDebugEnabled()) { - writeLog(marker, LocationAwareLogger.DEBUG_INT, msg, null, null); - } - } - - @Override - public void debug(Marker marker, String format, Object arg) { - if (logger.isDebugEnabled()) { - writeLog(marker, LocationAwareLogger.DEBUG_INT, format, new Object[]{arg}, null); - } - } - - @Override - public void debug(Marker marker, String format, Object arg1, Object arg2) { - if (logger.isDebugEnabled()) { - writeLog(marker, LocationAwareLogger.DEBUG_INT, format, new Object[]{arg1, arg2}, null); - } - } - - @Override - public void debug(Marker marker, String format, Object... arguments) { - if (logger.isDebugEnabled()) { - writeLog(marker, LocationAwareLogger.DEBUG_INT, format, arguments, null); - } - } - - @Override - public void debug(Marker marker, String msg, Throwable t) { - if (logger.isDebugEnabled()) { - writeLog(marker, LocationAwareLogger.DEBUG_INT, msg, null, t); - } - } - - @Override - public void warn(String message) { - writeLog(null, LocationAwareLogger.WARN_INT, message, null, null); - } - - @Override - public void warn(String format, Object arg) { - writeLog(null, LocationAwareLogger.WARN_INT, format, new Object[]{arg}, null); - } - - @Override - public void warn(String message, Object arg1, Object arg2) { - writeLog(null, LocationAwareLogger.WARN_INT, message, new Object[]{arg1, arg2}, null); - } - - @Override - public void warn(String format, Object... args) { - writeLog(null, LocationAwareLogger.WARN_INT, format, args, null); - } - - @Override - public void warn(String msg, Throwable t) { - writeLog(null, LocationAwareLogger.WARN_INT, msg, null, t); - } - - @Override - public void warn(Marker marker, String msg) { - writeLog(marker, LocationAwareLogger.WARN_INT, msg, null, null); - } - - @Override - public void warn(Marker marker, String format, Object arg) { - writeLog(marker, LocationAwareLogger.WARN_INT, format, new Object[]{arg}, null); - } - - @Override - public void warn(Marker marker, String format, Object arg1, Object arg2) { - writeLog(marker, LocationAwareLogger.WARN_INT, format, new Object[]{arg1, arg2}, null); - } - - @Override - public void warn(Marker marker, String format, Object... arguments) { - writeLog(marker, LocationAwareLogger.WARN_INT, format, arguments, null); - } - - @Override - public void warn(Marker marker, String msg, Throwable t) { - writeLog(marker, LocationAwareLogger.WARN_INT, msg, null, t); - } - - @Override - public void error(String message) { - writeLog(null, LocationAwareLogger.ERROR_INT, message, null, null); - } - - @Override - public void error(String format, Object arg) { - writeLog(null, LocationAwareLogger.ERROR_INT, format, new Object[]{arg}, null); - } - - @Override - public void error(String format, Object arg1, Object arg2) { - writeLog(null, LocationAwareLogger.ERROR_INT, format, new Object[]{arg1, arg2}, null); - } - - @Override - public void error(String format, Object... args) { - writeLog(null, LocationAwareLogger.ERROR_INT, format, args, null); - } - - @Override - public void error(String msg, Throwable t) { - writeLog(null, LocationAwareLogger.ERROR_INT, msg, null, t); - } - - @Override - public void error(Marker marker, String msg) { - writeLog(marker, LocationAwareLogger.ERROR_INT, msg, null, null); - } - - @Override - public void error(Marker marker, String format, Object arg) { - writeLog(marker, LocationAwareLogger.ERROR_INT, format, new Object[]{arg}, null); - } - - @Override - public void error(Marker marker, String format, Object arg1, Object arg2) { - writeLog(marker, LocationAwareLogger.ERROR_INT, format, new Object[]{arg1, arg2}, null); - } - - @Override - public void error(Marker marker, String format, Object... arguments) { - writeLog(marker, LocationAwareLogger.ERROR_INT, format, arguments, null); - } - - @Override - public void error(Marker marker, String msg, Throwable t) { - writeLog(marker, LocationAwareLogger.ERROR_INT, msg, null, t); - } - - @Override - public void info(String msg) { - writeLog(null, LocationAwareLogger.INFO_INT, msg, null, null); - } - - @Override - public void info(String format, Object arg) { - writeLog(null, LocationAwareLogger.INFO_INT, format, new Object[]{arg}, null); - } - - @Override - public void info(String format, Object arg1, Object arg2) { - writeLog(null, LocationAwareLogger.INFO_INT, format, new Object[]{arg1, arg2}, null); - } - - @Override - public void info(String format, Object... args) { - writeLog(null, LocationAwareLogger.INFO_INT, format, args, null); - } - - @Override - public void info(String msg, Throwable t) { - writeLog(null, LocationAwareLogger.INFO_INT, msg, null, t); - } - - @Override - public void info(Marker marker, String msg) { - writeLog(marker, LocationAwareLogger.INFO_INT, msg, null, null); - } - - @Override - public void info(Marker marker, String format, Object arg) { - writeLog(marker, LocationAwareLogger.INFO_INT, format, new Object[]{arg}, null); - } - - @Override - public void info(Marker marker, String format, Object arg1, Object arg2) { - writeLog(marker, LocationAwareLogger.INFO_INT, format, new Object[]{arg1, arg2}, null); - } - - @Override - public void info(Marker marker, String format, Object... arguments) { - writeLog(marker, LocationAwareLogger.INFO_INT, format, arguments, null); - } - - @Override - public void info(Marker marker, String msg, Throwable t) { - writeLog(marker, LocationAwareLogger.INFO_INT, msg, null, t); - } - - private void writeLog(Marker marker, int level, String format, Object[] args, Throwable exception) { - String message = format; - if (args != null && args.length > 0) { - FormattingTuple formatted = MessageFormatter.arrayFormat(format, args); - if (exception == null && formatted.getThrowable() != null) { - exception = formatted.getThrowable(); - } - message = formatted.getMessage(); - } - logger.log(marker, fqcn, level, addPrefix(message), null, exception); - } - } - - private static class LocationIgnorantKafkaLogger extends AbstractKafkaLogger { - private final Logger logger; - - LocationIgnorantKafkaLogger(String logPrefix, Logger logger) { - super(logPrefix); - this.logger = logger; - } - - @Override - public String getName() { - return logger.getName(); - } - - @Override - public boolean isTraceEnabled() { - return logger.isTraceEnabled(); - } - - @Override - public boolean isTraceEnabled(Marker marker) { - return logger.isTraceEnabled(marker); - } - - @Override - public boolean isDebugEnabled() { - return logger.isDebugEnabled(); - } - - @Override - public boolean isDebugEnabled(Marker marker) { - return logger.isDebugEnabled(marker); - } - - @Override - public boolean isInfoEnabled() { - return logger.isInfoEnabled(); - } - - @Override - public boolean isInfoEnabled(Marker marker) { - return logger.isInfoEnabled(marker); - } - - @Override - public boolean isWarnEnabled() { - return logger.isWarnEnabled(); - } - - @Override - public boolean isWarnEnabled(Marker marker) { - return logger.isWarnEnabled(marker); - } - - @Override - public boolean isErrorEnabled() { - return logger.isErrorEnabled(); - } - - @Override - public boolean isErrorEnabled(Marker marker) { - return logger.isErrorEnabled(marker); - } - - @Override - public void trace(String message) { - if (logger.isTraceEnabled()) { - logger.trace(addPrefix(message)); - } - } - - @Override - public void trace(String message, Object arg) { - if (logger.isTraceEnabled()) { - logger.trace(addPrefix(message), arg); - } - } - - @Override - public void trace(String message, Object arg1, Object arg2) { - if (logger.isTraceEnabled()) { - logger.trace(addPrefix(message), arg1, arg2); - } - } - - @Override - public void trace(String message, Object... args) { - if (logger.isTraceEnabled()) { - logger.trace(addPrefix(message), args); - } - } - - @Override - public void trace(String msg, Throwable t) { - if (logger.isTraceEnabled()) { - logger.trace(addPrefix(msg), t); - } - } - - @Override - public void trace(Marker marker, String msg) { - if (logger.isTraceEnabled()) { - logger.trace(marker, addPrefix(msg)); - } - } - - @Override - public void trace(Marker marker, String format, Object arg) { - if (logger.isTraceEnabled()) { - logger.trace(marker, addPrefix(format), arg); - } - } - - @Override - public void trace(Marker marker, String format, Object arg1, Object arg2) { - if (logger.isTraceEnabled()) { - logger.trace(marker, addPrefix(format), arg1, arg2); - } - } - - @Override - public void trace(Marker marker, String format, Object... argArray) { - if (logger.isTraceEnabled()) { - logger.trace(marker, addPrefix(format), argArray); - } - } - - @Override - public void trace(Marker marker, String msg, Throwable t) { - if (logger.isTraceEnabled()) { - logger.trace(marker, addPrefix(msg), t); - } - } - - @Override - public void debug(String message) { - if (logger.isDebugEnabled()) { - logger.debug(addPrefix(message)); - } - } - - @Override - public void debug(String message, Object arg) { - if (logger.isDebugEnabled()) { - logger.debug(addPrefix(message), arg); - } - } - - @Override - public void debug(String message, Object arg1, Object arg2) { - if (logger.isDebugEnabled()) { - logger.debug(addPrefix(message), arg1, arg2); - } - } - - @Override - public void debug(String message, Object... args) { - if (logger.isDebugEnabled()) { - logger.debug(addPrefix(message), args); - } - } - - @Override - public void debug(String msg, Throwable t) { - if (logger.isDebugEnabled()) { - logger.debug(addPrefix(msg), t); - } - } - - @Override - public void debug(Marker marker, String msg) { - if (logger.isDebugEnabled()) { - logger.debug(marker, addPrefix(msg)); - } - } - - @Override - public void debug(Marker marker, String format, Object arg) { - if (logger.isDebugEnabled()) { - logger.debug(marker, addPrefix(format), arg); - } - } - - @Override - public void debug(Marker marker, String format, Object arg1, Object arg2) { - if (logger.isDebugEnabled()) { - logger.debug(marker, addPrefix(format), arg1, arg2); - } - } - - @Override - public void debug(Marker marker, String format, Object... arguments) { - if (logger.isDebugEnabled()) { - logger.debug(marker, addPrefix(format), arguments); - } - } - - @Override - public void debug(Marker marker, String msg, Throwable t) { - if (logger.isDebugEnabled()) { - logger.debug(marker, addPrefix(msg), t); - } - } - - @Override - public void warn(String message) { - logger.warn(addPrefix(message)); - } - - @Override - public void warn(String message, Object arg) { - logger.warn(addPrefix(message), arg); - } - - @Override - public void warn(String message, Object arg1, Object arg2) { - logger.warn(addPrefix(message), arg1, arg2); - } - - @Override - public void warn(String message, Object... args) { - logger.warn(addPrefix(message), args); - } - - @Override - public void warn(String msg, Throwable t) { - logger.warn(addPrefix(msg), t); - } - - @Override - public void warn(Marker marker, String msg) { - logger.warn(marker, addPrefix(msg)); - } - - @Override - public void warn(Marker marker, String format, Object arg) { - logger.warn(marker, addPrefix(format), arg); - } - - @Override - public void warn(Marker marker, String format, Object arg1, Object arg2) { - logger.warn(marker, addPrefix(format), arg1, arg2); - } - - @Override - public void warn(Marker marker, String format, Object... arguments) { - logger.warn(marker, addPrefix(format), arguments); - } - - @Override - public void warn(Marker marker, String msg, Throwable t) { - logger.warn(marker, addPrefix(msg), t); - } - - @Override - public void error(String message) { - logger.error(addPrefix(message)); - } - - @Override - public void error(String message, Object arg) { - logger.error(addPrefix(message), arg); - } - - @Override - public void error(String message, Object arg1, Object arg2) { - logger.error(addPrefix(message), arg1, arg2); - } - - @Override - public void error(String message, Object... args) { - logger.error(addPrefix(message), args); - } - - @Override - public void error(String msg, Throwable t) { - logger.error(addPrefix(msg), t); - } - - @Override - public void error(Marker marker, String msg) { - logger.error(marker, addPrefix(msg)); - } - - @Override - public void error(Marker marker, String format, Object arg) { - logger.error(marker, addPrefix(format), arg); - } - - @Override - public void error(Marker marker, String format, Object arg1, Object arg2) { - logger.error(marker, addPrefix(format), arg1, arg2); - } - - @Override - public void error(Marker marker, String format, Object... arguments) { - logger.error(marker, addPrefix(format), arguments); - } - - @Override - public void error(Marker marker, String msg, Throwable t) { - logger.error(marker, addPrefix(msg), t); - } - - @Override - public void info(String message) { - logger.info(addPrefix(message)); - } - - @Override - public void info(String message, Object arg) { - logger.info(addPrefix(message), arg); - } - - @Override - public void info(String message, Object arg1, Object arg2) { - logger.info(addPrefix(message), arg1, arg2); - } - - @Override - public void info(String message, Object... args) { - logger.info(addPrefix(message), args); - } - - @Override - public void info(String msg, Throwable t) { - logger.info(addPrefix(msg), t); - } - - @Override - public void info(Marker marker, String msg) { - logger.info(marker, addPrefix(msg)); - } - - @Override - public void info(Marker marker, String format, Object arg) { - logger.info(marker, addPrefix(format), arg); - } - - @Override - public void info(Marker marker, String format, Object arg1, Object arg2) { - logger.info(marker, addPrefix(format), arg1, arg2); - } - - @Override - public void info(Marker marker, String format, Object... arguments) { - logger.info(marker, addPrefix(format), arguments); - } - - @Override - public void info(Marker marker, String msg, Throwable t) { - logger.info(marker, addPrefix(msg), t); - } - - } - -} diff --git a/clients/src/main/java/org/oracle/okafka/common/utils/MessageIdConverter.java b/clients/src/main/java/org/oracle/okafka/common/utils/MessageIdConverter.java index 30537fe..35a11f3 100644 --- a/clients/src/main/java/org/oracle/okafka/common/utils/MessageIdConverter.java +++ b/clients/src/main/java/org/oracle/okafka/common/utils/MessageIdConverter.java @@ -1,123 +1,330 @@ /* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ + ** OKafka Java Client version 23.4. + ** + ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. + ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + */ package org.oracle.okafka.common.utils; -import org.oracle.okafka.common.TopicPartition; +import org.apache.kafka.common.TopicPartition; import org.oracle.okafka.common.errors.InvalidMessageIdException; public class MessageIdConverter { + public static int invokeCnt =0; - public static long getOffset(String msgId) { + public static final int DEFAULT_SUBPARTITION_SIZE = 20000; + + /*public static long getOffset(String msgId) { if(msgId.length() != 35) - throw new InvalidMessageIdException("Length of message Id is not 32"); + throw new InvalidMessageIdException("Length of message Id is not 35"); try { String endian = msgId.substring(29, 31); - byte endianIndex ; - //Get endian of message id - switch(endian) { - //big endian - case "FF" : - endianIndex = 0; - break; - //little endian - case "66" : - endianIndex = 1; - break; - //PDP endian - case "99" : - endianIndex = 2; - break; - default : - endianIndex = -1; - } - if(endianIndex == -1) - throw new InvalidMessageIdException("Endian of message Id is not valid"); - long baseOffset = getOffset(msgId.substring(3, 19), endianIndex); - long relOffset = getOffset(msgId.substring(31, 35), endianIndex); - return ((baseOffset << 16) + relOffset); + byte endianIndex ; + //Get endian of message id + switch(endian) { + //big endian + case "FF" : + endianIndex = 0; + break; + //little endian + case "66" : + endianIndex = 1; + break; + //PDP endian + case "99" : + endianIndex = 2; + break; + default : + endianIndex = -1; + } + if(endianIndex == -1) + throw new InvalidMessageIdException("Endian of message Id is not valid"); + long baseOffset = getOffset(msgId.substring(3, 19), endianIndex); + long relOffset = getOffset(msgId.substring(31, 35), endianIndex); + //return ((baseOffset << 16) + relOffset); + return baseOffset*20000 + relOffset; + } catch(RuntimeException e) { throw e; } - + } + public static int getRelativeOffset(String msgId) + { + OKafkaOffset okOffset = getOKafkaOffset(msgId, false, false); + return okOffset.sequenceNo; + } + public static OKafkaOffset getOKafkaOffset(String msgId) + { + // Most common use. Get sub Partition Id (base offset) and sequence number ( relative offset) + return getOKafkaOffset(msgId, false, true); + }*/ + public static OKafkaOffset computeOffset(OKafkaOffset prevOffset, String msgId) + { + OKafkaOffset.MsgIdEndian endian = OKafkaOffset.MsgIdEndian.INVALID; + long partId = -1; + long subPartId = -1; + int seqNo = -1; + boolean expectedId = false; + OKafkaOffset newOffset = null; + if (prevOffset != null) + { + endian = prevOffset.endian; + expectedId = isExpectedMsgId(prevOffset, msgId); + if(expectedId) + { + partId = prevOffset.partitionId; + subPartId = prevOffset.subPartitionId; + seqNo = prevOffset.sequenceNo+1; + newOffset = new OKafkaOffset(partId, subPartId, seqNo, endian, msgId); + } + } + // If no previous offset or if this message id is not expected one then recalculate + if (!expectedId) + { + newOffset = getOKafkaOffset(msgId, true, true); + /* + endian = getEndian(msgId); + seqNo = (int)getOffset(msgId.substring(31, 35), endian); + subPartId = getOffset(msgId.substring(3, 19), endian); + partId= getOffset(msgId.substring(19,27), endian); + */ + } + newOffset.msgId = msgId; + return newOffset; + } + + public static OKafkaOffset getOKafkaOffset(String msgId, boolean getPartId, boolean getSubPartId) { + + if(msgId == null) + return new OKafkaOffset(-1, -1, -1, OKafkaOffset.MsgIdEndian.INVALID); + + if(msgId.length() != 35) + throw new InvalidMessageIdException("Length of message Id is not 35"); + try { + OKafkaOffset.MsgIdEndian endianIndex = getEndian(msgId); + + if(endianIndex == OKafkaOffset.MsgIdEndian.INVALID) + throw new InvalidMessageIdException("Endian of message Id '" + msgId +"' is not valid"); + + int relOffset = (int)getOffset(msgId.substring(31, 35), endianIndex); + + long baseOffset = -1; + if(getSubPartId) + { + baseOffset = getOffset(msgId.substring(3, 19), endianIndex); + baseOffset = baseOffset * DEFAULT_SUBPARTITION_SIZE; + } + + long partId = -1; + if (getPartId) + { + partId = getOffset(msgId.substring(19,27), endianIndex); + } + OKafkaOffset okOffset = new OKafkaOffset(partId, baseOffset, relOffset, endianIndex, msgId); + return okOffset; + } catch(RuntimeException e) { + throw e; + } + } + + /** + * Converts hexadecimal string which is in specific endian format to decimal number + * @param data hexadecimal string representing either subshard or sequence number in a subshard. + * @param endianIndex index representing either of big, little and pdp endian. + * @return decimal representation of hexadecimal string. + */ + private static long getOffset(String data, OKafkaOffset.MsgIdEndian endianIndex) { + String builderString = null; + switch(endianIndex) { + case BIG : + builderString = data; + break; + case LITTLE : + builderString = reverse(data); + break; + case PDP : + builderString = swap(data); + break; + case INVALID: + builderString= null; + } + if(builderString != null) + return Long.parseLong(builderString, 16); + + return -1; + } + /** - * Converts hexadecimal string which is in specific endian format to decimal number - * @param data hexadecimal string representing either subshard or sequence number in a subshard. - * @param endianIndex index representing either of big, little and pdp endian. - * @return decimal representation of hexadecimal string. - */ - private static long getOffset(String data, byte endianIndex) { - String builderString = null; - switch(endianIndex) { - case 0 : - builderString = data; - break; - case 1 : - builderString = reverse(data); - break; - case 2 : - builderString = swap(data); - } - if(builderString != null) - return Long.parseLong(builderString, 16); - return -1; - } - - /** - * convert hexadecimal string in little endian to big endian - * @param data hexadecimal string representing either subshard or sequence number in a subshard. - * @return hexadecimal string in big endian - */ - private static String reverse(String data) { - char[] builderArray = new char[data.length()]; - int length = data.length(); - for(int i = length-2; i >= 0 ; i= i-2) { - builderArray[length -2 -i] = data.charAt(i); - builderArray[length -1 -i] = data.charAt(i+1); - } - return new String(builderArray); - } - - /** - * convert hexadecimal string in pdp endian to big endian - * @param data hexadecimal string representing either subshard or sequence number in a subshard. - * @return hexadecimal string in big endian - */ - private static String swap(String data) { - StringBuilder sb= new StringBuilder(); - int length = data.length(); - for(int i = 0; i < length; i = i+4) { - sb.append(data.substring(i+2, i+4)); - sb.append(data.substring(i, i+2)); - } - return sb.toString(); - } - - public static String getMsgId(TopicPartition tp, long offset, String endian) { + * convert hexadecimal string in little endian to big endian + * @param data hexadecimal string representing either subshard or sequence number in a subshard. + * @return hexadecimal string in big endian + */ + private static String reverse(String data) { + char[] builderArray = new char[data.length()]; + int length = data.length(); + for(int i = length-2; i >= 0 ; i= i-2) { + builderArray[length -2 -i] = data.charAt(i); + builderArray[length -1 -i] = data.charAt(i+1); + } + return new String(builderArray); + } - StringBuilder sb = new StringBuilder(""); - String subpartition = String.format("%16s", Long.toHexString(offset >>> 16)).replace(' ', '0'); + /** + * convert hexadecimal string in pdp endian to big endian + * @param data hexadecimal string representing either subshard or sequence number in a subshard. + * @return hexadecimal string in big endian + */ + private static String swap(String data) { + StringBuilder sb= new StringBuilder(); + int length = data.length(); + for(int i = 0; i < length; i = i+4) { + sb.append(data.substring(i+2, i+4)); + sb.append(data.substring(i, i+2)); + } + return sb.toString(); + } + public static String getMsgId(TopicPartition tp, long offset, String endian, int priority) { + + StringBuilder sb = new StringBuilder(""); + /*String subpartition = String.format("%16s", Long.toHexString(offset >>> 16)).replace(' ', '0'); String partition = String.format("%8s",Integer.toHexString(tp.partition())).replace(' ', '0'); String seq = String.format("%4s", Long.toHexString(offset & 65535)).replace(' ', '0'); - if(endian.equals("66")) { - sb.append(reverse(subpartition)); - sb.append(reverse(partition)); - sb.append("0166"); - sb.append(reverse(seq)); - } else if (endian.equals("FF")) { - sb.append(swap(subpartition)); - sb.append(swap(partition)); - sb.append("01FF"); - sb.append(swap(seq)); - } - - return sb.toString(); - } - + */ + String subpartition = String.format("%16s", Long.toHexString((int)(offset/20000))).replace(' ', '0'); + String partition = String.format("%8s",Integer.toHexString(2*tp.partition())).replace(' ', '0'); + String seq = String.format("%4s", Long.toHexString(offset % 20000)).replace(' ', '0'); + + + if(endian.equals("66")) { + sb.append(reverse(subpartition)); + sb.append(reverse(partition)); + sb.append("0"+priority+"66"); + sb.append(reverse(seq)); + } else if (endian.equals("FF")) { + sb.append(swap(subpartition)); + sb.append(swap(partition)); + sb.append("0"+priority+"FF"); + sb.append(swap(seq)); + } + return sb.toString(); + } + + public static OKafkaOffset.MsgIdEndian getEndian(String msgId) + { + String endian = msgId.substring(29, 31); + OKafkaOffset.MsgIdEndian endianIndex ; + //Get Endian of message id + switch(endian) { + //big Endian + case "FF" : + endianIndex = OKafkaOffset.MsgIdEndian.BIG; + break; + //little Endian + case "66" : + endianIndex = OKafkaOffset.MsgIdEndian.LITTLE; + break; + //PDP Endian + case "99" : + endianIndex = OKafkaOffset.MsgIdEndian.PDP; + break; + default : + endianIndex = OKafkaOffset.MsgIdEndian.INVALID; + } + return endianIndex; + } + //Check if sequence number and sub partition id are expected or not + private static boolean isExpectedMsgId (OKafkaOffset prevOffset, String msgId) + { + if(prevOffset.msgId == null) + return false; + + String prevSubPart = prevOffset.msgId.substring(3, 19); + String thisSubPart = msgId.substring(3, 19); + if(!prevSubPart.equals(thisSubPart)) + return false; + + int thisSeqNo = (int)getOffset(msgId.substring(31, 35) ,prevOffset.endian); + + if(thisSeqNo != (prevOffset.sequenceNo+1) ) + return false; + + return true; + + } + + public static class OKafkaOffset + { + public static final int DEFAULT_SUBPARTITION_SIZE = 20000; + static enum MsgIdEndian + { + BIG, + LITTLE, + PDP, + INVALID + }; + long partitionId; + long subPartitionId; + int sequenceNo; + MsgIdEndian endian; + String msgId; + + public OKafkaOffset(String _msgId) + { + msgId = _msgId; + endian = MessageIdConverter.getEndian(msgId); + } + + public OKafkaOffset(long partId, long subPartId, int seqNo, MsgIdEndian _endian) + { + partitionId = partId; + subPartitionId = subPartId; + sequenceNo = seqNo; + endian = _endian; + } + + public OKafkaOffset(long partId, long subPartId, int seqNo, MsgIdEndian _endian, String _msgId) + { + partitionId = partId; + subPartitionId = subPartId; + sequenceNo = seqNo; + endian = _endian; + msgId = _msgId; + } + public void setPartitionId(long _partitionId) + { + partitionId= _partitionId; + } + public long partitionId() { + return partitionId; + } + public long subPartitionId() + { + return subPartitionId; + } + private void setSubPartitionId(long _subPartitionId) + { + subPartitionId = _subPartitionId; + } + public int sequenceNo() + { + return sequenceNo; + } + private void setSequenceNo(int _sequenceNo) + { + sequenceNo = _sequenceNo; + } + public String getMsgId() + { + return msgId; + } + public long getOffset() + { + return (subPartitionId + sequenceNo); + } + + } + } diff --git a/clients/src/main/java/org/oracle/okafka/common/utils/PureJavaCrc32C.java b/clients/src/main/java/org/oracle/okafka/common/utils/PureJavaCrc32C.java deleted file mode 100644 index 0005fdc..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/utils/PureJavaCrc32C.java +++ /dev/null @@ -1,652 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/* - * Some portions of this file Copyright (c) 2004-2006 Intel Corporation and - * licensed under the BSD license. - */ - -package org.oracle.okafka.common.utils; - -import java.util.zip.Checksum; - -/** - * This class was taken from Hadoop: org.apache.hadoop.util.PureJavaCrc32C. - * - * A pure-java implementation of the CRC32 checksum that uses - * the CRC32-C polynomial, the same polynomial used by iSCSI - * and implemented on many Intel chipsets supporting SSE4.2. - * - * NOTE: This class is intended for INTERNAL usage only within Kafka. - */ -// The exact version that was retrieved from Hadoop: -// https://github.com/apache/hadoop/blob/224de4f92c222a7b915e9c5d6bdd1a4a3fcbcf31/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PureJavaCrc32C.java -public class PureJavaCrc32C implements Checksum { - - /** the current CRC value, bit-flipped */ - private int crc; - - public PureJavaCrc32C() { - reset(); - } - - @Override - public long getValue() { - long ret = crc; - return (~ret) & 0xffffffffL; - } - - @Override - public void reset() { - crc = 0xffffffff; - } - - @Override - public void update(byte[] b, int off, int len) { - int localCrc = crc; - - while (len > 7) { - final int c0 = (b[off + 0] ^ localCrc) & 0xff; - final int c1 = (b[off + 1] ^ (localCrc >>>= 8)) & 0xff; - final int c2 = (b[off + 2] ^ (localCrc >>>= 8)) & 0xff; - final int c3 = (b[off + 3] ^ (localCrc >>>= 8)) & 0xff; - localCrc = (T[T8_7_START + c0] ^ T[T8_6_START + c1]) - ^ (T[T8_5_START + c2] ^ T[T8_4_START + c3]); - - final int c4 = b[off + 4] & 0xff; - final int c5 = b[off + 5] & 0xff; - final int c6 = b[off + 6] & 0xff; - final int c7 = b[off + 7] & 0xff; - - localCrc ^= (T[T8_3_START + c4] ^ T[T8_2_START + c5]) - ^ (T[T8_1_START + c6] ^ T[T8_0_START + c7]); - - off += 8; - len -= 8; - } - - /* loop unroll - duff's device style */ - switch (len) { - case 7: - localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; - case 6: - localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; - case 5: - localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; - case 4: - localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; - case 3: - localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; - case 2: - localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; - case 1: - localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; - default: - /* nothing */ - } - - // Publish crc out to object - crc = localCrc; - } - - @Override - final public void update(int b) { - crc = (crc >>> 8) ^ T[T8_0_START + ((crc ^ b) & 0xff)]; - } - - // CRC polynomial tables generated by: - // java -cp build/test/classes/:build/classes/ \ - // org.apache.hadoop.util.TestPureJavaCrc32\$Table 82F63B78 - - private static final int T8_0_START = 0 * 256; - private static final int T8_1_START = 1 * 256; - private static final int T8_2_START = 2 * 256; - private static final int T8_3_START = 3 * 256; - private static final int T8_4_START = 4 * 256; - private static final int T8_5_START = 5 * 256; - private static final int T8_6_START = 6 * 256; - private static final int T8_7_START = 7 * 256; - - private static final int[] T = new int[]{ - /* T8_0 */ - 0x00000000, 0xF26B8303, 0xE13B70F7, 0x1350F3F4, - 0xC79A971F, 0x35F1141C, 0x26A1E7E8, 0xD4CA64EB, - 0x8AD958CF, 0x78B2DBCC, 0x6BE22838, 0x9989AB3B, - 0x4D43CFD0, 0xBF284CD3, 0xAC78BF27, 0x5E133C24, - 0x105EC76F, 0xE235446C, 0xF165B798, 0x030E349B, - 0xD7C45070, 0x25AFD373, 0x36FF2087, 0xC494A384, - 0x9A879FA0, 0x68EC1CA3, 0x7BBCEF57, 0x89D76C54, - 0x5D1D08BF, 0xAF768BBC, 0xBC267848, 0x4E4DFB4B, - 0x20BD8EDE, 0xD2D60DDD, 0xC186FE29, 0x33ED7D2A, - 0xE72719C1, 0x154C9AC2, 0x061C6936, 0xF477EA35, - 0xAA64D611, 0x580F5512, 0x4B5FA6E6, 0xB93425E5, - 0x6DFE410E, 0x9F95C20D, 0x8CC531F9, 0x7EAEB2FA, - 0x30E349B1, 0xC288CAB2, 0xD1D83946, 0x23B3BA45, - 0xF779DEAE, 0x05125DAD, 0x1642AE59, 0xE4292D5A, - 0xBA3A117E, 0x4851927D, 0x5B016189, 0xA96AE28A, - 0x7DA08661, 0x8FCB0562, 0x9C9BF696, 0x6EF07595, - 0x417B1DBC, 0xB3109EBF, 0xA0406D4B, 0x522BEE48, - 0x86E18AA3, 0x748A09A0, 0x67DAFA54, 0x95B17957, - 0xCBA24573, 0x39C9C670, 0x2A993584, 0xD8F2B687, - 0x0C38D26C, 0xFE53516F, 0xED03A29B, 0x1F682198, - 0x5125DAD3, 0xA34E59D0, 0xB01EAA24, 0x42752927, - 0x96BF4DCC, 0x64D4CECF, 0x77843D3B, 0x85EFBE38, - 0xDBFC821C, 0x2997011F, 0x3AC7F2EB, 0xC8AC71E8, - 0x1C661503, 0xEE0D9600, 0xFD5D65F4, 0x0F36E6F7, - 0x61C69362, 0x93AD1061, 0x80FDE395, 0x72966096, - 0xA65C047D, 0x5437877E, 0x4767748A, 0xB50CF789, - 0xEB1FCBAD, 0x197448AE, 0x0A24BB5A, 0xF84F3859, - 0x2C855CB2, 0xDEEEDFB1, 0xCDBE2C45, 0x3FD5AF46, - 0x7198540D, 0x83F3D70E, 0x90A324FA, 0x62C8A7F9, - 0xB602C312, 0x44694011, 0x5739B3E5, 0xA55230E6, - 0xFB410CC2, 0x092A8FC1, 0x1A7A7C35, 0xE811FF36, - 0x3CDB9BDD, 0xCEB018DE, 0xDDE0EB2A, 0x2F8B6829, - 0x82F63B78, 0x709DB87B, 0x63CD4B8F, 0x91A6C88C, - 0x456CAC67, 0xB7072F64, 0xA457DC90, 0x563C5F93, - 0x082F63B7, 0xFA44E0B4, 0xE9141340, 0x1B7F9043, - 0xCFB5F4A8, 0x3DDE77AB, 0x2E8E845F, 0xDCE5075C, - 0x92A8FC17, 0x60C37F14, 0x73938CE0, 0x81F80FE3, - 0x55326B08, 0xA759E80B, 0xB4091BFF, 0x466298FC, - 0x1871A4D8, 0xEA1A27DB, 0xF94AD42F, 0x0B21572C, - 0xDFEB33C7, 0x2D80B0C4, 0x3ED04330, 0xCCBBC033, - 0xA24BB5A6, 0x502036A5, 0x4370C551, 0xB11B4652, - 0x65D122B9, 0x97BAA1BA, 0x84EA524E, 0x7681D14D, - 0x2892ED69, 0xDAF96E6A, 0xC9A99D9E, 0x3BC21E9D, - 0xEF087A76, 0x1D63F975, 0x0E330A81, 0xFC588982, - 0xB21572C9, 0x407EF1CA, 0x532E023E, 0xA145813D, - 0x758FE5D6, 0x87E466D5, 0x94B49521, 0x66DF1622, - 0x38CC2A06, 0xCAA7A905, 0xD9F75AF1, 0x2B9CD9F2, - 0xFF56BD19, 0x0D3D3E1A, 0x1E6DCDEE, 0xEC064EED, - 0xC38D26C4, 0x31E6A5C7, 0x22B65633, 0xD0DDD530, - 0x0417B1DB, 0xF67C32D8, 0xE52CC12C, 0x1747422F, - 0x49547E0B, 0xBB3FFD08, 0xA86F0EFC, 0x5A048DFF, - 0x8ECEE914, 0x7CA56A17, 0x6FF599E3, 0x9D9E1AE0, - 0xD3D3E1AB, 0x21B862A8, 0x32E8915C, 0xC083125F, - 0x144976B4, 0xE622F5B7, 0xF5720643, 0x07198540, - 0x590AB964, 0xAB613A67, 0xB831C993, 0x4A5A4A90, - 0x9E902E7B, 0x6CFBAD78, 0x7FAB5E8C, 0x8DC0DD8F, - 0xE330A81A, 0x115B2B19, 0x020BD8ED, 0xF0605BEE, - 0x24AA3F05, 0xD6C1BC06, 0xC5914FF2, 0x37FACCF1, - 0x69E9F0D5, 0x9B8273D6, 0x88D28022, 0x7AB90321, - 0xAE7367CA, 0x5C18E4C9, 0x4F48173D, 0xBD23943E, - 0xF36E6F75, 0x0105EC76, 0x12551F82, 0xE03E9C81, - 0x34F4F86A, 0xC69F7B69, 0xD5CF889D, 0x27A40B9E, - 0x79B737BA, 0x8BDCB4B9, 0x988C474D, 0x6AE7C44E, - 0xBE2DA0A5, 0x4C4623A6, 0x5F16D052, 0xAD7D5351, - /* T8_1 */ - 0x00000000, 0x13A29877, 0x274530EE, 0x34E7A899, - 0x4E8A61DC, 0x5D28F9AB, 0x69CF5132, 0x7A6DC945, - 0x9D14C3B8, 0x8EB65BCF, 0xBA51F356, 0xA9F36B21, - 0xD39EA264, 0xC03C3A13, 0xF4DB928A, 0xE7790AFD, - 0x3FC5F181, 0x2C6769F6, 0x1880C16F, 0x0B225918, - 0x714F905D, 0x62ED082A, 0x560AA0B3, 0x45A838C4, - 0xA2D13239, 0xB173AA4E, 0x859402D7, 0x96369AA0, - 0xEC5B53E5, 0xFFF9CB92, 0xCB1E630B, 0xD8BCFB7C, - 0x7F8BE302, 0x6C297B75, 0x58CED3EC, 0x4B6C4B9B, - 0x310182DE, 0x22A31AA9, 0x1644B230, 0x05E62A47, - 0xE29F20BA, 0xF13DB8CD, 0xC5DA1054, 0xD6788823, - 0xAC154166, 0xBFB7D911, 0x8B507188, 0x98F2E9FF, - 0x404E1283, 0x53EC8AF4, 0x670B226D, 0x74A9BA1A, - 0x0EC4735F, 0x1D66EB28, 0x298143B1, 0x3A23DBC6, - 0xDD5AD13B, 0xCEF8494C, 0xFA1FE1D5, 0xE9BD79A2, - 0x93D0B0E7, 0x80722890, 0xB4958009, 0xA737187E, - 0xFF17C604, 0xECB55E73, 0xD852F6EA, 0xCBF06E9D, - 0xB19DA7D8, 0xA23F3FAF, 0x96D89736, 0x857A0F41, - 0x620305BC, 0x71A19DCB, 0x45463552, 0x56E4AD25, - 0x2C896460, 0x3F2BFC17, 0x0BCC548E, 0x186ECCF9, - 0xC0D23785, 0xD370AFF2, 0xE797076B, 0xF4359F1C, - 0x8E585659, 0x9DFACE2E, 0xA91D66B7, 0xBABFFEC0, - 0x5DC6F43D, 0x4E646C4A, 0x7A83C4D3, 0x69215CA4, - 0x134C95E1, 0x00EE0D96, 0x3409A50F, 0x27AB3D78, - 0x809C2506, 0x933EBD71, 0xA7D915E8, 0xB47B8D9F, - 0xCE1644DA, 0xDDB4DCAD, 0xE9537434, 0xFAF1EC43, - 0x1D88E6BE, 0x0E2A7EC9, 0x3ACDD650, 0x296F4E27, - 0x53028762, 0x40A01F15, 0x7447B78C, 0x67E52FFB, - 0xBF59D487, 0xACFB4CF0, 0x981CE469, 0x8BBE7C1E, - 0xF1D3B55B, 0xE2712D2C, 0xD69685B5, 0xC5341DC2, - 0x224D173F, 0x31EF8F48, 0x050827D1, 0x16AABFA6, - 0x6CC776E3, 0x7F65EE94, 0x4B82460D, 0x5820DE7A, - 0xFBC3FAF9, 0xE861628E, 0xDC86CA17, 0xCF245260, - 0xB5499B25, 0xA6EB0352, 0x920CABCB, 0x81AE33BC, - 0x66D73941, 0x7575A136, 0x419209AF, 0x523091D8, - 0x285D589D, 0x3BFFC0EA, 0x0F186873, 0x1CBAF004, - 0xC4060B78, 0xD7A4930F, 0xE3433B96, 0xF0E1A3E1, - 0x8A8C6AA4, 0x992EF2D3, 0xADC95A4A, 0xBE6BC23D, - 0x5912C8C0, 0x4AB050B7, 0x7E57F82E, 0x6DF56059, - 0x1798A91C, 0x043A316B, 0x30DD99F2, 0x237F0185, - 0x844819FB, 0x97EA818C, 0xA30D2915, 0xB0AFB162, - 0xCAC27827, 0xD960E050, 0xED8748C9, 0xFE25D0BE, - 0x195CDA43, 0x0AFE4234, 0x3E19EAAD, 0x2DBB72DA, - 0x57D6BB9F, 0x447423E8, 0x70938B71, 0x63311306, - 0xBB8DE87A, 0xA82F700D, 0x9CC8D894, 0x8F6A40E3, - 0xF50789A6, 0xE6A511D1, 0xD242B948, 0xC1E0213F, - 0x26992BC2, 0x353BB3B5, 0x01DC1B2C, 0x127E835B, - 0x68134A1E, 0x7BB1D269, 0x4F567AF0, 0x5CF4E287, - 0x04D43CFD, 0x1776A48A, 0x23910C13, 0x30339464, - 0x4A5E5D21, 0x59FCC556, 0x6D1B6DCF, 0x7EB9F5B8, - 0x99C0FF45, 0x8A626732, 0xBE85CFAB, 0xAD2757DC, - 0xD74A9E99, 0xC4E806EE, 0xF00FAE77, 0xE3AD3600, - 0x3B11CD7C, 0x28B3550B, 0x1C54FD92, 0x0FF665E5, - 0x759BACA0, 0x663934D7, 0x52DE9C4E, 0x417C0439, - 0xA6050EC4, 0xB5A796B3, 0x81403E2A, 0x92E2A65D, - 0xE88F6F18, 0xFB2DF76F, 0xCFCA5FF6, 0xDC68C781, - 0x7B5FDFFF, 0x68FD4788, 0x5C1AEF11, 0x4FB87766, - 0x35D5BE23, 0x26772654, 0x12908ECD, 0x013216BA, - 0xE64B1C47, 0xF5E98430, 0xC10E2CA9, 0xD2ACB4DE, - 0xA8C17D9B, 0xBB63E5EC, 0x8F844D75, 0x9C26D502, - 0x449A2E7E, 0x5738B609, 0x63DF1E90, 0x707D86E7, - 0x0A104FA2, 0x19B2D7D5, 0x2D557F4C, 0x3EF7E73B, - 0xD98EEDC6, 0xCA2C75B1, 0xFECBDD28, 0xED69455F, - 0x97048C1A, 0x84A6146D, 0xB041BCF4, 0xA3E32483, - /* T8_2 */ - 0x00000000, 0xA541927E, 0x4F6F520D, 0xEA2EC073, - 0x9EDEA41A, 0x3B9F3664, 0xD1B1F617, 0x74F06469, - 0x38513EC5, 0x9D10ACBB, 0x773E6CC8, 0xD27FFEB6, - 0xA68F9ADF, 0x03CE08A1, 0xE9E0C8D2, 0x4CA15AAC, - 0x70A27D8A, 0xD5E3EFF4, 0x3FCD2F87, 0x9A8CBDF9, - 0xEE7CD990, 0x4B3D4BEE, 0xA1138B9D, 0x045219E3, - 0x48F3434F, 0xEDB2D131, 0x079C1142, 0xA2DD833C, - 0xD62DE755, 0x736C752B, 0x9942B558, 0x3C032726, - 0xE144FB14, 0x4405696A, 0xAE2BA919, 0x0B6A3B67, - 0x7F9A5F0E, 0xDADBCD70, 0x30F50D03, 0x95B49F7D, - 0xD915C5D1, 0x7C5457AF, 0x967A97DC, 0x333B05A2, - 0x47CB61CB, 0xE28AF3B5, 0x08A433C6, 0xADE5A1B8, - 0x91E6869E, 0x34A714E0, 0xDE89D493, 0x7BC846ED, - 0x0F382284, 0xAA79B0FA, 0x40577089, 0xE516E2F7, - 0xA9B7B85B, 0x0CF62A25, 0xE6D8EA56, 0x43997828, - 0x37691C41, 0x92288E3F, 0x78064E4C, 0xDD47DC32, - 0xC76580D9, 0x622412A7, 0x880AD2D4, 0x2D4B40AA, - 0x59BB24C3, 0xFCFAB6BD, 0x16D476CE, 0xB395E4B0, - 0xFF34BE1C, 0x5A752C62, 0xB05BEC11, 0x151A7E6F, - 0x61EA1A06, 0xC4AB8878, 0x2E85480B, 0x8BC4DA75, - 0xB7C7FD53, 0x12866F2D, 0xF8A8AF5E, 0x5DE93D20, - 0x29195949, 0x8C58CB37, 0x66760B44, 0xC337993A, - 0x8F96C396, 0x2AD751E8, 0xC0F9919B, 0x65B803E5, - 0x1148678C, 0xB409F5F2, 0x5E273581, 0xFB66A7FF, - 0x26217BCD, 0x8360E9B3, 0x694E29C0, 0xCC0FBBBE, - 0xB8FFDFD7, 0x1DBE4DA9, 0xF7908DDA, 0x52D11FA4, - 0x1E704508, 0xBB31D776, 0x511F1705, 0xF45E857B, - 0x80AEE112, 0x25EF736C, 0xCFC1B31F, 0x6A802161, - 0x56830647, 0xF3C29439, 0x19EC544A, 0xBCADC634, - 0xC85DA25D, 0x6D1C3023, 0x8732F050, 0x2273622E, - 0x6ED23882, 0xCB93AAFC, 0x21BD6A8F, 0x84FCF8F1, - 0xF00C9C98, 0x554D0EE6, 0xBF63CE95, 0x1A225CEB, - 0x8B277743, 0x2E66E53D, 0xC448254E, 0x6109B730, - 0x15F9D359, 0xB0B84127, 0x5A968154, 0xFFD7132A, - 0xB3764986, 0x1637DBF8, 0xFC191B8B, 0x595889F5, - 0x2DA8ED9C, 0x88E97FE2, 0x62C7BF91, 0xC7862DEF, - 0xFB850AC9, 0x5EC498B7, 0xB4EA58C4, 0x11ABCABA, - 0x655BAED3, 0xC01A3CAD, 0x2A34FCDE, 0x8F756EA0, - 0xC3D4340C, 0x6695A672, 0x8CBB6601, 0x29FAF47F, - 0x5D0A9016, 0xF84B0268, 0x1265C21B, 0xB7245065, - 0x6A638C57, 0xCF221E29, 0x250CDE5A, 0x804D4C24, - 0xF4BD284D, 0x51FCBA33, 0xBBD27A40, 0x1E93E83E, - 0x5232B292, 0xF77320EC, 0x1D5DE09F, 0xB81C72E1, - 0xCCEC1688, 0x69AD84F6, 0x83834485, 0x26C2D6FB, - 0x1AC1F1DD, 0xBF8063A3, 0x55AEA3D0, 0xF0EF31AE, - 0x841F55C7, 0x215EC7B9, 0xCB7007CA, 0x6E3195B4, - 0x2290CF18, 0x87D15D66, 0x6DFF9D15, 0xC8BE0F6B, - 0xBC4E6B02, 0x190FF97C, 0xF321390F, 0x5660AB71, - 0x4C42F79A, 0xE90365E4, 0x032DA597, 0xA66C37E9, - 0xD29C5380, 0x77DDC1FE, 0x9DF3018D, 0x38B293F3, - 0x7413C95F, 0xD1525B21, 0x3B7C9B52, 0x9E3D092C, - 0xEACD6D45, 0x4F8CFF3B, 0xA5A23F48, 0x00E3AD36, - 0x3CE08A10, 0x99A1186E, 0x738FD81D, 0xD6CE4A63, - 0xA23E2E0A, 0x077FBC74, 0xED517C07, 0x4810EE79, - 0x04B1B4D5, 0xA1F026AB, 0x4BDEE6D8, 0xEE9F74A6, - 0x9A6F10CF, 0x3F2E82B1, 0xD50042C2, 0x7041D0BC, - 0xAD060C8E, 0x08479EF0, 0xE2695E83, 0x4728CCFD, - 0x33D8A894, 0x96993AEA, 0x7CB7FA99, 0xD9F668E7, - 0x9557324B, 0x3016A035, 0xDA386046, 0x7F79F238, - 0x0B899651, 0xAEC8042F, 0x44E6C45C, 0xE1A75622, - 0xDDA47104, 0x78E5E37A, 0x92CB2309, 0x378AB177, - 0x437AD51E, 0xE63B4760, 0x0C158713, 0xA954156D, - 0xE5F54FC1, 0x40B4DDBF, 0xAA9A1DCC, 0x0FDB8FB2, - 0x7B2BEBDB, 0xDE6A79A5, 0x3444B9D6, 0x91052BA8, - /* T8_3 */ - 0x00000000, 0xDD45AAB8, 0xBF672381, 0x62228939, - 0x7B2231F3, 0xA6679B4B, 0xC4451272, 0x1900B8CA, - 0xF64463E6, 0x2B01C95E, 0x49234067, 0x9466EADF, - 0x8D665215, 0x5023F8AD, 0x32017194, 0xEF44DB2C, - 0xE964B13D, 0x34211B85, 0x560392BC, 0x8B463804, - 0x924680CE, 0x4F032A76, 0x2D21A34F, 0xF06409F7, - 0x1F20D2DB, 0xC2657863, 0xA047F15A, 0x7D025BE2, - 0x6402E328, 0xB9474990, 0xDB65C0A9, 0x06206A11, - 0xD725148B, 0x0A60BE33, 0x6842370A, 0xB5079DB2, - 0xAC072578, 0x71428FC0, 0x136006F9, 0xCE25AC41, - 0x2161776D, 0xFC24DDD5, 0x9E0654EC, 0x4343FE54, - 0x5A43469E, 0x8706EC26, 0xE524651F, 0x3861CFA7, - 0x3E41A5B6, 0xE3040F0E, 0x81268637, 0x5C632C8F, - 0x45639445, 0x98263EFD, 0xFA04B7C4, 0x27411D7C, - 0xC805C650, 0x15406CE8, 0x7762E5D1, 0xAA274F69, - 0xB327F7A3, 0x6E625D1B, 0x0C40D422, 0xD1057E9A, - 0xABA65FE7, 0x76E3F55F, 0x14C17C66, 0xC984D6DE, - 0xD0846E14, 0x0DC1C4AC, 0x6FE34D95, 0xB2A6E72D, - 0x5DE23C01, 0x80A796B9, 0xE2851F80, 0x3FC0B538, - 0x26C00DF2, 0xFB85A74A, 0x99A72E73, 0x44E284CB, - 0x42C2EEDA, 0x9F874462, 0xFDA5CD5B, 0x20E067E3, - 0x39E0DF29, 0xE4A57591, 0x8687FCA8, 0x5BC25610, - 0xB4868D3C, 0x69C32784, 0x0BE1AEBD, 0xD6A40405, - 0xCFA4BCCF, 0x12E11677, 0x70C39F4E, 0xAD8635F6, - 0x7C834B6C, 0xA1C6E1D4, 0xC3E468ED, 0x1EA1C255, - 0x07A17A9F, 0xDAE4D027, 0xB8C6591E, 0x6583F3A6, - 0x8AC7288A, 0x57828232, 0x35A00B0B, 0xE8E5A1B3, - 0xF1E51979, 0x2CA0B3C1, 0x4E823AF8, 0x93C79040, - 0x95E7FA51, 0x48A250E9, 0x2A80D9D0, 0xF7C57368, - 0xEEC5CBA2, 0x3380611A, 0x51A2E823, 0x8CE7429B, - 0x63A399B7, 0xBEE6330F, 0xDCC4BA36, 0x0181108E, - 0x1881A844, 0xC5C402FC, 0xA7E68BC5, 0x7AA3217D, - 0x52A0C93F, 0x8FE56387, 0xEDC7EABE, 0x30824006, - 0x2982F8CC, 0xF4C75274, 0x96E5DB4D, 0x4BA071F5, - 0xA4E4AAD9, 0x79A10061, 0x1B838958, 0xC6C623E0, - 0xDFC69B2A, 0x02833192, 0x60A1B8AB, 0xBDE41213, - 0xBBC47802, 0x6681D2BA, 0x04A35B83, 0xD9E6F13B, - 0xC0E649F1, 0x1DA3E349, 0x7F816A70, 0xA2C4C0C8, - 0x4D801BE4, 0x90C5B15C, 0xF2E73865, 0x2FA292DD, - 0x36A22A17, 0xEBE780AF, 0x89C50996, 0x5480A32E, - 0x8585DDB4, 0x58C0770C, 0x3AE2FE35, 0xE7A7548D, - 0xFEA7EC47, 0x23E246FF, 0x41C0CFC6, 0x9C85657E, - 0x73C1BE52, 0xAE8414EA, 0xCCA69DD3, 0x11E3376B, - 0x08E38FA1, 0xD5A62519, 0xB784AC20, 0x6AC10698, - 0x6CE16C89, 0xB1A4C631, 0xD3864F08, 0x0EC3E5B0, - 0x17C35D7A, 0xCA86F7C2, 0xA8A47EFB, 0x75E1D443, - 0x9AA50F6F, 0x47E0A5D7, 0x25C22CEE, 0xF8878656, - 0xE1873E9C, 0x3CC29424, 0x5EE01D1D, 0x83A5B7A5, - 0xF90696D8, 0x24433C60, 0x4661B559, 0x9B241FE1, - 0x8224A72B, 0x5F610D93, 0x3D4384AA, 0xE0062E12, - 0x0F42F53E, 0xD2075F86, 0xB025D6BF, 0x6D607C07, - 0x7460C4CD, 0xA9256E75, 0xCB07E74C, 0x16424DF4, - 0x106227E5, 0xCD278D5D, 0xAF050464, 0x7240AEDC, - 0x6B401616, 0xB605BCAE, 0xD4273597, 0x09629F2F, - 0xE6264403, 0x3B63EEBB, 0x59416782, 0x8404CD3A, - 0x9D0475F0, 0x4041DF48, 0x22635671, 0xFF26FCC9, - 0x2E238253, 0xF36628EB, 0x9144A1D2, 0x4C010B6A, - 0x5501B3A0, 0x88441918, 0xEA669021, 0x37233A99, - 0xD867E1B5, 0x05224B0D, 0x6700C234, 0xBA45688C, - 0xA345D046, 0x7E007AFE, 0x1C22F3C7, 0xC167597F, - 0xC747336E, 0x1A0299D6, 0x782010EF, 0xA565BA57, - 0xBC65029D, 0x6120A825, 0x0302211C, 0xDE478BA4, - 0x31035088, 0xEC46FA30, 0x8E647309, 0x5321D9B1, - 0x4A21617B, 0x9764CBC3, 0xF54642FA, 0x2803E842, - /* T8_4 */ - 0x00000000, 0x38116FAC, 0x7022DF58, 0x4833B0F4, - 0xE045BEB0, 0xD854D11C, 0x906761E8, 0xA8760E44, - 0xC5670B91, 0xFD76643D, 0xB545D4C9, 0x8D54BB65, - 0x2522B521, 0x1D33DA8D, 0x55006A79, 0x6D1105D5, - 0x8F2261D3, 0xB7330E7F, 0xFF00BE8B, 0xC711D127, - 0x6F67DF63, 0x5776B0CF, 0x1F45003B, 0x27546F97, - 0x4A456A42, 0x725405EE, 0x3A67B51A, 0x0276DAB6, - 0xAA00D4F2, 0x9211BB5E, 0xDA220BAA, 0xE2336406, - 0x1BA8B557, 0x23B9DAFB, 0x6B8A6A0F, 0x539B05A3, - 0xFBED0BE7, 0xC3FC644B, 0x8BCFD4BF, 0xB3DEBB13, - 0xDECFBEC6, 0xE6DED16A, 0xAEED619E, 0x96FC0E32, - 0x3E8A0076, 0x069B6FDA, 0x4EA8DF2E, 0x76B9B082, - 0x948AD484, 0xAC9BBB28, 0xE4A80BDC, 0xDCB96470, - 0x74CF6A34, 0x4CDE0598, 0x04EDB56C, 0x3CFCDAC0, - 0x51EDDF15, 0x69FCB0B9, 0x21CF004D, 0x19DE6FE1, - 0xB1A861A5, 0x89B90E09, 0xC18ABEFD, 0xF99BD151, - 0x37516AAE, 0x0F400502, 0x4773B5F6, 0x7F62DA5A, - 0xD714D41E, 0xEF05BBB2, 0xA7360B46, 0x9F2764EA, - 0xF236613F, 0xCA270E93, 0x8214BE67, 0xBA05D1CB, - 0x1273DF8F, 0x2A62B023, 0x625100D7, 0x5A406F7B, - 0xB8730B7D, 0x806264D1, 0xC851D425, 0xF040BB89, - 0x5836B5CD, 0x6027DA61, 0x28146A95, 0x10050539, - 0x7D1400EC, 0x45056F40, 0x0D36DFB4, 0x3527B018, - 0x9D51BE5C, 0xA540D1F0, 0xED736104, 0xD5620EA8, - 0x2CF9DFF9, 0x14E8B055, 0x5CDB00A1, 0x64CA6F0D, - 0xCCBC6149, 0xF4AD0EE5, 0xBC9EBE11, 0x848FD1BD, - 0xE99ED468, 0xD18FBBC4, 0x99BC0B30, 0xA1AD649C, - 0x09DB6AD8, 0x31CA0574, 0x79F9B580, 0x41E8DA2C, - 0xA3DBBE2A, 0x9BCAD186, 0xD3F96172, 0xEBE80EDE, - 0x439E009A, 0x7B8F6F36, 0x33BCDFC2, 0x0BADB06E, - 0x66BCB5BB, 0x5EADDA17, 0x169E6AE3, 0x2E8F054F, - 0x86F90B0B, 0xBEE864A7, 0xF6DBD453, 0xCECABBFF, - 0x6EA2D55C, 0x56B3BAF0, 0x1E800A04, 0x269165A8, - 0x8EE76BEC, 0xB6F60440, 0xFEC5B4B4, 0xC6D4DB18, - 0xABC5DECD, 0x93D4B161, 0xDBE70195, 0xE3F66E39, - 0x4B80607D, 0x73910FD1, 0x3BA2BF25, 0x03B3D089, - 0xE180B48F, 0xD991DB23, 0x91A26BD7, 0xA9B3047B, - 0x01C50A3F, 0x39D46593, 0x71E7D567, 0x49F6BACB, - 0x24E7BF1E, 0x1CF6D0B2, 0x54C56046, 0x6CD40FEA, - 0xC4A201AE, 0xFCB36E02, 0xB480DEF6, 0x8C91B15A, - 0x750A600B, 0x4D1B0FA7, 0x0528BF53, 0x3D39D0FF, - 0x954FDEBB, 0xAD5EB117, 0xE56D01E3, 0xDD7C6E4F, - 0xB06D6B9A, 0x887C0436, 0xC04FB4C2, 0xF85EDB6E, - 0x5028D52A, 0x6839BA86, 0x200A0A72, 0x181B65DE, - 0xFA2801D8, 0xC2396E74, 0x8A0ADE80, 0xB21BB12C, - 0x1A6DBF68, 0x227CD0C4, 0x6A4F6030, 0x525E0F9C, - 0x3F4F0A49, 0x075E65E5, 0x4F6DD511, 0x777CBABD, - 0xDF0AB4F9, 0xE71BDB55, 0xAF286BA1, 0x9739040D, - 0x59F3BFF2, 0x61E2D05E, 0x29D160AA, 0x11C00F06, - 0xB9B60142, 0x81A76EEE, 0xC994DE1A, 0xF185B1B6, - 0x9C94B463, 0xA485DBCF, 0xECB66B3B, 0xD4A70497, - 0x7CD10AD3, 0x44C0657F, 0x0CF3D58B, 0x34E2BA27, - 0xD6D1DE21, 0xEEC0B18D, 0xA6F30179, 0x9EE26ED5, - 0x36946091, 0x0E850F3D, 0x46B6BFC9, 0x7EA7D065, - 0x13B6D5B0, 0x2BA7BA1C, 0x63940AE8, 0x5B856544, - 0xF3F36B00, 0xCBE204AC, 0x83D1B458, 0xBBC0DBF4, - 0x425B0AA5, 0x7A4A6509, 0x3279D5FD, 0x0A68BA51, - 0xA21EB415, 0x9A0FDBB9, 0xD23C6B4D, 0xEA2D04E1, - 0x873C0134, 0xBF2D6E98, 0xF71EDE6C, 0xCF0FB1C0, - 0x6779BF84, 0x5F68D028, 0x175B60DC, 0x2F4A0F70, - 0xCD796B76, 0xF56804DA, 0xBD5BB42E, 0x854ADB82, - 0x2D3CD5C6, 0x152DBA6A, 0x5D1E0A9E, 0x650F6532, - 0x081E60E7, 0x300F0F4B, 0x783CBFBF, 0x402DD013, - 0xE85BDE57, 0xD04AB1FB, 0x9879010F, 0xA0686EA3, - /* T8_5 */ - 0x00000000, 0xEF306B19, 0xDB8CA0C3, 0x34BCCBDA, - 0xB2F53777, 0x5DC55C6E, 0x697997B4, 0x8649FCAD, - 0x6006181F, 0x8F367306, 0xBB8AB8DC, 0x54BAD3C5, - 0xD2F32F68, 0x3DC34471, 0x097F8FAB, 0xE64FE4B2, - 0xC00C303E, 0x2F3C5B27, 0x1B8090FD, 0xF4B0FBE4, - 0x72F90749, 0x9DC96C50, 0xA975A78A, 0x4645CC93, - 0xA00A2821, 0x4F3A4338, 0x7B8688E2, 0x94B6E3FB, - 0x12FF1F56, 0xFDCF744F, 0xC973BF95, 0x2643D48C, - 0x85F4168D, 0x6AC47D94, 0x5E78B64E, 0xB148DD57, - 0x370121FA, 0xD8314AE3, 0xEC8D8139, 0x03BDEA20, - 0xE5F20E92, 0x0AC2658B, 0x3E7EAE51, 0xD14EC548, - 0x570739E5, 0xB83752FC, 0x8C8B9926, 0x63BBF23F, - 0x45F826B3, 0xAAC84DAA, 0x9E748670, 0x7144ED69, - 0xF70D11C4, 0x183D7ADD, 0x2C81B107, 0xC3B1DA1E, - 0x25FE3EAC, 0xCACE55B5, 0xFE729E6F, 0x1142F576, - 0x970B09DB, 0x783B62C2, 0x4C87A918, 0xA3B7C201, - 0x0E045BEB, 0xE13430F2, 0xD588FB28, 0x3AB89031, - 0xBCF16C9C, 0x53C10785, 0x677DCC5F, 0x884DA746, - 0x6E0243F4, 0x813228ED, 0xB58EE337, 0x5ABE882E, - 0xDCF77483, 0x33C71F9A, 0x077BD440, 0xE84BBF59, - 0xCE086BD5, 0x213800CC, 0x1584CB16, 0xFAB4A00F, - 0x7CFD5CA2, 0x93CD37BB, 0xA771FC61, 0x48419778, - 0xAE0E73CA, 0x413E18D3, 0x7582D309, 0x9AB2B810, - 0x1CFB44BD, 0xF3CB2FA4, 0xC777E47E, 0x28478F67, - 0x8BF04D66, 0x64C0267F, 0x507CEDA5, 0xBF4C86BC, - 0x39057A11, 0xD6351108, 0xE289DAD2, 0x0DB9B1CB, - 0xEBF65579, 0x04C63E60, 0x307AF5BA, 0xDF4A9EA3, - 0x5903620E, 0xB6330917, 0x828FC2CD, 0x6DBFA9D4, - 0x4BFC7D58, 0xA4CC1641, 0x9070DD9B, 0x7F40B682, - 0xF9094A2F, 0x16392136, 0x2285EAEC, 0xCDB581F5, - 0x2BFA6547, 0xC4CA0E5E, 0xF076C584, 0x1F46AE9D, - 0x990F5230, 0x763F3929, 0x4283F2F3, 0xADB399EA, - 0x1C08B7D6, 0xF338DCCF, 0xC7841715, 0x28B47C0C, - 0xAEFD80A1, 0x41CDEBB8, 0x75712062, 0x9A414B7B, - 0x7C0EAFC9, 0x933EC4D0, 0xA7820F0A, 0x48B26413, - 0xCEFB98BE, 0x21CBF3A7, 0x1577387D, 0xFA475364, - 0xDC0487E8, 0x3334ECF1, 0x0788272B, 0xE8B84C32, - 0x6EF1B09F, 0x81C1DB86, 0xB57D105C, 0x5A4D7B45, - 0xBC029FF7, 0x5332F4EE, 0x678E3F34, 0x88BE542D, - 0x0EF7A880, 0xE1C7C399, 0xD57B0843, 0x3A4B635A, - 0x99FCA15B, 0x76CCCA42, 0x42700198, 0xAD406A81, - 0x2B09962C, 0xC439FD35, 0xF08536EF, 0x1FB55DF6, - 0xF9FAB944, 0x16CAD25D, 0x22761987, 0xCD46729E, - 0x4B0F8E33, 0xA43FE52A, 0x90832EF0, 0x7FB345E9, - 0x59F09165, 0xB6C0FA7C, 0x827C31A6, 0x6D4C5ABF, - 0xEB05A612, 0x0435CD0B, 0x308906D1, 0xDFB96DC8, - 0x39F6897A, 0xD6C6E263, 0xE27A29B9, 0x0D4A42A0, - 0x8B03BE0D, 0x6433D514, 0x508F1ECE, 0xBFBF75D7, - 0x120CEC3D, 0xFD3C8724, 0xC9804CFE, 0x26B027E7, - 0xA0F9DB4A, 0x4FC9B053, 0x7B757B89, 0x94451090, - 0x720AF422, 0x9D3A9F3B, 0xA98654E1, 0x46B63FF8, - 0xC0FFC355, 0x2FCFA84C, 0x1B736396, 0xF443088F, - 0xD200DC03, 0x3D30B71A, 0x098C7CC0, 0xE6BC17D9, - 0x60F5EB74, 0x8FC5806D, 0xBB794BB7, 0x544920AE, - 0xB206C41C, 0x5D36AF05, 0x698A64DF, 0x86BA0FC6, - 0x00F3F36B, 0xEFC39872, 0xDB7F53A8, 0x344F38B1, - 0x97F8FAB0, 0x78C891A9, 0x4C745A73, 0xA344316A, - 0x250DCDC7, 0xCA3DA6DE, 0xFE816D04, 0x11B1061D, - 0xF7FEE2AF, 0x18CE89B6, 0x2C72426C, 0xC3422975, - 0x450BD5D8, 0xAA3BBEC1, 0x9E87751B, 0x71B71E02, - 0x57F4CA8E, 0xB8C4A197, 0x8C786A4D, 0x63480154, - 0xE501FDF9, 0x0A3196E0, 0x3E8D5D3A, 0xD1BD3623, - 0x37F2D291, 0xD8C2B988, 0xEC7E7252, 0x034E194B, - 0x8507E5E6, 0x6A378EFF, 0x5E8B4525, 0xB1BB2E3C, - /* T8_6 */ - 0x00000000, 0x68032CC8, 0xD0065990, 0xB8057558, - 0xA5E0C5D1, 0xCDE3E919, 0x75E69C41, 0x1DE5B089, - 0x4E2DFD53, 0x262ED19B, 0x9E2BA4C3, 0xF628880B, - 0xEBCD3882, 0x83CE144A, 0x3BCB6112, 0x53C84DDA, - 0x9C5BFAA6, 0xF458D66E, 0x4C5DA336, 0x245E8FFE, - 0x39BB3F77, 0x51B813BF, 0xE9BD66E7, 0x81BE4A2F, - 0xD27607F5, 0xBA752B3D, 0x02705E65, 0x6A7372AD, - 0x7796C224, 0x1F95EEEC, 0xA7909BB4, 0xCF93B77C, - 0x3D5B83BD, 0x5558AF75, 0xED5DDA2D, 0x855EF6E5, - 0x98BB466C, 0xF0B86AA4, 0x48BD1FFC, 0x20BE3334, - 0x73767EEE, 0x1B755226, 0xA370277E, 0xCB730BB6, - 0xD696BB3F, 0xBE9597F7, 0x0690E2AF, 0x6E93CE67, - 0xA100791B, 0xC90355D3, 0x7106208B, 0x19050C43, - 0x04E0BCCA, 0x6CE39002, 0xD4E6E55A, 0xBCE5C992, - 0xEF2D8448, 0x872EA880, 0x3F2BDDD8, 0x5728F110, - 0x4ACD4199, 0x22CE6D51, 0x9ACB1809, 0xF2C834C1, - 0x7AB7077A, 0x12B42BB2, 0xAAB15EEA, 0xC2B27222, - 0xDF57C2AB, 0xB754EE63, 0x0F519B3B, 0x6752B7F3, - 0x349AFA29, 0x5C99D6E1, 0xE49CA3B9, 0x8C9F8F71, - 0x917A3FF8, 0xF9791330, 0x417C6668, 0x297F4AA0, - 0xE6ECFDDC, 0x8EEFD114, 0x36EAA44C, 0x5EE98884, - 0x430C380D, 0x2B0F14C5, 0x930A619D, 0xFB094D55, - 0xA8C1008F, 0xC0C22C47, 0x78C7591F, 0x10C475D7, - 0x0D21C55E, 0x6522E996, 0xDD279CCE, 0xB524B006, - 0x47EC84C7, 0x2FEFA80F, 0x97EADD57, 0xFFE9F19F, - 0xE20C4116, 0x8A0F6DDE, 0x320A1886, 0x5A09344E, - 0x09C17994, 0x61C2555C, 0xD9C72004, 0xB1C40CCC, - 0xAC21BC45, 0xC422908D, 0x7C27E5D5, 0x1424C91D, - 0xDBB77E61, 0xB3B452A9, 0x0BB127F1, 0x63B20B39, - 0x7E57BBB0, 0x16549778, 0xAE51E220, 0xC652CEE8, - 0x959A8332, 0xFD99AFFA, 0x459CDAA2, 0x2D9FF66A, - 0x307A46E3, 0x58796A2B, 0xE07C1F73, 0x887F33BB, - 0xF56E0EF4, 0x9D6D223C, 0x25685764, 0x4D6B7BAC, - 0x508ECB25, 0x388DE7ED, 0x808892B5, 0xE88BBE7D, - 0xBB43F3A7, 0xD340DF6F, 0x6B45AA37, 0x034686FF, - 0x1EA33676, 0x76A01ABE, 0xCEA56FE6, 0xA6A6432E, - 0x6935F452, 0x0136D89A, 0xB933ADC2, 0xD130810A, - 0xCCD53183, 0xA4D61D4B, 0x1CD36813, 0x74D044DB, - 0x27180901, 0x4F1B25C9, 0xF71E5091, 0x9F1D7C59, - 0x82F8CCD0, 0xEAFBE018, 0x52FE9540, 0x3AFDB988, - 0xC8358D49, 0xA036A181, 0x1833D4D9, 0x7030F811, - 0x6DD54898, 0x05D66450, 0xBDD31108, 0xD5D03DC0, - 0x8618701A, 0xEE1B5CD2, 0x561E298A, 0x3E1D0542, - 0x23F8B5CB, 0x4BFB9903, 0xF3FEEC5B, 0x9BFDC093, - 0x546E77EF, 0x3C6D5B27, 0x84682E7F, 0xEC6B02B7, - 0xF18EB23E, 0x998D9EF6, 0x2188EBAE, 0x498BC766, - 0x1A438ABC, 0x7240A674, 0xCA45D32C, 0xA246FFE4, - 0xBFA34F6D, 0xD7A063A5, 0x6FA516FD, 0x07A63A35, - 0x8FD9098E, 0xE7DA2546, 0x5FDF501E, 0x37DC7CD6, - 0x2A39CC5F, 0x423AE097, 0xFA3F95CF, 0x923CB907, - 0xC1F4F4DD, 0xA9F7D815, 0x11F2AD4D, 0x79F18185, - 0x6414310C, 0x0C171DC4, 0xB412689C, 0xDC114454, - 0x1382F328, 0x7B81DFE0, 0xC384AAB8, 0xAB878670, - 0xB66236F9, 0xDE611A31, 0x66646F69, 0x0E6743A1, - 0x5DAF0E7B, 0x35AC22B3, 0x8DA957EB, 0xE5AA7B23, - 0xF84FCBAA, 0x904CE762, 0x2849923A, 0x404ABEF2, - 0xB2828A33, 0xDA81A6FB, 0x6284D3A3, 0x0A87FF6B, - 0x17624FE2, 0x7F61632A, 0xC7641672, 0xAF673ABA, - 0xFCAF7760, 0x94AC5BA8, 0x2CA92EF0, 0x44AA0238, - 0x594FB2B1, 0x314C9E79, 0x8949EB21, 0xE14AC7E9, - 0x2ED97095, 0x46DA5C5D, 0xFEDF2905, 0x96DC05CD, - 0x8B39B544, 0xE33A998C, 0x5B3FECD4, 0x333CC01C, - 0x60F48DC6, 0x08F7A10E, 0xB0F2D456, 0xD8F1F89E, - 0xC5144817, 0xAD1764DF, 0x15121187, 0x7D113D4F, - /* T8_7 */ - 0x00000000, 0x493C7D27, 0x9278FA4E, 0xDB448769, - 0x211D826D, 0x6821FF4A, 0xB3657823, 0xFA590504, - 0x423B04DA, 0x0B0779FD, 0xD043FE94, 0x997F83B3, - 0x632686B7, 0x2A1AFB90, 0xF15E7CF9, 0xB86201DE, - 0x847609B4, 0xCD4A7493, 0x160EF3FA, 0x5F328EDD, - 0xA56B8BD9, 0xEC57F6FE, 0x37137197, 0x7E2F0CB0, - 0xC64D0D6E, 0x8F717049, 0x5435F720, 0x1D098A07, - 0xE7508F03, 0xAE6CF224, 0x7528754D, 0x3C14086A, - 0x0D006599, 0x443C18BE, 0x9F789FD7, 0xD644E2F0, - 0x2C1DE7F4, 0x65219AD3, 0xBE651DBA, 0xF759609D, - 0x4F3B6143, 0x06071C64, 0xDD439B0D, 0x947FE62A, - 0x6E26E32E, 0x271A9E09, 0xFC5E1960, 0xB5626447, - 0x89766C2D, 0xC04A110A, 0x1B0E9663, 0x5232EB44, - 0xA86BEE40, 0xE1579367, 0x3A13140E, 0x732F6929, - 0xCB4D68F7, 0x827115D0, 0x593592B9, 0x1009EF9E, - 0xEA50EA9A, 0xA36C97BD, 0x782810D4, 0x31146DF3, - 0x1A00CB32, 0x533CB615, 0x8878317C, 0xC1444C5B, - 0x3B1D495F, 0x72213478, 0xA965B311, 0xE059CE36, - 0x583BCFE8, 0x1107B2CF, 0xCA4335A6, 0x837F4881, - 0x79264D85, 0x301A30A2, 0xEB5EB7CB, 0xA262CAEC, - 0x9E76C286, 0xD74ABFA1, 0x0C0E38C8, 0x453245EF, - 0xBF6B40EB, 0xF6573DCC, 0x2D13BAA5, 0x642FC782, - 0xDC4DC65C, 0x9571BB7B, 0x4E353C12, 0x07094135, - 0xFD504431, 0xB46C3916, 0x6F28BE7F, 0x2614C358, - 0x1700AEAB, 0x5E3CD38C, 0x857854E5, 0xCC4429C2, - 0x361D2CC6, 0x7F2151E1, 0xA465D688, 0xED59ABAF, - 0x553BAA71, 0x1C07D756, 0xC743503F, 0x8E7F2D18, - 0x7426281C, 0x3D1A553B, 0xE65ED252, 0xAF62AF75, - 0x9376A71F, 0xDA4ADA38, 0x010E5D51, 0x48322076, - 0xB26B2572, 0xFB575855, 0x2013DF3C, 0x692FA21B, - 0xD14DA3C5, 0x9871DEE2, 0x4335598B, 0x0A0924AC, - 0xF05021A8, 0xB96C5C8F, 0x6228DBE6, 0x2B14A6C1, - 0x34019664, 0x7D3DEB43, 0xA6796C2A, 0xEF45110D, - 0x151C1409, 0x5C20692E, 0x8764EE47, 0xCE589360, - 0x763A92BE, 0x3F06EF99, 0xE44268F0, 0xAD7E15D7, - 0x572710D3, 0x1E1B6DF4, 0xC55FEA9D, 0x8C6397BA, - 0xB0779FD0, 0xF94BE2F7, 0x220F659E, 0x6B3318B9, - 0x916A1DBD, 0xD856609A, 0x0312E7F3, 0x4A2E9AD4, - 0xF24C9B0A, 0xBB70E62D, 0x60346144, 0x29081C63, - 0xD3511967, 0x9A6D6440, 0x4129E329, 0x08159E0E, - 0x3901F3FD, 0x703D8EDA, 0xAB7909B3, 0xE2457494, - 0x181C7190, 0x51200CB7, 0x8A648BDE, 0xC358F6F9, - 0x7B3AF727, 0x32068A00, 0xE9420D69, 0xA07E704E, - 0x5A27754A, 0x131B086D, 0xC85F8F04, 0x8163F223, - 0xBD77FA49, 0xF44B876E, 0x2F0F0007, 0x66337D20, - 0x9C6A7824, 0xD5560503, 0x0E12826A, 0x472EFF4D, - 0xFF4CFE93, 0xB67083B4, 0x6D3404DD, 0x240879FA, - 0xDE517CFE, 0x976D01D9, 0x4C2986B0, 0x0515FB97, - 0x2E015D56, 0x673D2071, 0xBC79A718, 0xF545DA3F, - 0x0F1CDF3B, 0x4620A21C, 0x9D642575, 0xD4585852, - 0x6C3A598C, 0x250624AB, 0xFE42A3C2, 0xB77EDEE5, - 0x4D27DBE1, 0x041BA6C6, 0xDF5F21AF, 0x96635C88, - 0xAA7754E2, 0xE34B29C5, 0x380FAEAC, 0x7133D38B, - 0x8B6AD68F, 0xC256ABA8, 0x19122CC1, 0x502E51E6, - 0xE84C5038, 0xA1702D1F, 0x7A34AA76, 0x3308D751, - 0xC951D255, 0x806DAF72, 0x5B29281B, 0x1215553C, - 0x230138CF, 0x6A3D45E8, 0xB179C281, 0xF845BFA6, - 0x021CBAA2, 0x4B20C785, 0x906440EC, 0xD9583DCB, - 0x613A3C15, 0x28064132, 0xF342C65B, 0xBA7EBB7C, - 0x4027BE78, 0x091BC35F, 0xD25F4436, 0x9B633911, - 0xA777317B, 0xEE4B4C5C, 0x350FCB35, 0x7C33B612, - 0x866AB316, 0xCF56CE31, 0x14124958, 0x5D2E347F, - 0xE54C35A1, 0xAC704886, 0x7734CFEF, 0x3E08B2C8, - 0xC451B7CC, 0x8D6DCAEB, 0x56294D82, 0x1F1530A5 - }; -} diff --git a/clients/src/main/java/org/oracle/okafka/common/utils/Sanitizer.java b/clients/src/main/java/org/oracle/okafka/common/utils/Sanitizer.java deleted file mode 100644 index 9363f68..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/utils/Sanitizer.java +++ /dev/null @@ -1,102 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.utils; - -import java.io.UnsupportedEncodingException; -import java.net.URLDecoder; -import java.net.URLEncoder; -import java.nio.charset.StandardCharsets; -import java.util.regex.Pattern; - -import javax.management.ObjectName; - -import org.oracle.okafka.common.KafkaException; - -/** - * Utility class for sanitizing/desanitizing/quoting values used in JMX metric names - * or as ZooKeeper node name. - *

      - * User principals and client-ids are URL-encoded using ({@link #sanitize(String)} - * for use as ZooKeeper node names. User principals are URL-encoded in all metric - * names as well. All other metric tags including client-id are quoted if they - * contain special characters using {@link #jmxSanitize(String)} when - * registering in JMX. - */ -public class Sanitizer { - - /** - * Even though only a small number of characters are disallowed in JMX, quote any - * string containing special characters to be safe. All characters in strings sanitized - * using {@link #sanitize(String)} are safe for JMX and hence included here. - */ - private static final Pattern MBEAN_PATTERN = Pattern.compile("[\\w-%\\. \t]*"); - - /** - * Sanitize `name` for safe use as JMX metric name as well as ZooKeeper node name - * using URL-encoding. - */ - public static String sanitize(String name) { - String encoded = ""; - try { - encoded = URLEncoder.encode(name, StandardCharsets.UTF_8.name()); - StringBuilder builder = new StringBuilder(); - for (int i = 0; i < encoded.length(); i++) { - char c = encoded.charAt(i); - if (c == '*') { // Metric ObjectName treats * as pattern - builder.append("%2A"); - } else if (c == '+') { // Space URL-encoded as +, replace with percent encoding - builder.append("%20"); - } else { - builder.append(c); - } - } - return builder.toString(); - } catch (UnsupportedEncodingException e) { - throw new KafkaException(e); - } - } - - /** - * Desanitize name that was URL-encoded using {@link #sanitize(String)}. This - * is used to obtain the desanitized version of node names in ZooKeeper. - */ - public static String desanitize(String name) { - try { - return URLDecoder.decode(name, StandardCharsets.UTF_8.name()); - } catch (UnsupportedEncodingException e) { - throw new KafkaException(e); - } - } - - /** - * Quote `name` using {@link ObjectName#quote(String)} if `name` contains - * characters that are not safe for use in JMX. User principals that are - * already sanitized using {@link #sanitize(String)} will not be quoted - * since they are safe for JMX. - */ - public static String jmxSanitize(String name) { - return MBEAN_PATTERN.matcher(name).matches() ? name : ObjectName.quote(name); - } -} diff --git a/clients/src/main/java/org/oracle/okafka/common/utils/SystemTime.java b/clients/src/main/java/org/oracle/okafka/common/utils/SystemTime.java deleted file mode 100644 index 04c341a..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/utils/SystemTime.java +++ /dev/null @@ -1,61 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.utils; - -import java.util.concurrent.TimeUnit; - -/** - * A time implementation that uses the system clock and sleep call. Use `Time.SYSTEM` instead of creating an instance - * of this class. - */ -public class SystemTime implements Time { - - @Override - public long milliseconds() { - return System.currentTimeMillis(); - } - - @Override - public long hiResClockMs() { - return TimeUnit.NANOSECONDS.toMillis(nanoseconds()); - } - - @Override - public long nanoseconds() { - return System.nanoTime(); - } - - @Override - public void sleep(long ms) { - try { - Thread.sleep(ms); - } catch (InterruptedException e) { - // just wake up early - Thread.currentThread().interrupt(); - } - } - -} - diff --git a/clients/src/main/java/org/oracle/okafka/common/utils/TNSParser.java b/clients/src/main/java/org/oracle/okafka/common/utils/TNSParser.java index 8ebe25d..285b10a 100644 --- a/clients/src/main/java/org/oracle/okafka/common/utils/TNSParser.java +++ b/clients/src/main/java/org/oracle/okafka/common/utils/TNSParser.java @@ -1,14 +1,14 @@ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. */ package org.oracle.okafka.common.utils; import org.oracle.okafka.clients.CommonClientConfigs; -import org.oracle.okafka.common.config.AbstractConfig; +import org.apache.kafka.common.config.AbstractConfig; import java.io.File; import java.io.FileNotFoundException; @@ -26,7 +26,7 @@ public TNSParser( AbstractConfig configs) { this.configs = configs; } - public String getProperty(String connStr, String property) { + public static String getProperty(String connStr, String property) { int index = connStr.indexOf(property); if(index == -1) return null; diff --git a/clients/src/main/java/org/oracle/okafka/common/utils/Time.java b/clients/src/main/java/org/oracle/okafka/common/utils/Time.java deleted file mode 100644 index d4d44f8..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/utils/Time.java +++ /dev/null @@ -1,64 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.utils; - -/** - * An interface abstracting the clock to use in unit testing classes that make use of clock time. - * - * Implementations of this class should be thread-safe. - */ -public interface Time { - - Time SYSTEM = new SystemTime(); - - /** - * Returns the current time in milliseconds. - */ - long milliseconds(); - - /** - * Returns the value returned by `nanoseconds` converted into milliseconds. - */ - long hiResClockMs(); - - /** - * Returns the current value of the running JVM's high-resolution time source, in nanoseconds. - * - *

      This method can only be used to measure elapsed time and is - * not related to any other notion of system or wall-clock time. - * The value returned represents nanoseconds since some fixed but - * arbitrary origin time (perhaps in the future, so values - * may be negative). The same origin is used by all invocations of - * this method in an instance of a Java virtual machine; other - * virtual machine instances are likely to use a different origin. - */ - long nanoseconds(); - - /** - * Sleep for the given number of milliseconds - */ - void sleep(long ms); - -} diff --git a/clients/src/main/java/org/oracle/okafka/common/utils/Utils.java b/clients/src/main/java/org/oracle/okafka/common/utils/Utils.java deleted file mode 100644 index ddb8e8f..0000000 --- a/clients/src/main/java/org/oracle/okafka/common/utils/Utils.java +++ /dev/null @@ -1,998 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.oracle.okafka.common.utils; - -import org.oracle.okafka.common.KafkaException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.Closeable; -import java.io.DataOutput; -import java.io.EOFException; -import java.io.File; -import java.io.FileInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.PrintWriter; -import java.io.StringWriter; -import java.lang.reflect.Constructor; -import java.lang.reflect.InvocationTargetException; -import java.nio.ByteBuffer; -import java.nio.MappedByteBuffer; -import java.nio.channels.FileChannel; -import java.nio.charset.Charset; -import java.nio.charset.StandardCharsets; -import java.nio.file.FileVisitResult; -import java.nio.file.Files; -import java.nio.file.NoSuchFileException; -import java.nio.file.Path; -import java.nio.file.SimpleFileVisitor; -import java.nio.file.StandardCopyOption; -import java.nio.file.attribute.BasicFileAttributes; -import java.text.DecimalFormat; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Properties; -import java.util.Set; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -public final class Utils { - - private Utils() {} - - // This matches URIs of formats: host:port and protocol:\\host:port - // IPv6 is supported with [ip] pattern - private static final Pattern HOST_PORT_PATTERN = Pattern.compile(".*?\\[?([0-9a-zA-Z\\-%._:]*)\\]?:([0-9]+)"); - - // Prints up to 2 decimal digits. Used for human readable printing - private static final DecimalFormat TWO_DIGIT_FORMAT = new DecimalFormat("0.##"); - - private static final String[] BYTE_SCALE_SUFFIXES = new String[] {"B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}; - - public static final String NL = System.getProperty("line.separator"); - - private static final Logger log = LoggerFactory.getLogger(Utils.class); - - /** - * Get a sorted list representation of a collection. - * @param collection The collection to sort - * @param The class of objects in the collection - * @return An unmodifiable sorted list with the contents of the collection - */ - public static > List sorted(Collection collection) { - List res = new ArrayList<>(collection); - Collections.sort(res); - return Collections.unmodifiableList(res); - } - - /** - * Turn the given UTF8 byte array into a string - * - * @param bytes The byte array - * @return The string - */ - public static String utf8(byte[] bytes) { - return new String(bytes, StandardCharsets.UTF_8); - } - - /** - * Read a UTF8 string from a byte buffer. Note that the position of the byte buffer is not affected - * by this method. - * - * @param buffer The buffer to read from - * @param length The length of the string in bytes - * @return The UTF8 string - */ - public static String utf8(ByteBuffer buffer, int length) { - return utf8(buffer, 0, length); - } - - /** - * Read a UTF8 string from the current position till the end of a byte buffer. The position of the byte buffer is - * not affected by this method. - * - * @param buffer The buffer to read from - * @return The UTF8 string - */ - public static String utf8(ByteBuffer buffer) { - return utf8(buffer, buffer.remaining()); - } - - /** - * Read a UTF8 string from a byte buffer at a given offset. Note that the position of the byte buffer - * is not affected by this method. - * - * @param buffer The buffer to read from - * @param offset The offset relative to the current position in the buffer - * @param length The length of the string in bytes - * @return The UTF8 string - */ - public static String utf8(ByteBuffer buffer, int offset, int length) { - if (buffer.hasArray()) - return new String(buffer.array(), buffer.arrayOffset() + buffer.position() + offset, length, StandardCharsets.UTF_8); - else - return utf8(toArray(buffer, offset, length)); - } - - /** - * Turn a string into a utf8 byte[] - * - * @param string The string - * @return The byte[] - */ - public static byte[] utf8(String string) { - return string.getBytes(StandardCharsets.UTF_8); - } - - /** - * Get the absolute value of the given number. If the number is Int.MinValue return 0. This is different from - * java.lang.Math.abs or scala.math.abs in that they return Int.MinValue (!). - */ - public static int abs(int n) { - return (n == Integer.MIN_VALUE) ? 0 : Math.abs(n); - } - - /** - * Get the minimum of some long values. - * @param first Used to ensure at least one value - * @param rest The remaining values to compare - * @return The minimum of all passed values - */ - public static long min(long first, long... rest) { - long min = first; - for (long r : rest) { - if (r < min) - min = r; - } - return min; - } - - /** - * Get the maximum of some long values. - * @param first Used to ensure at least one value - * @param rest The remaining values to compare - * @return The maximum of all passed values - */ - public static long max(long first, long... rest) { - long max = first; - for (long r : rest) { - if (r > max) - max = r; - } - return max; - } - - - public static short min(short first, short second) { - return (short) Math.min(first, second); - } - - /** - * Get the length for UTF8-encoding a string without encoding it first - * - * @param s The string to calculate the length for - * @return The length when serialized - */ - public static int utf8Length(CharSequence s) { - int count = 0; - for (int i = 0, len = s.length(); i < len; i++) { - char ch = s.charAt(i); - if (ch <= 0x7F) { - count++; - } else if (ch <= 0x7FF) { - count += 2; - } else if (Character.isHighSurrogate(ch)) { - count += 4; - ++i; - } else { - count += 3; - } - } - return count; - } - - /** - * Read the given byte buffer from its current position to its limit into a byte array. - * @param buffer The buffer to read from - */ - public static byte[] toArray(ByteBuffer buffer) { - return toArray(buffer, 0, buffer.remaining()); - } - - /** - * Read a byte array from its current position given the size in the buffer - * @param buffer The buffer to read from - * @param size The number of bytes to read into the array - */ - public static byte[] toArray(ByteBuffer buffer, int size) { - return toArray(buffer, 0, size); - } - - /** - * Convert a ByteBuffer to a nullable array. - * @param buffer The buffer to convert - * @return The resulting array or null if the buffer is null - */ - public static byte[] toNullableArray(ByteBuffer buffer) { - return buffer == null ? null : toArray(buffer); - } - - /** - * Wrap an array as a nullable ByteBuffer. - * @param array The nullable array to wrap - * @return The wrapping ByteBuffer or null if array is null - */ - public static ByteBuffer wrapNullable(byte[] array) { - return array == null ? null : ByteBuffer.wrap(array); - } - - /** - * Read a byte array from the given offset and size in the buffer - * @param buffer The buffer to read from - * @param offset The offset relative to the current position of the buffer - * @param size The number of bytes to read into the array - */ - public static byte[] toArray(ByteBuffer buffer, int offset, int size) { - byte[] dest = new byte[size]; - if (buffer.hasArray()) { - System.arraycopy(buffer.array(), buffer.position() + buffer.arrayOffset() + offset, dest, 0, size); - } else { - int pos = buffer.position(); - buffer.position(pos + offset); - buffer.get(dest); - buffer.position(pos); - } - return dest; - } - - /** - * Check that the parameter t is not null - * - * @param t The object to check - * @return t if it isn't null - * @throws NullPointerException if t is null. - */ - public static T notNull(T t) { - if (t == null) - throw new NullPointerException(); - else - return t; - } - - /** - * Sleep for a bit - * @param ms The duration of the sleep - */ - public static void sleep(long ms) { - try { - Thread.sleep(ms); - } catch (InterruptedException e) { - // this is okay, we just wake up early - Thread.currentThread().interrupt(); - } - } - - /** - * Instantiate the class - */ - public static T newInstance(Class c) { - if (c == null) - throw new KafkaException("class cannot be null"); - try { - return c.getDeclaredConstructor().newInstance(); - } catch (NoSuchMethodException e) { - throw new KafkaException("Could not find a public no-argument constructor for " + c.getName(), e); - } catch (ReflectiveOperationException | RuntimeException e) { - throw new KafkaException("Could not instantiate class " + c.getName(), e); - } - } - - /** - * Look up the class by name and instantiate it. - * @param klass class name - * @param base super class of the class to be instantiated - * @param the type of the base class - * @return the new instance - */ - public static T newInstance(String klass, Class base) throws ClassNotFoundException { - return Utils.newInstance(loadClass(klass, base)); - } - - /** - * Look up a class by name. - * @param klass class name - * @param base super class of the class for verification - * @param the type of the base class - * @return the new class - */ - public static Class loadClass(String klass, Class base) throws ClassNotFoundException { - return Class.forName(klass, true, Utils.getContextOrAQClassLoader()).asSubclass(base); - } - - /** - * Construct a new object using a class name and parameters. - * - * @param className The full name of the class to construct. - * @param params A sequence of (type, object) elements. - * @param The type of object to construct. - * @return The new object. - * @throws ClassNotFoundException If there was a problem constructing the object. - */ - public static T newParameterizedInstance(String className, Object... params) - throws ClassNotFoundException { - Class[] argTypes = new Class[params.length / 2]; - Object[] args = new Object[params.length / 2]; - try { - Class c = Class.forName(className, true, Utils.getContextOrAQClassLoader()); - for (int i = 0; i < params.length / 2; i++) { - argTypes[i] = (Class) params[2 * i]; - args[i] = params[(2 * i) + 1]; - } - @SuppressWarnings("unchecked") - Constructor constructor = (Constructor) c.getConstructor(argTypes); - return constructor.newInstance(args); - } catch (NoSuchMethodException e) { - throw new ClassNotFoundException(String.format("Failed to find " + - "constructor with %s for %s", Utils.join(argTypes, ", "), className), e); - } catch (InstantiationException e) { - throw new ClassNotFoundException(String.format("Failed to instantiate " + - "%s", className), e); - } catch (IllegalAccessException e) { - throw new ClassNotFoundException(String.format("Unable to access " + - "constructor of %s", className), e); - } catch (InvocationTargetException e) { - throw new ClassNotFoundException(String.format("Unable to invoke " + - "constructor of %s", className), e); - } - } - - /** - * Generates 32 bit murmur2 hash from byte array - * @param data byte array to hash - * @return 32 bit hash of the given array - */ - public static int murmur2(final byte[] data) { - int length = data.length; - int seed = 0x9747b28c; - // 'm' and 'r' are mixing constants generated offline. - // They're not really 'magic', they just happen to work well. - final int m = 0x5bd1e995; - final int r = 24; - - // Initialize the hash to a random value - int h = seed ^ length; - int length4 = length / 4; - - for (int i = 0; i < length4; i++) { - final int i4 = i * 4; - int k = (data[i4 + 0] & 0xff) + ((data[i4 + 1] & 0xff) << 8) + ((data[i4 + 2] & 0xff) << 16) + ((data[i4 + 3] & 0xff) << 24); - k *= m; - k ^= k >>> r; - k *= m; - h *= m; - h ^= k; - } - - // Handle the last few bytes of the input array - switch (length % 4) { - case 3: - h ^= (data[(length & ~3) + 2] & 0xff) << 16; - case 2: - h ^= (data[(length & ~3) + 1] & 0xff) << 8; - case 1: - h ^= data[length & ~3] & 0xff; - h *= m; - } - - h ^= h >>> 13; - h *= m; - h ^= h >>> 15; - - return h; - } - - /** - * Extracts the hostname from a "host:port" address string. - * @param address address string to parse - * @return hostname or null if the given address is incorrect - */ - public static String getHost(String address) { - Matcher matcher = HOST_PORT_PATTERN.matcher(address); - return matcher.matches() ? matcher.group(1) : null; - } - - /** - * Extracts the port number from a "host:port" address string. - * @param address address string to parse - * @return port number or null if the given address is incorrect - */ - public static Integer getPort(String address) { - Matcher matcher = HOST_PORT_PATTERN.matcher(address); - return matcher.matches() ? Integer.parseInt(matcher.group(2)) : null; - } - - /** - * Formats hostname and port number as a "host:port" address string, - * surrounding IPv6 addresses with braces '[', ']' - * @param host hostname - * @param port port number - * @return address string - */ - public static String formatAddress(String host, Integer port) { - return host.contains(":") - ? "[" + host + "]:" + port // IPv6 - : host + ":" + port; - } - - /** - * Formats a byte number as a human readable String ("3.2 MB") - * @param bytes some size in bytes - * @return - */ - public static String formatBytes(long bytes) { - if (bytes < 0) { - return String.valueOf(bytes); - } - double asDouble = (double) bytes; - int ordinal = (int) Math.floor(Math.log(asDouble) / Math.log(1024.0)); - double scale = Math.pow(1024.0, ordinal); - double scaled = asDouble / scale; - String formatted = TWO_DIGIT_FORMAT.format(scaled); - try { - return formatted + " " + BYTE_SCALE_SUFFIXES[ordinal]; - } catch (IndexOutOfBoundsException e) { - //huge number? - return String.valueOf(asDouble); - } - } - - /** - * Create a string representation of an array joined by the given separator - * @param strs The array of items - * @param separator The separator - * @return The string representation. - */ - public static String join(T[] strs, String separator) { - return join(Arrays.asList(strs), separator); - } - - /** - * Create a string representation of a list joined by the given separator - * @param list The list of items - * @param separator The separator - * @return The string representation. - */ - public static String join(Collection list, String separator) { - Objects.requireNonNull(list); - StringBuilder sb = new StringBuilder(); - Iterator iter = list.iterator(); - while (iter.hasNext()) { - sb.append(iter.next()); - if (iter.hasNext()) - sb.append(separator); - } - return sb.toString(); - } - - public static String mkString(Map map, String begin, String end, - String keyValueSeparator, String elementSeparator) { - StringBuilder bld = new StringBuilder(); - bld.append(begin); - String prefix = ""; - for (Map.Entry entry : map.entrySet()) { - bld.append(prefix).append(entry.getKey()). - append(keyValueSeparator).append(entry.getValue()); - prefix = elementSeparator; - } - bld.append(end); - return bld.toString(); - } - - public static Map parseMap(String mapStr, String keyValueSeparator, String elementSeparator) { - Map map = new HashMap<>(); - - if (!mapStr.isEmpty()) { - String[] attrvals = mapStr.split(elementSeparator); - for (String attrval : attrvals) { - String[] array = attrval.split(keyValueSeparator, 2); - map.put(array[0], array[1]); - } - } - return map; - } - - /** - * Read a properties file from the given path - * @param filename The path of the file to read - */ - public static Properties loadProps(String filename) throws IOException { - Properties props = new Properties(); - - if (filename != null) { - try (InputStream propStream = new FileInputStream(filename)) { - props.load(propStream); - } - } else { - System.out.println("Did not load any properties since the property file is not specified"); - } - - return props; - } - - /** - * Converts a Properties object to a Map, calling {@link #toString} to ensure all keys and values - * are Strings. - */ - public static Map propsToStringMap(Properties props) { - Map result = new HashMap<>(); - for (Map.Entry entry : props.entrySet()) - result.put(entry.getKey().toString(), entry.getValue().toString()); - return result; - } - - /** - * Get the stack trace from an exception as a string - */ - public static String stackTrace(Throwable e) { - StringWriter sw = new StringWriter(); - PrintWriter pw = new PrintWriter(sw); - e.printStackTrace(pw); - return sw.toString(); - } - - /** - * Print an error message and shutdown the JVM - * @param message The error message - */ - public static void croak(String message) { - System.err.println(message); - Exit.exit(1); - } - - /** - * Read a buffer into a Byte array for the given offset and length - */ - public static byte[] readBytes(ByteBuffer buffer, int offset, int length) { - byte[] dest = new byte[length]; - if (buffer.hasArray()) { - System.arraycopy(buffer.array(), buffer.arrayOffset() + offset, dest, 0, length); - } else { - buffer.mark(); - buffer.position(offset); - buffer.get(dest, 0, length); - buffer.reset(); - } - return dest; - } - - /** - * Read the given byte buffer into a Byte array - */ - public static byte[] readBytes(ByteBuffer buffer) { - return Utils.readBytes(buffer, 0, buffer.limit()); - } - - /** - * Attempt to read a file as a string - * @throws IOException - */ - public static String readFileAsString(String path, Charset charset) throws IOException { - if (charset == null) charset = Charset.defaultCharset(); - - try (FileInputStream stream = new FileInputStream(new File(path))) { - FileChannel fc = stream.getChannel(); - MappedByteBuffer bb = fc.map(FileChannel.MapMode.READ_ONLY, 0, fc.size()); - return charset.decode(bb).toString(); - } - - } - - public static String readFileAsString(String path) throws IOException { - return Utils.readFileAsString(path, Charset.defaultCharset()); - } - - /** - * Check if the given ByteBuffer capacity - * @param existingBuffer ByteBuffer capacity to check - * @param newLength new length for the ByteBuffer. - * returns ByteBuffer - */ - public static ByteBuffer ensureCapacity(ByteBuffer existingBuffer, int newLength) { - if (newLength > existingBuffer.capacity()) { - ByteBuffer newBuffer = ByteBuffer.allocate(newLength); - existingBuffer.flip(); - newBuffer.put(existingBuffer); - return newBuffer; - } - return existingBuffer; - } - - /* - * Creates a set - * @param elems the elements - * @param the type of element - * @return Set - */ - @SafeVarargs - public static Set mkSet(T... elems) { - return new HashSet<>(Arrays.asList(elems)); - } - - /* - * Creates a list - * @param elems the elements - * @param the type of element - * @return List - */ - @SafeVarargs - public static List mkList(T... elems) { - return Arrays.asList(elems); - } - - /** - * Creates a map entry (for use with {@link Utils#mkMap(java.util.Map.Entry[])}) - * - * @param k The key - * @param v The value - * @param The key type - * @param The value type - * @return An entry - */ - public static Map.Entry mkEntry(final K k, final V v) { - return new Map.Entry() { - @Override - public K getKey() { - return k; - } - - @Override - public V getValue() { - return v; - } - - @Override - public V setValue(final V value) { - throw new UnsupportedOperationException(); - } - }; - } - - /** - * Creates a map from a sequence of entries - * - * @param entries The entries to map - * @param The key type - * @param The value type - * @return A map - */ - @SafeVarargs - public static Map mkMap(final Map.Entry... entries) { - final LinkedHashMap result = new LinkedHashMap<>(); - for (final Map.Entry entry : entries) { - result.put(entry.getKey(), entry.getValue()); - } - return result; - } - - /** - * Creates a {@link Properties} from a map - * - * @param properties A map of properties to add - * @return The properties object - */ - public static Properties mkProperties(final Map properties) { - final Properties result = new Properties(); - for (final Map.Entry entry : properties.entrySet()) { - result.setProperty(entry.getKey(), entry.getValue()); - } - return result; - } - - /** - * Recursively delete the given file/directory and any subfiles (if any exist) - * - * @param file The root file at which to begin deleting - */ - public static void delete(final File file) throws IOException { - if (file == null) - return; - Files.walkFileTree(file.toPath(), new SimpleFileVisitor() { - @Override - public FileVisitResult visitFileFailed(Path path, IOException exc) throws IOException { - // If the root path did not exist, ignore the error; otherwise throw it. - if (exc instanceof NoSuchFileException && path.toFile().equals(file)) - return FileVisitResult.TERMINATE; - throw exc; - } - - @Override - public FileVisitResult visitFile(Path path, BasicFileAttributes attrs) throws IOException { - Files.delete(path); - return FileVisitResult.CONTINUE; - } - - @Override - public FileVisitResult postVisitDirectory(Path path, IOException exc) throws IOException { - Files.delete(path); - return FileVisitResult.CONTINUE; - } - }); - } - - /** - * Returns an empty collection if this list is null - * @param other - * @return - */ - public static List safe(List other) { - return other == null ? Collections.emptyList() : other; - } - - /** - * Get the ClassLoader which loaded Kafka. - */ - public static ClassLoader getAQClassLoader() { - return Utils.class.getClassLoader(); - } - - /** - * Get the Context ClassLoader on this thread or, if not present, the ClassLoader that - * loaded Kafka. - * - * This should be used whenever passing a ClassLoader to Class.forName - */ - public static ClassLoader getContextOrAQClassLoader() { - ClassLoader cl = Thread.currentThread().getContextClassLoader(); - if (cl == null) - return getAQClassLoader(); - else - return cl; - } - - /** - * Attempts to move source to target atomically and falls back to a non-atomic move if it fails. - * - * @throws IOException if both atomic and non-atomic moves fail - */ - public static void atomicMoveWithFallback(Path source, Path target) throws IOException { - try { - Files.move(source, target, StandardCopyOption.ATOMIC_MOVE); - } catch (IOException outer) { - try { - Files.move(source, target, StandardCopyOption.REPLACE_EXISTING); - log.debug("Non-atomic move of {} to {} succeeded after atomic move failed due to {}", source, target, - outer.getMessage()); - } catch (IOException inner) { - inner.addSuppressed(outer); - throw inner; - } - } - } - - /** - * Closes all the provided closeables. - * @throws IOException if any of the close methods throws an IOException. - * The first IOException is thrown with subsequent exceptions - * added as suppressed exceptions. - */ - public static void closeAll(Closeable... closeables) throws IOException { - IOException exception = null; - for (Closeable closeable : closeables) { - try { - if (closeable != null) - closeable.close(); - } catch (IOException e) { - if (exception != null) - exception.addSuppressed(e); - else - exception = e; - } - } - if (exception != null) - throw exception; - } - - /** - * Closes {@code closeable} and if an exception is thrown, it is logged at the WARN level. - */ - public static void closeQuietly(AutoCloseable closeable, String name) { - if (closeable != null) { - try { - closeable.close(); - } catch (Throwable t) { - log.warn("Failed to close {} with type {}", name, closeable.getClass().getName(), t); - } - } - } - - /** - * A cheap way to deterministically convert a number to a positive value. When the input is - * positive, the original value is returned. When the input number is negative, the returned - * positive value is the original value bit AND against 0x7fffffff which is not its absolutely - * value. - * - * Note: changing this method in the future will possibly cause partition selection not to be - * compatible with the existing messages already placed on a partition since it is used - * in producer's {@link org.oracle.okafka.clients.producer.internals.DefaultPartitioner} - * - * @param number a given number - * @return a positive number. - */ - public static int toPositive(int number) { - return number & 0x7fffffff; - } - - public static int longHashcode(long value) { - return (int) (value ^ (value >>> 32)); - } - - /** - * Read a size-delimited byte buffer starting at the given offset. - * @param buffer Buffer containing the size and data - * @param start Offset in the buffer to read from - * @return A slice of the buffer containing only the delimited data (excluding the size) - */ - public static ByteBuffer sizeDelimited(ByteBuffer buffer, int start) { - int size = buffer.getInt(start); - if (size < 0) { - return null; - } else { - ByteBuffer b = buffer.duplicate(); - b.position(start + 4); - b = b.slice(); - b.limit(size); - b.rewind(); - return b; - } - } - - /** - * Read data from the channel to the given byte buffer until there are no bytes remaining in the buffer. If the end - * of the file is reached while there are bytes remaining in the buffer, an EOFException is thrown. - * - * @param channel File channel containing the data to read from - * @param destinationBuffer The buffer into which bytes are to be transferred - * @param position The file position at which the transfer is to begin; it must be non-negative - * @param description A description of what is being read, this will be included in the EOFException if it is thrown - * - * @throws IllegalArgumentException If position is negative - * @throws EOFException If the end of the file is reached while there are remaining bytes in the destination buffer - * @throws IOException If an I/O error occurs, see {@link FileChannel#read(ByteBuffer, long)} for details on the - * possible exceptions - */ - public static void readFullyOrFail(FileChannel channel, ByteBuffer destinationBuffer, long position, - String description) throws IOException { - if (position < 0) { - throw new IllegalArgumentException("The file channel position cannot be negative, but it is " + position); - } - int expectedReadBytes = destinationBuffer.remaining(); - readFully(channel, destinationBuffer, position); - if (destinationBuffer.hasRemaining()) { - throw new EOFException(String.format("Failed to read `%s` from file channel `%s`. Expected to read %d bytes, " + - "but reached end of file after reading %d bytes. Started read from position %d.", - description, channel, expectedReadBytes, expectedReadBytes - destinationBuffer.remaining(), position)); - } - } - - /** - * Read data from the channel to the given byte buffer until there are no bytes remaining in the buffer or the end - * of the file has been reached. - * - * @param channel File channel containing the data to read from - * @param destinationBuffer The buffer into which bytes are to be transferred - * @param position The file position at which the transfer is to begin; it must be non-negative - * - * @throws IllegalArgumentException If position is negative - * @throws IOException If an I/O error occurs, see {@link FileChannel#read(ByteBuffer, long)} for details on the - * possible exceptions - */ - public static void readFully(FileChannel channel, ByteBuffer destinationBuffer, long position) throws IOException { - if (position < 0) { - throw new IllegalArgumentException("The file channel position cannot be negative, but it is " + position); - } - long currentPosition = position; - int bytesRead; - do { - bytesRead = channel.read(destinationBuffer, currentPosition); - currentPosition += bytesRead; - } while (bytesRead != -1 && destinationBuffer.hasRemaining()); - } - - /** - * Read data from the input stream to the given byte buffer until there are no bytes remaining in the buffer or the - * end of the stream has been reached. - * - * @param inputStream Input stream to read from - * @param destinationBuffer The buffer into which bytes are to be transferred (it must be backed by an array) - * - * @throws IOException If an I/O error occurs - */ - public static final void readFully(InputStream inputStream, ByteBuffer destinationBuffer) throws IOException { - if (!destinationBuffer.hasArray()) - throw new IllegalArgumentException("destinationBuffer must be backed by an array"); - int initialOffset = destinationBuffer.arrayOffset() + destinationBuffer.position(); - byte[] array = destinationBuffer.array(); - int length = destinationBuffer.remaining(); - int totalBytesRead = 0; - do { - int bytesRead = inputStream.read(array, initialOffset + totalBytesRead, length - totalBytesRead); - if (bytesRead == -1) - break; - totalBytesRead += bytesRead; - } while (length > totalBytesRead); - destinationBuffer.position(destinationBuffer.position() + totalBytesRead); - } - - public static void writeFully(FileChannel channel, ByteBuffer sourceBuffer) throws IOException { - while (sourceBuffer.hasRemaining()) - channel.write(sourceBuffer); - } - - /** - * Write the contents of a buffer to an output stream. The bytes are copied from the current position - * in the buffer. - * @param out The output to write to - * @param buffer The buffer to write from - * @param length The number of bytes to write - * @throws IOException For any errors writing to the output - */ - public static void writeTo(DataOutput out, ByteBuffer buffer, int length) throws IOException { - if (buffer.hasArray()) { - out.write(buffer.array(), buffer.position() + buffer.arrayOffset(), length); - } else { - int pos = buffer.position(); - for (int i = pos; i < length + pos; i++) - out.writeByte(buffer.get(i)); - } - } - - public static List toList(Iterator iterator) { - List res = new ArrayList<>(); - while (iterator.hasNext()) - res.add(iterator.next()); - return res; - } - - public static String enquote(String name) throws IllegalArgumentException{ - if( !name.contains("'")) { - if(!name.contains("\"")) return "\"" + name + "\""; - if(name.indexOf("\"") == 0 && name.indexOf("\"", 1) == name.length() -1 ) - return name; - - } - throw new IllegalArgumentException("Invalid argument provided: " + name); - } - -} diff --git a/clients/src/test/java/ojdbc.properties b/clients/src/test/java/ojdbc.properties new file mode 100644 index 0000000..b59575a --- /dev/null +++ b/clients/src/test/java/ojdbc.properties @@ -0,0 +1,2 @@ +user=OKafka +password=Welcome_123# \ No newline at end of file diff --git a/clients/src/test/java/org/oracle/okafka/clients/MockClient.java b/clients/src/test/java/org/oracle/okafka/clients/MockClient.java deleted file mode 100644 index e379b96..0000000 --- a/clients/src/test/java/org/oracle/okafka/clients/MockClient.java +++ /dev/null @@ -1,354 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. - * - */ - -package org.oracle.okafka.clients; - -import org.oracle.okafka.clients.producer.internals.ProduceResponse; -import org.oracle.okafka.clients.producer.internals.ProduceRequest; -import org.oracle.okafka.common.Cluster; -import org.oracle.okafka.common.Node; -import org.oracle.okafka.common.errors.AuthenticationException; -import org.oracle.okafka.common.errors.InvalidTopicException; -import org.oracle.okafka.common.protocol.ApiKeys; -import org.oracle.okafka.common.requests.AbstractRequest; -import org.oracle.okafka.common.requests.AbstractResponse; -import org.oracle.okafka.common.utils.Time; -import org.oracle.okafka.test.TestCondition; -import org.oracle.okafka.test.TestUtils; - -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Queue; -import java.util.Set; -import java.util.concurrent.ConcurrentLinkedDeque; - -/** - * A mock network client for use testing code - */ -public class MockClient implements KafkaClient { - - private int correlation; - private final Time time; - private final Metadata metadata; - private Set unavailableTopics; - private Cluster cluster; - private Node node = null; - private final Set ready = new HashSet<>(); - - // Nodes awaiting reconnect backoff, will not be chosen by leastLoadedNode - private final TransientSet blackedOut; - // Nodes which will always fail to connect, but can be chosen by leastLoadedNode - private final TransientSet unreachable; - // Nodes which have a delay before ultimately succeeding to connect - private final TransientSet delayedReady; - - private long requests = 0; - private final Map pendingAuthenticationErrors = new HashMap<>(); - private final Map authenticationErrors = new HashMap<>(); - private final Queue metadataUpdates = new ConcurrentLinkedDeque<>(); - - - public MockClient(Time time) { - this(time, null); - } - - public MockClient(Time time, Metadata metadata) { - this.time = time; - this.metadata = metadata; - this.unavailableTopics = Collections.emptySet(); - this.blackedOut = new TransientSet<>(time); - this.unreachable = new TransientSet<>(time); - this.delayedReady = new TransientSet<>(time); - } - - @Override - public boolean isReady(Node node, long now) { - return ready.contains(node); - } - - @Override - public boolean ready(Node node, long now) { - if (blackedOut.contains(node, now)) - return false; - - if (unreachable.contains(node, now)) { - blackout(node, 100); - return false; - } - - if (delayedReady.contains(node, now)) - return false; - - ready.add(node); - return true; - } - - @Override - public long connectionDelay(Node node, long now) { - return blackedOut.expirationDelayMs(node, now); - } - - @Override - public long pollDelayMs(Node node, long now) { - return connectionDelay(node, now); - } - - public void blackout(Node node, long durationMs) { - blackedOut.add(node, durationMs); - } - - public void setUnreachable(Node node, long durationMs) { - disconnect(node); - unreachable.add(node, durationMs); - } - - public void delayReady(Node node, long durationMs) { - delayedReady.add(node, durationMs); - } - - public void authenticationFailed(Node node, long blackoutMs) { - pendingAuthenticationErrors.remove(node); - authenticationErrors.put(node, new AuthenticationException("Authentication failed")); - disconnect(node); - blackout(node, blackoutMs); - } - - public void createPendingAuthenticationError(Node node, long blackoutMs) { - pendingAuthenticationErrors.put(node, blackoutMs); - } - - @Override - public boolean connectionFailed(Node node) { - return blackedOut.contains(node); - } - - @Override - public AuthenticationException authenticationException(Node node) { - return authenticationErrors.get(node); - } - - @Override - public void disconnected(Node node, long now) { - return ; - } - @Override - public void disconnect(Node node) { - ready.remove(node); - } - - @Override - public ClientResponse send(ClientRequest request, long now) { - requests++; - ClientResponse response = dummyResponse(request); - requests--; - return response; - } - - private ClientResponse dummyResponse(ClientRequest request) { - try { - Thread.sleep(30000); - } catch(Exception e) { - - } - if(request.apiKey() == ApiKeys.PRODUCE ) { - ProduceRequest.Builder builder = (ProduceRequest.Builder)request.requestBuilder(); - ProduceRequest produceRequest = builder.build(); - return new ClientResponse(request.makeHeader(), request.callback(), request.destination(), - request.createdTimeMs(), time.milliseconds(), true, - new ProduceResponse(produceRequest.getTopicpartition(), new ProduceResponse.PartitionResponse(new InvalidTopicException("This exception can be retried")))); - } - return null; - } - - @Override - public long maybeUpdateMetadata(long now) { - - if (metadata != null && metadata.updateRequested()) { - MetadataUpdate metadataUpdate = metadataUpdates.poll(); - if (cluster != null) - metadata.update(cluster, this.unavailableTopics, time.milliseconds()); - if (metadataUpdate == null) - metadata.update(metadata.fetch(), this.unavailableTopics, time.milliseconds()); - else { - if (metadataUpdate.expectMatchRefreshTopics - && !metadata.topics().equals(metadataUpdate.cluster.topics())) { - throw new IllegalStateException("The metadata topics does not match expectation. " - + "Expected topics: " + metadataUpdate.cluster.topics() - + ", asked topics: " + metadata.topics()); - } - this.unavailableTopics = metadataUpdate.unavailableTopics; - metadata.update(metadataUpdate.cluster, metadataUpdate.unavailableTopics, time.milliseconds()); - } - } - return 0; - } - - public void waitForRequests(final int minRequests, long maxWaitMs) throws InterruptedException { - TestUtils.waitForCondition(new TestCondition() { - @Override - public boolean conditionMet() { - return requests >= minRequests; - } - }, maxWaitMs, "Expected requests have not been sent"); - } - - public void reset() { - ready.clear(); - blackedOut.clear(); - unreachable.clear(); - requests = 0; - metadataUpdates.clear(); - authenticationErrors.clear(); - } - - public boolean hasPendingMetadataUpdates() { - return !metadataUpdates.isEmpty(); - } - - public void prepareMetadataUpdate(Cluster cluster, Set unavailableTopics) { - metadataUpdates.add(new MetadataUpdate(cluster, unavailableTopics, false)); - } - - public void prepareMetadataUpdate(Cluster cluster, - Set unavailableTopics, - boolean expectMatchMetadataTopics) { - metadataUpdates.add(new MetadataUpdate(cluster, unavailableTopics, expectMatchMetadataTopics)); - } - - public void setNode(Node node) { - this.node = node; - } - - public void cluster(Cluster cluster) { - this.cluster = cluster; - } - - @Override - public boolean hasReadyNodes(long now) { - return !ready.isEmpty(); - } - - @Override - public ClientRequest newClientRequest(Node node, AbstractRequest.Builder requestBuilder, long createdTimeMs, - boolean expectResponse) { - return newClientRequest(node, requestBuilder, createdTimeMs, expectResponse, 5000, null); - } - - @Override - public ClientRequest newClientRequest(Node node, - AbstractRequest.Builder requestBuilder, - long createdTimeMs, - boolean expectResponse, - int requestTimeoutMs, - RequestCompletionHandler callback) { - return new ClientRequest(node, requestBuilder, correlation++, "mockClientId", createdTimeMs, - expectResponse, requestTimeoutMs, callback); - } - - @Override - public void close() { - metadata.close(); - } - - @Override - public void close(Node node) { - ready.remove(node); - } - - @Override - public Node leastLoadedNode(long now) { - // Consistent with NetworkClient, we do not return nodes awaiting reconnect backoff - if (blackedOut.contains(node, now)) - return null; - return this.node; - } - - /** - * The RequestMatcher provides a way to match a particular request to a response prepared - * through {@link #prepareResponse(RequestMatcher, AbstractResponse)}. Basically this allows testers - * to inspect the request body for the type of the request or for specific fields that should be set, - * and to fail the test if it doesn't match. - */ - public interface RequestMatcher { - boolean matches(AbstractRequest body); - } - - private static class MetadataUpdate { - final Cluster cluster; - final Set unavailableTopics; - final boolean expectMatchRefreshTopics; - MetadataUpdate(Cluster cluster, Set unavailableTopics, boolean expectMatchRefreshTopics) { - this.cluster = cluster; - this.unavailableTopics = unavailableTopics; - this.expectMatchRefreshTopics = expectMatchRefreshTopics; - } - } - - private static class TransientSet { - // The elements in the set mapped to their expiration timestamps - private final Map elements = new HashMap<>(); - private final Time time; - - private TransientSet(Time time) { - this.time = time; - } - - boolean contains(T element) { - return contains(element, time.milliseconds()); - } - - boolean contains(T element, long now) { - return expirationDelayMs(element, now) > 0; - } - - void add(T element, long durationMs) { - elements.put(element, time.milliseconds() + durationMs); - } - - long expirationDelayMs(T element, long now) { - Long expirationTimeMs = elements.get(element); - if (expirationTimeMs == null) { - return 0; - } else if (now > expirationTimeMs) { - elements.remove(element); - return 0; - } else { - return expirationTimeMs - now; - } - } - - void clear() { - elements.clear(); - } - - } - -} diff --git a/clients/src/test/java/org/oracle/okafka/clients/consumer/ConsumerConfigTest.java b/clients/src/test/java/org/oracle/okafka/clients/consumer/ConsumerConfigTest.java deleted file mode 100644 index 17b13e7..0000000 --- a/clients/src/test/java/org/oracle/okafka/clients/consumer/ConsumerConfigTest.java +++ /dev/null @@ -1,105 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. - * - */ - -package org.oracle.okafka.clients.consumer; - -import org.oracle.okafka.common.serialization.ByteArrayDeserializer; -import org.oracle.okafka.common.serialization.Deserializer; -import org.oracle.okafka.common.serialization.StringDeserializer; -import org.junit.Test; - -import java.util.HashMap; -import java.util.Map; -import java.util.Properties; - -import static org.junit.Assert.assertEquals; - -public class ConsumerConfigTest { - - private final Deserializer keyDeserializer = new ByteArrayDeserializer(); - private final Deserializer valueDeserializer = new StringDeserializer(); - private final String keyDeserializerClassName = keyDeserializer.getClass().getName(); - private final String valueDeserializerClassName = valueDeserializer.getClass().getName(); - private final Object keyDeserializerClass = keyDeserializer.getClass(); - private final Object valueDeserializerClass = valueDeserializer.getClass(); - - @Test - public void testDeserializerToPropertyConfig() { - Properties properties = new Properties(); - properties.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializerClassName); - properties.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializerClassName); - Properties newProperties = ConsumerConfig.addDeserializerToConfig(properties, null, null); - assertEquals(newProperties.get(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG), keyDeserializerClassName); - assertEquals(newProperties.get(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG), valueDeserializerClassName); - - properties.clear(); - properties.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializerClassName); - newProperties = ConsumerConfig.addDeserializerToConfig(properties, keyDeserializer, null); - assertEquals(newProperties.get(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG), keyDeserializerClassName); - assertEquals(newProperties.get(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG), valueDeserializerClassName); - - properties.clear(); - properties.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializerClassName); - newProperties = ConsumerConfig.addDeserializerToConfig(properties, null, valueDeserializer); - assertEquals(newProperties.get(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG), keyDeserializerClassName); - assertEquals(newProperties.get(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG), valueDeserializerClassName); - - properties.clear(); - newProperties = ConsumerConfig.addDeserializerToConfig(properties, keyDeserializer, valueDeserializer); - assertEquals(newProperties.get(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG), keyDeserializerClassName); - assertEquals(newProperties.get(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG), valueDeserializerClassName); - } - - @Test - public void testDeserializerToMapConfig() { - Map configs = new HashMap<>(); - configs.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializerClass); - configs.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializerClass); - Map newConfigs = ConsumerConfig.addDeserializerToConfig(configs, null, null); - assertEquals(newConfigs.get(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG), keyDeserializerClass); - assertEquals(newConfigs.get(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG), valueDeserializerClass); - - configs.clear(); - configs.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializerClass); - newConfigs = ConsumerConfig.addDeserializerToConfig(configs, keyDeserializer, null); - assertEquals(newConfigs.get(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG), keyDeserializerClass); - assertEquals(newConfigs.get(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG), valueDeserializerClass); - - configs.clear(); - configs.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializerClass); - newConfigs = ConsumerConfig.addDeserializerToConfig(configs, null, valueDeserializer); - assertEquals(newConfigs.get(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG), keyDeserializerClass); - assertEquals(newConfigs.get(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG), valueDeserializerClass); - - configs.clear(); - newConfigs = ConsumerConfig.addDeserializerToConfig(configs, keyDeserializer, valueDeserializer); - assertEquals(newConfigs.get(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG), keyDeserializerClass); - assertEquals(newConfigs.get(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG), valueDeserializerClass); - } -} diff --git a/clients/src/test/java/org/oracle/okafka/clients/consumer/ConsumerRecordTest.java b/clients/src/test/java/org/oracle/okafka/clients/consumer/ConsumerRecordTest.java deleted file mode 100644 index 79abc1e..0000000 --- a/clients/src/test/java/org/oracle/okafka/clients/consumer/ConsumerRecordTest.java +++ /dev/null @@ -1,63 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.oracle.oorg/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. - * - */ - -package org.oracle.okafka.clients.consumer; - -import org.oracle.okafka.common.header.internals.RecordHeaders; -import org.oracle.okafka.common.record.TimestampType; -import org.junit.Test; - -import static org.junit.Assert.assertEquals; - -public class ConsumerRecordTest { - - @Test - @SuppressWarnings("deprecation") - public void testOldConstructor() { - String topic = "topic"; - int partition = 0; - long offset = 23; - String key = "key"; - String value = "value"; - - ConsumerRecord record = new ConsumerRecord<>(topic, partition, offset, key, value); - assertEquals(topic, record.topic()); - assertEquals(partition, record.partition()); - assertEquals(offset, record.offset()); - assertEquals(key, record.key()); - assertEquals(value, record.value()); - assertEquals(TimestampType.NO_TIMESTAMP_TYPE, record.timestampType()); - assertEquals(ConsumerRecord.NO_TIMESTAMP, record.timestamp()); - assertEquals(ConsumerRecord.NULL_CHECKSUM, record.checksum()); - assertEquals(ConsumerRecord.NULL_SIZE, record.serializedKeySize()); - assertEquals(ConsumerRecord.NULL_SIZE, record.serializedValueSize()); - assertEquals(new RecordHeaders(), record.headers()); - } - -} diff --git a/clients/src/test/java/org/oracle/okafka/clients/consumer/ConsumerRecordsTest.java b/clients/src/test/java/org/oracle/okafka/clients/consumer/ConsumerRecordsTest.java deleted file mode 100644 index 7505ac2..0000000 --- a/clients/src/test/java/org/oracle/okafka/clients/consumer/ConsumerRecordsTest.java +++ /dev/null @@ -1,70 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.oracle.oorg/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. - * - */ - -package org.oracle.okafka.clients.consumer; - -import static org.junit.Assert.assertEquals; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Iterator; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import org.oracle.okafka.common.TopicPartition; -import org.oracle.okafka.common.record.TimestampType; -import org.junit.Test; - -public class ConsumerRecordsTest { - - @Test - public void iterator() throws Exception { - - Map>> records = new LinkedHashMap<>(); - - String topic = "topic"; - records.put(new TopicPartition(topic, 0), new ArrayList>()); - ConsumerRecord record1 = new ConsumerRecord<>(topic, 1, 0, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, 1, "value1"); - ConsumerRecord record2 = new ConsumerRecord<>(topic, 1, 1, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, 2, "value2"); - records.put(new TopicPartition(topic, 1), Arrays.asList(record1, record2)); - records.put(new TopicPartition(topic, 2), new ArrayList>()); - - ConsumerRecords consumerRecords = new ConsumerRecords<>(records); - Iterator> iter = consumerRecords.iterator(); - - int c = 0; - for (; iter.hasNext(); c++) { - ConsumerRecord record = iter.next(); - assertEquals(1, record.partition()); - assertEquals(topic, record.topic()); - assertEquals(c, record.offset()); - } - assertEquals(2, c); - } -} diff --git a/clients/src/test/java/org/oracle/okafka/clients/consumer/SerializeCompatibilityOffsetAndMetadataTest.java b/clients/src/test/java/org/oracle/okafka/clients/consumer/SerializeCompatibilityOffsetAndMetadataTest.java deleted file mode 100644 index 24ac5b4..0000000 --- a/clients/src/test/java/org/oracle/okafka/clients/consumer/SerializeCompatibilityOffsetAndMetadataTest.java +++ /dev/null @@ -1,77 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.oracle.oorg/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. - * - */ - -package org.oracle.okafka.clients.consumer; - -import org.oracle.okafka.common.utils.Serializer; -import org.junit.Test; - -import java.io.IOException; - - -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.assertEquals; - -/** - * This test case ensures OffsetAndMetadata class is serializable and is serialization compatible. - * Note: this ensures that the current code can deserialize data serialized with older versions of the code, but not the reverse. - * That is, older code won't necessarily be able to deserialize data serialized with newer code. - */ -public class SerializeCompatibilityOffsetAndMetadataTest { - private String metadata = "test commit metadata"; - private String fileName = "serializedData/offsetAndMetadataSerializedfile"; - private long offset = 10; - - private void checkValues(OffsetAndMetadata deSerOAM) { - //assert deserialized values are same as original - assertEquals("Offset should be " + offset + " but got " + deSerOAM.offset(), offset, deSerOAM.offset()); - assertEquals("metadata should be " + metadata + " but got " + deSerOAM.metadata(), metadata, deSerOAM.metadata()); - } - - @Test - public void testSerializationRoundtrip() throws IOException, ClassNotFoundException { - //assert OffsetAndMetadata is serializable - OffsetAndMetadata origOAM = new OffsetAndMetadata(offset, metadata); - byte[] byteArray = Serializer.serialize(origOAM); - - //deserialize the byteArray and check if the values are same as original - Object deserializedObject = Serializer.deserialize(byteArray); - assertTrue(deserializedObject instanceof OffsetAndMetadata); - checkValues((OffsetAndMetadata) deserializedObject); - } - - /*@Test - public void testOffsetMetadataSerializationCompatibility() throws IOException, ClassNotFoundException { - // assert serialized OffsetAndMetadata object in file (oamserializedfile under resources folder) is - // deserializable into OffsetAndMetadata and is compatible - Object deserializedObject = Serializer.deserialize(fileName); - assertTrue(deserializedObject instanceof OffsetAndMetadata); - checkValues((OffsetAndMetadata) deserializedObject); - }*/ -} diff --git a/clients/src/test/java/org/oracle/okafka/clients/producer/KafkaProducerTest.java b/clients/src/test/java/org/oracle/okafka/clients/producer/KafkaProducerTest.java deleted file mode 100644 index 9a64fe9..0000000 --- a/clients/src/test/java/org/oracle/okafka/clients/producer/KafkaProducerTest.java +++ /dev/null @@ -1,560 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.oracle.oorg/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. - * - */ - -package org.oracle.okafka.clients.producer; - -import org.oracle.okafka.clients.CommonClientConfigs; -import org.oracle.okafka.clients.Metadata; -//import org.oracle.okafka.clients.MockClient; -import org.oracle.okafka.clients.producer.internals.ProducerInterceptors; -import org.oracle.okafka.common.Cluster; -import org.oracle.okafka.common.KafkaException; -import org.oracle.okafka.common.Node; -import org.oracle.okafka.common.PartitionInfo; -import org.oracle.okafka.common.TopicPartition; -import org.oracle.okafka.common.config.ConfigException; -import org.oracle.okafka.common.errors.InterruptException; -import org.oracle.okafka.common.errors.TimeoutException; -import org.oracle.okafka.common.header.internals.RecordHeader; -import org.oracle.okafka.common.internals.ClusterResourceListeners; -import org.oracle.okafka.common.metrics.Sensor; -import org.oracle.okafka.common.serialization.ByteArraySerializer; -import org.oracle.okafka.common.serialization.ExtendedSerializer; -import org.oracle.okafka.common.serialization.StringSerializer; -import org.oracle.okafka.common.utils.MockTime; -import org.oracle.okafka.common.utils.Time; -import org.oracle.okafka.test.MockMetricsReporter; -import org.oracle.okafka.test.MockPartitioner; -import org.oracle.okafka.test.MockProducerInterceptor; -import org.oracle.okafka.test.MockSerializer; -import org.oracle.okafka.test.TestCondition; -import org.oracle.okafka.test.TestUtils; -import org.oracle.okafka.clients.MockClient; -import org.easymock.EasyMock; -import org.junit.Assert; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.powermock.api.easymock.PowerMock; -import org.powermock.api.support.membermodification.MemberModifier; -import org.powermock.core.classloader.annotations.PowerMockIgnore; -import org.powermock.core.classloader.annotations.PrepareOnlyThisForTest; -import org.powermock.modules.junit4.PowerMockRunner; - -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; -import java.util.Properties; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicReference; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -@RunWith(PowerMockRunner.class) -@PowerMockIgnore("javax.management.*") -public class KafkaProducerTest { - - @Test - public void testConstructorWithSerializers() { - Properties producerProps = new Properties(); - producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000"); - producerProps.put(ProducerConfig.ORACLE_INSTANCE_NAME, "instancename"); - producerProps.put(ProducerConfig.ORACLE_SERVICE_NAME, "servicename"); - producerProps.put(ProducerConfig.ORACLE_NET_TNS_ADMIN, "/temp"); - new KafkaProducer<>(producerProps, new ByteArraySerializer(), new ByteArraySerializer()).close(); - } - - @Test(expected = ConfigException.class) - public void testNoSerializerProvided() { - Properties producerProps = new Properties(); - producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000"); - producerProps.put(ProducerConfig.ORACLE_INSTANCE_NAME, "instancename"); - producerProps.put(ProducerConfig.ORACLE_SERVICE_NAME, "servicename"); - producerProps.put(ProducerConfig.ORACLE_NET_TNS_ADMIN, "/temp"); - new KafkaProducer(producerProps); - } - - - @Test - public void testConstructorFailureCloseResource() { - Properties props = new Properties(); - props.setProperty(ProducerConfig.CLIENT_ID_CONFIG, "testConstructorClose"); - props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "some.invalid.hostname.foo.bar.local:9999"); - props.put(ProducerConfig.ORACLE_INSTANCE_NAME, "instancename"); - props.put(ProducerConfig.ORACLE_SERVICE_NAME, "servicename"); - props.put(ProducerConfig.ORACLE_NET_TNS_ADMIN, "/temp"); - props.setProperty(ProducerConfig.METRIC_REPORTER_CLASSES_CONFIG, MockMetricsReporter.class.getName()); - - final int oldInitCount = MockMetricsReporter.INIT_COUNT.get(); - final int oldCloseCount = MockMetricsReporter.CLOSE_COUNT.get(); - try (KafkaProducer producer = new KafkaProducer<>(props, new ByteArraySerializer(), new ByteArraySerializer())) { - fail("should have caught an exception and returned"); - } catch (KafkaException e) { - assertEquals(oldInitCount + 1, MockMetricsReporter.INIT_COUNT.get()); - assertEquals(oldCloseCount + 1, MockMetricsReporter.CLOSE_COUNT.get()); - assertEquals("Failed to construct kafka producer", e.getMessage()); - } - } - - @Test - public void testSerializerClose() throws Exception { - Map configs = new HashMap<>(); - configs.put(ProducerConfig.CLIENT_ID_CONFIG, "testConstructorClose"); - configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); - configs.put(ProducerConfig.ORACLE_INSTANCE_NAME, "instancename"); - configs.put(ProducerConfig.ORACLE_SERVICE_NAME, "servicename"); - configs.put(ProducerConfig.ORACLE_NET_TNS_ADMIN, "/tmp"); - configs.put(ProducerConfig.METRIC_REPORTER_CLASSES_CONFIG, MockMetricsReporter.class.getName()); - final int oldInitCount = MockSerializer.INIT_COUNT.get(); - final int oldCloseCount = MockSerializer.CLOSE_COUNT.get(); - - KafkaProducer producer = new KafkaProducer( - configs, new MockSerializer(), new MockSerializer()); - assertEquals(oldInitCount + 2, MockSerializer.INIT_COUNT.get()); - assertEquals(oldCloseCount, MockSerializer.CLOSE_COUNT.get()); - - producer.close(); - assertEquals(oldInitCount + 2, MockSerializer.INIT_COUNT.get()); - assertEquals(oldCloseCount + 2, MockSerializer.CLOSE_COUNT.get()); - } - - @Test - public void testInterceptorConstructClose() throws Exception { - try { - Properties props = new Properties(); - // test with client ID assigned by KafkaProducer - props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); - props.put(ProducerConfig.ORACLE_INSTANCE_NAME, "instancename"); - props.put(ProducerConfig.ORACLE_SERVICE_NAME, "servicename"); - props.put(ProducerConfig.ORACLE_NET_TNS_ADMIN, "/temp"); - props.setProperty(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, MockProducerInterceptor.class.getName()); - props.setProperty(MockProducerInterceptor.APPEND_STRING_PROP, "something"); - - KafkaProducer producer = new KafkaProducer( - props, new StringSerializer(), new StringSerializer()); - assertEquals(1, MockProducerInterceptor.INIT_COUNT.get()); - assertEquals(0, MockProducerInterceptor.CLOSE_COUNT.get()); - - // Cluster metadata will only be updated on calling onSend. - Assert.assertNull(MockProducerInterceptor.CLUSTER_META.get()); - - producer.close(); - assertEquals(1, MockProducerInterceptor.INIT_COUNT.get()); - assertEquals(1, MockProducerInterceptor.CLOSE_COUNT.get()); - } finally { - // cleanup since we are using mutable static variables in MockProducerInterceptor - MockProducerInterceptor.resetCounters(); - } - } - - @Test - public void testPartitionerClose() throws Exception { - try { - Properties props = new Properties(); - props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); - props.put(ProducerConfig.ORACLE_INSTANCE_NAME, "instancename"); - props.put(ProducerConfig.ORACLE_SERVICE_NAME, "servicename"); - props.put(ProducerConfig.ORACLE_NET_TNS_ADMIN, "/temp"); - MockPartitioner.resetCounters(); - props.setProperty(ProducerConfig.PARTITIONER_CLASS_CONFIG, MockPartitioner.class.getName()); - - KafkaProducer producer = new KafkaProducer( - props, new StringSerializer(), new StringSerializer()); - assertEquals(1, MockPartitioner.INIT_COUNT.get()); - assertEquals(0, MockPartitioner.CLOSE_COUNT.get()); - - producer.close(); - assertEquals(1, MockPartitioner.INIT_COUNT.get()); - assertEquals(1, MockPartitioner.CLOSE_COUNT.get()); - } finally { - // cleanup since we are using mutable static variables in MockPartitioner - MockPartitioner.resetCounters(); - } - } - - @Test - public void shouldCloseProperlyAndThrowIfInterrupted() throws Exception { - Properties props = new Properties(); - props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); - props.put(ProducerConfig.ORACLE_INSTANCE_NAME, "instancename"); - props.put(ProducerConfig.ORACLE_SERVICE_NAME, "servicename"); - props.put(ProducerConfig.ORACLE_NET_TNS_ADMIN, "/temp"); - props.setProperty(ProducerConfig.PARTITIONER_CLASS_CONFIG, MockPartitioner.class.getName()); - props.setProperty(ProducerConfig.BATCH_SIZE_CONFIG, "1"); - - Time time = new MockTime(); - Cluster cluster = TestUtils.singletonCluster("topic", 1); - Node node = cluster.nodes().get(0); - - Metadata metadata = new Metadata(0, Long.MAX_VALUE, true, null); - metadata.update(cluster, Collections.emptySet(), time.milliseconds()); - - MockClient client = new MockClient(time, metadata); - client.setNode(node); - - final Producer producer = new KafkaProducer<>( - new ProducerConfig(ProducerConfig.addSerializerToConfig(props, new StringSerializer(), new StringSerializer())), - new StringSerializer(), new StringSerializer(), metadata, client); - - ExecutorService executor = Executors.newSingleThreadExecutor(); - final AtomicReference closeException = new AtomicReference<>(); - try { - Future future = executor.submit(new Runnable() { - @Override - public void run() { - producer.send(new ProducerRecord<>("topic", "key", "value")); - try { - producer.close(); - fail("Close should block and throw."); - } catch (Exception e) { - closeException.set(e); - } - } - }); - - // Close producer should not complete until send succeeds - try { - future.get(100, TimeUnit.MILLISECONDS); - fail("Close completed without waiting for send"); - } catch (java.util.concurrent.TimeoutException expected) { } - - // Ensure send has started - //client.waitForRequests(1, 1000); - - assertTrue("Close terminated prematurely", future.cancel(true)); - - TestUtils.waitForCondition(new TestCondition() { - @Override - public boolean conditionMet() { - return closeException.get() != null; - } - }, "InterruptException did not occur within timeout."); - - assertTrue("Expected exception not thrown " + closeException, closeException.get() instanceof InterruptException); - } finally { - executor.shutdownNow(); - } - - } -/* - @PrepareOnlyThisForTest(Metadata.class) - @Test - public void testMetadataFetch() throws Exception { - Properties props = new Properties(); - props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:1521"); - props.put(ProducerConfig.ORACLE_INSTANCE_NAME, "instancename"); - props.put(ProducerConfig.ORACLE_SERVICE_NAME, "serviceid.regress.rdbms.dev.us.oracle.com"); - KafkaProducer producer = new KafkaProducer<>(props, new StringSerializer(), new StringSerializer()); - Metadata metadata = PowerMock.createNiceMock(Metadata.class); - MemberModifier.field(KafkaProducer.class, "metadata").set(producer, metadata); - - String topic = "topic"; - ProducerRecord record = new ProducerRecord<>(topic, "value"); - Collection nodes = Collections.singletonList(new Node(0, "host1", 1000, "", "")); - final Cluster emptyCluster = new Cluster(null, nodes, - Collections.emptySet(), - Collections.emptySet(), - Collections.emptySet(), null); - final Cluster cluster = new Cluster( - "dummy", - Collections.singletonList(new Node(0, "host1", 1000, "", "")), - Arrays.asList(new PartitionInfo(topic, 0, null, null, null)), - Collections.emptySet(), - Collections.emptySet(), null); - - // Expect exactly one fetch for each attempt to refresh while topic metadata is not available - final int refreshAttempts = 5; - EasyMock.expect(metadata.fetch()).andReturn(emptyCluster).times(refreshAttempts - 1); - EasyMock.expect(metadata.fetch()).andReturn(cluster).once(); - EasyMock.expect(metadata.fetch()).andThrow(new IllegalStateException("Unexpected call to metadata.fetch()")).anyTimes(); - PowerMock.replay(metadata); - producer.send(record); - PowerMock.verify(metadata); - - // Expect exactly one fetch if topic metadata is available - PowerMock.reset(metadata); - EasyMock.expect(metadata.fetch()).andReturn(cluster).once(); - EasyMock.expect(metadata.fetch()).andThrow(new IllegalStateException("Unexpected call to metadata.fetch()")).anyTimes(); - PowerMock.replay(metadata); - producer.send(record, null); - PowerMock.verify(metadata); - - // Expect exactly one fetch if topic metadata is available - PowerMock.reset(metadata); - EasyMock.expect(metadata.fetch()).andReturn(cluster).once(); - EasyMock.expect(metadata.fetch()).andThrow(new IllegalStateException("Unexpected call to metadata.fetch()")).anyTimes(); - PowerMock.replay(metadata); - producer.partitionsFor(topic); - PowerMock.verify(metadata); - } - - @PrepareOnlyThisForTest(Metadata.class) - @Test - public void testMetadataFetchOnStaleMetadata() throws Exception { - Properties props = new Properties(); - props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); - props.put(ProducerConfig.ORACLE_INSTANCE_NAME, "instancename"); - props.put(ProducerConfig.ORACLE_SERVICE_NAME, "servicename"); - props.put(ProducerConfig.ORACLE_NET_TNS_ADMIN, "/temp"); - KafkaProducer producer = new KafkaProducer<>(props, new StringSerializer(), new StringSerializer()); - Metadata metadata = PowerMock.createNiceMock(Metadata.class); - MemberModifier.field(KafkaProducer.class, "metadata").set(producer, metadata); - - String topic = "topic"; - ProducerRecord initialRecord = new ProducerRecord<>(topic, "value"); - // Create a record with a partition higher than the initial (outdated) partition range - ProducerRecord extendedRecord = new ProducerRecord<>(topic, 2, null, "value"); - Collection nodes = Collections.singletonList(new Node(0, "host1", 1000, "", "")); - final Cluster emptyCluster = new Cluster(null, nodes, - Collections.emptySet(), - Collections.emptySet(), - Collections.emptySet(), null); - final Cluster initialCluster = new Cluster( - "dummy", - Collections.singletonList(new Node(0, "host1", 1000, "", "")), - Arrays.asList(new PartitionInfo(topic, 0, null, null, null)), - Collections.emptySet(), - Collections.emptySet(), null); - final Cluster extendedCluster = new Cluster( - "dummy", - Collections.singletonList(new Node(0, "host1", 1000, "", "")), - Arrays.asList( - new PartitionInfo(topic, 0, null, null, null), - new PartitionInfo(topic, 1, null, null, null), - new PartitionInfo(topic, 2, null, null, null)), - Collections.emptySet(), - Collections.emptySet(), null); - - // Expect exactly one fetch for each attempt to refresh while topic metadata is not available - final int refreshAttempts = 5; - EasyMock.expect(metadata.fetch()).andReturn(emptyCluster).times(refreshAttempts - 1); - EasyMock.expect(metadata.fetch()).andReturn(initialCluster).once(); - EasyMock.expect(metadata.fetch()).andThrow(new IllegalStateException("Unexpected call to metadata.fetch()")).anyTimes(); - PowerMock.replay(metadata); - producer.send(initialRecord); - PowerMock.verify(metadata); - - // Expect exactly one fetch if topic metadata is available and records are still within range - PowerMock.reset(metadata); - EasyMock.expect(metadata.fetch()).andReturn(initialCluster).once(); - EasyMock.expect(metadata.fetch()).andThrow(new IllegalStateException("Unexpected call to metadata.fetch()")).anyTimes(); - PowerMock.replay(metadata); - producer.send(initialRecord, null); - PowerMock.verify(metadata); - - // Expect exactly two fetches if topic metadata is available but metadata response still returns - // the same partition size (either because metadata are still stale at the broker too or because - // there weren't any partitions added in the first place). - PowerMock.reset(metadata); - EasyMock.expect(metadata.fetch()).andReturn(initialCluster).once(); - EasyMock.expect(metadata.fetch()).andReturn(initialCluster).once(); - EasyMock.expect(metadata.fetch()).andThrow(new IllegalStateException("Unexpected call to metadata.fetch()")).anyTimes(); - PowerMock.replay(metadata); - try { - producer.send(extendedRecord, null); - fail("Expected KafkaException to be raised"); - } catch (KafkaException e) { - // expected - } - PowerMock.verify(metadata); - - // Expect exactly two fetches if topic metadata is available but outdated for the given record - PowerMock.reset(metadata); - EasyMock.expect(metadata.fetch()).andReturn(initialCluster).once(); - EasyMock.expect(metadata.fetch()).andReturn(extendedCluster).once(); - EasyMock.expect(metadata.fetch()).andThrow(new IllegalStateException("Unexpected call to metadata.fetch()")).anyTimes(); - PowerMock.replay(metadata); - producer.send(extendedRecord, null); - PowerMock.verify(metadata); - } -*/ - @Test - public void testTopicRefreshInMetadata() throws Exception { - Properties props = new Properties(); - props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); - props.put(ProducerConfig.ORACLE_INSTANCE_NAME, "instancename"); - props.put(ProducerConfig.ORACLE_SERVICE_NAME, "servicename"); - props.put(ProducerConfig.ORACLE_NET_TNS_ADMIN, "/temp"); - props.setProperty(ProducerConfig.MAX_BLOCK_MS_CONFIG, "600000"); - KafkaProducer producer = new KafkaProducer<>(props, new StringSerializer(), new StringSerializer()); - long refreshBackoffMs = 500L; - long metadataExpireMs = 60000L; - final Metadata metadata = new Metadata(refreshBackoffMs, metadataExpireMs, true, - true, new ClusterResourceListeners(), null); - final Time time = new MockTime(); - MemberModifier.field(KafkaProducer.class, "metadata").set(producer, metadata); - MemberModifier.field(KafkaProducer.class, "time").set(producer, time); - final String topic = "topic"; - - Thread t = new Thread() { - @Override - public void run() { - long startTimeMs = System.currentTimeMillis(); - for (int i = 0; i < 10; i++) { - while (!metadata.updateRequested() && System.currentTimeMillis() - startTimeMs < 1000) { - //TODO return to this issue. - //syield(); - } - metadata.update(Cluster.empty(), Collections.singleton(topic), time.milliseconds()); - time.sleep(60 * 1000L); - } - } - }; - t.start(); - try { - producer.partitionsFor(topic); - fail("Expect TimeoutException"); - } catch (TimeoutException e) { - // skip - } - Assert.assertTrue("Topic should still exist in metadata", metadata.containsTopic(topic)); - } - - @Test - public void closeShouldBeIdempotent() { - Properties producerProps = new Properties(); - producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000"); - producerProps.put(ProducerConfig.ORACLE_INSTANCE_NAME, "instancename"); - producerProps.put(ProducerConfig.ORACLE_SERVICE_NAME, "servicename"); - producerProps.put(ProducerConfig.ORACLE_NET_TNS_ADMIN, "/temp"); - Producer producer = new KafkaProducer<>(producerProps, new ByteArraySerializer(), new ByteArraySerializer()); - producer.close(); - producer.close(); - } - - /* @PrepareOnlyThisForTest(Metadata.class) - @Test - public void testInterceptorPartitionSetOnTooLargeRecord() throws Exception { - Properties props = new Properties(); - props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); - props.put(ProducerConfig.ORACLE_INSTANCE_NAME, "instancename"); - props.put(ProducerConfig.ORACLE_SERVICE_NAME, "servicename"); - props.put(ProducerConfig.ORACLE_NET_TNS_ADMIN, "/temp"); - props.setProperty(ProducerConfig.MAX_REQUEST_SIZE_CONFIG, "1"); - String topic = "topic"; - ProducerRecord record = new ProducerRecord<>(topic, "value"); - - KafkaProducer producer = new KafkaProducer<>(props, new StringSerializer(), - new StringSerializer()); - Metadata metadata = PowerMock.createNiceMock(Metadata.class); - MemberModifier.field(KafkaProducer.class, "metadata").set(producer, metadata); - final Cluster cluster = new Cluster( - "dummy", - Collections.singletonList(new Node(0, "host1", 1000, "", "")), - Arrays.asList(new PartitionInfo(topic, 0, null, null, null)), - Collections.emptySet(), - Collections.emptySet(), null); - EasyMock.expect(metadata.fetch()).andReturn(cluster).once(); - - // Mock interceptors field - ProducerInterceptors interceptors = PowerMock.createMock(ProducerInterceptors.class); - EasyMock.expect(interceptors.onSend(record)).andReturn(record); - interceptors.onSendError(EasyMock.eq(record), EasyMock.notNull(), EasyMock.notNull()); - EasyMock.expectLastCall(); - MemberModifier.field(KafkaProducer.class, "interceptors").set(producer, interceptors); - - PowerMock.replay(metadata); - EasyMock.replay(interceptors); - producer.send(record); - - EasyMock.verify(interceptors); - } -*/ - @Test - public void testPartitionsForWithNullTopic() { - Properties props = new Properties(); - props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000"); - props.put(ProducerConfig.ORACLE_INSTANCE_NAME, "instancename"); - props.put(ProducerConfig.ORACLE_SERVICE_NAME, "servicename"); - props.put(ProducerConfig.ORACLE_NET_TNS_ADMIN, "/temp"); - try (KafkaProducer producer = new KafkaProducer<>(props, new ByteArraySerializer(), new ByteArraySerializer())) { - producer.partitionsFor(null); - fail("Expected NullPointerException to be raised"); - } catch (NullPointerException e) { - // expected - } - } - - @Test - public void testCloseWhenWaitingForMetadataUpdate() throws InterruptedException { - Properties props = new Properties(); - props.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, Long.MAX_VALUE); - props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000"); - props.put(ProducerConfig.ORACLE_INSTANCE_NAME, "instancename"); - props.put(ProducerConfig.ORACLE_SERVICE_NAME, "servicename"); - props.put(ProducerConfig.ORACLE_NET_TNS_ADMIN, "/temp"); - - // Simulate a case where metadata for a particular topic is not available. This will cause KafkaProducer#send to - // block in Metadata#awaitUpdate for the configured max.block.ms. When close() is invoked, KafkaProducer#send should - // return with a KafkaException. - String topicName = "test"; - Time time = new MockTime(); - Cluster cluster = TestUtils.singletonCluster(); - Node node = cluster.nodes().get(0); - Metadata metadata = new Metadata(0, Long.MAX_VALUE, false, null); - metadata.update(cluster, Collections.emptySet(), time.milliseconds()); - - MockClient client = new MockClient(time, metadata); - client.setNode(node); - - Producer producer = new KafkaProducer<>( - new ProducerConfig(ProducerConfig.addSerializerToConfig(props, new StringSerializer(), new StringSerializer())), - new StringSerializer(), new StringSerializer(), metadata, client); - - ExecutorService executor = Executors.newSingleThreadExecutor(); - final AtomicReference sendException = new AtomicReference<>(); - try { - executor.submit(() -> { - try { - // Metadata for topic "test" will not be available which will cause us to block indefinitely until - // KafkaProducer#close is invoked. - producer.send(new ProducerRecord<>(topicName, "key", "value")); - fail(); - } catch (Exception e) { - sendException.set(e); - } - }); - - // Wait until metadata update for the topic has been requested - TestUtils.waitForCondition(() -> metadata.containsTopic(topicName), "Timeout when waiting for topic to be added to metadata"); - producer.close(0, TimeUnit.MILLISECONDS); - TestUtils.waitForCondition(() -> sendException.get() != null, "No producer exception within timeout"); - assertEquals(KafkaException.class, sendException.get().getClass()); - } finally { - executor.shutdownNow(); - } - } - -} diff --git a/clients/src/test/java/org/oracle/okafka/clients/producer/ProducerRecordTest.java b/clients/src/test/java/org/oracle/okafka/clients/producer/ProducerRecordTest.java deleted file mode 100644 index d1de07c..0000000 --- a/clients/src/test/java/org/oracle/okafka/clients/producer/ProducerRecordTest.java +++ /dev/null @@ -1,91 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. - * - */ - -package org.oracle.okafka.clients.producer; - -import org.junit.Test; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.fail; - -public class ProducerRecordTest { - - @Test - public void testEqualsAndHashCode() { - ProducerRecord producerRecord = new ProducerRecord<>("test", 1, "key", 1); - assertEquals(producerRecord, producerRecord); - assertEquals(producerRecord.hashCode(), producerRecord.hashCode()); - - ProducerRecord equalRecord = new ProducerRecord<>("test", 1, "key", 1); - assertEquals(producerRecord, equalRecord); - assertEquals(producerRecord.hashCode(), equalRecord.hashCode()); - - ProducerRecord topicMisMatch = new ProducerRecord<>("test-1", 1, "key", 1); - assertFalse(producerRecord.equals(topicMisMatch)); - - ProducerRecord partitionMismatch = new ProducerRecord<>("test", 2, "key", 1); - assertFalse(producerRecord.equals(partitionMismatch)); - - ProducerRecord keyMisMatch = new ProducerRecord<>("test", 1, "key-1", 1); - assertFalse(producerRecord.equals(keyMisMatch)); - - ProducerRecord valueMisMatch = new ProducerRecord<>("test", 1, "key", 2); - assertFalse(producerRecord.equals(valueMisMatch)); - - ProducerRecord nullFieldsRecord = new ProducerRecord<>("topic", null, null, null, null, null); - assertEquals(nullFieldsRecord, nullFieldsRecord); - assertEquals(nullFieldsRecord.hashCode(), nullFieldsRecord.hashCode()); - } - - @Test - public void testInvalidRecords() { - try { - new ProducerRecord<>(null, 0, "key", 1); - fail("Expected IllegalArgumentException to be raised because topic is null"); - } catch (IllegalArgumentException e) { - //expected - } - - try { - new ProducerRecord<>("test", 0, -1L, "key", 1); - fail("Expected IllegalArgumentException to be raised because of negative timestamp"); - } catch (IllegalArgumentException e) { - //expected - } - - try { - new ProducerRecord<>("test", -1, "key", 1); - fail("Expected IllegalArgumentException to be raised because of negative partition"); - } catch (IllegalArgumentException e) { - //expected - } - } - -} \ No newline at end of file diff --git a/clients/src/test/java/org/oracle/okafka/clients/producer/RecordMetadataTest.java b/clients/src/test/java/org/oracle/okafka/clients/producer/RecordMetadataTest.java deleted file mode 100644 index 277b752..0000000 --- a/clients/src/test/java/org/oracle/okafka/clients/producer/RecordMetadataTest.java +++ /dev/null @@ -1,83 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. - * - */ - -package org.oracle.okafka.clients.producer; - -import org.oracle.okafka.common.TopicPartition; -import org.oracle.okafka.common.record.DefaultRecord; -import org.junit.Test; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; - -public class RecordMetadataTest { - - @Test - @SuppressWarnings("deprecation") - public void testConstructionWithMissingRelativeOffset() { - TopicPartition tp = new TopicPartition("foo", 0); - long timestamp = 2340234L; - int keySize = 3; - int valueSize = 5; - Long checksum = -1L; - - RecordMetadata metadata = new RecordMetadata(tp, -1L, -1L, timestamp, checksum, keySize, valueSize); - assertEquals(tp.topic(), metadata.topic()); - assertEquals(tp.partition(), metadata.partition()); - assertEquals(timestamp, metadata.timestamp()); - assertFalse(metadata.hasOffset()); - assertEquals(-1L, metadata.offset()); - assertEquals(checksum.longValue(), metadata.checksum()); - assertEquals(keySize, metadata.serializedKeySize()); - assertEquals(valueSize, metadata.serializedValueSize()); - } - - @Test - @SuppressWarnings("deprecation") - public void testConstructionWithRelativeOffset() { - TopicPartition tp = new TopicPartition("foo", 0); - long timestamp = 2340234L; - int keySize = 3; - int valueSize = 5; - long baseOffset = 15L; - long relativeOffset = 3L; - Long checksum = -1L; - - RecordMetadata metadata = new RecordMetadata(tp, baseOffset, relativeOffset, timestamp, checksum, - keySize, valueSize); - assertEquals(tp.topic(), metadata.topic()); - assertEquals(tp.partition(), metadata.partition()); - assertEquals(timestamp, metadata.timestamp()); - assertEquals((baseOffset << 16) + (relativeOffset), metadata.offset()); - assertEquals(checksum.longValue(), metadata.checksum()); - assertEquals(keySize, metadata.serializedKeySize()); - assertEquals(valueSize, metadata.serializedValueSize()); - } - -} diff --git a/clients/src/test/java/org/oracle/okafka/clients/producer/RecordSendTest.java b/clients/src/test/java/org/oracle/okafka/clients/producer/RecordSendTest.java deleted file mode 100644 index a622c48..0000000 --- a/clients/src/test/java/org/oracle/okafka/clients/producer/RecordSendTest.java +++ /dev/null @@ -1,118 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. - * - */ - -package org.oracle.okafka.clients.producer; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - - -import org.oracle.okafka.clients.producer.internals.FutureRecordMetadata; -import org.oracle.okafka.clients.producer.internals.ProduceRequestResult; -import org.oracle.okafka.common.TopicPartition; -import org.oracle.okafka.common.errors.CorruptRecordException; -import org.oracle.okafka.common.record.RecordBatch; -import org.junit.Test; - -public class RecordSendTest { - - private final TopicPartition topicPartition = new TopicPartition("test", 0); - private final long baseOffset = 0; - private final int relOffset = 0; - private final List msgIds = Collections.singletonList("ID:00000000000000000000000000660000"); - private final List timeStamps = Collections.singletonList(RecordBatch.NO_TIMESTAMP); - - /** - * Test that waiting on a request that never completes times out - */ - @Test - public void testTimeout() throws Exception { - ProduceRequestResult request = new ProduceRequestResult(topicPartition); - FutureRecordMetadata future = new FutureRecordMetadata(request, relOffset, - RecordBatch.NO_TIMESTAMP, 0L, 0, 0); - assertFalse("Request is not completed", future.isDone()); - try { - future.get(5, TimeUnit.MILLISECONDS); - fail("Should have thrown exception."); - } catch (TimeoutException e) { /* this is good */ - } - - - request.set(msgIds, timeStamps, null); - request.done(); - assertTrue(future.isDone()); - assertEquals((baseOffset << 16) + relOffset, future.get().offset()); - } - - /** - * Test that an asynchronous request will eventually throw the right exception - */ - @Test(expected = ExecutionException.class) - public void testError() throws Exception { - FutureRecordMetadata future = new FutureRecordMetadata(asyncRequest(msgIds, new CorruptRecordException(), 50L), - relOffset, RecordBatch.NO_TIMESTAMP, 0L, 0, 0); - future.get(); - } - - /** - * Test that an asynchronous request will eventually return the right offset - */ - @Test - public void testBlocking() throws Exception { - FutureRecordMetadata future = new FutureRecordMetadata(asyncRequest(msgIds, null, 50L), - relOffset, RecordBatch.NO_TIMESTAMP, 0L, 0, 0); - assertEquals((baseOffset << 16) + relOffset, future.get().offset()); - } - - /* create a new request result that will be completed after the given timeout */ - public ProduceRequestResult asyncRequest(final List msgIds, final RuntimeException error, final long timeout) { - final ProduceRequestResult request = new ProduceRequestResult(topicPartition); - Thread thread = new Thread() { - public void run() { - try { - sleep(timeout); - request.set(msgIds, timeStamps, error); - request.done(); - } catch (InterruptedException e) { } - } - }; - thread.start(); - return request; - } - -} diff --git a/clients/src/test/java/org/oracle/okafka/clients/producer/internals/BufferPoolTest.java b/clients/src/test/java/org/oracle/okafka/clients/producer/internals/BufferPoolTest.java deleted file mode 100644 index 283df45..0000000 --- a/clients/src/test/java/org/oracle/okafka/clients/producer/internals/BufferPoolTest.java +++ /dev/null @@ -1,376 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. - * - */ - -package org.oracle.okafka.clients.producer.internals; - -import org.oracle.okafka.common.MetricName; -import org.oracle.okafka.common.errors.TimeoutException; -import org.oracle.okafka.common.metrics.Metrics; -import org.oracle.okafka.common.metrics.Sensor; -import org.oracle.okafka.common.utils.MockTime; -import org.oracle.okafka.common.utils.Time; -import org.oracle.okafka.clients.producer.internals.BufferPool; -import org.oracle.okafka.test.TestUtils; -import org.junit.After; -import org.junit.Test; - -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Deque; -import java.util.List; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.locks.Condition; -import org.junit.runner.RunWith; -import org.powermock.core.classloader.annotations.PrepareForTest; -import org.powermock.modules.junit4.PowerMockRunner; - -import static org.easymock.EasyMock.eq; -import static org.easymock.EasyMock.createNiceMock; -import static org.easymock.EasyMock.replay; -import static org.easymock.EasyMock.anyLong; -import static org.easymock.EasyMock.anyDouble; -import static org.easymock.EasyMock.expectLastCall; -import static org.easymock.EasyMock.expect; -import static org.easymock.EasyMock.anyString; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - - -@RunWith(PowerMockRunner.class) -public class BufferPoolTest { - private final MockTime time = new MockTime(); - private final long maxBlockTimeMs = 2000; - private final String metricGroup = "TestMetrics"; - - /** - * Test the simple non-blocking allocation paths - */ - @Test - public void testSimple() throws Exception { - long totalMemory = 64 * 1024; - int size = 1024; - BufferPool pool = new BufferPool(totalMemory, size, time); - ByteBuffer buffer = pool.allocate(size, maxBlockTimeMs); - assertEquals("Buffer size should equal requested size.", size, buffer.limit()); - assertEquals("Unallocated memory should have shrunk", totalMemory - size, pool.unallocatedMemory()); - assertEquals("Available memory should have shrunk", totalMemory - size, pool.availableMemory()); - buffer.putInt(1); - buffer.flip(); - pool.deallocate(buffer); - assertEquals("All memory should be available", totalMemory, pool.availableMemory()); - assertEquals("But now some is on the free list", totalMemory - size, pool.unallocatedMemory()); - buffer = pool.allocate(size, maxBlockTimeMs); - assertEquals("Recycled buffer should be cleared.", 0, buffer.position()); - assertEquals("Recycled buffer should be cleared.", buffer.capacity(), buffer.limit()); - pool.deallocate(buffer); - assertEquals("All memory should be available", totalMemory, pool.availableMemory()); - assertEquals("Still a single buffer on the free list", totalMemory - size, pool.unallocatedMemory()); - buffer = pool.allocate(2 * size, maxBlockTimeMs); - pool.deallocate(buffer); - assertEquals("All memory should be available", totalMemory, pool.availableMemory()); - assertEquals("Non-standard size didn't go to the free list.", totalMemory - size, pool.unallocatedMemory()); - } - - /** - * Test that we cannot try to allocate more memory then we have in the whole pool - */ - @Test(expected = IllegalArgumentException.class) - public void testCantAllocateMoreMemoryThanWeHave() throws Exception { - BufferPool pool = new BufferPool(1024, 512,time); - ByteBuffer buffer = pool.allocate(1024, maxBlockTimeMs); - assertEquals(1024, buffer.limit()); - pool.deallocate(buffer); - pool.allocate(1025, maxBlockTimeMs); - } - - /** - * Test that delayed allocation blocks - */ - @Test - public void testDelayedAllocation() throws Exception { - BufferPool pool = new BufferPool(5 * 1024, 1024, time); - ByteBuffer buffer = pool.allocate(1024, maxBlockTimeMs); - CountDownLatch doDealloc = asyncDeallocate(pool, buffer); - CountDownLatch allocation = asyncAllocate(pool, 5 * 1024); - assertEquals("Allocation shouldn't have happened yet, waiting on memory.", 1L, allocation.getCount()); - doDealloc.countDown(); // return the memory - assertTrue("Allocation should succeed soon after de-allocation", allocation.await(1, TimeUnit.SECONDS)); - } - - private CountDownLatch asyncDeallocate(final BufferPool pool, final ByteBuffer buffer) { - final CountDownLatch latch = new CountDownLatch(1); - Thread thread = new Thread() { - public void run() { - try { - latch.await(); - } catch (InterruptedException e) { - e.printStackTrace(); - } - pool.deallocate(buffer); - } - }; - thread.start(); - return latch; - } - - private void delayedDeallocate(final BufferPool pool, final ByteBuffer buffer, final long delayMs) { - Thread thread = new Thread() { - public void run() { - Time.SYSTEM.sleep(delayMs); - pool.deallocate(buffer); - } - }; - thread.start(); - } - - private CountDownLatch asyncAllocate(final BufferPool pool, final int size) { - final CountDownLatch completed = new CountDownLatch(1); - Thread thread = new Thread() { - public void run() { - try { - pool.allocate(size, maxBlockTimeMs); - } catch (InterruptedException e) { - e.printStackTrace(); - } finally { - completed.countDown(); - } - } - }; - thread.start(); - return completed; - } - - /** - * Test if Timeout exception is thrown when there is not enough memory to allocate and the elapsed time is greater than the max specified block time. - * And verify that the allocation should finish soon after the maxBlockTimeMs. - */ - @Test - public void testBlockTimeout() throws Exception { - BufferPool pool = new BufferPool(10, 1, Time.SYSTEM); - ByteBuffer buffer1 = pool.allocate(1, maxBlockTimeMs); - ByteBuffer buffer2 = pool.allocate(1, maxBlockTimeMs); - ByteBuffer buffer3 = pool.allocate(1, maxBlockTimeMs); - // First two buffers will be de-allocated within maxBlockTimeMs since the most recent de-allocation - delayedDeallocate(pool, buffer1, maxBlockTimeMs / 2); - delayedDeallocate(pool, buffer2, maxBlockTimeMs); - // The third buffer will be de-allocated after maxBlockTimeMs since the most recent de-allocation - delayedDeallocate(pool, buffer3, maxBlockTimeMs / 2 * 5); - - long beginTimeMs = Time.SYSTEM.milliseconds(); - try { - pool.allocate(10, maxBlockTimeMs); - fail("The buffer allocated more memory than its maximum value 10"); - } catch (TimeoutException e) { - // this is good - } - assertTrue("available memory" + pool.availableMemory(), pool.availableMemory() >= 9 && pool.availableMemory() <= 10); - long endTimeMs = Time.SYSTEM.milliseconds(); - assertTrue("Allocation should finish not much later than maxBlockTimeMs", endTimeMs - beginTimeMs < maxBlockTimeMs + 1000); - } - - /** - * Test if the waiter that is waiting on availability of more memory is cleaned up when a timeout occurs - */ - @Test - public void testCleanupMemoryAvailabilityWaiterOnBlockTimeout() throws Exception { - BufferPool pool = new BufferPool(2, 1, time); - pool.allocate(1, maxBlockTimeMs); - try { - pool.allocate(2, maxBlockTimeMs); - fail("The buffer allocated more memory than its maximum value 2"); - } catch (TimeoutException e) { - // this is good - } - assertTrue(pool.queued() == 0); - } - - /** - * Test if the waiter that is waiting on availability of more memory is cleaned up when an interruption occurs - */ - @Test - public void testCleanupMemoryAvailabilityWaiterOnInterruption() throws Exception { - BufferPool pool = new BufferPool(2, 1, time); - long blockTime = 5000; - pool.allocate(1, maxBlockTimeMs); - Thread t1 = new Thread(new BufferPoolAllocator(pool, blockTime)); - Thread t2 = new Thread(new BufferPoolAllocator(pool, blockTime)); - // start thread t1 which will try to allocate more memory on to the Buffer pool - t1.start(); - // sleep for 500ms. Condition variable c1 associated with pool.allocate() by thread t1 will be inserted in the waiters queue. - Thread.sleep(500); - Deque waiters = pool.waiters(); - // get the condition object associated with pool.allocate() by thread t1 - Condition c1 = waiters.getFirst(); - // start thread t2 which will try to allocate more memory on to the Buffer pool - t2.start(); - // sleep for 500ms. Condition variable c2 associated with pool.allocate() by thread t2 will be inserted in the waiters queue. The waiters queue will have 2 entries c1 and c2. - Thread.sleep(500); - t1.interrupt(); - // sleep for 500ms. - Thread.sleep(500); - // get the condition object associated with allocate() by thread t2 - Condition c2 = waiters.getLast(); - t2.interrupt(); - assertNotEquals(c1, c2); - t1.join(); - t2.join(); - // both the allocate() called by threads t1 and t2 should have been interrupted and the waiters queue should be empty - assertEquals(pool.queued(), 0); - } - - private static class BufferPoolAllocator implements Runnable { - BufferPool pool; - long maxBlockTimeMs; - - BufferPoolAllocator(BufferPool pool, long maxBlockTimeMs) { - this.pool = pool; - this.maxBlockTimeMs = maxBlockTimeMs; - } - - @Override - public void run() { - try { - pool.allocate(2, maxBlockTimeMs); - fail("The buffer allocated more memory than its maximum value 2"); - } catch (TimeoutException e) { - // this is good - } catch (InterruptedException e) { - // this can be neglected - } - } - } - - /** - * This test creates lots of threads that hammer on the pool - */ - @Test - public void testStressfulSituation() throws Exception { - int numThreads = 10; - final int iterations = 50000; - final int poolableSize = 1024; - final long totalMemory = numThreads / 2 * poolableSize; - final BufferPool pool = new BufferPool(totalMemory, poolableSize, time); - List threads = new ArrayList(); - for (int i = 0; i < numThreads; i++) - threads.add(new StressTestThread(pool, iterations)); - for (StressTestThread thread : threads) - thread.start(); - for (StressTestThread thread : threads) - thread.join(); - for (StressTestThread thread : threads) - assertTrue("Thread should have completed all iterations successfully.", thread.success.get()); - assertEquals(totalMemory, pool.availableMemory()); - } - - @Test - public void testLargeAvailableMemory() throws Exception { - long memory = 20_000_000_000L; - int poolableSize = 2_000_000_000; - final AtomicInteger freeSize = new AtomicInteger(0); - BufferPool pool = new BufferPool(memory, poolableSize, time) { - @Override - protected ByteBuffer allocateByteBuffer(int size) { - // Ignore size to avoid OOM due to large buffers - return ByteBuffer.allocate(0); - } - - @Override - protected int freeSize() { - return freeSize.get(); - } - }; - pool.allocate(poolableSize, 0); - assertEquals(18_000_000_000L, pool.availableMemory()); - pool.allocate(poolableSize, 0); - assertEquals(16_000_000_000L, pool.availableMemory()); - - // Emulate `deallocate` by increasing `freeSize` - freeSize.incrementAndGet(); - assertEquals(18_000_000_000L, pool.availableMemory()); - freeSize.incrementAndGet(); - assertEquals(20_000_000_000L, pool.availableMemory()); - } - - @Test - public void outOfMemoryOnAllocation() { - BufferPool bufferPool = new BufferPool(1024, 1024, time) { - @Override - protected ByteBuffer allocateByteBuffer(int size) { - throw new OutOfMemoryError(); - } - }; - - try { - bufferPool.allocateByteBuffer(1024); - // should not reach here - fail("Should have thrown OutOfMemoryError"); - } catch (OutOfMemoryError ignored) { - - } - - assertEquals(bufferPool.availableMemory(), 1024); - } - - public static class StressTestThread extends Thread { - private final int iterations; - private final BufferPool pool; - private final long maxBlockTimeMs = 20_000; - public final AtomicBoolean success = new AtomicBoolean(false); - - public StressTestThread(BufferPool pool, int iterations) { - this.iterations = iterations; - this.pool = pool; - } - - public void run() { - try { - for (int i = 0; i < iterations; i++) { - int size; - if (TestUtils.RANDOM.nextBoolean()) - // allocate poolable size - size = pool.poolableSize(); - else - // allocate a random size - size = TestUtils.RANDOM.nextInt((int) pool.totalMemory()); - ByteBuffer buffer = pool.allocate(size, maxBlockTimeMs); - pool.deallocate(buffer); - } - success.set(true); - } catch (Exception e) { - e.printStackTrace(); - } - } - } - -} diff --git a/clients/src/test/java/org/oracle/okafka/clients/producer/internals/DefaultPartitionerTest.java b/clients/src/test/java/org/oracle/okafka/clients/producer/internals/DefaultPartitionerTest.java deleted file mode 100644 index 0e89bfe..0000000 --- a/clients/src/test/java/org/oracle/okafka/clients/producer/internals/DefaultPartitionerTest.java +++ /dev/null @@ -1,114 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. - * - */ - -package org.oracle.okafka.clients.producer.internals; - -import org.oracle.okafka.clients.producer.Partitioner; -import org.oracle.okafka.common.Cluster; -import org.oracle.okafka.common.Node; -import org.oracle.okafka.common.PartitionInfo; -import org.junit.Test; - -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import static java.util.Arrays.asList; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - -public class DefaultPartitionerTest { - private byte[] keyBytes = "key".getBytes(); - private Partitioner partitioner = new DefaultPartitioner(); - private Node node0 = new Node(0, "localhost", 99, "", ""); - private Node node1 = new Node(1, "localhost", 100, "", ""); - private Node node2 = new Node(2, "localhost", 101, "", ""); - private Node[] nodes = new Node[] {node0, node1, node2}; - private String topic = "test"; - // Intentionally make the partition list not in partition order to test the edge cases. - private List partitions = asList(new PartitionInfo(topic, 1, null, nodes, nodes), - new PartitionInfo(topic, 2, node1, nodes, nodes), - new PartitionInfo(topic, 0, node0, nodes, nodes)); - private Cluster cluster = new Cluster("clusterId", asList(node0, node1, node2), partitions, - Collections.emptySet(), Collections.emptySet(), null); - - @Test - public void testKeyPartitionIsStable() { - int partition = partitioner.partition("test", null, keyBytes, null, null, cluster); - assertEquals("Same key should yield same partition", partition, partitioner.partition("test", null, keyBytes, null, null, cluster)); - } - - @Test - public void testRoundRobinWithUnavailablePartitions() { - // When there are some unavailable partitions, we want to make sure that (1) we always pick an available partition, - // and (2) the available partitions are selected in a round robin way. - int countForPart0 = 0; - int countForPart2 = 0; - for (int i = 1; i <= 100; i++) { - int part = partitioner.partition("test", null, null, null, null, cluster); - assertTrue("We should never choose a leader-less node in round robin", part == 0 || part == 2); - if (part == 0) - countForPart0++; - else - countForPart2++; - } - assertEquals("The distribution between two available partitions should be even", countForPart0, countForPart2); - } - - @Test - public void testRoundRobin() throws InterruptedException { - final String topicA = "topicA"; - final String topicB = "topicB"; - - List allPartitions = asList(new PartitionInfo(topicA, 0, node0, nodes, nodes), - new PartitionInfo(topicA, 1, node1, nodes, nodes), - new PartitionInfo(topicA, 2, node2, nodes, nodes), - new PartitionInfo(topicB, 0, node0, nodes, nodes) - ); - Cluster testCluster = new Cluster("clusterId", asList(node0, node1, node2), allPartitions, - Collections.emptySet(), Collections.emptySet(), null); - - final Map partitionCount = new HashMap<>(); - - for (int i = 0; i < 30; ++i) { - int partition = partitioner.partition(topicA, null, null, null, null, testCluster); - Integer count = partitionCount.get(partition); - if (null == count) count = 0; - partitionCount.put(partition, count + 1); - - if (i % 5 == 0) { - partitioner.partition(topicB, null, null, null, null, testCluster); - } - } - - assertEquals(10, (int) partitionCount.get(0)); - assertEquals(10, (int) partitionCount.get(1)); - assertEquals(10, (int) partitionCount.get(2)); - } -} diff --git a/clients/src/test/java/org/oracle/okafka/clients/producer/internals/ProducerBatchTest.java b/clients/src/test/java/org/oracle/okafka/clients/producer/internals/ProducerBatchTest.java deleted file mode 100644 index 46dccf5..0000000 --- a/clients/src/test/java/org/oracle/okafka/clients/producer/internals/ProducerBatchTest.java +++ /dev/null @@ -1,287 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. - * - */ - -package org.oracle.okafka.clients.producer.internals; - -import org.oracle.okafka.clients.producer.Callback; -import org.oracle.okafka.clients.producer.RecordMetadata; -import org.oracle.okafka.common.KafkaException; -import org.oracle.okafka.common.TopicPartition; -import org.oracle.okafka.common.header.Header; -import org.oracle.okafka.common.header.internals.RecordHeader; -import org.oracle.okafka.common.record.CompressionType; -import org.oracle.okafka.common.record.LegacyRecord; -import org.oracle.okafka.common.record.MemoryRecords; -import org.oracle.okafka.common.record.MemoryRecordsBuilder; -import org.oracle.okafka.common.record.Record; -import org.oracle.okafka.common.record.RecordBatch; -import org.oracle.okafka.common.record.TimestampType; -import org.junit.Test; - -import java.nio.ByteBuffer; -import java.util.Arrays; -import java.util.ArrayList; -import java.util.Deque; -import java.util.concurrent.ExecutionException; - -import static org.oracle.okafka.common.record.RecordBatch.MAGIC_VALUE_V0; -import static org.oracle.okafka.common.record.RecordBatch.MAGIC_VALUE_V1; -import static org.oracle.okafka.common.record.RecordBatch.MAGIC_VALUE_V2; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -public class ProducerBatchTest { - - private final long now = 1488748346917L; - - private final MemoryRecordsBuilder memoryRecordsBuilder = MemoryRecords.builder(ByteBuffer.allocate(128), - CompressionType.NONE, TimestampType.CREATE_TIME, 128); - - @Test - public void testChecksumNullForMagicV2() { - ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), memoryRecordsBuilder, now); - FutureRecordMetadata future = batch.tryAppend(now, null, new byte[10], Record.EMPTY_HEADERS, null, now); - assertNotNull(future); - assertNull(future.checksumOrNull()); - } - - @Test - public void testBatchAbort() throws Exception { - ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), memoryRecordsBuilder, now); - MockCallback callback = new MockCallback(); - FutureRecordMetadata future = batch.tryAppend(now, null, new byte[10], Record.EMPTY_HEADERS, callback, now); - - KafkaException exception = new KafkaException(); - batch.abort(exception); - assertTrue(future.isDone()); - assertEquals(1, callback.invocations); - assertEquals(exception, callback.exception); - assertNull(callback.metadata); - - // subsequent completion should be ignored - assertFalse(batch.done(new ArrayList() {{ add("ID:00000000000000000000000000660000"); } }, new ArrayList() { {add(2342342341L);} }, null)); - assertFalse(batch.done(new ArrayList() {{ add("ID:00000000000000000000000000660000"); } }, new ArrayList() { {add(2342342341L);} }, new KafkaException())); - assertEquals(1, callback.invocations); - - assertTrue(future.isDone()); - try { - future.get(); - fail("Future should have thrown"); - } catch (ExecutionException e) { - assertEquals(exception, e.getCause()); - } - } - - @Test - public void testBatchCannotAbortTwice() throws Exception { - ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), memoryRecordsBuilder, now); - MockCallback callback = new MockCallback(); - FutureRecordMetadata future = batch.tryAppend(now, null, new byte[10], Record.EMPTY_HEADERS, callback, now); - KafkaException exception = new KafkaException(); - batch.abort(exception); - assertEquals(1, callback.invocations); - assertEquals(exception, callback.exception); - assertNull(callback.metadata); - - try { - batch.abort(new KafkaException()); - fail("Expected exception from abort"); - } catch (IllegalStateException e) { - // expected - } - - assertEquals(1, callback.invocations); - assertTrue(future.isDone()); - try { - future.get(); - fail("Future should have thrown"); - } catch (ExecutionException e) { - assertEquals(exception, e.getCause()); - } - } - - @Test - public void testBatchCannotCompleteTwice() throws Exception { - ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), memoryRecordsBuilder, now); - MockCallback callback = new MockCallback(); - FutureRecordMetadata future = batch.tryAppend(now, null, new byte[10], Record.EMPTY_HEADERS, callback, now); - batch.done(new ArrayList() {{ add("ID:00000000000000000000000000660000"); } }, new ArrayList() { {add(10L);} }, null); - assertEquals(1, callback.invocations); - assertNull(callback.exception); - assertNotNull(callback.metadata); - - try { - batch.done(new ArrayList() {{ add("ID:00000000000000000000000000660001"); } }, new ArrayList() { {add(20L);} }, null); - fail("Expected exception from done"); - } catch (IllegalStateException e) { - // expected - } - - RecordMetadata recordMetadata = future.get(); - assertEquals(0L, recordMetadata.offset()); - assertEquals(10L, recordMetadata.timestamp()); - } - -/* - @Test - public void testSplitPreservesHeaders() { - for (CompressionType compressionType : CompressionType.values()) { - MemoryRecordsBuilder builder = MemoryRecords.builder( - ByteBuffer.allocate(1024), - MAGIC_VALUE_V2, - compressionType, - TimestampType.CREATE_TIME, - 0L); - ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), builder, now); - Header header = new RecordHeader("header-key", "header-value".getBytes()); - - while (true) { - FutureRecordMetadata future = batch.tryAppend( - now, "hi".getBytes(), "there".getBytes(), - new Header[]{header}, null, now); - if (future == null) { - break; - } - } - Deque batches = batch.split(200); - assertTrue("This batch should be split to multiple small batches.", batches.size() >= 2); - - for (ProducerBatch splitProducerBatch : batches) { - for (RecordBatch splitBatch : splitProducerBatch.records().batches()) { - for (Record record : splitBatch) { - assertTrue("Header size should be 1.", record.headers().length == 1); - assertTrue("Header key should be 'header-key'.", record.headers()[0].key().equals("header-key")); - assertTrue("Header value should be 'header-value'.", new String(record.headers()[0].value()).equals("header-value")); - } - } - } - } - } - - @Test - public void testSplitPreservesMagicAndCompressionType() { - for (byte magic : Arrays.asList(MAGIC_VALUE_V0, MAGIC_VALUE_V1, MAGIC_VALUE_V2)) { - for (CompressionType compressionType : CompressionType.values()) { - if (compressionType == CompressionType.NONE && magic < MAGIC_VALUE_V2) - continue; - - MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), magic, - compressionType, TimestampType.CREATE_TIME, 0L); - - ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), builder, now); - while (true) { - FutureRecordMetadata future = batch.tryAppend(now, "hi".getBytes(), "there".getBytes(), - Record.EMPTY_HEADERS, null, now); - if (future == null) - break; - } - - Deque batches = batch.split(512); - assertTrue(batches.size() >= 2); - - for (ProducerBatch splitProducerBatch : batches) { - assertEquals(magic, splitProducerBatch.magic()); - assertTrue(splitProducerBatch.isSplitBatch()); - - for (RecordBatch splitBatch : splitProducerBatch.records().batches()) { - assertEquals(magic, splitBatch.magic()); - assertEquals(0L, splitBatch.baseOffset()); - assertEquals(compressionType, splitBatch.compressionType()); - } - } - } - } - } -*/ - /** - * A {@link ProducerBatch} configured using a very large linger value and a timestamp preceding its create - * time is interpreted correctly as not expired when the linger time is larger than the difference - * between now and create time by {@link ProducerBatch#maybeExpire(int, long, long, long, boolean)}. - */ - @Test - public void testLargeLingerOldNowExpire() { - ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), memoryRecordsBuilder, now); - // Set `now` to 2ms before the create time. - assertFalse(batch.maybeExpire(10240, 100L, now - 2L, Long.MAX_VALUE, false)); - } - - /** - * A {@link ProducerBatch} configured using a very large retryBackoff value with retry = true and a timestamp - * preceding its create time is interpreted correctly as not expired when the retryBackoff time is larger than the - * difference between now and create time by {@link ProducerBatch#maybeExpire(int, long, long, long, boolean)}. - */ - @Test - public void testLargeRetryBackoffOldNowExpire() { - ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), memoryRecordsBuilder, now); - // Set batch.retry = true - batch.reenqueued(now); - // Set `now` to 2ms before the create time. - assertFalse(batch.maybeExpire(10240, Long.MAX_VALUE, now - 2L, 10240L, false)); - } - - /** - * A {@link ProducerBatch#maybeExpire(int, long, long, long, boolean)} call with a now value before the create - * time of the ProducerBatch is correctly recognized as not expired when invoked with parameter isFull = true. - */ - @Test - public void testLargeFullOldNowExpire() { - ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), memoryRecordsBuilder, now); - // Set `now` to 2ms before the create time. - assertFalse(batch.maybeExpire(10240, 10240L, now - 2L, 10240L, true)); - } - - @Test - public void testShouldNotAttemptAppendOnceRecordsBuilderIsClosedForAppends() { - ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), memoryRecordsBuilder, now); - FutureRecordMetadata result0 = batch.tryAppend(now, null, new byte[10], Record.EMPTY_HEADERS, null, now); - assertNotNull(result0); - assertTrue(memoryRecordsBuilder.hasRoomFor(now, null, new byte[10], Record.EMPTY_HEADERS)); - memoryRecordsBuilder.closeForRecordAppends(); - assertFalse(memoryRecordsBuilder.hasRoomFor(now, null, new byte[10], Record.EMPTY_HEADERS)); - assertEquals(null, batch.tryAppend(now + 1, null, new byte[10], Record.EMPTY_HEADERS, null, now + 1)); - } - - private static class MockCallback implements Callback { - private int invocations = 0; - private RecordMetadata metadata; - private Exception exception; - - @Override - public void onCompletion(RecordMetadata metadata, Exception exception) { - invocations++; - this.metadata = metadata; - this.exception = exception; - } - } - -} diff --git a/clients/src/test/java/org/oracle/okafka/clients/producer/internals/ProducerInterceptorsTest.java b/clients/src/test/java/org/oracle/okafka/clients/producer/internals/ProducerInterceptorsTest.java deleted file mode 100644 index deeb8b0..0000000 --- a/clients/src/test/java/org/oracle/okafka/clients/producer/internals/ProducerInterceptorsTest.java +++ /dev/null @@ -1,221 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. - * - */ - -package org.oracle.okafka.clients.producer.internals; - - -import org.oracle.okafka.clients.producer.ProducerInterceptor; -import org.oracle.okafka.clients.producer.ProducerRecord; -import org.oracle.okafka.clients.producer.RecordMetadata; -import org.oracle.okafka.common.KafkaException; -import org.oracle.okafka.common.TopicPartition; -import org.junit.Test; - -import java.util.ArrayList; -import java.util.List; -import java.util.Map; - -import static org.junit.Assert.assertEquals; - -public class ProducerInterceptorsTest { - private final TopicPartition tp = new TopicPartition("test", 0); - private final ProducerRecord producerRecord = new ProducerRecord<>("test", 0, 1, "value"); - private int onAckCount = 0; - private int onErrorAckCount = 0; - private int onErrorAckWithTopicSetCount = 0; - private int onErrorAckWithTopicPartitionSetCount = 0; - private int onSendCount = 0; - - private class AppendProducerInterceptor implements ProducerInterceptor { - private String appendStr = ""; - private boolean throwExceptionOnSend = false; - private boolean throwExceptionOnAck = false; - - public AppendProducerInterceptor(String appendStr) { - this.appendStr = appendStr; - } - - @Override - public void configure(Map configs) { - } - - @Override - public ProducerRecord onSend(ProducerRecord record) { - onSendCount++; - if (throwExceptionOnSend) - throw new KafkaException("Injected exception in AppendProducerInterceptor.onSend"); - - return new ProducerRecord<>( - record.topic(), record.partition(), record.key(), record.value().concat(appendStr)); - } - - @Override - public void onAcknowledgement(RecordMetadata metadata, Exception exception) { - onAckCount++; - if (exception != null) { - onErrorAckCount++; - // the length check is just to call topic() method and let it throw an exception - // if RecordMetadata.TopicPartition is null - if (metadata != null && metadata.topic().length() >= 0) { - onErrorAckWithTopicSetCount++; - if (metadata.partition() >= 0) - onErrorAckWithTopicPartitionSetCount++; - } - } - if (throwExceptionOnAck) - throw new KafkaException("Injected exception in AppendProducerInterceptor.onAcknowledgement"); - } - - @Override - public void close() { - } - - // if 'on' is true, onSend will always throw an exception - public void injectOnSendError(boolean on) { - throwExceptionOnSend = on; - } - - // if 'on' is true, onAcknowledgement will always throw an exception - public void injectOnAcknowledgementError(boolean on) { - throwExceptionOnAck = on; - } - } - - @Test - public void testOnSendChain() { - List> interceptorList = new ArrayList<>(); - // we are testing two different interceptors by configuring the same interceptor differently, which is not - // how it would be done in KafkaProducer, but ok for testing interceptor callbacks - AppendProducerInterceptor interceptor1 = new AppendProducerInterceptor("One"); - AppendProducerInterceptor interceptor2 = new AppendProducerInterceptor("Two"); - interceptorList.add(interceptor1); - interceptorList.add(interceptor2); - ProducerInterceptors interceptors = new ProducerInterceptors<>(interceptorList); - - // verify that onSend() mutates the record as expected - ProducerRecord interceptedRecord = interceptors.onSend(producerRecord); - assertEquals(2, onSendCount); - assertEquals(producerRecord.topic(), interceptedRecord.topic()); - assertEquals(producerRecord.partition(), interceptedRecord.partition()); - assertEquals(producerRecord.key(), interceptedRecord.key()); - assertEquals(interceptedRecord.value(), producerRecord.value().concat("One").concat("Two")); - - // onSend() mutates the same record the same way - ProducerRecord anotherRecord = interceptors.onSend(producerRecord); - assertEquals(4, onSendCount); - assertEquals(interceptedRecord, anotherRecord); - - // verify that if one of the interceptors throws an exception, other interceptors' callbacks are still called - interceptor1.injectOnSendError(true); - ProducerRecord partInterceptRecord = interceptors.onSend(producerRecord); - assertEquals(6, onSendCount); - assertEquals(partInterceptRecord.value(), producerRecord.value().concat("Two")); - - // verify the record remains valid if all onSend throws an exception - interceptor2.injectOnSendError(true); - ProducerRecord noInterceptRecord = interceptors.onSend(producerRecord); - assertEquals(producerRecord, noInterceptRecord); - - interceptors.close(); - } - - @Test - public void testOnAcknowledgementChain() { - List> interceptorList = new ArrayList<>(); - // we are testing two different interceptors by configuring the same interceptor differently, which is not - // how it would be done in KafkaProducer, but ok for testing interceptor callbacks - AppendProducerInterceptor interceptor1 = new AppendProducerInterceptor("One"); - AppendProducerInterceptor interceptor2 = new AppendProducerInterceptor("Two"); - interceptorList.add(interceptor1); - interceptorList.add(interceptor2); - ProducerInterceptors interceptors = new ProducerInterceptors<>(interceptorList); - - // verify onAck is called on all interceptors - RecordMetadata meta = new RecordMetadata(tp, 0, 0, 0, Long.valueOf(0L), 0, 0); - interceptors.onAcknowledgement(meta, null); - assertEquals(2, onAckCount); - - // verify that onAcknowledgement exceptions do not propagate - interceptor1.injectOnAcknowledgementError(true); - interceptors.onAcknowledgement(meta, null); - assertEquals(4, onAckCount); - - interceptor2.injectOnAcknowledgementError(true); - interceptors.onAcknowledgement(meta, null); - assertEquals(6, onAckCount); - - interceptors.close(); - } - - @Test - public void testOnAcknowledgementWithErrorChain() { - List> interceptorList = new ArrayList<>(); - AppendProducerInterceptor interceptor1 = new AppendProducerInterceptor("One"); - interceptorList.add(interceptor1); - ProducerInterceptors interceptors = new ProducerInterceptors<>(interceptorList); - - // verify that metadata contains both topic and partition - interceptors.onSendError(producerRecord, - new TopicPartition(producerRecord.topic(), producerRecord.partition()), - new KafkaException("Test")); - assertEquals(1, onErrorAckCount); - assertEquals(1, onErrorAckWithTopicPartitionSetCount); - - // verify that metadata contains both topic and partition (because record already contains partition) - interceptors.onSendError(producerRecord, null, new KafkaException("Test")); - assertEquals(2, onErrorAckCount); - assertEquals(2, onErrorAckWithTopicPartitionSetCount); - - // if producer record does not contain partition, interceptor should get partition == -1 - ProducerRecord record2 = new ProducerRecord<>("test2", null, 1, "value"); - interceptors.onSendError(record2, null, new KafkaException("Test")); - assertEquals(3, onErrorAckCount); - assertEquals(3, onErrorAckWithTopicSetCount); - assertEquals(2, onErrorAckWithTopicPartitionSetCount); - - // if producer record does not contain partition, but topic/partition is passed to - // onSendError, then interceptor should get valid partition - int reassignedPartition = producerRecord.partition() + 1; - interceptors.onSendError(record2, - new TopicPartition(record2.topic(), reassignedPartition), - new KafkaException("Test")); - assertEquals(4, onErrorAckCount); - assertEquals(4, onErrorAckWithTopicSetCount); - assertEquals(3, onErrorAckWithTopicPartitionSetCount); - - // if both record and topic/partition are null, interceptor should not receive metadata - interceptors.onSendError(null, null, new KafkaException("Test")); - assertEquals(5, onErrorAckCount); - assertEquals(4, onErrorAckWithTopicSetCount); - assertEquals(3, onErrorAckWithTopicPartitionSetCount); - - interceptors.close(); - } -} - diff --git a/clients/src/test/java/org/oracle/okafka/clients/producer/internals/RecordAccumulatorTest.java b/clients/src/test/java/org/oracle/okafka/clients/producer/internals/RecordAccumulatorTest.java deleted file mode 100644 index b08a334..0000000 --- a/clients/src/test/java/org/oracle/okafka/clients/producer/internals/RecordAccumulatorTest.java +++ /dev/null @@ -1,842 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. - * - */ - -package org.oracle.okafka.clients.producer.internals; - -import org.oracle.okafka.clients.producer.Callback; -import org.oracle.okafka.clients.producer.RecordMetadata; -import org.oracle.okafka.common.Cluster; -import org.oracle.okafka.common.KafkaException; -import org.oracle.okafka.common.Node; -import org.oracle.okafka.common.PartitionInfo; -import org.oracle.okafka.common.TopicPartition; -import org.oracle.okafka.common.metrics.Metrics; -import org.oracle.okafka.common.protocol.ApiKeys; -import org.oracle.okafka.common.record.CompressionRatioEstimator; -import org.oracle.okafka.common.record.CompressionType; -import org.oracle.okafka.common.record.DefaultRecord; -import org.oracle.okafka.common.record.DefaultRecordBatch; -import org.oracle.okafka.common.record.MemoryRecords; -import org.oracle.okafka.common.record.MemoryRecordsBuilder; -import org.oracle.okafka.common.record.MutableRecordBatch; -import org.oracle.okafka.common.record.Record; -import org.oracle.okafka.common.record.TimestampType; -import org.oracle.okafka.common.utils.LogContext; -import org.oracle.okafka.common.utils.MockTime; -import org.oracle.okafka.common.utils.Time; -import org.oracle.okafka.test.TestUtils; -import org.junit.After; -import org.junit.Test; - -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.Deque; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Random; -import java.util.Set; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Future; -import java.util.concurrent.atomic.AtomicInteger; - -import static java.util.Arrays.asList; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; - -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -public class RecordAccumulatorTest { - - private String topic = "test"; - private int partition1 = 0; - private int partition2 = 1; - private int partition3 = 2; - private Node node1 = new Node(0, "localhost", 1111, "", ""); - private Node node2 = new Node(1, "localhost", 1112, "", ""); - private TopicPartition tp1 = new TopicPartition(topic, partition1); - private TopicPartition tp2 = new TopicPartition(topic, partition2); - private TopicPartition tp3 = new TopicPartition(topic, partition3); - private PartitionInfo part1 = new PartitionInfo(topic, partition1, node1, null, null); - private PartitionInfo part2 = new PartitionInfo(topic, partition2, node1, null, null); - private PartitionInfo part3 = new PartitionInfo(topic, partition3, node2, null, null); - private MockTime time = new MockTime(); - private byte[] key = "key".getBytes(); - private byte[] value = "value".getBytes(); - private int msgSize = DefaultRecord.sizeInBytes(0, 0, key.length, value.length, - Record.EMPTY_HEADERS); - private Cluster cluster = new Cluster(null, Arrays.asList(node1, node2), Arrays.asList(part1, part2, part3), - Collections.emptySet(), Collections.emptySet(), null); - private Metrics metrics = new Metrics(time); - private final long maxBlockTimeMs = 1000; - private final LogContext logContext = new LogContext(); - - @After - public void teardown() { - this.metrics.close(); - } - - @Test - public void testFull() throws Exception { - long now = time.milliseconds(); - - // test case assumes that the records do not fill the batch completely - int batchSize = 1025; - - RecordAccumulator accum = createTestRecordAccumulator( - batchSize + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 10L * batchSize, CompressionType.NONE, 10L); - int appends = expectedNumAppends(batchSize); - for (int i = 0; i < appends; i++) { - // append to the first batch - accum.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs); - Deque partitionBatches = accum.batches().get(tp1); - assertEquals(1, partitionBatches.size()); - - ProducerBatch batch = partitionBatches.peekFirst(); - assertTrue(batch.isWritable()); - assertEquals("No partitions should be ready.", 0, accum.ready(cluster, now).readyNodes.size()); - } - - // this append doesn't fit in the first batch, so a new batch is created and the first batch is closed - - accum.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs); - Deque partitionBatches = accum.batches().get(tp1); - assertEquals(2, partitionBatches.size()); - Iterator partitionBatchesIterator = partitionBatches.iterator(); - assertTrue(partitionBatchesIterator.next().isWritable()); - assertEquals("Our partition's leader should be ready", Collections.singleton(node1), accum.ready(cluster, time.milliseconds()).readyNodes); - - List batches = accum.drain(cluster, Collections.singleton(node1), Integer.MAX_VALUE, 0).get(node1); - assertEquals(1, batches.size()); - ProducerBatch batch = batches.get(0); - - Iterator iter = batch.records().records().iterator(); - for (int i = 0; i < appends; i++) { - Record record = iter.next(); - assertEquals("Keys should match", ByteBuffer.wrap(key), record.key()); - assertEquals("Values should match", ByteBuffer.wrap(value), record.value()); - } - assertFalse("No more records", iter.hasNext()); - } - - @Test - public void testAppendLargeNonCompressed() throws Exception { - testAppendLarge(CompressionType.NONE); - } - - private void testAppendLarge(CompressionType compressionType) throws Exception { - int batchSize = 512; - byte[] value = new byte[2 * batchSize]; - RecordAccumulator accum = createTestRecordAccumulator( - batchSize + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 10 * 1024, compressionType, 0L); - accum.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs); - assertEquals("Our partition's leader should be ready", Collections.singleton(node1), accum.ready(cluster, time.milliseconds()).readyNodes); - - Deque batches = accum.batches().get(tp1); - assertEquals(1, batches.size()); - ProducerBatch producerBatch = batches.peek(); - List recordBatches = TestUtils.toList(producerBatch.records().batches()); - assertEquals(1, recordBatches.size()); - MutableRecordBatch recordBatch = recordBatches.get(0); - assertEquals(0L, recordBatch.baseOffset()); - List records = TestUtils.toList(recordBatch); - assertEquals(1, records.size()); - Record record = records.get(0); - assertEquals(0L, record.offset()); - assertEquals(ByteBuffer.wrap(key), record.key()); - assertEquals(ByteBuffer.wrap(value), record.value()); - assertEquals(0L, record.timestamp()); - } - - @Test - public void testLinger() throws Exception { - long lingerMs = 10L; - RecordAccumulator accum = createTestRecordAccumulator( - 1024 + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 10 * 1024, CompressionType.NONE, lingerMs); - accum.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs); - assertEquals("No partitions should be ready", 0, accum.ready(cluster, time.milliseconds()).readyNodes.size()); - time.sleep(10); - assertEquals("Our partition's leader should be ready", Collections.singleton(node1), accum.ready(cluster, time.milliseconds()).readyNodes); - List batches = accum.drain(cluster, Collections.singleton(node1), Integer.MAX_VALUE, time.milliseconds() + lingerMs + 1).get(node1); - assertEquals(1, batches.size()); - ProducerBatch batch = batches.get(0); - - Iterator iter = batch.records().records().iterator(); - Record record = iter.next(); - assertEquals("Keys should match", ByteBuffer.wrap(key), record.key()); - assertEquals("Values should match", ByteBuffer.wrap(value), record.value()); - assertFalse("No more records", iter.hasNext()); - } - - @Test - public void testPartialDrain() throws Exception { - RecordAccumulator accum = createTestRecordAccumulator( - 1024 + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 10 * 1024, CompressionType.NONE, 10L); - int appends = 1024 / msgSize + 1; - List partitions = asList(tp1, tp2); - for (TopicPartition tp : partitions) { - for (int i = 0; i < appends; i++) - accum.append(tp, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs); - } - assertEquals("Partition's leader should be ready", Collections.singleton(node1), accum.ready(cluster, time.milliseconds()).readyNodes); - - List batches = accum.drain(cluster, Collections.singleton(node1), 1024, 0).get(node1); - assertEquals("But due to size bound only one partition should have been retrieved", 1, batches.size()); - } - - @SuppressWarnings("unused") - @Test - public void testStressfulSituation() throws Exception { - final int numThreads = 5; - final int msgs = 10000; - final int numParts = 2; - final RecordAccumulator accum = createTestRecordAccumulator( - 1024 + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 10 * 1024, CompressionType.NONE, 0L); - List threads = new ArrayList<>(); - for (int i = 0; i < numThreads; i++) { - threads.add(new Thread() { - public void run() { - for (int i = 0; i < msgs; i++) { - try { - accum.append(new TopicPartition(topic, i % numParts), 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs); - } catch (Exception e) { - e.printStackTrace(); - } - } - } - }); - } - for (Thread t : threads) - t.start(); - int read = 0; - long now = time.milliseconds(); - while (read < numThreads * msgs) { - Set nodes = accum.ready(cluster, now).readyNodes; - List batches = accum.drain(cluster, nodes, 5 * 1024, 0).get(node1); - if (batches != null) { - for (ProducerBatch batch : batches) { - for (Record record : batch.records().records()) - read++; - accum.deallocate(batch); - } - } - } - - for (Thread t : threads) - t.join(); - } - - - @Test - public void testNextReadyCheckDelay() throws Exception { - // Next check time will use lingerMs since this test won't trigger any retries/backoff - long lingerMs = 10L; - - // test case assumes that the records do not fill the batch completely - int batchSize = 1025; - - RecordAccumulator accum = createTestRecordAccumulator( - batchSize + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 10 * batchSize, CompressionType.NONE, lingerMs); - // Just short of going over the limit so we trigger linger time - int appends = expectedNumAppends(batchSize); - - // Partition on node1 only - for (int i = 0; i < appends; i++) - accum.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs); - RecordAccumulator.ReadyCheckResult result = accum.ready(cluster, time.milliseconds()); - assertEquals("No nodes should be ready.", 0, result.readyNodes.size()); - assertEquals("Next check time should be the linger time", lingerMs, result.nextReadyCheckDelayMs); - - time.sleep(lingerMs / 2); - - // Add partition on node2 only - for (int i = 0; i < appends; i++) - accum.append(tp3, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs); - result = accum.ready(cluster, time.milliseconds()); - assertEquals("No nodes should be ready.", 0, result.readyNodes.size()); - assertEquals("Next check time should be defined by node1, half remaining linger time", lingerMs / 2, result.nextReadyCheckDelayMs); - - // Add data for another partition on node1, enough to make data sendable immediately - for (int i = 0; i < appends + 1; i++) - accum.append(tp2, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs); - result = accum.ready(cluster, time.milliseconds()); - assertEquals("Node1 should be ready", Collections.singleton(node1), result.readyNodes); - // Note this can actually be < linger time because it may use delays from partitions that aren't sendable - // but have leaders with other sendable data. - assertTrue("Next check time should be defined by node2, at most linger time", result.nextReadyCheckDelayMs <= lingerMs); - } - - @Test - public void testRetryBackoff() throws Exception { - long lingerMs = Long.MAX_VALUE / 4; - long retryBackoffMs = Long.MAX_VALUE / 2; - final RecordAccumulator accum = new RecordAccumulator(logContext, 1024 + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 10 * 1024, - CompressionType.NONE, lingerMs, retryBackoffMs, metrics, time); - - long now = time.milliseconds(); - accum.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs); - RecordAccumulator.ReadyCheckResult result = accum.ready(cluster, now + lingerMs + 1); - assertEquals("Node1 should be ready", Collections.singleton(node1), result.readyNodes); - Map> batches = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, now + lingerMs + 1); - assertEquals("Node1 should be the only ready node.", 1, batches.size()); - assertEquals("Partition 0 should only have one batch drained.", 1, batches.get(node1).size()); - - // Reenqueue the batch - now = time.milliseconds(); - accum.reenqueue(batches.get(node1).get(0), now); - - // Put message for partition 1 into accumulator - accum.append(tp2, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs); - result = accum.ready(cluster, now + lingerMs + 1); - assertEquals("Node1 should be ready", Collections.singleton(node1), result.readyNodes); - - // tp1 should backoff while tp2 should not - batches = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, now + lingerMs + 1); - assertEquals("Node1 should be the only ready node.", 1, batches.size()); - assertEquals("Node1 should only have one batch drained.", 1, batches.get(node1).size()); - assertEquals("Node1 should only have one batch for partition 1.", tp2, batches.get(node1).get(0).topicPartition); - - // Partition 0 can be drained after retry backoff - result = accum.ready(cluster, now + retryBackoffMs + 1); - assertEquals("Node1 should be ready", Collections.singleton(node1), result.readyNodes); - batches = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, now + retryBackoffMs + 1); - assertEquals("Node1 should be the only ready node.", 1, batches.size()); - assertEquals("Node1 should only have one batch drained.", 1, batches.get(node1).size()); - assertEquals("Node1 should only have one batch for partition 0.", tp1, batches.get(node1).get(0).topicPartition); - } - - @Test - public void testFlush() throws Exception { - long lingerMs = Long.MAX_VALUE; - final RecordAccumulator accum = createTestRecordAccumulator( - 4 * 1024 + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 64 * 1024, CompressionType.NONE, lingerMs); - - for (int i = 0; i < 100; i++) { - accum.append(new TopicPartition(topic, i % 3), 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs); - assertTrue(accum.hasIncomplete()); - } - RecordAccumulator.ReadyCheckResult result = accum.ready(cluster, time.milliseconds()); - assertEquals("No nodes should be ready.", 0, result.readyNodes.size()); - - accum.beginFlush(); - result = accum.ready(cluster, time.milliseconds()); - - // drain and deallocate all batches - Map> results = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds()); - assertTrue(accum.hasIncomplete()); - - for (List batches: results.values()) - for (ProducerBatch batch: batches) - accum.deallocate(batch); - - // should be complete with no unsent records. - accum.awaitFlushCompletion(); - assertFalse(accum.hasUndrained()); - assertFalse(accum.hasIncomplete()); - } - - - private void delayedInterrupt(final Thread thread, final long delayMs) { - Thread t = new Thread() { - public void run() { - Time.SYSTEM.sleep(delayMs); - thread.interrupt(); - } - }; - t.start(); - } - - @Test - public void testAwaitFlushComplete() throws Exception { - RecordAccumulator accum = createTestRecordAccumulator( - 4 * 1024 + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 64 * 1024, CompressionType.NONE, Long.MAX_VALUE); - accum.append(new TopicPartition(topic, 0), 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs); - - accum.beginFlush(); - assertTrue(accum.flushInProgress()); - delayedInterrupt(Thread.currentThread(), 1000L); - try { - accum.awaitFlushCompletion(); - fail("awaitFlushCompletion should throw InterruptException"); - } catch (InterruptedException e) { - assertFalse("flushInProgress count should be decremented even if thread is interrupted", accum.flushInProgress()); - } - } - - @Test - public void testAbortIncompleteBatches() throws Exception { - long lingerMs = Long.MAX_VALUE; - int numRecords = 100; - - final AtomicInteger numExceptionReceivedInCallback = new AtomicInteger(0); - final RecordAccumulator accum = createTestRecordAccumulator( - 128 + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 64 * 1024, CompressionType.NONE, lingerMs); - class TestCallback implements Callback { - @Override - public void onCompletion(RecordMetadata metadata, Exception exception) { - assertTrue(exception.getMessage().equals("Producer is closed forcefully.")); - numExceptionReceivedInCallback.incrementAndGet(); - } - } - for (int i = 0; i < numRecords; i++) - accum.append(new TopicPartition(topic, i % 3), 0L, key, value, null, new TestCallback(), maxBlockTimeMs); - RecordAccumulator.ReadyCheckResult result = accum.ready(cluster, time.milliseconds()); - assertFalse(result.readyNodes.isEmpty()); - Map> drained = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds()); - assertTrue(accum.hasUndrained()); - assertTrue(accum.hasIncomplete()); - - int numDrainedRecords = 0; - for (Map.Entry> drainedEntry : drained.entrySet()) { - for (ProducerBatch batch : drainedEntry.getValue()) { - assertTrue(batch.isClosed()); - assertFalse(batch.produceFuture.completed()); - numDrainedRecords += batch.recordCount; - } - } - - assertTrue(numDrainedRecords > 0 && numDrainedRecords < numRecords); - accum.abortIncompleteBatches(); - assertEquals(numRecords, numExceptionReceivedInCallback.get()); - assertFalse(accum.hasUndrained()); - assertFalse(accum.hasIncomplete()); - } - - @Test - public void testAbortUnsentBatches() throws Exception { - long lingerMs = Long.MAX_VALUE; - int numRecords = 100; - - final AtomicInteger numExceptionReceivedInCallback = new AtomicInteger(0); - final RecordAccumulator accum = createTestRecordAccumulator( - 128 + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 64 * 1024, CompressionType.NONE, lingerMs); - final KafkaException cause = new KafkaException(); - - class TestCallback implements Callback { - @Override - public void onCompletion(RecordMetadata metadata, Exception exception) { - assertEquals(cause, exception); - numExceptionReceivedInCallback.incrementAndGet(); - } - } - for (int i = 0; i < numRecords; i++) - accum.append(new TopicPartition(topic, i % 3), 0L, key, value, null, new TestCallback(), maxBlockTimeMs); - RecordAccumulator.ReadyCheckResult result = accum.ready(cluster, time.milliseconds()); - assertFalse(result.readyNodes.isEmpty()); - Map> drained = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, - time.milliseconds()); - assertTrue(accum.hasUndrained()); - assertTrue(accum.hasIncomplete()); - - accum.abortUndrainedBatches(cause); - int numDrainedRecords = 0; - for (Map.Entry> drainedEntry : drained.entrySet()) { - for (ProducerBatch batch : drainedEntry.getValue()) { - assertTrue(batch.isClosed()); - assertFalse(batch.produceFuture.completed()); - numDrainedRecords += batch.recordCount; - } - } - - assertTrue(numDrainedRecords > 0); - assertTrue(numExceptionReceivedInCallback.get() > 0); - assertEquals(numRecords, numExceptionReceivedInCallback.get() + numDrainedRecords); - assertFalse(accum.hasUndrained()); - assertTrue(accum.hasIncomplete()); - } - - @Test - public void testExpiredBatches() throws InterruptedException { - long retryBackoffMs = 100L; - long lingerMs = 3000L; - int requestTimeout = 60; - - // test case assumes that the records do not fill the batch completely - int batchSize = 1025; - - RecordAccumulator accum = createTestRecordAccumulator( - batchSize + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 10 * batchSize, CompressionType.NONE, lingerMs); - int appends = expectedNumAppends(batchSize); - - // Test batches not in retry - for (int i = 0; i < appends; i++) { - accum.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs); - assertEquals("No partitions should be ready.", 0, accum.ready(cluster, time.milliseconds()).readyNodes.size()); - } - // Make the batches ready due to batch full - accum.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, 0); - Set readyNodes = accum.ready(cluster, time.milliseconds()).readyNodes; - assertEquals("Our partition's leader should be ready", Collections.singleton(node1), readyNodes); - // Advance the clock to expire the batch. - time.sleep(requestTimeout + 1); - accum.mutePartition(tp1); - List expiredBatches = accum.expiredBatches(requestTimeout, time.milliseconds()); - assertEquals("The batch should not be expired when the partition is muted", 0, expiredBatches.size()); - - accum.unmutePartition(tp1, 0L); - expiredBatches = accum.expiredBatches(requestTimeout, time.milliseconds()); - assertEquals("The batch should be expired", 1, expiredBatches.size()); - assertEquals("No partitions should be ready.", 0, accum.ready(cluster, time.milliseconds()).readyNodes.size()); - - // Advance the clock to make the next batch ready due to linger.ms - time.sleep(lingerMs); - assertEquals("Our partition's leader should be ready", Collections.singleton(node1), readyNodes); - time.sleep(requestTimeout + 1); - - accum.mutePartition(tp1); - expiredBatches = accum.expiredBatches(requestTimeout, time.milliseconds()); - assertEquals("The batch should not be expired when metadata is still available and partition is muted", 0, expiredBatches.size()); - - accum.unmutePartition(tp1, 0L); - expiredBatches = accum.expiredBatches(requestTimeout, time.milliseconds()); - assertEquals("The batch should be expired when the partition is not muted", 1, expiredBatches.size()); - assertEquals("No partitions should be ready.", 0, accum.ready(cluster, time.milliseconds()).readyNodes.size()); - - // Test batches in retry. - // Create a retried batch - accum.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, 0); - time.sleep(lingerMs); - readyNodes = accum.ready(cluster, time.milliseconds()).readyNodes; - assertEquals("Our partition's leader should be ready", Collections.singleton(node1), readyNodes); - Map> drained = accum.drain(cluster, readyNodes, Integer.MAX_VALUE, time.milliseconds()); - assertEquals("There should be only one batch.", drained.get(node1).size(), 1); - time.sleep(1000L); - accum.reenqueue(drained.get(node1).get(0), time.milliseconds()); - - // test expiration. - time.sleep(requestTimeout + retryBackoffMs); - expiredBatches = accum.expiredBatches(requestTimeout, time.milliseconds()); - assertEquals("The batch should not be expired.", 0, expiredBatches.size()); - time.sleep(1L); - - accum.mutePartition(tp1); - expiredBatches = accum.expiredBatches(requestTimeout, time.milliseconds()); - assertEquals("The batch should not be expired when the partition is muted", 0, expiredBatches.size()); - - accum.unmutePartition(tp1, 0L); - expiredBatches = accum.expiredBatches(requestTimeout, time.milliseconds()); - assertEquals("The batch should be expired when the partition is not muted.", 1, expiredBatches.size()); - - // Test that when being throttled muted batches are expired before the throttle time is over. - accum.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, 0); - time.sleep(lingerMs); - readyNodes = accum.ready(cluster, time.milliseconds()).readyNodes; - assertEquals("Our partition's leader should be ready", Collections.singleton(node1), readyNodes); - // Advance the clock to expire the batch. - time.sleep(requestTimeout + 1); - accum.mutePartition(tp1); - expiredBatches = accum.expiredBatches(requestTimeout, time.milliseconds()); - assertEquals("The batch should not be expired when the partition is muted", 0, expiredBatches.size()); - - long throttleTimeMs = 100L; - accum.unmutePartition(tp1, time.milliseconds() + throttleTimeMs); - // The batch shouldn't be expired yet. - expiredBatches = accum.expiredBatches(requestTimeout, time.milliseconds()); - assertEquals("The batch should not be expired when the partition is muted", 0, expiredBatches.size()); - - // Once the throttle time is over, the batch can be expired. - time.sleep(throttleTimeMs); - expiredBatches = accum.expiredBatches(requestTimeout, time.milliseconds()); - assertEquals("The batch should be expired", 1, expiredBatches.size()); - assertEquals("No partitions should be ready.", 0, accum.ready(cluster, time.milliseconds()).readyNodes.size()); - } - - @Test - public void testMutedPartitions() throws InterruptedException { - long now = time.milliseconds(); - // test case assumes that the records do not fill the batch completely - int batchSize = 1025; - - RecordAccumulator accum = createTestRecordAccumulator( - batchSize + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 10 * batchSize, CompressionType.NONE, 10); - int appends = expectedNumAppends(batchSize); - for (int i = 0; i < appends; i++) { - accum.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs); - assertEquals("No partitions should be ready.", 0, accum.ready(cluster, now).readyNodes.size()); - } - time.sleep(2000); - - // Test ready with muted partition - accum.mutePartition(tp1); - RecordAccumulator.ReadyCheckResult result = accum.ready(cluster, time.milliseconds()); - assertEquals("No node should be ready", 0, result.readyNodes.size()); - - // Test ready without muted partition - accum.unmutePartition(tp1, 0L); - result = accum.ready(cluster, time.milliseconds()); - assertTrue("The batch should be ready", result.readyNodes.size() > 0); - - // Test drain with muted partition - accum.mutePartition(tp1); - Map> drained = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds()); - assertEquals("No batch should have been drained", 0, drained.get(node1).size()); - - // Test drain without muted partition. - accum.unmutePartition(tp1, 0L); - drained = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds()); - assertTrue("The batch should have been drained.", drained.get(node1).size() > 0); - } -/* - @Test(expected = UnsupportedVersionException.class) - public void testIdempotenceWithOldMagic() throws InterruptedException { - // Simulate talking to an older broker, ie. one which supports a lower magic. - ApiVersions apiVersions = new ApiVersions(); - int batchSize = 1025; - apiVersions.update("foobar", NodeApiVersions.create(Arrays.asList(new ApiVersionsResponse.ApiVersion(ApiKeys.PRODUCE.id, - (short) 0, (short) 2)))); - RecordAccumulator accum = new RecordAccumulator(logContext, batchSize + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 10 * batchSize, - CompressionType.NONE, 10, 100L, metrics, time, apiVersions, new TransactionManager()); - accum.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, 0); - } - - @Test - public void testSplitAndReenqueue() throws ExecutionException, InterruptedException { - long now = time.milliseconds(); - RecordAccumulator accum = createTestRecordAccumulator(1024, 10 * 1024, CompressionType.GZIP, 10); - - // Create a big batch - ByteBuffer buffer = ByteBuffer.allocate(4096); - MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, CompressionType.NONE, TimestampType.CREATE_TIME, 0L); - ProducerBatch batch = new ProducerBatch(tp1, builder, now, true); - - byte[] value = new byte[1024]; - final AtomicInteger acked = new AtomicInteger(0); - Callback cb = new Callback() { - @Override - public void onCompletion(RecordMetadata metadata, Exception exception) { - acked.incrementAndGet(); - } - }; - // Append two messages so the batch is too big. - Future future1 = batch.tryAppend(now, null, value, Record.EMPTY_HEADERS, cb, now); - Future future2 = batch.tryAppend(now, null, value, Record.EMPTY_HEADERS, cb, now); - assertNotNull(future1); - assertNotNull(future2); - batch.close(); - // Enqueue the batch to the accumulator as if the batch was created by the accumulator. - accum.reenqueue(batch, now); - time.sleep(101L); - // Drain the batch. - RecordAccumulator.ReadyCheckResult result = accum.ready(cluster, time.milliseconds()); - assertTrue("The batch should be ready", result.readyNodes.size() > 0); - Map> drained = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds()); - assertEquals("Only node1 should be drained", 1, drained.size()); - assertEquals("Only one batch should be drained", 1, drained.get(node1.id()).size()); - // Split and reenqueue the batch. - accum.splitAndReenqueue(drained.get(node1.id()).get(0)); - time.sleep(101L); - - drained = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds()); - assertFalse(drained.isEmpty()); - assertFalse(drained.get(node1.id()).isEmpty()); - drained.get(node1.id()).get(0).done(acked.get(), 100L, null); - assertEquals("The first message should have been acked.", 1, acked.get()); - assertTrue(future1.isDone()); - assertEquals(0, future1.get().offset()); - - drained = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds()); - assertFalse(drained.isEmpty()); - assertFalse(drained.get(node1.id()).isEmpty()); - drained.get(node1.id()).get(0).done(acked.get(), 100L, null); - assertEquals("Both message should have been acked.", 2, acked.get()); - assertTrue(future2.isDone()); - assertEquals(1, future2.get().offset()); - } - - @Test - public void testSplitBatchOffAccumulator() throws InterruptedException { - long seed = System.currentTimeMillis(); - final int batchSize = 1024; - final int bufferCapacity = 3 * 1024; - - // First set the compression ratio estimation to be good. - CompressionRatioEstimator.setEstimation(tp1.topic(), CompressionType.GZIP, 0.1f); - RecordAccumulator accum = createTestRecordAccumulator(batchSize, bufferCapacity, CompressionType.GZIP, 0L); - int numSplitBatches = prepareSplitBatches(accum, seed, 100, 20); - assertTrue("There should be some split batches", numSplitBatches > 0); - // Drain all the split batches. - RecordAccumulator.ReadyCheckResult result = accum.ready(cluster, time.milliseconds()); - for (int i = 0; i < numSplitBatches; i++) { - Map> drained = - accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds()); - assertFalse(drained.isEmpty()); - assertFalse(drained.get(node1.id()).isEmpty()); - } - assertTrue("All the batches should have been drained.", - accum.ready(cluster, time.milliseconds()).readyNodes.isEmpty()); - assertEquals("The split batches should be allocated off the accumulator", - bufferCapacity, accum.bufferPoolAvailableMemory()); - } - - @Test - public void testSplitFrequency() throws InterruptedException { - long seed = System.currentTimeMillis(); - Random random = new Random(); - random.setSeed(seed); - final int batchSize = 1024; - final int numMessages = 1000; - - RecordAccumulator accum = createTestRecordAccumulator(batchSize, 3 * 1024, CompressionType.GZIP, 10); - // Adjust the high and low compression ratio message percentage - for (int goodCompRatioPercentage = 1; goodCompRatioPercentage < 100; goodCompRatioPercentage++) { - int numSplit = 0; - int numBatches = 0; - CompressionRatioEstimator.resetEstimation(topic); - for (int i = 0; i < numMessages; i++) { - int dice = random.nextInt(100); - byte[] value = (dice < goodCompRatioPercentage) ? - bytesWithGoodCompression(random) : bytesWithPoorCompression(random, 100); - accum.append(tp1, 0L, null, value, Record.EMPTY_HEADERS, null, 0); - BatchDrainedResult result = completeOrSplitBatches(accum, batchSize); - numSplit += result.numSplit; - numBatches += result.numBatches; - } - time.sleep(10); - BatchDrainedResult result = completeOrSplitBatches(accum, batchSize); - numSplit += result.numSplit; - numBatches += result.numBatches; - assertTrue(String.format("Total num batches = %d, split batches = %d, more than 10%% of the batch splits. " - + "Random seed is " + seed, - numBatches, numSplit), (double) numSplit / numBatches < 0.1f); - } - } - - private int prepareSplitBatches(RecordAccumulator accum, long seed, int recordSize, int numRecords) - throws InterruptedException { - Random random = new Random(); - random.setSeed(seed); - - // First set the compression ratio estimation to be good. - CompressionRatioEstimator.setEstimation(tp1.topic(), CompressionType.GZIP, 0.1f); - // Append 20 records of 100 bytes size with poor compression ratio should make the batch too big. - for (int i = 0; i < numRecords; i++) { - accum.append(tp1, 0L, null, bytesWithPoorCompression(random, recordSize), Record.EMPTY_HEADERS, null, 0); - } - - RecordAccumulator.ReadyCheckResult result = accum.ready(cluster, time.milliseconds()); - assertFalse(result.readyNodes.isEmpty()); - Map> batches = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds()); - assertEquals(1, batches.size()); - assertEquals(1, batches.values().iterator().next().size()); - ProducerBatch batch = batches.values().iterator().next().get(0); - int numSplitBatches = accum.splitAndReenqueue(batch); - accum.deallocate(batch); - - return numSplitBatches; - } - - private BatchDrainedResult completeOrSplitBatches(RecordAccumulator accum, int batchSize) { - int numSplit = 0; - int numBatches = 0; - boolean batchDrained; - do { - batchDrained = false; - RecordAccumulator.ReadyCheckResult result = accum.ready(cluster, time.milliseconds()); - Map> batches = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds()); - for (List batchList : batches.values()) { - for (ProducerBatch batch : batchList) { - batchDrained = true; - numBatches++; - if (batch.estimatedSizeInBytes() > batchSize + DefaultRecordBatch.RECORD_BATCH_OVERHEAD) { - accum.splitAndReenqueue(batch); - // release the resource of the original big batch. - numSplit++; - } else { - batch.done(0L, 0L, null); - } - accum.deallocate(batch); - } - } - } while (batchDrained); - return new BatchDrainedResult(numSplit, numBatches); - } -*/ - /** - * Generates the compression ratio at about 0.6 - */ - private byte[] bytesWithGoodCompression(Random random) { - byte[] value = new byte[100]; - ByteBuffer buffer = ByteBuffer.wrap(value); - while (buffer.remaining() > 0) - buffer.putInt(random.nextInt(1000)); - return value; - } - - /** - * Generates the compression ratio at about 0.9 - */ - private byte[] bytesWithPoorCompression(Random random, int size) { - byte[] value = new byte[size]; - random.nextBytes(value); - return value; - } - - private class BatchDrainedResult { - final int numSplit; - final int numBatches; - BatchDrainedResult(int numSplit, int numBatches) { - this.numBatches = numBatches; - this.numSplit = numSplit; - } - } - - /** - * Return the offset delta. - */ - private int expectedNumAppends(int batchSize) { - int size = 0; - int offsetDelta = 0; - while (true) { - int recordSize = DefaultRecord.sizeInBytes(offsetDelta, 0, key.length, value.length, - Record.EMPTY_HEADERS); - if (size + recordSize > batchSize) - return offsetDelta; - offsetDelta += 1; - size += recordSize; - } - } - - /** - * Return a test RecordAccumulator instance - */ - private RecordAccumulator createTestRecordAccumulator(int batchSize, long totalSize, CompressionType type, long lingerMs) { - return new RecordAccumulator( - logContext, - batchSize, - totalSize, - type, - lingerMs, - 100L, - metrics, - time); - } -} diff --git a/clients/src/test/java/org/oracle/okafka/common/utils/MockTime.java b/clients/src/test/java/org/oracle/okafka/common/utils/MockTime.java deleted file mode 100644 index d418d66..0000000 --- a/clients/src/test/java/org/oracle/okafka/common/utils/MockTime.java +++ /dev/null @@ -1,120 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. - * - */ - -package org.oracle.okafka.common.utils; - -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicLong; - -/** - * A clock that you can manually advance by calling sleep - */ -public class MockTime implements Time { - - interface MockTimeListener { - void tick(); - } - - /** - * Listeners which are waiting for time changes. - */ - private final CopyOnWriteArrayList listeners = new CopyOnWriteArrayList<>(); - - private final long autoTickMs; - - // Values from `nanoTime` and `currentTimeMillis` are not comparable, so we store them separately to allow tests - // using this class to detect bugs where this is incorrectly assumed to be true - private final AtomicLong timeMs; - private final AtomicLong highResTimeNs; - - public MockTime() { - this(0); - } - - public MockTime(long autoTickMs) { - this(autoTickMs, System.currentTimeMillis(), System.nanoTime()); - } - - public MockTime(long autoTickMs, long currentTimeMs, long currentHighResTimeNs) { - this.timeMs = new AtomicLong(currentTimeMs); - this.highResTimeNs = new AtomicLong(currentHighResTimeNs); - this.autoTickMs = autoTickMs; - } - - public void addListener(MockTimeListener listener) { - listeners.add(listener); - } - - @Override - public long milliseconds() { - maybeSleep(autoTickMs); - return timeMs.get(); - } - - @Override - public long nanoseconds() { - maybeSleep(autoTickMs); - return highResTimeNs.get(); - } - - @Override - public long hiResClockMs() { - return TimeUnit.NANOSECONDS.toMillis(nanoseconds()); - } - - private void maybeSleep(long ms) { - if (ms != 0) - sleep(ms); - } - - @Override - public void sleep(long ms) { - timeMs.addAndGet(ms); - highResTimeNs.addAndGet(TimeUnit.MILLISECONDS.toNanos(ms)); - tick(); - } - - public void setCurrentTimeMs(long newMs) { - long oldMs = timeMs.getAndSet(newMs); - - // does not allow to set to an older timestamp - if (oldMs > newMs) - throw new IllegalArgumentException("Setting the time to " + newMs + " while current time " + oldMs + " is newer; this is not allowed"); - - highResTimeNs.set(TimeUnit.MILLISECONDS.toNanos(newMs)); - tick(); - } - - private void tick() { - for (MockTimeListener listener : listeners) { - listener.tick(); - } - } -} diff --git a/clients/src/test/java/org/oracle/okafka/common/utils/Serializer.java b/clients/src/test/java/org/oracle/okafka/common/utils/Serializer.java deleted file mode 100644 index 368795c..0000000 --- a/clients/src/test/java/org/oracle/okafka/common/utils/Serializer.java +++ /dev/null @@ -1,65 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.oracle.oorg/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. - * - */ - -package org.oracle.okafka.common.utils; - -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.ObjectOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.ObjectInputStream; - -public class Serializer { - - public static byte[] serialize(Object toSerialize) throws IOException { - ByteArrayOutputStream arrayOutputStream = new ByteArrayOutputStream(); - try (ObjectOutputStream ooStream = new ObjectOutputStream(arrayOutputStream)) { - ooStream.writeObject(toSerialize); - return arrayOutputStream.toByteArray(); - } - } - - public static Object deserialize(InputStream inputStream) throws IOException, ClassNotFoundException { - try (ObjectInputStream objectInputStream = new ObjectInputStream(inputStream)) { - return objectInputStream.readObject(); - } - } - - public static Object deserialize(byte[] byteArray) throws IOException, ClassNotFoundException { - ByteArrayInputStream arrayInputStream = new ByteArrayInputStream(byteArray); - return deserialize(arrayInputStream); - } - - public static Object deserialize(String fileName) throws IOException, ClassNotFoundException { - ClassLoader classLoader = Serializer.class.getClassLoader(); - InputStream fileStream = classLoader.getResourceAsStream(fileName); - return deserialize(fileStream); - } -} diff --git a/clients/src/test/java/org/oracle/okafka/test/MockMetricsReporter.java b/clients/src/test/java/org/oracle/okafka/test/MockMetricsReporter.java deleted file mode 100644 index 16d94b9..0000000 --- a/clients/src/test/java/org/oracle/okafka/test/MockMetricsReporter.java +++ /dev/null @@ -1,65 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. - * - */ - -package org.oracle.okafka.test; - -import org.oracle.okafka.common.metrics.KafkaMetric; -import org.oracle.okafka.common.metrics.MetricsReporter; - -import java.util.List; -import java.util.Map; -import java.util.concurrent.atomic.AtomicInteger; - -public class MockMetricsReporter implements MetricsReporter { - public static final AtomicInteger INIT_COUNT = new AtomicInteger(0); - public static final AtomicInteger CLOSE_COUNT = new AtomicInteger(0); - - public MockMetricsReporter() { - } - - @Override - public void init(List metrics) { - INIT_COUNT.incrementAndGet(); - } - - @Override - public void metricChange(KafkaMetric metric) {} - - @Override - public void metricRemoval(KafkaMetric metric) {} - - @Override - public void close() { - CLOSE_COUNT.incrementAndGet(); - } - - @Override - public void configure(Map configs) { - } -} \ No newline at end of file diff --git a/clients/src/test/java/org/oracle/okafka/test/MockPartitioner.java b/clients/src/test/java/org/oracle/okafka/test/MockPartitioner.java deleted file mode 100644 index fbbb99b..0000000 --- a/clients/src/test/java/org/oracle/okafka/test/MockPartitioner.java +++ /dev/null @@ -1,64 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. - * - */ - -package org.oracle.okafka.test; - -import org.oracle.okafka.common.Cluster; -import org.oracle.okafka.clients.producer.Partitioner; - -import java.util.Map; -import java.util.concurrent.atomic.AtomicInteger; - -public class MockPartitioner implements Partitioner { - public static final AtomicInteger INIT_COUNT = new AtomicInteger(0); - public static final AtomicInteger CLOSE_COUNT = new AtomicInteger(0); - - public MockPartitioner() { - INIT_COUNT.incrementAndGet(); - } - - @Override - public void configure(Map configs) { - } - - @Override - public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) { - return 0; - } - - @Override - public void close() { - CLOSE_COUNT.incrementAndGet(); - } - - public static void resetCounters() { - INIT_COUNT.set(0); - CLOSE_COUNT.set(0); - } -} diff --git a/clients/src/test/java/org/oracle/okafka/test/MockProducerInterceptor.java b/clients/src/test/java/org/oracle/okafka/test/MockProducerInterceptor.java deleted file mode 100644 index 1373d56..0000000 --- a/clients/src/test/java/org/oracle/okafka/test/MockProducerInterceptor.java +++ /dev/null @@ -1,119 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. - * - */ - - -package org.oracle.okafka.test; - -import org.oracle.okafka.clients.producer.ProducerConfig; -import org.oracle.okafka.clients.producer.ProducerInterceptor; -import org.oracle.okafka.clients.producer.ProducerRecord; -import org.oracle.okafka.clients.producer.RecordMetadata; -import org.oracle.okafka.common.ClusterResourceListener; -import org.oracle.okafka.common.ClusterResource; -import org.oracle.okafka.common.config.ConfigException; - -import java.util.Map; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; - -public class MockProducerInterceptor implements ClusterResourceListener, ProducerInterceptor { - public static final AtomicInteger INIT_COUNT = new AtomicInteger(0); - public static final AtomicInteger CLOSE_COUNT = new AtomicInteger(0); - public static final AtomicInteger ONSEND_COUNT = new AtomicInteger(0); - public static final AtomicInteger ON_SUCCESS_COUNT = new AtomicInteger(0); - public static final AtomicInteger ON_ERROR_COUNT = new AtomicInteger(0); - public static final AtomicInteger ON_ERROR_WITH_METADATA_COUNT = new AtomicInteger(0); - public static final AtomicReference CLUSTER_META = new AtomicReference<>(); - public static final ClusterResource NO_CLUSTER_ID = new ClusterResource("no_cluster_id"); - public static final AtomicReference CLUSTER_ID_BEFORE_ON_ACKNOWLEDGEMENT = new AtomicReference<>(NO_CLUSTER_ID); - public static final String APPEND_STRING_PROP = "mock.interceptor.append"; - private String appendStr; - - public MockProducerInterceptor() { - INIT_COUNT.incrementAndGet(); - } - - @Override - public void configure(Map configs) { - // ensure this method is called and expected configs are passed in - Object o = configs.get(APPEND_STRING_PROP); - if (o == null) - throw new ConfigException("Mock producer interceptor expects configuration " + APPEND_STRING_PROP); - if (o instanceof String) - appendStr = (String) o; - - // clientId also must be in configs - Object clientIdValue = configs.get(ProducerConfig.CLIENT_ID_CONFIG); - if (clientIdValue == null) - throw new ConfigException("Mock producer interceptor expects configuration " + ProducerConfig.CLIENT_ID_CONFIG); - } - - @Override - public ProducerRecord onSend(ProducerRecord record) { - ONSEND_COUNT.incrementAndGet(); - return new ProducerRecord<>( - record.topic(), record.partition(), record.key(), record.value().concat(appendStr)); - } - - @Override - public void onAcknowledgement(RecordMetadata metadata, Exception exception) { - // This will ensure that we get the cluster metadata when onAcknowledgement is called for the first time - // as subsequent compareAndSet operations will fail. - CLUSTER_ID_BEFORE_ON_ACKNOWLEDGEMENT.compareAndSet(NO_CLUSTER_ID, CLUSTER_META.get()); - - if (exception != null) { - ON_ERROR_COUNT.incrementAndGet(); - if (metadata != null) { - ON_ERROR_WITH_METADATA_COUNT.incrementAndGet(); - } - } else if (metadata != null) - ON_SUCCESS_COUNT.incrementAndGet(); - } - - @Override - public void close() { - CLOSE_COUNT.incrementAndGet(); - } - - public static void resetCounters() { - INIT_COUNT.set(0); - CLOSE_COUNT.set(0); - ONSEND_COUNT.set(0); - ON_SUCCESS_COUNT.set(0); - ON_ERROR_COUNT.set(0); - ON_ERROR_WITH_METADATA_COUNT.set(0); - CLUSTER_META.set(null); - CLUSTER_ID_BEFORE_ON_ACKNOWLEDGEMENT.set(NO_CLUSTER_ID); - } - - @Override - public void onUpdate(ClusterResource clusterResource) { - CLUSTER_META.set(clusterResource); - } -} \ No newline at end of file diff --git a/clients/src/test/java/org/oracle/okafka/test/MockSerializer.java b/clients/src/test/java/org/oracle/okafka/test/MockSerializer.java deleted file mode 100644 index 4202f45..0000000 --- a/clients/src/test/java/org/oracle/okafka/test/MockSerializer.java +++ /dev/null @@ -1,72 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. - * - */ - -package org.oracle.okafka.test; - -import org.oracle.okafka.common.ClusterResourceListener; -import org.oracle.okafka.common.ClusterResource; -import org.oracle.okafka.common.serialization.Serializer; - -import java.util.Map; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; - -public class MockSerializer implements ClusterResourceListener, Serializer { - public static final AtomicInteger INIT_COUNT = new AtomicInteger(0); - public static final AtomicInteger CLOSE_COUNT = new AtomicInteger(0); - public static final AtomicReference CLUSTER_META = new AtomicReference<>(); - public static final ClusterResource NO_CLUSTER_ID = new ClusterResource("no_cluster_id"); - public static final AtomicReference CLUSTER_ID_BEFORE_SERIALIZE = new AtomicReference<>(NO_CLUSTER_ID); - - public MockSerializer() { - INIT_COUNT.incrementAndGet(); - } - - @Override - public void configure(Map configs, boolean isKey) { - } - - @Override - public byte[] serialize(String topic, byte[] data) { - // This will ensure that we get the cluster metadata when serialize is called for the first time - // as subsequent compareAndSet operations will fail. - CLUSTER_ID_BEFORE_SERIALIZE.compareAndSet(NO_CLUSTER_ID, CLUSTER_META.get()); - return data; - } - - @Override - public void close() { - CLOSE_COUNT.incrementAndGet(); - } - - @Override - public void onUpdate(ClusterResource clusterResource) { - CLUSTER_META.set(clusterResource); - } -} diff --git a/clients/src/test/java/org/oracle/okafka/test/TestCondition.java b/clients/src/test/java/org/oracle/okafka/test/TestCondition.java deleted file mode 100644 index a2e84c1..0000000 --- a/clients/src/test/java/org/oracle/okafka/test/TestCondition.java +++ /dev/null @@ -1,39 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. - * - */ - -package org.oracle.okafka.test; - -/** - * Interface to wrap actions that are required to wait until a condition is met - * for testing purposes. Note that this is not intended to do any assertions. - */ -public interface TestCondition { - - boolean conditionMet(); -} diff --git a/clients/src/test/java/org/oracle/okafka/test/TestUtils.java b/clients/src/test/java/org/oracle/okafka/test/TestUtils.java deleted file mode 100644 index 1caf461..0000000 --- a/clients/src/test/java/org/oracle/okafka/test/TestUtils.java +++ /dev/null @@ -1,353 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. - * - */ - -package org.oracle.okafka.test; - -import org.oracle.okafka.clients.consumer.ConsumerConfig; -import org.oracle.okafka.clients.producer.ProducerConfig; -import org.oracle.okafka.common.Cluster; -import org.oracle.okafka.common.Node; -import org.oracle.okafka.common.PartitionInfo; -import org.oracle.okafka.common.utils.Utils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.ArrayList; -import java.util.Base64; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Properties; -import java.util.Random; -import java.util.Set; -import java.util.UUID; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -import static java.util.Arrays.asList; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -/** - * Helper functions for writing unit tests - */ -public class TestUtils { - private static final Logger log = LoggerFactory.getLogger(TestUtils.class); - - //public static final File IO_TMP_DIR = new File(System.getProperty("java.io.tmpdir")); - - public static final String LETTERS = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"; - public static final String DIGITS = "0123456789"; - public static final String LETTERS_AND_DIGITS = LETTERS + DIGITS; - - /* A consistent random number generator to make tests repeatable */ - public static final Random SEEDED_RANDOM = new Random(192348092834L); - public static final Random RANDOM = new Random(); - public static final long DEFAULT_MAX_WAIT_MS = 15000; - - public static Cluster singletonCluster() { - return clusterWith(1); - } - - public static Cluster singletonCluster(final Map topicPartitionCounts) { - return clusterWith(1, topicPartitionCounts); - } - - public static Cluster singletonCluster(final String topic, final int partitions) { - return clusterWith(1, topic, partitions); - } - - public static Cluster clusterWith(int nodes) { - return clusterWith(nodes, new HashMap()); - } - - public static Cluster clusterWith(final int nodes, final Map topicPartitionCounts) { - final Node[] ns = new Node[nodes]; - for (int i = 0; i < nodes; i++) - ns[i] = new Node(i, "localhost", 1969, "", ""); - final List parts = new ArrayList<>(); - for (final Map.Entry topicPartition : topicPartitionCounts.entrySet()) { - final String topic = topicPartition.getKey(); - final int partitions = topicPartition.getValue(); - for (int i = 0; i < partitions; i++) - parts.add(new PartitionInfo(topic, i, ns[i % ns.length], ns, ns)); - } - return new Cluster("kafka-cluster", asList(ns), parts, Collections.emptySet(), Collections.emptySet(), null); - } - - public static Cluster clusterWith(final int nodes, final String topic, final int partitions) { - return clusterWith(nodes, Collections.singletonMap(topic, partitions)); - } - - /** - * Generate an array of random bytes - * - * @param size The size of the array - */ - public static byte[] randomBytes(final int size) { - final byte[] bytes = new byte[size]; - SEEDED_RANDOM.nextBytes(bytes); - return bytes; - } - - /** - * Generate a random string of letters and digits of the given length - * - * @param len The length of the string - * @return The random string - */ - public static String randomString(final int len) { - final StringBuilder b = new StringBuilder(); - for (int i = 0; i < len; i++) - b.append(LETTERS_AND_DIGITS.charAt(SEEDED_RANDOM.nextInt(LETTERS_AND_DIGITS.length()))); - return b.toString(); - } - - /** - * Create an empty file in the default temporary-file directory, using `kafka` as the prefix and `tmp` as the - * suffix to generate its name. - */ - public static File tempFile() throws IOException { - final File file = File.createTempFile("kafka", ".tmp"); - file.deleteOnExit(); - - return file; - } - - /** - * Create a temporary relative directory in the default temporary-file directory with the given prefix. - * - * @param prefix The prefix of the temporary directory, if null using "kafka-" as default prefix - */ - public static File tempDirectory(final String prefix) { - return tempDirectory(null, prefix); - } - - /** - * Create a temporary relative directory in the default temporary-file directory with a - * prefix of "kafka-" - * - * @return the temporary directory just created. - */ - public static File tempDirectory() { - return tempDirectory(null); - } - - /** - * Create a temporary relative directory in the specified parent directory with the given prefix. - * - * @param parent The parent folder path name, if null using the default temporary-file directory - * @param prefix The prefix of the temporary directory, if null using "kafka-" as default prefix - */ - public static File tempDirectory(final Path parent, String prefix) { - final File file; - prefix = prefix == null ? "kafka-" : prefix; - try { - file = parent == null ? - Files.createTempDirectory(prefix).toFile() : Files.createTempDirectory(parent, prefix).toFile(); - } catch (final IOException ex) { - throw new RuntimeException("Failed to create a temp dir", ex); - } - file.deleteOnExit(); - - Runtime.getRuntime().addShutdownHook(new Thread() { - @Override - public void run() { - try { - Utils.delete(file); - } catch (IOException e) { - log.error("Error deleting {}", file.getAbsolutePath(), e); - } - } - }); - - return file; - } - - public static Properties producerConfig(final String bootstrapServers, - final Class keySerializer, - final Class valueSerializer, - final Properties additional) { - final Properties properties = new Properties(); - properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - properties.put(ProducerConfig.ACKS_CONFIG, "all"); - properties.put(ProducerConfig.RETRIES_CONFIG, 0); - properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, keySerializer); - properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, valueSerializer); - properties.putAll(additional); - return properties; - } - - public static Properties producerConfig(final String bootstrapServers, final Class keySerializer, final Class valueSerializer) { - return producerConfig(bootstrapServers, keySerializer, valueSerializer, new Properties()); - } - - public static Properties consumerConfig(final String bootstrapServers, - final String groupId, - final Class keyDeserializer, - final Class valueDeserializer, - final Properties additional) { - - final Properties consumerConfig = new Properties(); - consumerConfig.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); - consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); - consumerConfig.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializer); - consumerConfig.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializer); - consumerConfig.putAll(additional); - return consumerConfig; - } - - public static Properties consumerConfig(final String bootstrapServers, - final String groupId, - final Class keyDeserializer, - final Class valueDeserializer) { - return consumerConfig(bootstrapServers, - groupId, - keyDeserializer, - valueDeserializer, - new Properties()); - } - - /** - * returns consumer config with random UUID for the Group ID - */ - public static Properties consumerConfig(final String bootstrapServers, final Class keyDeserializer, final Class valueDeserializer) { - return consumerConfig(bootstrapServers, - UUID.randomUUID().toString(), - keyDeserializer, - valueDeserializer, - new Properties()); - } - - /** - * uses default value of 15 seconds for timeout - */ - public static void waitForCondition(final TestCondition testCondition, final String conditionDetails) throws InterruptedException { - waitForCondition(testCondition, DEFAULT_MAX_WAIT_MS, conditionDetails); - } - - /** - * Wait for condition to be met for at most {@code maxWaitMs} and throw assertion failure otherwise. - * This should be used instead of {@code Thread.sleep} whenever possible as it allows a longer timeout to be used - * without unnecessarily increasing test time (as the condition is checked frequently). The longer timeout is needed to - * avoid transient failures due to slow or overloaded machines. - */ - public static void waitForCondition(final TestCondition testCondition, final long maxWaitMs, String conditionDetails) throws InterruptedException { - final long startTime = System.currentTimeMillis(); - - boolean testConditionMet; - while (!(testConditionMet = testCondition.conditionMet()) && ((System.currentTimeMillis() - startTime) < maxWaitMs)) { - Thread.sleep(Math.min(maxWaitMs, 100L)); - } - - // don't re-evaluate testCondition.conditionMet() because this might slow down some tests significantly (this - // could be avoided by making the implementations more robust, but we have a large number of such implementations - // and it's easier to simply avoid the issue altogether) - if (!testConditionMet) { - conditionDetails = conditionDetails != null ? conditionDetails : ""; - throw new AssertionError("Condition not met within timeout " + maxWaitMs + ". " + conditionDetails); - } - } - - /** - * Checks if a cluster id is valid. - * @param clusterId - */ - public static void isValidClusterId(String clusterId) { - assertNotNull(clusterId); - - // Base 64 encoded value is 22 characters - assertEquals(clusterId.length(), 22); - - Pattern clusterIdPattern = Pattern.compile("[a-zA-Z0-9_\\-]+"); - Matcher matcher = clusterIdPattern.matcher(clusterId); - assertTrue(matcher.matches()); - - // Convert into normal variant and add padding at the end. - String originalClusterId = String.format("%s==", clusterId.replace("_", "/").replace("-", "+")); - byte[] decodedUuid = Base64.getDecoder().decode(originalClusterId); - - // We expect 16 bytes, same as the input UUID. - assertEquals(decodedUuid.length, 16); - - //Check if it can be converted back to a UUID. - try { - ByteBuffer uuidBuffer = ByteBuffer.wrap(decodedUuid); - new UUID(uuidBuffer.getLong(), uuidBuffer.getLong()).toString(); - } catch (Exception e) { - fail(clusterId + " cannot be converted back to UUID."); - } - } - - /** - * Checks the two iterables for equality by first converting both to a list. - */ - public static void checkEquals(Iterable it1, Iterable it2) { - assertEquals(toList(it1), toList(it2)); - } - - public static void checkEquals(Iterator it1, Iterator it2) { - assertEquals(Utils.toList(it1), Utils.toList(it2)); - } - - public static void checkEquals(Set c1, Set c2, String firstDesc, String secondDesc) { - if (!c1.equals(c2)) { - Set missing1 = new HashSet<>(c2); - missing1.removeAll(c1); - Set missing2 = new HashSet<>(c1); - missing2.removeAll(c2); - fail(String.format("Sets not equal, missing %s=%s, missing %s=%s", firstDesc, missing1, secondDesc, missing2)); - } - } - - public static List toList(Iterable iterable) { - List list = new ArrayList<>(); - for (T item : iterable) - list.add(item); - return list; - } - - /* public static ByteBuffer toBuffer(Struct struct) { - ByteBuffer buffer = ByteBuffer.allocate(struct.sizeOf()); - struct.writeTo(buffer); - buffer.rewind(); - return buffer; - }*/ -} diff --git a/clients/src/test/java/org/oracle/okafka/tests/ConsumerMetricsTest.java b/clients/src/test/java/org/oracle/okafka/tests/ConsumerMetricsTest.java new file mode 100644 index 0000000..0c3e74b --- /dev/null +++ b/clients/src/test/java/org/oracle/okafka/tests/ConsumerMetricsTest.java @@ -0,0 +1,137 @@ +package org.oracle.okafka.tests; + +import java.io.File; +import java.io.FileWriter; +import java.io.IOException; +import java.time.Duration; +import java.util.Arrays; +import java.util.Collection; +import java.util.Map; +import java.util.Properties; + +import org.apache.kafka.clients.consumer.Consumer; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.consumer.ConsumerRecords; +import org.apache.kafka.common.Metric; +import org.apache.kafka.common.MetricName; +import org.apache.kafka.common.TopicPartition; +import org.junit.Test; +import org.oracle.okafka.clients.consumer.KafkaConsumer; + +public class ConsumerMetricsTest { + public static void getMetricData(Consumer consumer,String fileName) { + try { + Map metricData = consumer.metrics(); + File csvFile = new File(System.getProperty("user.dir") +fileName+ ".csv"); + FileWriter fileWriter = new FileWriter(csvFile); + StringBuilder headLine = new StringBuilder(); + headLine.append("Name"); + headLine.append(','); + headLine.append("Group"); + headLine.append(','); + headLine.append("Description"); + headLine.append(','); + headLine.append("Tags"); + headLine.append(','); + headLine.append("Value"); + headLine.append("\n"); + fileWriter.write(headLine.toString()); + metricData.forEach((a, b) -> { + try { + + StringBuilder line = new StringBuilder(); + + line.append(a.name()); + line.append(','); + line.append(a.group()); + line.append(','); + line.append(a.description()); + if(a.tags().containsKey("node-id") || a.tags().containsKey("topic")) { + if(a.tags().containsKey("node-id")) { + line.append(','); + line.append(a.tags().get("node-id")); + } + if(a.tags().containsKey("topic")) { + line.append(','); + line.append("topic-"+a.tags().get("topic")); + } + }else{ + line.append(','); + line.append(""); + + } + line.append(','); + line.append(b.metricValue().toString()); + line.append("\n"); + fileWriter.write(line.toString()); + + } catch (IOException e) { + e.printStackTrace(); + } + + }); + fileWriter.close(); + } catch(IOException e) { + e.printStackTrace(); + } + } + + + + @Test + public void ConsumingTest() { + Properties prop = new Properties(); + prop = OkafkaSetup.setup(); + prop.put("group.id" , "S1"); + prop.put("max.poll.records", 1000); + prop.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); + prop.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); + Consumer consumer = new KafkaConsumer(prop); + consumer.subscribe(Arrays.asList("TEQ")); + int expectedMsgCnt =1000; + int retryCount=1; + int msgCnt = 0; + try { + while(true) { + try { + ConsumerRecords records = consumer.poll(Duration.ofMillis(10000)); + + Collection partitions = records.partitions(); + for (ConsumerRecord record : records) + System.out.printf("partition = %d, offset = %d, key = %s, value =%s\n ", record.partition(), record.offset(), record.key(), record.value()); + + if(records != null && records.count() > 0) { + msgCnt += records.count(); + System.out.println("Committing records " + records.count()); + consumer.commitSync(); + + if(msgCnt >= expectedMsgCnt ) + { + System.out.println("Received " + msgCnt + " Expected " + expectedMsgCnt +". Exiting Now."); + break; + } + } + else { + if(retryCount>3) + break; + System.out.println("No Record Fetched. Retrying in 1 second"); + Thread.sleep(1000); + retryCount++; + } + }catch(Exception e) + { + throw e; + } + } + + }catch(Exception e) + { + System.out.println("Exception from consumer " + e); + e.printStackTrace(); + }finally { + ConsumerMetricsTest.getMetricData(consumer, "afterConsumingOkafka"); + System.out.println("Closing Consumer"); + consumer.close(); + } + } +} diff --git a/clients/src/test/java/org/oracle/okafka/tests/OkafkaAutoOffsetReset.java b/clients/src/test/java/org/oracle/okafka/tests/OkafkaAutoOffsetReset.java new file mode 100644 index 0000000..391f639 --- /dev/null +++ b/clients/src/test/java/org/oracle/okafka/tests/OkafkaAutoOffsetReset.java @@ -0,0 +1,69 @@ +package org.oracle.okafka.tests; + + +import java.io.IOException; +import java.time.Duration; +import java.util.Arrays; +import java.util.Properties; +import org.apache.kafka.clients.consumer.Consumer; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.consumer.ConsumerRecords; +import org.junit.Test; +import org.oracle.okafka.clients.consumer.KafkaConsumer; + +public class OkafkaAutoOffsetReset { + + @Test + public void autoOffsetSeekTest() throws IOException { + Properties prop = new Properties(); + prop = OkafkaSetup.setup(); + prop.put("group.id" , "S1"); + prop.put("max.poll.records", 1000); + prop.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); + prop.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); + prop.put("auto.offset.reset", "earliest"); + + Consumer consumer = new KafkaConsumer(prop); + + consumer.subscribe(Arrays.asList("TEQ")); + + int expectedMsgCnt = 1000; + int msgCnt = 0; + try { + while(true) { + try { + ConsumerRecords records = consumer.poll(Duration.ofMillis(10000)); + + for (ConsumerRecord record : records) + System.out.printf("partition = %d, offset = %d, key = %s, value =%s\n ", record.partition(), record.offset(), record.key(), record.value()); + + if(records != null && records.count() > 0) { + msgCnt += records.count(); + System.out.println("Committing records " + records.count()); + consumer.commitSync(); + + if(msgCnt >= expectedMsgCnt ) + { + System.out.println("Received " + msgCnt + " Expected " + expectedMsgCnt +". Exiting Now."); + break; + } + } + else { + System.out.println("No Record Fetched. Retrying in 1 second"); + Thread.sleep(1000); + } + }catch(Exception e) + { + throw e; + } + } + }catch(Exception e) + { + System.out.println("Exception from consumer " + e); + e.printStackTrace(); + }finally { + System.out.println("Closing Consumer"); + consumer.close(); + } + } + } diff --git a/clients/src/test/java/org/oracle/okafka/tests/OkafkaDeleteTopic.java b/clients/src/test/java/org/oracle/okafka/tests/OkafkaDeleteTopic.java new file mode 100644 index 0000000..4828ed7 --- /dev/null +++ b/clients/src/test/java/org/oracle/okafka/tests/OkafkaDeleteTopic.java @@ -0,0 +1,39 @@ +package org.oracle.okafka.tests; +import java.util.Arrays; +import java.util.Collections; +import java.util.Properties; +import java.util.concurrent.ExecutionException; +import org.apache.kafka.clients.admin.Admin; +import org.apache.kafka.clients.admin.CreateTopicsResult; +import org.apache.kafka.clients.admin.NewTopic; +import org.apache.kafka.common.KafkaFuture; +import org.junit.Test; +import org.oracle.okafka.clients.admin.AdminClient; +import org.oracle.okafka.clients.admin.DeleteTopicsResult; +import org.oracle.okafka.clients.admin.KafkaAdminClient; + +public class OkafkaDeleteTopic { + + @Test + public void DeleteTopicTest() { + try (Admin admin = AdminClient.create(OkafkaSetup.setup())) { + KafkaAdminClient kAdminClient = (((org.oracle.okafka.clients.admin.KafkaAdminClient)admin)); + DeleteTopicsResult delResult = kAdminClient.deleteTopics(Collections.singletonList("TEQ"), new org.oracle.okafka.clients.admin.DeleteTopicsOptions()); + try { + KafkaFuture ftr = delResult.all(); + ftr.get(); + System.out.println("Main Thread Out of wait now"); + } catch ( InterruptedException | ExecutionException e ) { + + throw new IllegalStateException(e); + } + System.out.println("Auto Closing admin now"); + } + catch(Exception e) + { + System.out.println("Exception while creating topic " + e); + e.printStackTrace(); + } + System.out.println("Main thread completed "); + } +} \ No newline at end of file diff --git a/clients/src/test/java/org/oracle/okafka/tests/OkafkaSeekToBeginning.java b/clients/src/test/java/org/oracle/okafka/tests/OkafkaSeekToBeginning.java new file mode 100644 index 0000000..2c8aeec --- /dev/null +++ b/clients/src/test/java/org/oracle/okafka/tests/OkafkaSeekToBeginning.java @@ -0,0 +1,97 @@ +package org.oracle.okafka.tests; + +import java.io.IOException; +import java.time.Duration; +import java.util.Arrays; +import java.util.Collection; +import java.util.Properties; +import org.apache.kafka.clients.consumer.Consumer; +import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.consumer.ConsumerRecords; +import org.apache.kafka.common.TopicPartition; +import org.junit.Test; +import org.oracle.okafka.clients.consumer.KafkaConsumer; + +public class OkafkaSeekToBeginning { + @Test + public void SeekBeginningTest() throws IOException { + Properties prop = new Properties(); + prop = OkafkaSetup.setup(); + prop.put("group.id" , "S1"); + prop.put("max.poll.records", 1000); + prop.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); + prop.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); + + Consumer consumer = new KafkaConsumer(prop); + try { + + consumer.subscribe(Arrays.asList("TEQ"), new ConsumerRebalanceListener() { + @Override + public synchronized void onPartitionsRevoked(Collection partitions) { + System.out.println("Partitions revoked for rebalance."); + } + @Override + public synchronized void onPartitionsAssigned(Collection partitions) { + System.out.println("New Partitions assigned after rebalance"); + try { + consumer.seekToBeginning(partitions); + } + catch (Exception e) { + e.printStackTrace(); + } + } + }); + } + catch(Exception e) { + System.out.println(e); + e.printStackTrace(); + } + int expectedMsgCnt = 1000; + int msgCnt = 0; + try { + + while(true) { + try { + ConsumerRecords records = consumer.poll(Duration.ofMillis(10000)); + + for (ConsumerRecord record : records) + System.out.printf("partition = %d, offset = %d, key = %s, value =%s\n ", record.partition(), record.offset(), record.key(), record.value()); + + if(records != null && records.count() > 0) { + msgCnt += records.count(); + System.out.println("Committing records " + records.count()); + consumer.commitSync(); + + if(msgCnt >= expectedMsgCnt ) + { + System.out.println("Received " + msgCnt + " Expected " + expectedMsgCnt +". Exiting Now."); + break; + } + } + else { + System.out.println("No Record Fetched. Retrying in 1 second"); + Thread.sleep(1000); + } + + }catch(Exception e) + { + throw e; + } + } + + }catch(Exception e) + { + System.out.println("Exception from consumer " + e); + e.printStackTrace(); + }finally { + System.out.println("Closing Consumer"); + consumer.close(); + } + } + + } + + + + diff --git a/clients/src/test/java/org/oracle/okafka/tests/OkafkaSeekToEnd.java b/clients/src/test/java/org/oracle/okafka/tests/OkafkaSeekToEnd.java new file mode 100644 index 0000000..6f8a9a8 --- /dev/null +++ b/clients/src/test/java/org/oracle/okafka/tests/OkafkaSeekToEnd.java @@ -0,0 +1,99 @@ +package org.oracle.okafka.tests; + +import java.io.IOException; +import java.time.Duration; +import java.time.Instant; +import java.util.Arrays; +import java.util.Collection; +import java.util.Properties; +import org.apache.kafka.clients.consumer.Consumer; +import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.consumer.ConsumerRecords; +import org.apache.kafka.common.TopicPartition; +import org.junit.Test; +import org.oracle.okafka.clients.consumer.KafkaConsumer; + +public class OkafkaSeekToEnd { + @Test + public void SeekEndTest() throws IOException { + Properties prop = new Properties(); + prop = OkafkaSetup.setup(); + prop.put("group.id" , "S1"); + prop.put("max.poll.records", 1000); + prop.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); + prop.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); + + Consumer consumer = new KafkaConsumer(prop); + try { + consumer.subscribe(Arrays.asList("TEQ"), new ConsumerRebalanceListener() { + @Override + public synchronized void onPartitionsRevoked(Collection partitions) { + System.out.println("Partitions revoked for rebalance."); + } + @Override + public synchronized void onPartitionsAssigned(Collection partitions) { + System.out.println("New Partitions assigned after rebalance"); + try { + consumer.seekToEnd(partitions); + } + catch (Exception e) { + e.printStackTrace(); + } + } + }); + } + catch(Exception e) { + System.out.println(e); + e.printStackTrace(); + } + int expectedMsgCnt = 1000; + int msgCnt = 0; + try { + Instant starttime = Instant.now(); + long runtime =0; + while(true && runtime <=120) { + try { + ConsumerRecords records = consumer.poll(Duration.ofMillis(10000)); + + for (ConsumerRecord record : records) + System.out.printf("partition = %d, offset = %d, key = %s, value =%s\n ", record.partition(), record.offset(), record.key(), record.value()); + + if(records != null && records.count() > 0) { + msgCnt += records.count(); + System.out.println("Committing records " + records.count()); + consumer.commitSync(); + + if(msgCnt >= expectedMsgCnt ) + { + System.out.println("Received " + msgCnt + " Expected " + expectedMsgCnt +". Exiting Now."); + break; + } + } + else { + System.out.println("No Record Fetched. Retrying in 1 second"); + Thread.sleep(1000); + } + runtime = Duration.between(starttime, Instant.now()).toSeconds(); + + }catch(Exception e) + { + throw e; + } + } + + }catch(Exception e) + { + System.out.println("Exception from consumer " + e); + e.printStackTrace(); + }finally { + System.out.println("Closing Consumer"); + consumer.close(); + } + } + + } + + + + diff --git a/clients/src/test/java/org/oracle/okafka/tests/OkafkaSetup.java b/clients/src/test/java/org/oracle/okafka/tests/OkafkaSetup.java new file mode 100644 index 0000000..d9385b8 --- /dev/null +++ b/clients/src/test/java/org/oracle/okafka/tests/OkafkaSetup.java @@ -0,0 +1,24 @@ +package org.oracle.okafka.tests; + +import java.io.FileInputStream; +import java.io.InputStream; +import java.util.Properties; +import org.junit.BeforeClass; + +public class OkafkaSetup { + + @BeforeClass + public static Properties setup(){ + + final Properties BaseProperties = new Properties(); + InputStream input; + try { + input = new FileInputStream("src/test/java/test.config"); + BaseProperties.load(input); + } catch (Exception e) { + System.out.println("Exception whlie loading config.properties file. " + e); + e.printStackTrace(); + } + return BaseProperties; + } +} diff --git a/clients/src/test/java/org/oracle/okafka/tests/OkafkaUnsubscribe.java b/clients/src/test/java/org/oracle/okafka/tests/OkafkaUnsubscribe.java new file mode 100644 index 0000000..88cddd3 --- /dev/null +++ b/clients/src/test/java/org/oracle/okafka/tests/OkafkaUnsubscribe.java @@ -0,0 +1,106 @@ +package org.oracle.okafka.tests; + +import org.junit.Test; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStream; +import java.time.Duration; +import java.util.*; +import org.apache.kafka.clients.consumer.Consumer; +import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.consumer.ConsumerRecords; +import org.apache.kafka.common.TopicPartition; +import org.oracle.okafka.clients.consumer.KafkaConsumer; +import org.oracle.okafka.clients.consumer.internals.SubscriptionState; + +public class OkafkaUnsubscribe{ + + @Test + public void UnsubscribeTest() throws IOException { + Properties prop = new Properties(); + prop = OkafkaSetup.setup(); + prop.put("group.id" , "S1"); + prop.put("max.poll.records", 1000); + prop.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); + prop.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); + Consumer consumer = new KafkaConsumer(prop); + consumer.subscribe(Arrays.asList("TEQ")); + int expectedMsgCnt = 1000; + int msgCnt = 0; + try { + while(true) { + try { + ConsumerRecords records = consumer.poll(Duration.ofMillis(10000)); + Collection partitions = records.partitions(); + for (ConsumerRecord record : records) + System.out.printf("partition = %d, offset = %d, key = %s, value =%s\n ", record.partition(), record.offset(), record.key(), record.value()); + + if(records != null && records.count() > 0) { + msgCnt += records.count(); + System.out.println("Committing records " + records.count()); + consumer.commitSync(); + + if(msgCnt >= expectedMsgCnt ) + { + System.out.println("Received " + msgCnt + " Expected " + expectedMsgCnt +". Exiting Now."); + break; + } + } + else { + System.out.println("No Record Fetched. Retrying in 1 second"); + Thread.sleep(1000); + } + }catch(Exception e) + { + throw e; + } + } + try { + consumer.unsubscribe(); + } + catch(Exception e) { + System.out.println("Exception while unsubscribe" + e); + e.printStackTrace(); + } + + while(true) { + try { + ConsumerRecords records = consumer.poll(Duration.ofMillis(10000)); + Collection partitions = records.partitions(); + for (ConsumerRecord record : records) + System.out.printf("partition = %d, offset = %d, key = %s, value =%s\n ", record.partition(), record.offset(), record.key(), record.value()); + + if(records != null && records.count() > 0) { + msgCnt += records.count(); + System.out.println("Committing records " + records.count()); + consumer.commitSync(); + + if(msgCnt >= expectedMsgCnt ) + { + System.out.println("Received " + msgCnt + " Expected " + expectedMsgCnt +". Exiting Now."); + break; + } + } + else { + System.out.println("No Record Fetched. Retrying in 1 second"); + Thread.sleep(1000); + } + }catch(Exception e) + { + throw e; + } + } + + + }catch(Exception e) + { + System.out.println("Exception from consumer " + e); + e.printStackTrace(); + }finally { + System.out.println("Closing Consumer"); + consumer.close(); + } + } + } \ No newline at end of file diff --git a/clients/src/test/java/org/oracle/okafka/tests/ProducerMetricsTest.java b/clients/src/test/java/org/oracle/okafka/tests/ProducerMetricsTest.java new file mode 100644 index 0000000..12abf6b --- /dev/null +++ b/clients/src/test/java/org/oracle/okafka/tests/ProducerMetricsTest.java @@ -0,0 +1,112 @@ +package org.oracle.okafka.tests; + +import org.junit.Test; +import org.oracle.okafka.clients.producer.KafkaProducer; + + +import java.lang.Thread; +import java.io.File; +import java.io.FileWriter; +import java.io.IOException; +import java.util.Map; +import java.util.Properties; +import java.util.concurrent.Future; +import org.apache.kafka.clients.producer.Producer; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.clients.producer.RecordMetadata; +import org.apache.kafka.common.Metric; +import org.apache.kafka.common.MetricName; + +public class ProducerMetricsTest{ + + public static void getMetricData(Producer producer,String fileName) { + try { + Map metricData = producer.metrics(); + File csvFile = new File(System.getProperty("user.dir")+fileName+ ".csv"); + FileWriter fileWriter = new FileWriter(csvFile); + StringBuilder headLine = new StringBuilder(); + headLine.append("Name"); + headLine.append(','); + headLine.append("Group"); + headLine.append(','); + headLine.append("Description"); + headLine.append(','); + headLine.append("Tags"); + headLine.append(','); + headLine.append("Value"); + headLine.append("\n"); + fileWriter.write(headLine.toString()); + metricData.forEach((a, b) -> { + try { + + StringBuilder line = new StringBuilder(); + + line.append(a.name()); + line.append(','); + line.append(a.group()); + line.append(','); + line.append(a.description()); + if(a.tags().containsKey("node-id") || a.tags().containsKey("topic")) { + if(a.tags().containsKey("node-id")) { + line.append(','); + line.append(a.tags().get("node-id")); + } + if(a.tags().containsKey("topic")) { + line.append(','); + line.append("topic-"+a.tags().get("topic")); + } + }else{ + line.append(','); + line.append(""); + + } + + + + line.append(','); + line.append(b.metricValue().toString()); + + line.append("\n"); + fileWriter.write(line.toString()); + + } catch (IOException e) { + e.printStackTrace(); + } + + }); + fileWriter.close(); + } catch(IOException e) { + e.printStackTrace(); + } + } + + @Test + public void ProducerTest() { + try { + + Properties prop = new Properties(); + prop = OkafkaSetup.setup(); + prop.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); + prop.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); + Producer producer = new KafkaProducer(prop); + + int msgCnt = 100; + for(int i=0;i producerRecord = new ProducerRecord("TEQ", i+"", "Test message # " + i); + producer.send(producerRecord); + } + System.out.println("Produced "+ msgCnt +" messages."); + + Thread.sleep(9000); + ProducerMetricsTest.getMetricData(producer,"afterProducingOkafka"); + + producer.close(); + System.out.println("producer closed"); + } + catch(Exception e) + { + System.out.println("Exception in Main " + e ); + e.printStackTrace(); + } + } +} diff --git a/clients/src/test/java/org/oracle/okafka/tests/SimpleOkafkaAdmin.java b/clients/src/test/java/org/oracle/okafka/tests/SimpleOkafkaAdmin.java new file mode 100644 index 0000000..1d5b588 --- /dev/null +++ b/clients/src/test/java/org/oracle/okafka/tests/SimpleOkafkaAdmin.java @@ -0,0 +1,41 @@ +package org.oracle.okafka.tests; + +import java.util.Arrays; +import java.util.Properties; +import java.util.concurrent.ExecutionException; + +import org.apache.kafka.clients.admin.Admin; +import org.apache.kafka.clients.admin.CreateTopicsResult; +import org.apache.kafka.clients.admin.CreateTopicsResult.TopicMetadataAndConfig; +import org.apache.kafka.clients.admin.NewTopic; +import org.apache.kafka.common.KafkaFuture; +import org.junit.Test; +import org.oracle.okafka.clients.admin.AdminClient; + +public class SimpleOkafkaAdmin { + + @Test + public void AdminTest() { + try (Admin admin = AdminClient.create(OkafkaSetup.setup())) { + CreateTopicsResult result = admin.createTopics(Arrays.asList( + new NewTopic("TEQ",5, (short)1))); + try { + KafkaFuture ftr = result.all(); + ftr.get(); + System.out.println("Main Thread Out of wait now"); + } catch ( InterruptedException | ExecutionException e ) { + + throw new IllegalStateException(e); + } + System.out.println("Auto Closing admin now"); + } + catch(Exception e) + { + System.out.println("Exception while creating topic " + e); + e.printStackTrace(); + } + + System.out.println("Main thread complete "); + + } +} \ No newline at end of file diff --git a/clients/src/test/java/org/oracle/okafka/tests/SimpleOkafkaConsumer.java b/clients/src/test/java/org/oracle/okafka/tests/SimpleOkafkaConsumer.java new file mode 100644 index 0000000..f3c50e5 --- /dev/null +++ b/clients/src/test/java/org/oracle/okafka/tests/SimpleOkafkaConsumer.java @@ -0,0 +1,69 @@ +package org.oracle.okafka.tests; + +import org.junit.Test; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStream; +import java.time.Duration; +import java.util.*; +import org.apache.kafka.clients.consumer.Consumer; +import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.consumer.ConsumerRecords; +import org.apache.kafka.common.TopicPartition; +import org.oracle.okafka.clients.consumer.KafkaConsumer; +import org.oracle.okafka.clients.consumer.internals.SubscriptionState; + +public class SimpleOkafkaConsumer{ + + @Test + public void ConsumerTest() { + Properties prop = new Properties(); + prop = OkafkaSetup.setup(); + prop.put("group.id" , "S1"); + prop.put("max.poll.records", 1000); + prop.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); + prop.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); + Consumer consumer = new KafkaConsumer(prop); + consumer.subscribe(Arrays.asList("TEQ")); + int expectedMsgCnt = 1000; + int msgCnt = 0; + try { + while(true) { + try { + ConsumerRecords records = consumer.poll(Duration.ofMillis(10000)); + Collection partitions = records.partitions(); + for (ConsumerRecord record : records) + System.out.printf("partition = %d, offset = %d, key = %s, value =%s\n ", record.partition(), record.offset(), record.key(), record.value()); + + if(records != null && records.count() > 0) { + msgCnt += records.count(); + System.out.println("Committing records " + records.count()); + consumer.commitSync(); + + if(msgCnt >= expectedMsgCnt ) + { + System.out.println("Received " + msgCnt + " Expected " + expectedMsgCnt +". Exiting Now."); + break; + } + } + else { + System.out.println("No Record Fetched. Retrying in 1 second"); + Thread.sleep(1000); + } + }catch(Exception e) + { + throw e; + } + } + }catch(Exception e) + { + System.out.println("Exception from consumer " + e); + e.printStackTrace(); + }finally { + System.out.println("Closing Consumer"); + consumer.close(); + } + } + } diff --git a/clients/src/test/java/org/oracle/okafka/tests/SimpleOkafkaProducer.java b/clients/src/test/java/org/oracle/okafka/tests/SimpleOkafkaProducer.java new file mode 100644 index 0000000..c66a73e --- /dev/null +++ b/clients/src/test/java/org/oracle/okafka/tests/SimpleOkafkaProducer.java @@ -0,0 +1,42 @@ +package org.oracle.okafka.tests; + +import org.junit.BeforeClass; +import org.junit.Test; +import org.oracle.okafka.clients.producer.KafkaProducer; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStream; +import java.util.Properties; +import java.util.concurrent.Future; +import org.apache.kafka.clients.producer.Producer; +import org.apache.kafka.clients.producer.ProducerConfig; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.clients.producer.RecordMetadata; + +public class SimpleOkafkaProducer{ + + @Test + public void ProducerTest() { + try { + Properties prop = OkafkaSetup.setup(); + prop.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); + prop.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); + Producer producer = new KafkaProducer(prop); + Future lastFuture = null; + int msgCnt = 1000; + for(int i=0;i producerRecord = new ProducerRecord("TEQ", i+"", "Test message # " + i); + lastFuture =producer.send(producerRecord); + } + System.out.println("Produced "+ msgCnt +" messages."); + lastFuture.get(); + producer.close(); + } + catch(Exception e) + { + System.out.println("Exception in Main " + e ); + e.printStackTrace(); + } + } +} diff --git a/clients/src/test/java/org/oracle/okafka/tests/TestRunner.java b/clients/src/test/java/org/oracle/okafka/tests/TestRunner.java new file mode 100644 index 0000000..f45c711 --- /dev/null +++ b/clients/src/test/java/org/oracle/okafka/tests/TestRunner.java @@ -0,0 +1,24 @@ +package org.oracle.okafka.tests; + +import org.junit.runner.JUnitCore; +import org.junit.runner.Result; +import org.junit.runner.notification.Failure; + + +class TestRunner { + + public static void main(String[] args) { + + Result result = new Result(); + + result = JUnitCore.runClasses(SimpleOkafkaAdmin.class, SimpleOkafkaProducer.class, OkafkaAutoOffsetReset.class, + SimpleOkafkaProducer.class, OkafkaSeekToEnd.class, OkafkaSeekToBeginning.class, SimpleOkafkaProducer.class, + OkafkaUnsubscribe.class, OkafkaDeleteTopic.class,ProducerMetricsTest.class, ConsumerMetricsTest.class); + + for (Failure failure : result.getFailures()) { + System.out.println("Test failure : "+ failure.toString()); + } + System.out.println("Tests ran succesfully: " + result.wasSuccessful()); + } + } + diff --git a/clients/src/test/java/test.config b/clients/src/test/java/test.config new file mode 100644 index 0000000..989410a --- /dev/null +++ b/clients/src/test/java/test.config @@ -0,0 +1,14 @@ +#OKafka common properties for common examples + +#Properties to connect to Oracle Database +#Option 1: Connect to Oracle database using plaintext + +security.protocol=PLAINTEXT +bootstrap.servers=localhost:1521 +oracle.service.name=FREEPDB1 +oracle.net.tns_admin=./src/test/java + +#Option 2: Connect to Oracle Database deployed in Oracle Autonomous Cloud using Wallet +#security.protocol=SSL +#oracle.net.tns_admin= +#tns.alias= diff --git a/examples/consumer/pom.xml b/examples/consumer/pom.xml deleted file mode 100644 index 66fbb71..0000000 --- a/examples/consumer/pom.xml +++ /dev/null @@ -1,82 +0,0 @@ - - - - 4.0.0 - - org.oracle.okafka.examples - consumer - 0.8 - - consumer - https://github.com/oracle/okafka - - - UTF-8 - UTF-8 - ${java.version} - ${java.version} - 11 - 0.8 - - - - - org.oracle.okafka - okafka - 0.8 - - - junit - junit - 4.13.2 - test - - - - - - - - - maven-clean-plugin - 3.1.0 - - - - maven-resources-plugin - 3.0.2 - - - maven-compiler-plugin - 3.8.0 - - - maven-surefire-plugin - 2.22.1 - - - maven-jar-plugin - 3.0.2 - - - maven-install-plugin - 2.5.2 - - - maven-deploy-plugin - 2.8.2 - - - - maven-site-plugin - 3.7.1 - - - maven-project-info-reports-plugin - 3.0.0 - - - - - diff --git a/examples/consumer/src/main/java/org/oracle/okafka/examples/Consumer.java b/examples/consumer/src/main/java/org/oracle/okafka/examples/Consumer.java deleted file mode 100644 index 4c05727..0000000 --- a/examples/consumer/src/main/java/org/oracle/okafka/examples/Consumer.java +++ /dev/null @@ -1,135 +0,0 @@ -/* - ** OKafka Java Client version 0.8. - ** - ** Copyright (c) 2019, 2020 Oracle and/or its affiliates. - ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. - */ -package org.oracle.okafka.examples; - -import java.io.FileNotFoundException; -import java.io.IOException; -import java.io.InputStream; -import java.security.Provider; -import java.security.Security; -import java.time.Duration; -import java.util.Arrays; -import java.util.Properties; - -import org.oracle.okafka.clients.consumer.ConsumerRecord; -import org.oracle.okafka.clients.consumer.ConsumerRecords; -import org.oracle.okafka.clients.consumer.KafkaConsumer; - -public class Consumer { - - public static void main(String[] args) { - System.setProperty("org.slf4j.simpleLogger.defaultLogLevel", "DEBUG"); - - // Get application properties - Properties appProperties = null; - try { - appProperties = getProperties(); - if (appProperties == null) { - System.out.println("Application properties not found!"); - System.exit(-1); - } - } catch (Exception e) { - System.out.println("Application properties not found!"); - System.out.println("Exception: " + e); - System.exit(-1); - } - - Properties props = new Properties(); - - String topic = appProperties.getProperty("topic.name", "topic"); - - // Get Oracle Database Service Name, ex: "serviceid.regress.rdbms.dev.us.oracle.com" - props.put("oracle.service.name", appProperties.getProperty("oracle.service.name")); - - // Get Oracle Database Instance, ex: "instancename" - props.put("oracle.instance.name", appProperties.getProperty("oracle.instance.name")); - - // Get location of tnsnames.ora/ojdbc.properties file eg: "/user/home" if ojdbc.properies file is in home - props.put("oracle.net.tns_admin", appProperties.getProperty("oracle.net.tns_admin")); // - - //SSL communication with ADB - props.put("security.protocol", appProperties.getProperty("security.protocol", "SSL")); - if ("SSL".equals(appProperties.getProperty("security.protocol"))) { - // Add dynamically Oracle PKI Provider required to SSL/Wallet - addOraclePKIProvider(); - props.put("tns.alias", appProperties.getProperty("tns.alias")); - } - - // Get Oracle Database address, eg: "host:port" - props.put("bootstrap.servers", appProperties.getProperty("bootstrap.servers")); - - // Get Oracle TEQ Subscriber ID - props.put("group.id", appProperties.getProperty("group.id")); - - props.put("enable.auto.commit", appProperties.getProperty("enable.auto.commit", "true")); - props.put("auto.commit.interval.ms", appProperties.getProperty("auto.commit.interval.ms", "10000")); - - props.put("key.deserializer", appProperties.getProperty("key.deserializer", - "org.oracle.okafka.common.serialization.StringDeserializer")); - props.put("value.deserializer", appProperties.getProperty("value.deserializer", - "org.oracle.okafka.common.serialization.StringDeserializer")); - props.put("max.poll.records", Integer.parseInt(appProperties.getProperty("max.poll.records", "100"))); - - KafkaConsumer consumer = null; - - consumer = new KafkaConsumer(props); - - consumer.subscribe(Arrays.asList(topic)); - - ConsumerRecords records = null; - - try { - - records = consumer.poll(Duration.ofMillis(15000)); - for (ConsumerRecord record : records) { - System.out.println("topic = , partition= ,key= , value = \n"+ - record.topic()+ " "+record.partition()+ " "+record.key()+" "+ record.value()); - System.out.println("......."); - } - - consumer.commitSync(); - - }catch(Exception ex) { - ex.printStackTrace(); - - } finally { - consumer.close(); - } - } - - private static java.util.Properties getProperties() throws IOException { - InputStream inputStream = null; - Properties appProperties = null; - - try { - Properties prop = new Properties(); - String propFileName = "config.properties"; - - inputStream = Consumer.class.getClassLoader().getResourceAsStream(propFileName); - if (inputStream != null) { - prop.load(inputStream); - } else { - throw new FileNotFoundException("property file '" + propFileName + "' not found in the classpath"); - } - - appProperties = prop; - - } catch (Exception e) { - System.out.println("Exception: " + e); - } finally { - inputStream.close(); - } - return appProperties; - } - - private static void addOraclePKIProvider() { - System.out.println("Installing Oracle PKI provider."); - Provider oraclePKI = new oracle.security.pki.OraclePKIProvider(); - Security.insertProviderAt(oraclePKI,3); - } -} - diff --git a/examples/consumer/src/main/java/org/oracle/okafka/examples/ConsumerOKafka.java b/examples/consumer/src/main/java/org/oracle/okafka/examples/ConsumerOKafka.java new file mode 100644 index 0000000..2e4ac09 --- /dev/null +++ b/examples/consumer/src/main/java/org/oracle/okafka/examples/ConsumerOKafka.java @@ -0,0 +1,98 @@ +/* + ** OKafka Java Client version 23.4. + ** + ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. + ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + */ + +package org.oracle.okafka.examples; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStream; +import java.util.Properties; +import java.time.Duration; +import java.util.Arrays; + +import org.oracle.okafka.clients.consumer.KafkaConsumer; + +import org.apache.kafka.clients.consumer.Consumer; +import org.apache.kafka.clients.consumer.ConsumerRecords; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.common.TopicPartition; + +public class ConsumerOKafka { + public static void main(String[] args) { + System.setProperty("org.slf4j.simpleLogger.defaultLogLevel", "DEBUG"); + + // Get application properties + Properties appProperties = null; + try { + appProperties = getProperties(); + if (appProperties == null) { + System.out.println("Application properties not found!"); + System.exit(-1); + } + } catch (Exception e) { + System.out.println("Application properties not found!"); + System.out.println("Exception: " + e); + System.exit(-1); + } + + String topic = appProperties.getProperty("topic.name", "TXEQ"); + appProperties.remove("topic.name"); // Pass props to build OKafkaProducer + + KafkaConsumer consumer = new KafkaConsumer<>(appProperties); + consumer.subscribe(Arrays.asList(topic)); + + + try { + while(true) { + ConsumerRecords records = consumer.poll(Duration.ofMillis(10000)); + + for (ConsumerRecord record : records) + System.out.printf("partition = %d, offset = %d, key = %s, value =%s\n ", record.partition(), record.offset(), record.key(), record.value()); + + if (records != null && records.count() > 0) { + System.out.println("Committing records" + records.count()); + consumer.commitSync(); + } else { + System.out.println("No Record Fetched. Retrying in 1 second"); + Thread.sleep(1000); + } + } + }catch(Exception e) + { + System.out.println("Exception from consumer " + e); + e.printStackTrace(); + } + finally { + consumer.close(); + } + + } + + private static java.util.Properties getProperties() throws IOException { + InputStream inputStream = null; + Properties appProperties = null; + + try { + Properties prop = new Properties(); + String propFileName = "config.properties"; + inputStream = ConsumerOKafka.class.getClassLoader().getResourceAsStream(propFileName); + if (inputStream != null) { + prop.load(inputStream); + } else { + throw new FileNotFoundException("property file '" + propFileName + "' not found."); + } + appProperties = prop; + + } catch (Exception e) { + System.out.println("Exception: " + e); + throw e; + } finally { + inputStream.close(); + } + return appProperties; + } +} \ No newline at end of file diff --git a/examples/consumer/src/main/resources/config.properties b/examples/consumer/src/main/resources/config.properties index ef0f503..65ae47b 100644 --- a/examples/consumer/src/main/resources/config.properties +++ b/examples/consumer/src/main/resources/config.properties @@ -1,19 +1,25 @@ -# okafka consumer example properties -oracle.service.name= -oracle.instance.name= - -oracle.net.tns_admin= -security.protocol=SSL -tns.alias= +# OKafka Consumer example properties +#Properties to connect to Oracle Database +#Option 1: Connect to Oracle database using plaintext bootstrap.servers= +oracle.service.name= +oracle.net.tns_admin= + + +#Option 2: Connect to Oracle Database deployed in Oracle Autonomous Cloud using Wallet +#security.protocol=SSL +#oracle.net.tns_admin= +#tns.alias= + +# Application specific OKafka consumer properties +topic.name= group.id= enable.auto.commit=true -auto.commit.interval.ms=10000 -max.poll.records=100 +max.poll.records=1000 +default.api.timeout.ms=180000 -key.deserializer=org.oracle.okafka.common.serialization.StringDeserializer -value.deserializer=org.oracle.okafka.common.serialization.StringDeserializer +key.deserializer=org.apache.kafka.common.serialization.StringDeserializer +value.deserializer=org.apache.kafka.common.serialization.StringDeserializer -topic.name= diff --git a/examples/ojdbc.properties b/examples/ojdbc.properties new file mode 100644 index 0000000..70bd7ff --- /dev/null +++ b/examples/ojdbc.properties @@ -0,0 +1,2 @@ +user= +password= \ No newline at end of file diff --git a/examples/producer/pom.xml b/examples/producer/pom.xml deleted file mode 100644 index 1182667..0000000 --- a/examples/producer/pom.xml +++ /dev/null @@ -1,82 +0,0 @@ - - - - 4.0.0 - - org.oracle.okafka.examples - producer - 0.8 - - producer - https://github.com/oracle/okafka - - - UTF-8 - UTF-8 - ${java.version} - ${java.version} - 11 - 0.8 - - - - - org.oracle.okafka - okafka - 0.8 - - - junit - junit - 4.13.2 - test - - - - - - - - - - maven-clean-plugin - 3.1.0 - - - - maven-resources-plugin - 3.0.2 - - - maven-compiler-plugin - 3.8.0 - - - maven-surefire-plugin - 2.22.1 - - - maven-jar-plugin - 3.0.2 - - - maven-install-plugin - 2.5.2 - - - maven-deploy-plugin - 2.8.2 - - - - maven-site-plugin - 3.7.1 - - - maven-project-info-reports-plugin - 3.0.0 - - - - - diff --git a/examples/producer/src/main/java/org/oracle/okafka/examples/Producer.java b/examples/producer/src/main/java/org/oracle/okafka/examples/Producer.java deleted file mode 100644 index 1880e5e..0000000 --- a/examples/producer/src/main/java/org/oracle/okafka/examples/Producer.java +++ /dev/null @@ -1,127 +0,0 @@ -/* -** OKafka Java Client version 0.8. -** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. -** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -*/ - -package org.oracle.okafka.examples; - -import java.io.FileNotFoundException; -import java.io.IOException; -import java.io.InputStream; -import java.security.Provider; -import java.security.Security; -import java.util.Properties; - -import org.oracle.okafka.clients.producer.KafkaProducer; -import org.oracle.okafka.clients.producer.ProducerRecord; - - -public class Producer { - - public static void main(String[] args) { - System.setProperty("org.slf4j.simpleLogger.defaultLogLevel", "INFO"); - - // Get application properties - Properties appProperties = null; - try { - appProperties = getProperties(); - if (appProperties == null) { - System.out.println("Application properties not found!"); - System.exit(-1); - } - } catch (Exception e) { - System.out.println("Application properties not found!"); - System.out.println("Exception: " + e); - System.exit(-1); - } - - String topic = appProperties.getProperty("topic.name", "topic"); - - KafkaProducer prod = null; - Properties props = new Properties(); - - // Get Oracle Database Service Name, ex: "serviceid.regress.rdbms.dev.us.oracle.com" - props.put("oracle.service.name", appProperties.getProperty("oracle.service.name")); - - // Get Oracle Database Instance, ex: "instancename" - props.put("oracle.instance.name", appProperties.getProperty("oracle.instance.name")); - - // Get location of tnsnames.ora/ojdbc.properties file eg: "/user/home" if ojdbc.properies file is in home - props.put("oracle.net.tns_admin", appProperties.getProperty("oracle.net.tns_admin")); // - - //SSL communication with ADB - props.put("security.protocol", appProperties.getProperty("security.protocol", "SSL")); - if ("SSL".equals(appProperties.getProperty("security.protocol"))) { - // Add dynamically Oracle PKI Provider required to SSL/Wallet - addOraclePKIProvider(); - props.put("tns.alias", appProperties.getProperty("tns.alias")); - } - - // Get Oracle Database address, eg: "host:port" - props.put("bootstrap.servers", appProperties.getProperty("bootstrap.servers")); - - //props.put("batch.size", Integer.parseInt(appProperties.getProperty("batch.size", "200"))); - props.put("linger.ms", Integer.parseInt(appProperties.getProperty("linger.ms", "100"))); - //props.put("buffer.memory", Integer.parseInt(appProperties.getProperty("buffer.memory", "335544"))); - - props.put("key.serializer", appProperties.getProperty("key.serializer", - "org.oracle.okafka.common.serialization.StringSerializer")); - props.put("value.serializer", appProperties.getProperty("value.serializer", - "org.oracle.okafka.common.serialization.StringSerializer")); - - System.out.println("Creating producer now 1 2 3.."); - - prod=new KafkaProducer(props); - - System.out.println("Producer created."); - - try { - int i; - for(i = 0; i < 10; i++) - prod.send(new ProducerRecord(topic ,0, i+"000","This is new message"+i)); - - System.out.println("Sent "+ i + "messages"); - } catch(Exception ex) { - - System.out.println("Failed to send messages:"); - ex.printStackTrace(); - } - finally { - prod.close(); - } - } - - private static java.util.Properties getProperties() throws IOException { - InputStream inputStream = null; - Properties appProperties = null; - - try { - Properties prop = new Properties(); - String propFileName = "config.properties"; - - inputStream = Producer.class.getClassLoader().getResourceAsStream(propFileName); - if (inputStream != null) { - prop.load(inputStream); - } else { - throw new FileNotFoundException("property file '" + propFileName + "' not found in the classpath"); - } - - appProperties = prop; - - } catch (Exception e) { - System.out.println("Exception: " + e); - } finally { - inputStream.close(); - } - return appProperties; - } - - private static void addOraclePKIProvider() { - System.out.println("Installing Oracle PKI provider."); - Provider oraclePKI = new oracle.security.pki.OraclePKIProvider(); - Security.insertProviderAt(oraclePKI,3); - } -} - diff --git a/examples/producer/src/main/java/org/oracle/okafka/examples/ProducerOKafka.java b/examples/producer/src/main/java/org/oracle/okafka/examples/ProducerOKafka.java new file mode 100644 index 0000000..7e45867 --- /dev/null +++ b/examples/producer/src/main/java/org/oracle/okafka/examples/ProducerOKafka.java @@ -0,0 +1,105 @@ +/* +** OKafka Java Client version 23.4. +** +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. +** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +*/ + +package org.oracle.okafka.examples; + +import org.oracle.okafka.clients.producer.KafkaProducer; + +import org.apache.kafka.common.header.internals.RecordHeader; +import org.apache.kafka.clients.producer.Producer; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.clients.producer.RecordMetadata; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.Properties; +import java.util.concurrent.Future; + +public class ProducerOKafka { + + public static void main(String[] args) { + System.setProperty("org.slf4j.simpleLogger.defaultLogLevel", "DEBUG"); + + // Get application properties + Properties appProperties = null; + try { + appProperties = getProperties(); + if (appProperties == null) { + System.out.println("Application properties not found!"); + System.exit(-1); + } + } catch (Exception e) { + System.out.println("Application properties not found!"); + System.out.println("Exception: " + e); + System.exit(-1); + } + + String topic = appProperties.getProperty("topic.name", "TXEQ"); + appProperties.remove("topic.name"); // Pass props to build OKafkaProducer + + Producer producer = new KafkaProducer<>(appProperties); + + String baseMsg = "This is test with 128 characters Payload used to test Oracle Kafka. "+ + "Read https://github.com/oracle/okafka/blob/master/README.md"; + + Future lastFuture = null; + int msgCnt = 10; + String key = "Just some key for OKafka"; + ArrayList> metadataList = new ArrayList<>(); + + try { + for(int i=0;i producerRecord = + new ProducerRecord<>(topic, key+i, i+ baseMsg); + producerRecord.headers().add(rH1).add(rH2); + lastFuture = producer.send(producerRecord); + metadataList.add(lastFuture); + } + RecordMetadata rd = lastFuture.get(); + System.out.println("Last record placed in " + rd.partition() + " Offset " + rd.offset()); + } + catch(Exception e) { + System.out.println("Failed to send messages:"); + e.printStackTrace(); + } + finally { + System.out.println("Initiating close"); + producer.close(); + } + + } + + private static java.util.Properties getProperties() throws IOException { + InputStream inputStream = null; + Properties appProperties; + + try { + Properties prop = new Properties(); + String propFileName = "config.properties"; + inputStream = ProducerOKafka.class.getClassLoader().getResourceAsStream(propFileName); + if (inputStream != null) { + prop.load(inputStream); + } else { + throw new FileNotFoundException("property file '" + propFileName + "' not found."); + } + appProperties = prop; + + } catch (Exception e) { + System.out.println("Exception: " + e); + throw e; + } finally { + if (inputStream != null) + inputStream.close(); + } + return appProperties; + } + +} diff --git a/examples/producer/src/main/resources/config.properties b/examples/producer/src/main/resources/config.properties index a36fc7d..3111efc 100644 --- a/examples/producer/src/main/resources/config.properties +++ b/examples/producer/src/main/resources/config.properties @@ -1,18 +1,25 @@ -# okafka producer example properties -oracle.service.name= -oracle.instance.name= - -oracle.net.tns_admin= -security.protocol=SSL -tns.alias= +# OKafka Producer example properties +#Properties to connect to Oracle Database +#Option 1: Connect to Oracle database using plaintext bootstrap.servers= +oracle.service.name= +oracle.net.tns_admin= + + +#Option 2: Connect to Oracle Database deployed in Oracle Autonomous Cloud using Wallet +#security.protocol=SSL +#oracle.net.tns_admin= +#tns.alias= + +#Appliction specific OKafka Producer properties +topic.name= batch.size=200 linger.ms=100 buffer.memory=335544 -key.serializer=org.oracle.okafka.common.serialization.StringSerializer -value.serializer=org.oracle.okafka.common.serialization.StringSerializer +enable.idempotence=true +key.serializer=org.apache.kafka.common.serialization.StringSerializer +value.serializer=org.apache.kafka.common.serialization.StringSerializer -topic.name= \ No newline at end of file diff --git a/settings.gradle b/settings.gradle index 2c7edff..0d4ce19 100644 --- a/settings.gradle +++ b/settings.gradle @@ -1,9 +1,10 @@ /* -** OKafka Java Client version 0.8. +** OKafka Java Client version 23.4. ** -** Copyright (c) 2019, 2020 Oracle and/or its affiliates. +** Copyright (c) 2019, 2024 Oracle and/or its affiliates. ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. */ rootProject.name = 'okafka' -include(':clients', 'examples:consumer', 'examples:producer') \ No newline at end of file +include(':clients', 'examples:consumer', 'examples:producer') +