mirror of
https://github.com/didi/KnowStreaming.git
synced 2025-12-24 20:22:12 +08:00
Add km module kafka
This commit is contained in:
1
connect/api/.gitignore
vendored
Normal file
1
connect/api/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
/bin/
|
||||
@@ -0,0 +1,30 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.kafka.connect.components;
|
||||
|
||||
/**
|
||||
* Connect requires some components implement this interface to define a version string.
|
||||
*/
|
||||
public interface Versioned {
|
||||
/**
|
||||
* Get the version of this component.
|
||||
*
|
||||
* @return the version, formatted as a String. The version may not be (@code null} or empty.
|
||||
*/
|
||||
String version();
|
||||
}
|
||||
@@ -0,0 +1,182 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.connector;
|
||||
|
||||
import org.apache.kafka.connect.data.Schema;
|
||||
import org.apache.kafka.connect.header.ConnectHeaders;
|
||||
import org.apache.kafka.connect.header.Header;
|
||||
import org.apache.kafka.connect.header.Headers;
|
||||
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* <p>
|
||||
* Base class for records containing data to be copied to/from Kafka. This corresponds closely to
|
||||
* Kafka's {@link org.apache.kafka.clients.producer.ProducerRecord ProducerRecord} and {@link org.apache.kafka.clients.consumer.ConsumerRecord ConsumerRecord} classes, and holds the data that may be used by both
|
||||
* sources and sinks (topic, kafkaPartition, key, value). Although both implementations include a
|
||||
* notion of offset, it is not included here because they differ in type.
|
||||
* </p>
|
||||
*/
|
||||
public abstract class ConnectRecord<R extends ConnectRecord<R>> {
|
||||
private final String topic;
|
||||
private final Integer kafkaPartition;
|
||||
private final Schema keySchema;
|
||||
private final Object key;
|
||||
private final Schema valueSchema;
|
||||
private final Object value;
|
||||
private final Long timestamp;
|
||||
private final Headers headers;
|
||||
|
||||
public ConnectRecord(String topic, Integer kafkaPartition,
|
||||
Schema keySchema, Object key,
|
||||
Schema valueSchema, Object value,
|
||||
Long timestamp) {
|
||||
this(topic, kafkaPartition, keySchema, key, valueSchema, value, timestamp, new ConnectHeaders());
|
||||
}
|
||||
|
||||
public ConnectRecord(String topic, Integer kafkaPartition,
|
||||
Schema keySchema, Object key,
|
||||
Schema valueSchema, Object value,
|
||||
Long timestamp, Iterable<Header> headers) {
|
||||
this.topic = topic;
|
||||
this.kafkaPartition = kafkaPartition;
|
||||
this.keySchema = keySchema;
|
||||
this.key = key;
|
||||
this.valueSchema = valueSchema;
|
||||
this.value = value;
|
||||
this.timestamp = timestamp;
|
||||
if (headers instanceof ConnectHeaders) {
|
||||
this.headers = (ConnectHeaders) headers;
|
||||
} else {
|
||||
this.headers = new ConnectHeaders(headers);
|
||||
}
|
||||
}
|
||||
|
||||
public String topic() {
|
||||
return topic;
|
||||
}
|
||||
|
||||
public Integer kafkaPartition() {
|
||||
return kafkaPartition;
|
||||
}
|
||||
|
||||
public Object key() {
|
||||
return key;
|
||||
}
|
||||
|
||||
public Schema keySchema() {
|
||||
return keySchema;
|
||||
}
|
||||
|
||||
public Object value() {
|
||||
return value;
|
||||
}
|
||||
|
||||
public Schema valueSchema() {
|
||||
return valueSchema;
|
||||
}
|
||||
|
||||
public Long timestamp() {
|
||||
return timestamp;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the headers for this record.
|
||||
*
|
||||
* @return the headers; never null
|
||||
*/
|
||||
public Headers headers() {
|
||||
return headers;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new record of the same type as itself, with the specified parameter values. All other fields in this record will be copied
|
||||
* over to the new record. Since the headers are mutable, the resulting record will have a copy of this record's headers.
|
||||
*
|
||||
* @param topic the name of the topic; may be null
|
||||
* @param kafkaPartition the partition number for the Kafka topic; may be null
|
||||
* @param keySchema the schema for the key; may be null
|
||||
* @param key the key; may be null
|
||||
* @param valueSchema the schema for the value; may be null
|
||||
* @param value the value; may be null
|
||||
* @param timestamp the timestamp; may be null
|
||||
* @return the new record
|
||||
*/
|
||||
public abstract R newRecord(String topic, Integer kafkaPartition, Schema keySchema, Object key, Schema valueSchema, Object value, Long timestamp);
|
||||
|
||||
/**
|
||||
* Create a new record of the same type as itself, with the specified parameter values. All other fields in this record will be copied
|
||||
* over to the new record.
|
||||
*
|
||||
* @param topic the name of the topic; may be null
|
||||
* @param kafkaPartition the partition number for the Kafka topic; may be null
|
||||
* @param keySchema the schema for the key; may be null
|
||||
* @param key the key; may be null
|
||||
* @param valueSchema the schema for the value; may be null
|
||||
* @param value the value; may be null
|
||||
* @param timestamp the timestamp; may be null
|
||||
* @param headers the headers; may be null or empty
|
||||
* @return the new record
|
||||
*/
|
||||
public abstract R newRecord(String topic, Integer kafkaPartition, Schema keySchema, Object key, Schema valueSchema, Object value, Long timestamp, Iterable<Header> headers);
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ConnectRecord{" +
|
||||
"topic='" + topic + '\'' +
|
||||
", kafkaPartition=" + kafkaPartition +
|
||||
", key=" + key +
|
||||
", keySchema=" + keySchema +
|
||||
", value=" + value +
|
||||
", valueSchema=" + valueSchema +
|
||||
", timestamp=" + timestamp +
|
||||
", headers=" + headers +
|
||||
'}';
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o)
|
||||
return true;
|
||||
if (o == null || getClass() != o.getClass())
|
||||
return false;
|
||||
|
||||
ConnectRecord that = (ConnectRecord) o;
|
||||
|
||||
return Objects.equals(kafkaPartition, that.kafkaPartition)
|
||||
&& Objects.equals(topic, that.topic)
|
||||
&& Objects.equals(keySchema, that.keySchema)
|
||||
&& Objects.equals(key, that.key)
|
||||
&& Objects.equals(valueSchema, that.valueSchema)
|
||||
&& Objects.equals(value, that.value)
|
||||
&& Objects.equals(timestamp, that.timestamp)
|
||||
&& Objects.equals(headers, that.headers);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = topic != null ? topic.hashCode() : 0;
|
||||
result = 31 * result + (kafkaPartition != null ? kafkaPartition.hashCode() : 0);
|
||||
result = 31 * result + (keySchema != null ? keySchema.hashCode() : 0);
|
||||
result = 31 * result + (key != null ? key.hashCode() : 0);
|
||||
result = 31 * result + (valueSchema != null ? valueSchema.hashCode() : 0);
|
||||
result = 31 * result + (value != null ? value.hashCode() : 0);
|
||||
result = 31 * result + (timestamp != null ? timestamp.hashCode() : 0);
|
||||
result = 31 * result + headers.hashCode();
|
||||
return result;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,144 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.connector;
|
||||
|
||||
import org.apache.kafka.common.config.Config;
|
||||
import org.apache.kafka.common.config.ConfigDef;
|
||||
import org.apache.kafka.common.config.ConfigValue;
|
||||
import org.apache.kafka.connect.errors.ConnectException;
|
||||
import org.apache.kafka.connect.components.Versioned;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* <p>
|
||||
* Connectors manage integration of Kafka Connect with another system, either as an input that ingests
|
||||
* data into Kafka or an output that passes data to an external system. Implementations should
|
||||
* not use this class directly; they should inherit from {@link org.apache.kafka.connect.source.SourceConnector SourceConnector}
|
||||
* or {@link org.apache.kafka.connect.sink.SinkConnector SinkConnector}.
|
||||
* </p>
|
||||
* <p>
|
||||
* Connectors have two primary tasks. First, given some configuration, they are responsible for
|
||||
* creating configurations for a set of {@link Task}s that split up the data processing. For
|
||||
* example, a database Connector might create Tasks by dividing the set of tables evenly among
|
||||
* tasks. Second, they are responsible for monitoring inputs for changes that require
|
||||
* reconfiguration and notifying the Kafka Connect runtime via the {@link ConnectorContext}. Continuing the
|
||||
* previous example, the connector might periodically check for new tables and notify Kafka Connect of
|
||||
* additions and deletions. Kafka Connect will then request new configurations and update the running
|
||||
* Tasks.
|
||||
* </p>
|
||||
*/
|
||||
public abstract class Connector implements Versioned {
|
||||
|
||||
protected ConnectorContext context;
|
||||
|
||||
|
||||
/**
|
||||
* Initialize this connector, using the provided ConnectorContext to notify the runtime of
|
||||
* input configuration changes.
|
||||
* @param ctx context object used to interact with the Kafka Connect runtime
|
||||
*/
|
||||
public void initialize(ConnectorContext ctx) {
|
||||
context = ctx;
|
||||
}
|
||||
|
||||
/**
|
||||
* <p>
|
||||
* Initialize this connector, using the provided ConnectorContext to notify the runtime of
|
||||
* input configuration changes and using the provided set of Task configurations.
|
||||
* This version is only used to recover from failures.
|
||||
* </p>
|
||||
* <p>
|
||||
* The default implementation ignores the provided Task configurations. During recovery, Kafka Connect will request
|
||||
* an updated set of configurations and update the running Tasks appropriately. However, Connectors should
|
||||
* implement special handling of this case if it will avoid unnecessary changes to running Tasks.
|
||||
* </p>
|
||||
*
|
||||
* @param ctx context object used to interact with the Kafka Connect runtime
|
||||
* @param taskConfigs existing task configurations, which may be used when generating new task configs to avoid
|
||||
* churn in partition to task assignments
|
||||
*/
|
||||
public void initialize(ConnectorContext ctx, List<Map<String, String>> taskConfigs) {
|
||||
context = ctx;
|
||||
// Ignore taskConfigs. May result in more churn of tasks during recovery if updated configs
|
||||
// are very different, but reduces the difficulty of implementing a Connector
|
||||
}
|
||||
|
||||
/**
|
||||
* Start this Connector. This method will only be called on a clean Connector, i.e. it has
|
||||
* either just been instantiated and initialized or {@link #stop()} has been invoked.
|
||||
*
|
||||
* @param props configuration settings
|
||||
*/
|
||||
public abstract void start(Map<String, String> props);
|
||||
|
||||
/**
|
||||
* Reconfigure this Connector. Most implementations will not override this, using the default
|
||||
* implementation that calls {@link #stop()} followed by {@link #start(Map)}.
|
||||
* Implementations only need to override this if they want to handle this process more
|
||||
* efficiently, e.g. without shutting down network connections to the external system.
|
||||
*
|
||||
* @param props new configuration settings
|
||||
*/
|
||||
public void reconfigure(Map<String, String> props) {
|
||||
stop();
|
||||
start(props);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the Task implementation for this Connector.
|
||||
*/
|
||||
public abstract Class<? extends Task> taskClass();
|
||||
|
||||
/**
|
||||
* Returns a set of configurations for Tasks based on the current configuration,
|
||||
* producing at most count configurations.
|
||||
*
|
||||
* @param maxTasks maximum number of configurations to generate
|
||||
* @return configurations for Tasks
|
||||
*/
|
||||
public abstract List<Map<String, String>> taskConfigs(int maxTasks);
|
||||
|
||||
/**
|
||||
* Stop this connector.
|
||||
*/
|
||||
public abstract void stop();
|
||||
|
||||
/**
|
||||
* Validate the connector configuration values against configuration definitions.
|
||||
* @param connectorConfigs the provided configuration values
|
||||
* @return List of Config, each Config contains the updated configuration information given
|
||||
* the current configuration values.
|
||||
*/
|
||||
public Config validate(Map<String, String> connectorConfigs) {
|
||||
ConfigDef configDef = config();
|
||||
if (null == configDef) {
|
||||
throw new ConnectException(
|
||||
String.format("%s.config() must return a ConfigDef that is not null.", this.getClass().getName())
|
||||
);
|
||||
}
|
||||
List<ConfigValue> configValues = configDef.validate(connectorConfigs);
|
||||
return new Config(configValues);
|
||||
}
|
||||
|
||||
/**
|
||||
* Define the configuration for the connector.
|
||||
* @return The ConfigDef for this connector; may not be null.
|
||||
*/
|
||||
public abstract ConfigDef config();
|
||||
}
|
||||
@@ -0,0 +1,36 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.connector;
|
||||
|
||||
/**
|
||||
* ConnectorContext allows Connectors to proactively interact with the Kafka Connect runtime.
|
||||
*/
|
||||
public interface ConnectorContext {
|
||||
/**
|
||||
* Requests that the runtime reconfigure the Tasks for this source. This should be used to
|
||||
* indicate to the runtime that something about the input/output has changed (e.g. partitions
|
||||
* added/removed) and the running Tasks will need to be modified.
|
||||
*/
|
||||
void requestTaskReconfiguration();
|
||||
|
||||
/**
|
||||
* Raise an unrecoverable exception to the Connect framework. This will cause the status of the
|
||||
* connector to transition to FAILED.
|
||||
* @param e Exception to be raised.
|
||||
*/
|
||||
void raiseError(Exception e);
|
||||
}
|
||||
@@ -0,0 +1,52 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.connector;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* <p>
|
||||
* Tasks contain the code that actually copies data to/from another system. They receive
|
||||
* a configuration from their parent Connector, assigning them a fraction of a Kafka Connect job's work.
|
||||
* The Kafka Connect framework then pushes/pulls data from the Task. The Task must also be able to
|
||||
* respond to reconfiguration requests.
|
||||
* </p>
|
||||
* <p>
|
||||
* Task only contains the minimal shared functionality between
|
||||
* {@link org.apache.kafka.connect.source.SourceTask} and
|
||||
* {@link org.apache.kafka.connect.sink.SinkTask}.
|
||||
* </p>
|
||||
*/
|
||||
public interface Task {
|
||||
/**
|
||||
* Get the version of this task. Usually this should be the same as the corresponding {@link Connector} class's version.
|
||||
*
|
||||
* @return the version, formatted as a String
|
||||
*/
|
||||
String version();
|
||||
|
||||
/**
|
||||
* Start the Task
|
||||
* @param props initial configuration
|
||||
*/
|
||||
void start(Map<String, String> props);
|
||||
|
||||
/**
|
||||
* Stop this task.
|
||||
*/
|
||||
void stop();
|
||||
}
|
||||
@@ -0,0 +1,47 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.kafka.connect.connector.policy;
|
||||
|
||||
import org.apache.kafka.common.Configurable;
|
||||
import org.apache.kafka.common.config.ConfigValue;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* <p>An interface for enforcing a policy on overriding of client configs via the connector configs.
|
||||
*
|
||||
* <p>Common use cases are ability to provide principal per connector, <code>sasl.jaas.config</code>
|
||||
* and/or enforcing that the producer/consumer configurations for optimizations are within acceptable ranges.
|
||||
*/
|
||||
public interface ConnectorClientConfigOverridePolicy extends Configurable, AutoCloseable {
|
||||
|
||||
|
||||
/**
|
||||
* Worker will invoke this while constructing the producer for the SourceConnectors, DLQ for SinkConnectors and the consumer for the
|
||||
* SinkConnectors to validate if all of the overridden client configurations are allowed per the
|
||||
* policy implementation. This would also be invoked during the validate of connector configs via the Rest API.
|
||||
*
|
||||
* If there are any policy violations, the connector will not be started.
|
||||
*
|
||||
* @param connectorClientConfigRequest an instance of {@code ConnectorClientConfigRequest} that provides the configs to overridden and
|
||||
* its context; never {@code null}
|
||||
* @return list of {@link ConfigValue} instances that describe each client configuration in the request and includes an
|
||||
{@link ConfigValue#errorMessages error} if the configuration is not allowed by the policy; never null
|
||||
*/
|
||||
List<ConfigValue> validate(ConnectorClientConfigRequest connectorClientConfigRequest);
|
||||
}
|
||||
@@ -0,0 +1,101 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.kafka.connect.connector.policy;
|
||||
|
||||
import org.apache.kafka.connect.connector.Connector;
|
||||
import org.apache.kafka.connect.health.ConnectorType;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
public class ConnectorClientConfigRequest {
|
||||
|
||||
private Map<String, Object> clientProps;
|
||||
private ClientType clientType;
|
||||
private String connectorName;
|
||||
private ConnectorType connectorType;
|
||||
private Class<? extends Connector> connectorClass;
|
||||
|
||||
public ConnectorClientConfigRequest(
|
||||
String connectorName,
|
||||
ConnectorType connectorType,
|
||||
Class<? extends Connector> connectorClass,
|
||||
Map<String, Object> clientProps,
|
||||
ClientType clientType) {
|
||||
this.clientProps = clientProps;
|
||||
this.clientType = clientType;
|
||||
this.connectorName = connectorName;
|
||||
this.connectorType = connectorType;
|
||||
this.connectorClass = connectorClass;
|
||||
}
|
||||
|
||||
/**
|
||||
* Provides Config with prefix {@code producer.override.} for {@link ConnectorType#SOURCE}.
|
||||
* Provides Config with prefix {@code consumer.override.} for {@link ConnectorType#SINK}.
|
||||
* Provides Config with prefix {@code producer.override.} for {@link ConnectorType#SINK} for DLQ.
|
||||
* Provides Config with prefix {@code admin.override.} for {@link ConnectorType#SINK} for DLQ.
|
||||
*
|
||||
* @return The client properties specified in the Connector Config with prefix {@code producer.override.} ,
|
||||
* {@code consumer.override.} and {@code admin.override.}. The configs don't include the prefixes.
|
||||
*/
|
||||
public Map<String, Object> clientProps() {
|
||||
return clientProps;
|
||||
}
|
||||
|
||||
/**
|
||||
* {@link ClientType#PRODUCER} for {@link ConnectorType#SOURCE}
|
||||
* {@link ClientType#CONSUMER} for {@link ConnectorType#SINK}
|
||||
* {@link ClientType#PRODUCER} for DLQ in {@link ConnectorType#SINK}
|
||||
* {@link ClientType#ADMIN} for DLQ Topic Creation in {@link ConnectorType#SINK}
|
||||
*
|
||||
* @return enumeration specifying the client type that is being overriden by the worker; never null.
|
||||
*/
|
||||
public ClientType clientType() {
|
||||
return clientType;
|
||||
}
|
||||
|
||||
/**
|
||||
* Name of the connector specified in the connector config.
|
||||
*
|
||||
* @return name of the connector; never null.
|
||||
*/
|
||||
public String connectorName() {
|
||||
return connectorName;
|
||||
}
|
||||
|
||||
/**
|
||||
* Type of the Connector.
|
||||
*
|
||||
* @return enumeration specifying the type of the connector {@link ConnectorType#SINK} or {@link ConnectorType#SOURCE}.
|
||||
*/
|
||||
public ConnectorType connectorType() {
|
||||
return connectorType;
|
||||
}
|
||||
|
||||
/**
|
||||
* The class of the Connector.
|
||||
*
|
||||
* @return the class of the Connector being created; never null
|
||||
*/
|
||||
public Class<? extends Connector> connectorClass() {
|
||||
return connectorClass;
|
||||
}
|
||||
|
||||
public enum ClientType {
|
||||
PRODUCER, CONSUMER, ADMIN;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,353 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.data;
|
||||
|
||||
import org.apache.kafka.connect.errors.DataException;
|
||||
|
||||
import java.math.BigDecimal;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.EnumMap;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
public class ConnectSchema implements Schema {
|
||||
/**
|
||||
* Maps Schema.Types to a list of Java classes that can be used to represent them.
|
||||
*/
|
||||
private static final Map<Type, List<Class>> SCHEMA_TYPE_CLASSES = new EnumMap<>(Type.class);
|
||||
/**
|
||||
* Maps known logical types to a list of Java classes that can be used to represent them.
|
||||
*/
|
||||
private static final Map<String, List<Class>> LOGICAL_TYPE_CLASSES = new HashMap<>();
|
||||
|
||||
/**
|
||||
* Maps the Java classes to the corresponding Schema.Type.
|
||||
*/
|
||||
private static final Map<Class<?>, Type> JAVA_CLASS_SCHEMA_TYPES = new HashMap<>();
|
||||
|
||||
static {
|
||||
SCHEMA_TYPE_CLASSES.put(Type.INT8, Collections.singletonList((Class) Byte.class));
|
||||
SCHEMA_TYPE_CLASSES.put(Type.INT16, Collections.singletonList((Class) Short.class));
|
||||
SCHEMA_TYPE_CLASSES.put(Type.INT32, Collections.singletonList((Class) Integer.class));
|
||||
SCHEMA_TYPE_CLASSES.put(Type.INT64, Collections.singletonList((Class) Long.class));
|
||||
SCHEMA_TYPE_CLASSES.put(Type.FLOAT32, Collections.singletonList((Class) Float.class));
|
||||
SCHEMA_TYPE_CLASSES.put(Type.FLOAT64, Collections.singletonList((Class) Double.class));
|
||||
SCHEMA_TYPE_CLASSES.put(Type.BOOLEAN, Collections.singletonList((Class) Boolean.class));
|
||||
SCHEMA_TYPE_CLASSES.put(Type.STRING, Collections.singletonList((Class) String.class));
|
||||
// Bytes are special and have 2 representations. byte[] causes problems because it doesn't handle equals() and
|
||||
// hashCode() like we want objects to, so we support both byte[] and ByteBuffer. Using plain byte[] can cause
|
||||
// those methods to fail, so ByteBuffers are recommended
|
||||
SCHEMA_TYPE_CLASSES.put(Type.BYTES, Arrays.asList((Class) byte[].class, (Class) ByteBuffer.class));
|
||||
SCHEMA_TYPE_CLASSES.put(Type.ARRAY, Collections.singletonList((Class) List.class));
|
||||
SCHEMA_TYPE_CLASSES.put(Type.MAP, Collections.singletonList((Class) Map.class));
|
||||
SCHEMA_TYPE_CLASSES.put(Type.STRUCT, Collections.singletonList((Class) Struct.class));
|
||||
|
||||
for (Map.Entry<Type, List<Class>> schemaClasses : SCHEMA_TYPE_CLASSES.entrySet()) {
|
||||
for (Class<?> schemaClass : schemaClasses.getValue())
|
||||
JAVA_CLASS_SCHEMA_TYPES.put(schemaClass, schemaClasses.getKey());
|
||||
}
|
||||
|
||||
LOGICAL_TYPE_CLASSES.put(Decimal.LOGICAL_NAME, Collections.singletonList((Class) BigDecimal.class));
|
||||
LOGICAL_TYPE_CLASSES.put(Date.LOGICAL_NAME, Collections.singletonList((Class) java.util.Date.class));
|
||||
LOGICAL_TYPE_CLASSES.put(Time.LOGICAL_NAME, Collections.singletonList((Class) java.util.Date.class));
|
||||
LOGICAL_TYPE_CLASSES.put(Timestamp.LOGICAL_NAME, Collections.singletonList((Class) java.util.Date.class));
|
||||
// We don't need to put these into JAVA_CLASS_SCHEMA_TYPES since that's only used to determine schemas for
|
||||
// schemaless data and logical types will have ambiguous schemas (e.g. many of them use the same Java class) so
|
||||
// they should not be used without schemas.
|
||||
}
|
||||
|
||||
// The type of the field
|
||||
private final Type type;
|
||||
private final boolean optional;
|
||||
private final Object defaultValue;
|
||||
|
||||
private final List<Field> fields;
|
||||
private final Map<String, Field> fieldsByName;
|
||||
|
||||
private final Schema keySchema;
|
||||
private final Schema valueSchema;
|
||||
|
||||
// Optional name and version provide a built-in way to indicate what type of data is included. Most
|
||||
// useful for structs to indicate the semantics of the struct and map it to some existing underlying
|
||||
// serializer-specific schema. However, can also be useful in specifying other logical types (e.g. a set is an array
|
||||
// with additional constraints).
|
||||
private final String name;
|
||||
private final Integer version;
|
||||
// Optional human readable documentation describing this schema.
|
||||
private final String doc;
|
||||
private final Map<String, String> parameters;
|
||||
// precomputed hash code. There is no need to re-compute every time hashCode() is called.
|
||||
private Integer hash = null;
|
||||
|
||||
/**
|
||||
* Construct a Schema. Most users should not construct schemas manually, preferring {@link SchemaBuilder} instead.
|
||||
*/
|
||||
public ConnectSchema(Type type, boolean optional, Object defaultValue, String name, Integer version, String doc, Map<String, String> parameters, List<Field> fields, Schema keySchema, Schema valueSchema) {
|
||||
this.type = type;
|
||||
this.optional = optional;
|
||||
this.defaultValue = defaultValue;
|
||||
this.name = name;
|
||||
this.version = version;
|
||||
this.doc = doc;
|
||||
this.parameters = parameters;
|
||||
|
||||
if (this.type == Type.STRUCT) {
|
||||
this.fields = fields == null ? Collections.<Field>emptyList() : fields;
|
||||
this.fieldsByName = new HashMap<>(this.fields.size());
|
||||
for (Field field : this.fields)
|
||||
fieldsByName.put(field.name(), field);
|
||||
} else {
|
||||
this.fields = null;
|
||||
this.fieldsByName = null;
|
||||
}
|
||||
|
||||
this.keySchema = keySchema;
|
||||
this.valueSchema = valueSchema;
|
||||
}
|
||||
|
||||
/**
|
||||
* Construct a Schema for a primitive type, setting schema parameters, struct fields, and key and value schemas to null.
|
||||
*/
|
||||
public ConnectSchema(Type type, boolean optional, Object defaultValue, String name, Integer version, String doc) {
|
||||
this(type, optional, defaultValue, name, version, doc, null, null, null, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Construct a default schema for a primitive type. The schema is required, has no default value, name, version,
|
||||
* or documentation.
|
||||
*/
|
||||
public ConnectSchema(Type type) {
|
||||
this(type, false, null, null, null, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Type type() {
|
||||
return type;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isOptional() {
|
||||
return optional;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object defaultValue() {
|
||||
return defaultValue;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String name() {
|
||||
return name;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Integer version() {
|
||||
return version;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String doc() {
|
||||
return doc;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, String> parameters() {
|
||||
return parameters;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Field> fields() {
|
||||
if (type != Type.STRUCT)
|
||||
throw new DataException("Cannot list fields on non-struct type");
|
||||
return fields;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Field field(String fieldName) {
|
||||
if (type != Type.STRUCT)
|
||||
throw new DataException("Cannot look up fields on non-struct type");
|
||||
return fieldsByName.get(fieldName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Schema keySchema() {
|
||||
if (type != Type.MAP)
|
||||
throw new DataException("Cannot look up key schema on non-map type");
|
||||
return keySchema;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Schema valueSchema() {
|
||||
if (type != Type.MAP && type != Type.ARRAY)
|
||||
throw new DataException("Cannot look up value schema on non-array and non-map type");
|
||||
return valueSchema;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Validate that the value can be used with the schema, i.e. that its type matches the schema type and nullability
|
||||
* requirements. Throws a DataException if the value is invalid.
|
||||
* @param schema Schema to test
|
||||
* @param value value to test
|
||||
*/
|
||||
public static void validateValue(Schema schema, Object value) {
|
||||
validateValue(null, schema, value);
|
||||
}
|
||||
|
||||
public static void validateValue(String name, Schema schema, Object value) {
|
||||
if (value == null) {
|
||||
if (!schema.isOptional())
|
||||
throw new DataException("Invalid value: null used for required field: \"" + name
|
||||
+ "\", schema type: " + schema.type());
|
||||
return;
|
||||
}
|
||||
|
||||
List<Class> expectedClasses = expectedClassesFor(schema);
|
||||
|
||||
if (expectedClasses == null)
|
||||
throw new DataException("Invalid Java object for schema type " + schema.type()
|
||||
+ ": " + value.getClass()
|
||||
+ " for field: \"" + name + "\"");
|
||||
|
||||
boolean foundMatch = false;
|
||||
if (expectedClasses.size() == 1) {
|
||||
foundMatch = expectedClasses.get(0).isInstance(value);
|
||||
} else {
|
||||
for (Class<?> expectedClass : expectedClasses) {
|
||||
if (expectedClass.isInstance(value)) {
|
||||
foundMatch = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!foundMatch)
|
||||
throw new DataException("Invalid Java object for schema type " + schema.type()
|
||||
+ ": " + value.getClass()
|
||||
+ " for field: \"" + name + "\"");
|
||||
|
||||
switch (schema.type()) {
|
||||
case STRUCT:
|
||||
Struct struct = (Struct) value;
|
||||
if (!struct.schema().equals(schema))
|
||||
throw new DataException("Struct schemas do not match.");
|
||||
struct.validate();
|
||||
break;
|
||||
case ARRAY:
|
||||
List<?> array = (List<?>) value;
|
||||
for (Object entry : array)
|
||||
validateValue(schema.valueSchema(), entry);
|
||||
break;
|
||||
case MAP:
|
||||
Map<?, ?> map = (Map<?, ?>) value;
|
||||
for (Map.Entry<?, ?> entry : map.entrySet()) {
|
||||
validateValue(schema.keySchema(), entry.getKey());
|
||||
validateValue(schema.valueSchema(), entry.getValue());
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
private static List<Class> expectedClassesFor(Schema schema) {
|
||||
List<Class> expectedClasses = LOGICAL_TYPE_CLASSES.get(schema.name());
|
||||
if (expectedClasses == null)
|
||||
expectedClasses = SCHEMA_TYPE_CLASSES.get(schema.type());
|
||||
return expectedClasses;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate that the value can be used for this schema, i.e. that its type matches the schema type and optional
|
||||
* requirements. Throws a DataException if the value is invalid.
|
||||
* @param value the value to validate
|
||||
*/
|
||||
public void validateValue(Object value) {
|
||||
validateValue(this, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ConnectSchema schema() {
|
||||
return this;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
ConnectSchema schema = (ConnectSchema) o;
|
||||
return Objects.equals(optional, schema.optional) &&
|
||||
Objects.equals(version, schema.version) &&
|
||||
Objects.equals(name, schema.name) &&
|
||||
Objects.equals(doc, schema.doc) &&
|
||||
Objects.equals(type, schema.type) &&
|
||||
Objects.deepEquals(defaultValue, schema.defaultValue) &&
|
||||
Objects.equals(fields, schema.fields) &&
|
||||
Objects.equals(keySchema, schema.keySchema) &&
|
||||
Objects.equals(valueSchema, schema.valueSchema) &&
|
||||
Objects.equals(parameters, schema.parameters);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
if (this.hash == null) {
|
||||
this.hash = Objects.hash(type, optional, defaultValue, fields, keySchema, valueSchema, name, version, doc,
|
||||
parameters);
|
||||
}
|
||||
return this.hash;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
if (name != null)
|
||||
return "Schema{" + name + ":" + type + "}";
|
||||
else
|
||||
return "Schema{" + type + "}";
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Get the {@link Schema.Type} associated with the given class.
|
||||
*
|
||||
* @param klass the Class to
|
||||
* @return the corresponding type, or null if there is no matching type
|
||||
*/
|
||||
public static Type schemaType(Class<?> klass) {
|
||||
synchronized (JAVA_CLASS_SCHEMA_TYPES) {
|
||||
Type schemaType = JAVA_CLASS_SCHEMA_TYPES.get(klass);
|
||||
if (schemaType != null)
|
||||
return schemaType;
|
||||
|
||||
// Since the lookup only checks the class, we need to also try
|
||||
for (Map.Entry<Class<?>, Type> entry : JAVA_CLASS_SCHEMA_TYPES.entrySet()) {
|
||||
try {
|
||||
klass.asSubclass(entry.getKey());
|
||||
// Cache this for subsequent lookups
|
||||
JAVA_CLASS_SCHEMA_TYPES.put(klass, entry.getValue());
|
||||
return entry.getValue();
|
||||
} catch (ClassCastException e) {
|
||||
// Expected, ignore
|
||||
}
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,75 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.data;
|
||||
|
||||
import org.apache.kafka.connect.errors.DataException;
|
||||
|
||||
import java.util.Calendar;
|
||||
import java.util.TimeZone;
|
||||
|
||||
/**
|
||||
* <p>
|
||||
* A date representing a calendar day with no time of day or timezone. The corresponding Java type is a java.util.Date
|
||||
* with hours, minutes, seconds, milliseconds set to 0. The underlying representation is an integer representing the
|
||||
* number of standardized days (based on a number of milliseconds with 24 hours/day, 60 minutes/hour, 60 seconds/minute,
|
||||
* 1000 milliseconds/second with n) since Unix epoch.
|
||||
* </p>
|
||||
*/
|
||||
public class Date {
|
||||
public static final String LOGICAL_NAME = "org.apache.kafka.connect.data.Date";
|
||||
|
||||
private static final long MILLIS_PER_DAY = 24 * 60 * 60 * 1000;
|
||||
|
||||
private static final TimeZone UTC = TimeZone.getTimeZone("UTC");
|
||||
|
||||
/**
|
||||
* Returns a SchemaBuilder for a Date. By returning a SchemaBuilder you can override additional schema settings such
|
||||
* as required/optional, default value, and documentation.
|
||||
* @return a SchemaBuilder
|
||||
*/
|
||||
public static SchemaBuilder builder() {
|
||||
return SchemaBuilder.int32()
|
||||
.name(LOGICAL_NAME)
|
||||
.version(1);
|
||||
}
|
||||
|
||||
public static final Schema SCHEMA = builder().schema();
|
||||
|
||||
/**
|
||||
* Convert a value from its logical format (Date) to it's encoded format.
|
||||
* @param value the logical value
|
||||
* @return the encoded value
|
||||
*/
|
||||
public static int fromLogical(Schema schema, java.util.Date value) {
|
||||
if (!(LOGICAL_NAME.equals(schema.name())))
|
||||
throw new DataException("Requested conversion of Date object but the schema does not match.");
|
||||
Calendar calendar = Calendar.getInstance(UTC);
|
||||
calendar.setTime(value);
|
||||
if (calendar.get(Calendar.HOUR_OF_DAY) != 0 || calendar.get(Calendar.MINUTE) != 0 ||
|
||||
calendar.get(Calendar.SECOND) != 0 || calendar.get(Calendar.MILLISECOND) != 0) {
|
||||
throw new DataException("Kafka Connect Date type should not have any time fields set to non-zero values.");
|
||||
}
|
||||
long unixMillis = calendar.getTimeInMillis();
|
||||
return (int) (unixMillis / MILLIS_PER_DAY);
|
||||
}
|
||||
|
||||
public static java.util.Date toLogical(Schema schema, int value) {
|
||||
if (!(LOGICAL_NAME.equals(schema.name())))
|
||||
throw new DataException("Requested conversion of Date object but the schema does not match.");
|
||||
return new java.util.Date(value * MILLIS_PER_DAY);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,86 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.data;
|
||||
|
||||
import org.apache.kafka.connect.errors.DataException;
|
||||
|
||||
import java.math.BigDecimal;
|
||||
import java.math.BigInteger;
|
||||
|
||||
/**
|
||||
* <p>
|
||||
* An arbitrary-precision signed decimal number. The value is unscaled * 10 ^ -scale where:
|
||||
* <ul>
|
||||
* <li>unscaled is an integer </li>
|
||||
* <li>scale is an integer representing how many digits the decimal point should be shifted on the unscaled value</li>
|
||||
* </ul>
|
||||
* </p>
|
||||
* <p>
|
||||
* Decimal does not provide a fixed schema because it is parameterized by the scale, which is fixed on the schema
|
||||
* rather than being part of the value.
|
||||
* </p>
|
||||
* <p>
|
||||
* The underlying representation of this type is bytes containing a two's complement integer
|
||||
* </p>
|
||||
*/
|
||||
public class Decimal {
|
||||
public static final String LOGICAL_NAME = "org.apache.kafka.connect.data.Decimal";
|
||||
public static final String SCALE_FIELD = "scale";
|
||||
|
||||
/**
|
||||
* Returns a SchemaBuilder for a Decimal with the given scale factor. By returning a SchemaBuilder you can override
|
||||
* additional schema settings such as required/optional, default value, and documentation.
|
||||
* @param scale the scale factor to apply to unscaled values
|
||||
* @return a SchemaBuilder
|
||||
*/
|
||||
public static SchemaBuilder builder(int scale) {
|
||||
return SchemaBuilder.bytes()
|
||||
.name(LOGICAL_NAME)
|
||||
.parameter(SCALE_FIELD, Integer.toString(scale))
|
||||
.version(1);
|
||||
}
|
||||
|
||||
public static Schema schema(int scale) {
|
||||
return builder(scale).build();
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert a value from its logical format (BigDecimal) to it's encoded format.
|
||||
* @param value the logical value
|
||||
* @return the encoded value
|
||||
*/
|
||||
public static byte[] fromLogical(Schema schema, BigDecimal value) {
|
||||
if (value.scale() != scale(schema))
|
||||
throw new DataException("BigDecimal has mismatching scale value for given Decimal schema");
|
||||
return value.unscaledValue().toByteArray();
|
||||
}
|
||||
|
||||
public static BigDecimal toLogical(Schema schema, byte[] value) {
|
||||
return new BigDecimal(new BigInteger(value), scale(schema));
|
||||
}
|
||||
|
||||
private static int scale(Schema schema) {
|
||||
String scaleString = schema.parameters().get(SCALE_FIELD);
|
||||
if (scaleString == null)
|
||||
throw new DataException("Invalid Decimal schema: scale parameter not found.");
|
||||
try {
|
||||
return Integer.parseInt(scaleString);
|
||||
} catch (NumberFormatException e) {
|
||||
throw new DataException("Invalid scale parameter found in Decimal schema: ", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,85 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.data;
|
||||
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* <p>
|
||||
* A field in a {@link Struct}, consisting of a field name, index, and {@link Schema} for the field value.
|
||||
* </p>
|
||||
*/
|
||||
public class Field {
|
||||
private final String name;
|
||||
private final int index;
|
||||
private final Schema schema;
|
||||
|
||||
public Field(String name, int index, Schema schema) {
|
||||
this.name = name;
|
||||
this.index = index;
|
||||
this.schema = schema;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the name of this field.
|
||||
* @return the name of this field
|
||||
*/
|
||||
public String name() {
|
||||
return name;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Get the index of this field within the struct.
|
||||
* @return the index of this field
|
||||
*/
|
||||
public int index() {
|
||||
return index;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the schema of this field
|
||||
* @return the schema of values of this field
|
||||
*/
|
||||
public Schema schema() {
|
||||
return schema;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
Field field = (Field) o;
|
||||
return Objects.equals(index, field.index) &&
|
||||
Objects.equals(name, field.name) &&
|
||||
Objects.equals(schema, field.schema);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(name, index, schema);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "Field{" +
|
||||
"name=" + name +
|
||||
", index=" + index +
|
||||
", schema=" + schema +
|
||||
"}";
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,222 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.data;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* <p>
|
||||
* Definition of an abstract data type. Data types can be primitive types (integer types, floating point types,
|
||||
* boolean, strings, and bytes) or complex types (typed arrays, maps with one key schema and value schema,
|
||||
* and structs that have a fixed set of field names each with an associated value schema). Any type can be specified
|
||||
* as optional, allowing it to be omitted (resulting in null values when it is missing) and can specify a default
|
||||
* value.
|
||||
* </p>
|
||||
* <p>
|
||||
* All schemas may have some associated metadata: a name, version, and documentation. These are all considered part
|
||||
* of the schema itself and included when comparing schemas. Besides adding important metadata, these fields enable
|
||||
* the specification of logical types that specify additional constraints and semantics (e.g. UNIX timestamps are
|
||||
* just an int64, but the user needs the know about the additional semantics to interpret it properly).
|
||||
* </p>
|
||||
* <p>
|
||||
* Schemas can be created directly, but in most cases using {@link SchemaBuilder} will be simpler.
|
||||
* </p>
|
||||
*/
|
||||
public interface Schema {
|
||||
/**
|
||||
* The type of a schema. These only include the core types; logical types must be determined by checking the schema name.
|
||||
*/
|
||||
enum Type {
|
||||
/**
|
||||
* 8-bit signed integer
|
||||
*
|
||||
* Note that if you have an unsigned 8-bit data source, {@link Type#INT16} will be required to safely capture all valid values
|
||||
*/
|
||||
INT8,
|
||||
/**
|
||||
* 16-bit signed integer
|
||||
*
|
||||
* Note that if you have an unsigned 16-bit data source, {@link Type#INT32} will be required to safely capture all valid values
|
||||
*/
|
||||
INT16,
|
||||
/**
|
||||
* 32-bit signed integer
|
||||
*
|
||||
* Note that if you have an unsigned 32-bit data source, {@link Type#INT64} will be required to safely capture all valid values
|
||||
*/
|
||||
INT32,
|
||||
/**
|
||||
* 64-bit signed integer
|
||||
*
|
||||
* Note that if you have an unsigned 64-bit data source, the {@link Decimal} logical type (encoded as {@link Type#BYTES})
|
||||
* will be required to safely capture all valid values
|
||||
*/
|
||||
INT64,
|
||||
/**
|
||||
* 32-bit IEEE 754 floating point number
|
||||
*/
|
||||
FLOAT32,
|
||||
/**
|
||||
* 64-bit IEEE 754 floating point number
|
||||
*/
|
||||
FLOAT64,
|
||||
/**
|
||||
* Boolean value (true or false)
|
||||
*/
|
||||
BOOLEAN,
|
||||
/**
|
||||
* Character string that supports all Unicode characters.
|
||||
*
|
||||
* Note that this does not imply any specific encoding (e.g. UTF-8) as this is an in-memory representation.
|
||||
*/
|
||||
STRING,
|
||||
/**
|
||||
* Sequence of unsigned 8-bit bytes
|
||||
*/
|
||||
BYTES,
|
||||
/**
|
||||
* An ordered sequence of elements, each of which shares the same type.
|
||||
*/
|
||||
ARRAY,
|
||||
/**
|
||||
* A mapping from keys to values. Both keys and values can be arbitrarily complex types, including complex types
|
||||
* such as {@link Struct}.
|
||||
*/
|
||||
MAP,
|
||||
/**
|
||||
* A structured record containing a set of named fields, each field using a fixed, independent {@link Schema}.
|
||||
*/
|
||||
STRUCT;
|
||||
|
||||
private String name;
|
||||
|
||||
Type() {
|
||||
this.name = this.name().toLowerCase(Locale.ROOT);
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public boolean isPrimitive() {
|
||||
switch (this) {
|
||||
case INT8:
|
||||
case INT16:
|
||||
case INT32:
|
||||
case INT64:
|
||||
case FLOAT32:
|
||||
case FLOAT64:
|
||||
case BOOLEAN:
|
||||
case STRING:
|
||||
case BYTES:
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Schema INT8_SCHEMA = SchemaBuilder.int8().build();
|
||||
Schema INT16_SCHEMA = SchemaBuilder.int16().build();
|
||||
Schema INT32_SCHEMA = SchemaBuilder.int32().build();
|
||||
Schema INT64_SCHEMA = SchemaBuilder.int64().build();
|
||||
Schema FLOAT32_SCHEMA = SchemaBuilder.float32().build();
|
||||
Schema FLOAT64_SCHEMA = SchemaBuilder.float64().build();
|
||||
Schema BOOLEAN_SCHEMA = SchemaBuilder.bool().build();
|
||||
Schema STRING_SCHEMA = SchemaBuilder.string().build();
|
||||
Schema BYTES_SCHEMA = SchemaBuilder.bytes().build();
|
||||
|
||||
Schema OPTIONAL_INT8_SCHEMA = SchemaBuilder.int8().optional().build();
|
||||
Schema OPTIONAL_INT16_SCHEMA = SchemaBuilder.int16().optional().build();
|
||||
Schema OPTIONAL_INT32_SCHEMA = SchemaBuilder.int32().optional().build();
|
||||
Schema OPTIONAL_INT64_SCHEMA = SchemaBuilder.int64().optional().build();
|
||||
Schema OPTIONAL_FLOAT32_SCHEMA = SchemaBuilder.float32().optional().build();
|
||||
Schema OPTIONAL_FLOAT64_SCHEMA = SchemaBuilder.float64().optional().build();
|
||||
Schema OPTIONAL_BOOLEAN_SCHEMA = SchemaBuilder.bool().optional().build();
|
||||
Schema OPTIONAL_STRING_SCHEMA = SchemaBuilder.string().optional().build();
|
||||
Schema OPTIONAL_BYTES_SCHEMA = SchemaBuilder.bytes().optional().build();
|
||||
|
||||
/**
|
||||
* @return the type of this schema
|
||||
*/
|
||||
Type type();
|
||||
|
||||
/**
|
||||
* @return true if this field is optional, false otherwise
|
||||
*/
|
||||
boolean isOptional();
|
||||
|
||||
/**
|
||||
* @return the default value for this schema
|
||||
*/
|
||||
Object defaultValue();
|
||||
|
||||
/**
|
||||
* @return the name of this schema
|
||||
*/
|
||||
String name();
|
||||
|
||||
/**
|
||||
* Get the optional version of the schema. If a version is included, newer versions *must* be larger than older ones.
|
||||
* @return the version of this schema
|
||||
*/
|
||||
Integer version();
|
||||
|
||||
/**
|
||||
* @return the documentation for this schema
|
||||
*/
|
||||
String doc();
|
||||
|
||||
/**
|
||||
* Get a map of schema parameters.
|
||||
* @return Map containing parameters for this schema, or null if there are no parameters
|
||||
*/
|
||||
Map<String, String> parameters();
|
||||
|
||||
/**
|
||||
* Get the key schema for this map schema. Throws a DataException if this schema is not a map.
|
||||
* @return the key schema
|
||||
*/
|
||||
Schema keySchema();
|
||||
|
||||
/**
|
||||
* Get the value schema for this map or array schema. Throws a DataException if this schema is not a map or array.
|
||||
* @return the value schema
|
||||
*/
|
||||
Schema valueSchema();
|
||||
|
||||
/**
|
||||
* Get the list of fields for this Schema. Throws a DataException if this schema is not a struct.
|
||||
* @return the list of fields for this Schema
|
||||
*/
|
||||
List<Field> fields();
|
||||
|
||||
/**
|
||||
* Get a field for this Schema by name. Throws a DataException if this schema is not a struct.
|
||||
* @param fieldName the name of the field to look up
|
||||
* @return the Field object for the specified field, or null if there is no field with the given name
|
||||
*/
|
||||
Field field(String fieldName);
|
||||
|
||||
/**
|
||||
* Return a concrete instance of the {@link Schema}
|
||||
* @return the {@link Schema}
|
||||
*/
|
||||
Schema schema();
|
||||
}
|
||||
@@ -0,0 +1,61 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.data;
|
||||
|
||||
import java.util.Objects;
|
||||
|
||||
public class SchemaAndValue {
|
||||
private final Schema schema;
|
||||
private final Object value;
|
||||
|
||||
public static final SchemaAndValue NULL = new SchemaAndValue(null, null);
|
||||
|
||||
public SchemaAndValue(Schema schema, Object value) {
|
||||
this.value = value;
|
||||
this.schema = schema;
|
||||
}
|
||||
|
||||
public Schema schema() {
|
||||
return schema;
|
||||
}
|
||||
|
||||
public Object value() {
|
||||
return value;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
SchemaAndValue that = (SchemaAndValue) o;
|
||||
return Objects.equals(schema, that.schema) &&
|
||||
Objects.equals(value, that.value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(schema, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "SchemaAndValue{" +
|
||||
"schema=" + schema +
|
||||
", value=" + value +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,444 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.data;
|
||||
|
||||
import org.apache.kafka.connect.errors.DataException;
|
||||
import org.apache.kafka.connect.errors.SchemaBuilderException;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* <p>
|
||||
* SchemaBuilder provides a fluent API for constructing {@link Schema} objects. It allows you to set each of the
|
||||
* properties for the schema and each call returns the SchemaBuilder so the calls can be chained. When nested types
|
||||
* are required, use one of the predefined schemas from {@link Schema} or use a second SchemaBuilder inline.
|
||||
* </p>
|
||||
* <p>
|
||||
* Here is an example of building a struct schema:
|
||||
* <pre>
|
||||
* Schema dateSchema = SchemaBuilder.struct()
|
||||
* .name("com.example.CalendarDate").version(2).doc("A calendar date including month, day, and year.")
|
||||
* .field("month", Schema.STRING_SCHEMA)
|
||||
* .field("day", Schema.INT8_SCHEMA)
|
||||
* .field("year", Schema.INT16_SCHEMA)
|
||||
* .build();
|
||||
* </pre>
|
||||
* </p>
|
||||
* <p>
|
||||
* Here is an example of using a second SchemaBuilder to construct complex, nested types:
|
||||
* <pre>
|
||||
* Schema userListSchema = SchemaBuilder.array(
|
||||
* SchemaBuilder.struct().name("com.example.User").field("username", Schema.STRING_SCHEMA).field("id", Schema.INT64_SCHEMA).build()
|
||||
* ).build();
|
||||
* </pre>
|
||||
* </p>
|
||||
*/
|
||||
public class SchemaBuilder implements Schema {
|
||||
private static final String TYPE_FIELD = "type";
|
||||
private static final String OPTIONAL_FIELD = "optional";
|
||||
private static final String DEFAULT_FIELD = "default";
|
||||
private static final String NAME_FIELD = "name";
|
||||
private static final String VERSION_FIELD = "version";
|
||||
private static final String DOC_FIELD = "doc";
|
||||
|
||||
|
||||
private final Type type;
|
||||
private Boolean optional = null;
|
||||
private Object defaultValue = null;
|
||||
|
||||
private Map<String, Field> fields = null;
|
||||
private Schema keySchema = null;
|
||||
private Schema valueSchema = null;
|
||||
|
||||
private String name;
|
||||
private Integer version;
|
||||
// Optional human readable documentation describing this schema.
|
||||
private String doc;
|
||||
// Additional parameters for logical types.
|
||||
private Map<String, String> parameters;
|
||||
|
||||
public SchemaBuilder(Type type) {
|
||||
if (null == type)
|
||||
throw new SchemaBuilderException("type cannot be null");
|
||||
this.type = type;
|
||||
if (type == Type.STRUCT) {
|
||||
fields = new LinkedHashMap<>();
|
||||
}
|
||||
}
|
||||
|
||||
// Common/metadata fields
|
||||
|
||||
@Override
|
||||
public boolean isOptional() {
|
||||
return optional == null ? false : optional;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set this schema as optional.
|
||||
* @return the SchemaBuilder
|
||||
*/
|
||||
public SchemaBuilder optional() {
|
||||
checkCanSet(OPTIONAL_FIELD, optional, true);
|
||||
optional = true;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set this schema as required. This is the default, but this method can be used to make this choice explicit.
|
||||
* @return the SchemaBuilder
|
||||
*/
|
||||
public SchemaBuilder required() {
|
||||
checkCanSet(OPTIONAL_FIELD, optional, false);
|
||||
optional = false;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object defaultValue() {
|
||||
return defaultValue;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the default value for this schema. The value is validated against the schema type, throwing a
|
||||
* {@link SchemaBuilderException} if it does not match.
|
||||
* @param value the default value
|
||||
* @return the SchemaBuilder
|
||||
*/
|
||||
public SchemaBuilder defaultValue(Object value) {
|
||||
checkCanSet(DEFAULT_FIELD, defaultValue, value);
|
||||
checkNotNull(TYPE_FIELD, type, DEFAULT_FIELD);
|
||||
try {
|
||||
ConnectSchema.validateValue(this, value);
|
||||
} catch (DataException e) {
|
||||
throw new SchemaBuilderException("Invalid default value", e);
|
||||
}
|
||||
defaultValue = value;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String name() {
|
||||
return name;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the name of this schema.
|
||||
* @param name the schema name
|
||||
* @return the SchemaBuilder
|
||||
*/
|
||||
public SchemaBuilder name(String name) {
|
||||
checkCanSet(NAME_FIELD, this.name, name);
|
||||
this.name = name;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Integer version() {
|
||||
return version;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the version of this schema. Schema versions are integers which, if provided, must indicate which schema is
|
||||
* newer and which is older by their ordering.
|
||||
* @param version the schema version
|
||||
* @return the SchemaBuilder
|
||||
*/
|
||||
public SchemaBuilder version(Integer version) {
|
||||
checkCanSet(VERSION_FIELD, this.version, version);
|
||||
this.version = version;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String doc() {
|
||||
return doc;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the documentation for this schema.
|
||||
* @param doc the documentation
|
||||
* @return the SchemaBuilder
|
||||
*/
|
||||
public SchemaBuilder doc(String doc) {
|
||||
checkCanSet(DOC_FIELD, this.doc, doc);
|
||||
this.doc = doc;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, String> parameters() {
|
||||
return parameters == null ? null : Collections.unmodifiableMap(parameters);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set a schema parameter.
|
||||
* @param propertyName name of the schema property to define
|
||||
* @param propertyValue value of the schema property to define, as a String
|
||||
* @return the SchemaBuilder
|
||||
*/
|
||||
public SchemaBuilder parameter(String propertyName, String propertyValue) {
|
||||
// Preserve order of insertion with a LinkedHashMap. This isn't strictly necessary, but is nice if logical types
|
||||
// can print their properties in a consistent order.
|
||||
if (parameters == null)
|
||||
parameters = new LinkedHashMap<>();
|
||||
parameters.put(propertyName, propertyValue);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set schema parameters. This operation is additive; it does not remove existing parameters that do not appear in
|
||||
* the set of properties pass to this method.
|
||||
* @param props Map of properties to set
|
||||
* @return the SchemaBuilder
|
||||
*/
|
||||
public SchemaBuilder parameters(Map<String, String> props) {
|
||||
// Avoid creating an empty set of properties so we never have an empty map
|
||||
if (props.isEmpty())
|
||||
return this;
|
||||
if (parameters == null)
|
||||
parameters = new LinkedHashMap<>();
|
||||
parameters.putAll(props);
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Type type() {
|
||||
return type;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a SchemaBuilder for the specified type.
|
||||
*
|
||||
* Usually it will be simpler to use one of the variants like {@link #string()} or {@link #struct()}, but this form
|
||||
* can be useful when generating schemas dynamically.
|
||||
*
|
||||
* @param type the schema type
|
||||
* @return a new SchemaBuilder
|
||||
*/
|
||||
public static SchemaBuilder type(Type type) {
|
||||
return new SchemaBuilder(type);
|
||||
}
|
||||
|
||||
// Primitive types
|
||||
|
||||
/**
|
||||
* @return a new {@link Schema.Type#INT8} SchemaBuilder
|
||||
*/
|
||||
public static SchemaBuilder int8() {
|
||||
return new SchemaBuilder(Type.INT8);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return a new {@link Schema.Type#INT16} SchemaBuilder
|
||||
*/
|
||||
public static SchemaBuilder int16() {
|
||||
return new SchemaBuilder(Type.INT16);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return a new {@link Schema.Type#INT32} SchemaBuilder
|
||||
*/
|
||||
public static SchemaBuilder int32() {
|
||||
return new SchemaBuilder(Type.INT32);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return a new {@link Schema.Type#INT64} SchemaBuilder
|
||||
*/
|
||||
public static SchemaBuilder int64() {
|
||||
return new SchemaBuilder(Type.INT64);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return a new {@link Schema.Type#FLOAT32} SchemaBuilder
|
||||
*/
|
||||
public static SchemaBuilder float32() {
|
||||
return new SchemaBuilder(Type.FLOAT32);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return a new {@link Schema.Type#FLOAT64} SchemaBuilder
|
||||
*/
|
||||
public static SchemaBuilder float64() {
|
||||
return new SchemaBuilder(Type.FLOAT64);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return a new {@link Schema.Type#BOOLEAN} SchemaBuilder
|
||||
*/
|
||||
public static SchemaBuilder bool() {
|
||||
return new SchemaBuilder(Type.BOOLEAN);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return a new {@link Schema.Type#STRING} SchemaBuilder
|
||||
*/
|
||||
public static SchemaBuilder string() {
|
||||
return new SchemaBuilder(Type.STRING);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return a new {@link Schema.Type#BYTES} SchemaBuilder
|
||||
*/
|
||||
public static SchemaBuilder bytes() {
|
||||
return new SchemaBuilder(Type.BYTES);
|
||||
}
|
||||
|
||||
|
||||
// Structs
|
||||
|
||||
/**
|
||||
* @return a new {@link Schema.Type#STRUCT} SchemaBuilder
|
||||
*/
|
||||
public static SchemaBuilder struct() {
|
||||
return new SchemaBuilder(Type.STRUCT);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a field to this struct schema. Throws a SchemaBuilderException if this is not a struct schema.
|
||||
* @param fieldName the name of the field to add
|
||||
* @param fieldSchema the Schema for the field's value
|
||||
* @return the SchemaBuilder
|
||||
*/
|
||||
public SchemaBuilder field(String fieldName, Schema fieldSchema) {
|
||||
if (type != Type.STRUCT)
|
||||
throw new SchemaBuilderException("Cannot create fields on type " + type);
|
||||
if (null == fieldName || fieldName.isEmpty())
|
||||
throw new SchemaBuilderException("fieldName cannot be null.");
|
||||
if (null == fieldSchema)
|
||||
throw new SchemaBuilderException("fieldSchema for field " + fieldName + " cannot be null.");
|
||||
int fieldIndex = fields.size();
|
||||
if (fields.containsKey(fieldName))
|
||||
throw new SchemaBuilderException("Cannot create field because of field name duplication " + fieldName);
|
||||
fields.put(fieldName, new Field(fieldName, fieldIndex, fieldSchema));
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the list of fields for this Schema. Throws a DataException if this schema is not a struct.
|
||||
* @return the list of fields for this Schema
|
||||
*/
|
||||
@Override
|
||||
public List<Field> fields() {
|
||||
if (type != Type.STRUCT)
|
||||
throw new DataException("Cannot list fields on non-struct type");
|
||||
return new ArrayList<>(fields.values());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Field field(String fieldName) {
|
||||
if (type != Type.STRUCT)
|
||||
throw new DataException("Cannot look up fields on non-struct type");
|
||||
return fields.get(fieldName);
|
||||
}
|
||||
|
||||
|
||||
|
||||
// Maps & Arrays
|
||||
|
||||
/**
|
||||
* @param valueSchema the schema for elements of the array
|
||||
* @return a new {@link Schema.Type#ARRAY} SchemaBuilder
|
||||
*/
|
||||
public static SchemaBuilder array(Schema valueSchema) {
|
||||
if (null == valueSchema)
|
||||
throw new SchemaBuilderException("valueSchema cannot be null.");
|
||||
SchemaBuilder builder = new SchemaBuilder(Type.ARRAY);
|
||||
builder.valueSchema = valueSchema;
|
||||
return builder;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param keySchema the schema for keys in the map
|
||||
* @param valueSchema the schema for values in the map
|
||||
* @return a new {@link Schema.Type#MAP} SchemaBuilder
|
||||
*/
|
||||
public static SchemaBuilder map(Schema keySchema, Schema valueSchema) {
|
||||
if (null == keySchema)
|
||||
throw new SchemaBuilderException("keySchema cannot be null.");
|
||||
if (null == valueSchema)
|
||||
throw new SchemaBuilderException("valueSchema cannot be null.");
|
||||
SchemaBuilder builder = new SchemaBuilder(Type.MAP);
|
||||
builder.keySchema = keySchema;
|
||||
builder.valueSchema = valueSchema;
|
||||
return builder;
|
||||
}
|
||||
|
||||
static SchemaBuilder arrayOfNull() {
|
||||
return new SchemaBuilder(Type.ARRAY);
|
||||
}
|
||||
|
||||
static SchemaBuilder mapOfNull() {
|
||||
return new SchemaBuilder(Type.MAP);
|
||||
}
|
||||
|
||||
static SchemaBuilder mapWithNullKeys(Schema valueSchema) {
|
||||
SchemaBuilder result = new SchemaBuilder(Type.MAP);
|
||||
result.valueSchema = valueSchema;
|
||||
return result;
|
||||
}
|
||||
|
||||
static SchemaBuilder mapWithNullValues(Schema keySchema) {
|
||||
SchemaBuilder result = new SchemaBuilder(Type.MAP);
|
||||
result.keySchema = keySchema;
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Schema keySchema() {
|
||||
return keySchema;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Schema valueSchema() {
|
||||
return valueSchema;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Build the Schema using the current settings
|
||||
* @return the {@link Schema}
|
||||
*/
|
||||
public Schema build() {
|
||||
return new ConnectSchema(type, isOptional(), defaultValue, name, version, doc,
|
||||
parameters == null ? null : Collections.unmodifiableMap(parameters),
|
||||
fields == null ? null : Collections.unmodifiableList(new ArrayList<>(fields.values())), keySchema, valueSchema);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return a concrete instance of the {@link Schema} specified by this builder
|
||||
* @return the {@link Schema}
|
||||
*/
|
||||
@Override
|
||||
public Schema schema() {
|
||||
return build();
|
||||
}
|
||||
|
||||
private static void checkCanSet(String fieldName, Object fieldVal, Object val) {
|
||||
if (fieldVal != null && fieldVal != val)
|
||||
throw new SchemaBuilderException("Invalid SchemaBuilder call: " + fieldName + " has already been set.");
|
||||
}
|
||||
|
||||
private static void checkNotNull(String fieldName, Object val, String fieldToSet) {
|
||||
if (val == null)
|
||||
throw new SchemaBuilderException("Invalid SchemaBuilder call: " + fieldName + " must be specified to set " + fieldToSet);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,196 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.data;
|
||||
|
||||
import org.apache.kafka.connect.data.Schema.Type;
|
||||
import org.apache.kafka.connect.errors.SchemaProjectorException;
|
||||
|
||||
import java.util.AbstractMap;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* <p>
|
||||
* SchemaProjector is utility to project a value between compatible schemas and throw exceptions
|
||||
* when non compatible schemas are provided.
|
||||
* </p>
|
||||
*/
|
||||
|
||||
public class SchemaProjector {
|
||||
|
||||
private static Set<AbstractMap.SimpleImmutableEntry<Type, Type>> promotable = new HashSet<>();
|
||||
|
||||
static {
|
||||
Type[] promotableTypes = {Type.INT8, Type.INT16, Type.INT32, Type.INT64, Type.FLOAT32, Type.FLOAT64};
|
||||
for (int i = 0; i < promotableTypes.length; ++i) {
|
||||
for (int j = i; j < promotableTypes.length; ++j) {
|
||||
promotable.add(new AbstractMap.SimpleImmutableEntry<>(promotableTypes[i], promotableTypes[j]));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* This method project a value between compatible schemas and throw exceptions when non compatible schemas are provided
|
||||
* @param source the schema used to construct the record
|
||||
* @param record the value to project from source schema to target schema
|
||||
* @param target the schema to project the record to
|
||||
* @return the projected value with target schema
|
||||
* @throws SchemaProjectorException
|
||||
*/
|
||||
public static Object project(Schema source, Object record, Schema target) throws SchemaProjectorException {
|
||||
checkMaybeCompatible(source, target);
|
||||
if (source.isOptional() && !target.isOptional()) {
|
||||
if (target.defaultValue() != null) {
|
||||
if (record != null) {
|
||||
return projectRequiredSchema(source, record, target);
|
||||
} else {
|
||||
return target.defaultValue();
|
||||
}
|
||||
} else {
|
||||
throw new SchemaProjectorException("Writer schema is optional, however, target schema does not provide a default value.");
|
||||
}
|
||||
} else {
|
||||
if (record != null) {
|
||||
return projectRequiredSchema(source, record, target);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static Object projectRequiredSchema(Schema source, Object record, Schema target) throws SchemaProjectorException {
|
||||
switch (target.type()) {
|
||||
case INT8:
|
||||
case INT16:
|
||||
case INT32:
|
||||
case INT64:
|
||||
case FLOAT32:
|
||||
case FLOAT64:
|
||||
case BOOLEAN:
|
||||
case BYTES:
|
||||
case STRING:
|
||||
return projectPrimitive(source, record, target);
|
||||
case STRUCT:
|
||||
return projectStruct(source, (Struct) record, target);
|
||||
case ARRAY:
|
||||
return projectArray(source, record, target);
|
||||
case MAP:
|
||||
return projectMap(source, record, target);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private static Object projectStruct(Schema source, Struct sourceStruct, Schema target) throws SchemaProjectorException {
|
||||
Struct targetStruct = new Struct(target);
|
||||
for (Field targetField : target.fields()) {
|
||||
String fieldName = targetField.name();
|
||||
Field sourceField = source.field(fieldName);
|
||||
if (sourceField != null) {
|
||||
Object sourceFieldValue = sourceStruct.get(fieldName);
|
||||
try {
|
||||
Object targetFieldValue = project(sourceField.schema(), sourceFieldValue, targetField.schema());
|
||||
targetStruct.put(fieldName, targetFieldValue);
|
||||
} catch (SchemaProjectorException e) {
|
||||
throw new SchemaProjectorException("Error projecting " + sourceField.name(), e);
|
||||
}
|
||||
} else if (targetField.schema().isOptional()) {
|
||||
// Ignore missing field
|
||||
} else if (targetField.schema().defaultValue() != null) {
|
||||
targetStruct.put(fieldName, targetField.schema().defaultValue());
|
||||
} else {
|
||||
throw new SchemaProjectorException("Required field `" + fieldName + "` is missing from source schema: " + source);
|
||||
}
|
||||
}
|
||||
return targetStruct;
|
||||
}
|
||||
|
||||
|
||||
private static void checkMaybeCompatible(Schema source, Schema target) {
|
||||
if (source.type() != target.type() && !isPromotable(source.type(), target.type())) {
|
||||
throw new SchemaProjectorException("Schema type mismatch. source type: " + source.type() + " and target type: " + target.type());
|
||||
} else if (!Objects.equals(source.name(), target.name())) {
|
||||
throw new SchemaProjectorException("Schema name mismatch. source name: " + source.name() + " and target name: " + target.name());
|
||||
} else if (!Objects.equals(source.parameters(), target.parameters())) {
|
||||
throw new SchemaProjectorException("Schema parameters not equal. source parameters: " + source.parameters() + " and target parameters: " + target.parameters());
|
||||
}
|
||||
}
|
||||
|
||||
private static Object projectArray(Schema source, Object record, Schema target) throws SchemaProjectorException {
|
||||
List<?> array = (List<?>) record;
|
||||
List<Object> retArray = new ArrayList<>();
|
||||
for (Object entry : array) {
|
||||
retArray.add(project(source.valueSchema(), entry, target.valueSchema()));
|
||||
}
|
||||
return retArray;
|
||||
}
|
||||
|
||||
private static Object projectMap(Schema source, Object record, Schema target) throws SchemaProjectorException {
|
||||
Map<?, ?> map = (Map<?, ?>) record;
|
||||
Map<Object, Object> retMap = new HashMap<>();
|
||||
for (Map.Entry<?, ?> entry : map.entrySet()) {
|
||||
Object key = entry.getKey();
|
||||
Object value = entry.getValue();
|
||||
Object retKey = project(source.keySchema(), key, target.keySchema());
|
||||
Object retValue = project(source.valueSchema(), value, target.valueSchema());
|
||||
retMap.put(retKey, retValue);
|
||||
}
|
||||
return retMap;
|
||||
}
|
||||
|
||||
private static Object projectPrimitive(Schema source, Object record, Schema target) throws SchemaProjectorException {
|
||||
assert source.type().isPrimitive();
|
||||
assert target.type().isPrimitive();
|
||||
Object result;
|
||||
if (isPromotable(source.type(), target.type()) && record instanceof Number) {
|
||||
Number numberRecord = (Number) record;
|
||||
switch (target.type()) {
|
||||
case INT8:
|
||||
result = numberRecord.byteValue();
|
||||
break;
|
||||
case INT16:
|
||||
result = numberRecord.shortValue();
|
||||
break;
|
||||
case INT32:
|
||||
result = numberRecord.intValue();
|
||||
break;
|
||||
case INT64:
|
||||
result = numberRecord.longValue();
|
||||
break;
|
||||
case FLOAT32:
|
||||
result = numberRecord.floatValue();
|
||||
break;
|
||||
case FLOAT64:
|
||||
result = numberRecord.doubleValue();
|
||||
break;
|
||||
default:
|
||||
throw new SchemaProjectorException("Not promotable type.");
|
||||
}
|
||||
} else {
|
||||
result = record;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
private static boolean isPromotable(Type sourceType, Type targetType) {
|
||||
return promotable.contains(new AbstractMap.SimpleImmutableEntry<>(sourceType, targetType));
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,287 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.data;
|
||||
|
||||
import org.apache.kafka.connect.errors.DataException;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* <p>
|
||||
* A structured record containing a set of named fields with values, each field using an independent {@link Schema}.
|
||||
* Struct objects must specify a complete {@link Schema} up front, and only fields specified in the Schema may be set.
|
||||
* </p>
|
||||
* <p>
|
||||
* The Struct's {@link #put(String, Object)} method returns the Struct itself to provide a fluent API for constructing
|
||||
* complete objects:
|
||||
* <pre>
|
||||
* Schema schema = SchemaBuilder.struct().name("com.example.Person")
|
||||
* .field("name", Schema.STRING_SCHEMA).field("age", Schema.INT32_SCHEMA).build()
|
||||
* Struct struct = new Struct(schema).put("name", "Bobby McGee").put("age", 21)
|
||||
* </pre>
|
||||
* </p>
|
||||
*/
|
||||
public class Struct {
|
||||
|
||||
private final Schema schema;
|
||||
private final Object[] values;
|
||||
|
||||
/**
|
||||
* Create a new Struct for this {@link Schema}
|
||||
* @param schema the {@link Schema} for the Struct
|
||||
*/
|
||||
public Struct(Schema schema) {
|
||||
if (schema.type() != Schema.Type.STRUCT)
|
||||
throw new DataException("Not a struct schema: " + schema);
|
||||
this.schema = schema;
|
||||
this.values = new Object[schema.fields().size()];
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the schema for this Struct.
|
||||
* @return the Struct's schema
|
||||
*/
|
||||
public Schema schema() {
|
||||
return schema;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the value of a field, returning the default value if no value has been set yet and a default value is specified
|
||||
* in the field's schema. Because this handles fields of all types, the value is returned as an {@link Object} and
|
||||
* must be cast to a more specific type.
|
||||
* @param fieldName the field name to lookup
|
||||
* @return the value for the field
|
||||
*/
|
||||
public Object get(String fieldName) {
|
||||
Field field = lookupField(fieldName);
|
||||
return get(field);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the value of a field, returning the default value if no value has been set yet and a default value is specified
|
||||
* in the field's schema. Because this handles fields of all types, the value is returned as an {@link Object} and
|
||||
* must be cast to a more specific type.
|
||||
* @param field the field to lookup
|
||||
* @return the value for the field
|
||||
*/
|
||||
public Object get(Field field) {
|
||||
Object val = values[field.index()];
|
||||
if (val == null && field.schema().defaultValue() != null) {
|
||||
val = field.schema().defaultValue();
|
||||
}
|
||||
return val;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the underlying raw value for the field without accounting for default values.
|
||||
* @param fieldName the field to get the value of
|
||||
* @return the raw value
|
||||
*/
|
||||
public Object getWithoutDefault(String fieldName) {
|
||||
Field field = lookupField(fieldName);
|
||||
return values[field.index()];
|
||||
}
|
||||
|
||||
// Note that all getters have to have boxed return types since the fields might be optional
|
||||
|
||||
/**
|
||||
* Equivalent to calling {@link #get(String)} and casting the result to a Byte.
|
||||
*/
|
||||
public Byte getInt8(String fieldName) {
|
||||
return (Byte) getCheckType(fieldName, Schema.Type.INT8);
|
||||
}
|
||||
|
||||
/**
|
||||
* Equivalent to calling {@link #get(String)} and casting the result to a Short.
|
||||
*/
|
||||
public Short getInt16(String fieldName) {
|
||||
return (Short) getCheckType(fieldName, Schema.Type.INT16);
|
||||
}
|
||||
|
||||
/**
|
||||
* Equivalent to calling {@link #get(String)} and casting the result to a Integer.
|
||||
*/
|
||||
public Integer getInt32(String fieldName) {
|
||||
return (Integer) getCheckType(fieldName, Schema.Type.INT32);
|
||||
}
|
||||
|
||||
/**
|
||||
* Equivalent to calling {@link #get(String)} and casting the result to a Long.
|
||||
*/
|
||||
public Long getInt64(String fieldName) {
|
||||
return (Long) getCheckType(fieldName, Schema.Type.INT64);
|
||||
}
|
||||
|
||||
/**
|
||||
* Equivalent to calling {@link #get(String)} and casting the result to a Float.
|
||||
*/
|
||||
public Float getFloat32(String fieldName) {
|
||||
return (Float) getCheckType(fieldName, Schema.Type.FLOAT32);
|
||||
}
|
||||
|
||||
/**
|
||||
* Equivalent to calling {@link #get(String)} and casting the result to a Double.
|
||||
*/
|
||||
public Double getFloat64(String fieldName) {
|
||||
return (Double) getCheckType(fieldName, Schema.Type.FLOAT64);
|
||||
}
|
||||
|
||||
/**
|
||||
* Equivalent to calling {@link #get(String)} and casting the result to a Boolean.
|
||||
*/
|
||||
public Boolean getBoolean(String fieldName) {
|
||||
return (Boolean) getCheckType(fieldName, Schema.Type.BOOLEAN);
|
||||
}
|
||||
|
||||
/**
|
||||
* Equivalent to calling {@link #get(String)} and casting the result to a String.
|
||||
*/
|
||||
public String getString(String fieldName) {
|
||||
return (String) getCheckType(fieldName, Schema.Type.STRING);
|
||||
}
|
||||
|
||||
/**
|
||||
* Equivalent to calling {@link #get(String)} and casting the result to a byte[].
|
||||
*/
|
||||
public byte[] getBytes(String fieldName) {
|
||||
Object bytes = getCheckType(fieldName, Schema.Type.BYTES);
|
||||
if (bytes instanceof ByteBuffer)
|
||||
return ((ByteBuffer) bytes).array();
|
||||
return (byte[]) bytes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Equivalent to calling {@link #get(String)} and casting the result to a List.
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public <T> List<T> getArray(String fieldName) {
|
||||
return (List<T>) getCheckType(fieldName, Schema.Type.ARRAY);
|
||||
}
|
||||
|
||||
/**
|
||||
* Equivalent to calling {@link #get(String)} and casting the result to a Map.
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public <K, V> Map<K, V> getMap(String fieldName) {
|
||||
return (Map<K, V>) getCheckType(fieldName, Schema.Type.MAP);
|
||||
}
|
||||
|
||||
/**
|
||||
* Equivalent to calling {@link #get(String)} and casting the result to a Struct.
|
||||
*/
|
||||
public Struct getStruct(String fieldName) {
|
||||
return (Struct) getCheckType(fieldName, Schema.Type.STRUCT);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the value of a field. Validates the value, throwing a {@link DataException} if it does not match the field's
|
||||
* {@link Schema}.
|
||||
* @param fieldName the name of the field to set
|
||||
* @param value the value of the field
|
||||
* @return the Struct, to allow chaining of {@link #put(String, Object)} calls
|
||||
*/
|
||||
public Struct put(String fieldName, Object value) {
|
||||
Field field = lookupField(fieldName);
|
||||
return put(field, value);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the value of a field. Validates the value, throwing a {@link DataException} if it does not match the field's
|
||||
* {@link Schema}.
|
||||
* @param field the field to set
|
||||
* @param value the value of the field
|
||||
* @return the Struct, to allow chaining of {@link #put(String, Object)} calls
|
||||
*/
|
||||
public Struct put(Field field, Object value) {
|
||||
if (null == field)
|
||||
throw new DataException("field cannot be null.");
|
||||
ConnectSchema.validateValue(field.name(), field.schema(), value);
|
||||
values[field.index()] = value;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Validates that this struct has filled in all the necessary data with valid values. For required fields
|
||||
* without defaults, this validates that a value has been set and has matching types/schemas. If any validation
|
||||
* fails, throws a DataException.
|
||||
*/
|
||||
public void validate() {
|
||||
for (Field field : schema.fields()) {
|
||||
Schema fieldSchema = field.schema();
|
||||
Object value = values[field.index()];
|
||||
if (value == null && (fieldSchema.isOptional() || fieldSchema.defaultValue() != null))
|
||||
continue;
|
||||
ConnectSchema.validateValue(field.name(), fieldSchema, value);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
Struct struct = (Struct) o;
|
||||
return Objects.equals(schema, struct.schema) &&
|
||||
Arrays.deepEquals(values, struct.values);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(schema, Arrays.deepHashCode(values));
|
||||
}
|
||||
|
||||
private Field lookupField(String fieldName) {
|
||||
Field field = schema.field(fieldName);
|
||||
if (field == null)
|
||||
throw new DataException(fieldName + " is not a valid field name");
|
||||
return field;
|
||||
}
|
||||
|
||||
// Get the field's value, but also check that the field matches the specified type, throwing an exception if it doesn't.
|
||||
// Used to implement the get*() methods that return typed data instead of Object
|
||||
private Object getCheckType(String fieldName, Schema.Type type) {
|
||||
Field field = lookupField(fieldName);
|
||||
if (field.schema().type() != type)
|
||||
throw new DataException("Field '" + fieldName + "' is not of type " + type);
|
||||
return values[field.index()];
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
final StringBuilder sb = new StringBuilder("Struct{");
|
||||
boolean first = true;
|
||||
for (int i = 0; i < values.length; i++) {
|
||||
final Object value = values[i];
|
||||
if (value != null) {
|
||||
final Field field = schema.fields().get(i);
|
||||
if (first) {
|
||||
first = false;
|
||||
} else {
|
||||
sb.append(",");
|
||||
}
|
||||
sb.append(field.name()).append("=").append(value);
|
||||
}
|
||||
}
|
||||
return sb.append("}").toString();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -0,0 +1,76 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.data;
|
||||
|
||||
import org.apache.kafka.connect.errors.DataException;
|
||||
|
||||
import java.util.Calendar;
|
||||
import java.util.TimeZone;
|
||||
|
||||
/**
|
||||
* <p>
|
||||
* A time representing a specific point in a day, not tied to any specific date. The corresponding Java type is a
|
||||
* java.util.Date where only hours, minutes, seconds, and milliseconds can be non-zero. This effectively makes it a
|
||||
* point in time during the first day after the Unix epoch. The underlying representation is an integer
|
||||
* representing the number of milliseconds after midnight.
|
||||
* </p>
|
||||
*/
|
||||
public class Time {
|
||||
public static final String LOGICAL_NAME = "org.apache.kafka.connect.data.Time";
|
||||
|
||||
private static final long MILLIS_PER_DAY = 24 * 60 * 60 * 1000;
|
||||
|
||||
private static final TimeZone UTC = TimeZone.getTimeZone("UTC");
|
||||
|
||||
/**
|
||||
* Returns a SchemaBuilder for a Time. By returning a SchemaBuilder you can override additional schema settings such
|
||||
* as required/optional, default value, and documentation.
|
||||
* @return a SchemaBuilder
|
||||
*/
|
||||
public static SchemaBuilder builder() {
|
||||
return SchemaBuilder.int32()
|
||||
.name(LOGICAL_NAME)
|
||||
.version(1);
|
||||
}
|
||||
|
||||
public static final Schema SCHEMA = builder().schema();
|
||||
|
||||
/**
|
||||
* Convert a value from its logical format (Time) to it's encoded format.
|
||||
* @param value the logical value
|
||||
* @return the encoded value
|
||||
*/
|
||||
public static int fromLogical(Schema schema, java.util.Date value) {
|
||||
if (!(LOGICAL_NAME.equals(schema.name())))
|
||||
throw new DataException("Requested conversion of Time object but the schema does not match.");
|
||||
Calendar calendar = Calendar.getInstance(UTC);
|
||||
calendar.setTime(value);
|
||||
long unixMillis = calendar.getTimeInMillis();
|
||||
if (unixMillis < 0 || unixMillis > MILLIS_PER_DAY) {
|
||||
throw new DataException("Kafka Connect Time type should not have any date fields set to non-zero values.");
|
||||
}
|
||||
return (int) unixMillis;
|
||||
}
|
||||
|
||||
public static java.util.Date toLogical(Schema schema, int value) {
|
||||
if (!(LOGICAL_NAME.equals(schema.name())))
|
||||
throw new DataException("Requested conversion of Date object but the schema does not match.");
|
||||
if (value < 0 || value > MILLIS_PER_DAY)
|
||||
throw new DataException("Time values must use number of milliseconds greater than 0 and less than 86400000");
|
||||
return new java.util.Date(value);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,59 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.data;
|
||||
|
||||
import org.apache.kafka.connect.errors.DataException;
|
||||
|
||||
/**
|
||||
* <p>
|
||||
* A timestamp representing an absolute time, without timezone information. The corresponding Java type is a
|
||||
* java.util.Date. The underlying representation is a long representing the number of milliseconds since Unix epoch.
|
||||
* </p>
|
||||
*/
|
||||
public class Timestamp {
|
||||
public static final String LOGICAL_NAME = "org.apache.kafka.connect.data.Timestamp";
|
||||
|
||||
/**
|
||||
* Returns a SchemaBuilder for a Timestamp. By returning a SchemaBuilder you can override additional schema settings such
|
||||
* as required/optional, default value, and documentation.
|
||||
* @return a SchemaBuilder
|
||||
*/
|
||||
public static SchemaBuilder builder() {
|
||||
return SchemaBuilder.int64()
|
||||
.name(LOGICAL_NAME)
|
||||
.version(1);
|
||||
}
|
||||
|
||||
public static final Schema SCHEMA = builder().schema();
|
||||
|
||||
/**
|
||||
* Convert a value from its logical format (Date) to it's encoded format.
|
||||
* @param value the logical value
|
||||
* @return the encoded value
|
||||
*/
|
||||
public static long fromLogical(Schema schema, java.util.Date value) {
|
||||
if (!(LOGICAL_NAME.equals(schema.name())))
|
||||
throw new DataException("Requested conversion of Timestamp object but the schema does not match.");
|
||||
return value.getTime();
|
||||
}
|
||||
|
||||
public static java.util.Date toLogical(Schema schema, long value) {
|
||||
if (!(LOGICAL_NAME.equals(schema.name())))
|
||||
throw new DataException("Requested conversion of Timestamp object but the schema does not match.");
|
||||
return new java.util.Date(value);
|
||||
}
|
||||
}
|
||||
1259
connect/api/src/main/java/org/apache/kafka/connect/data/Values.java
Normal file
1259
connect/api/src/main/java/org/apache/kafka/connect/data/Values.java
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,34 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.errors;
|
||||
|
||||
/**
|
||||
* Indicates the operation tried to create an entity that already exists.
|
||||
*/
|
||||
public class AlreadyExistsException extends ConnectException {
|
||||
public AlreadyExistsException(String s) {
|
||||
super(s);
|
||||
}
|
||||
|
||||
public AlreadyExistsException(String s, Throwable throwable) {
|
||||
super(s, throwable);
|
||||
}
|
||||
|
||||
public AlreadyExistsException(Throwable throwable) {
|
||||
super(throwable);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,37 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.errors;
|
||||
|
||||
import org.apache.kafka.common.KafkaException;
|
||||
|
||||
/**
|
||||
* ConnectException is the top-level exception type generated by Kafka Connect and connector implementations.
|
||||
*/
|
||||
public class ConnectException extends KafkaException {
|
||||
|
||||
public ConnectException(String s) {
|
||||
super(s);
|
||||
}
|
||||
|
||||
public ConnectException(String s, Throwable throwable) {
|
||||
super(s, throwable);
|
||||
}
|
||||
|
||||
public ConnectException(Throwable throwable) {
|
||||
super(throwable);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,34 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.errors;
|
||||
|
||||
/**
|
||||
* Base class for all Kafka Connect data API exceptions.
|
||||
*/
|
||||
public class DataException extends ConnectException {
|
||||
public DataException(String s) {
|
||||
super(s);
|
||||
}
|
||||
|
||||
public DataException(String s, Throwable throwable) {
|
||||
super(s, throwable);
|
||||
}
|
||||
|
||||
public DataException(Throwable throwable) {
|
||||
super(throwable);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,34 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.errors;
|
||||
|
||||
/**
|
||||
* Indicates that a method has been invoked illegally or at an invalid time by a connector or task.
|
||||
*/
|
||||
public class IllegalWorkerStateException extends ConnectException {
|
||||
public IllegalWorkerStateException(String s) {
|
||||
super(s);
|
||||
}
|
||||
|
||||
public IllegalWorkerStateException(String s, Throwable throwable) {
|
||||
super(s, throwable);
|
||||
}
|
||||
|
||||
public IllegalWorkerStateException(Throwable throwable) {
|
||||
super(throwable);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,34 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.errors;
|
||||
|
||||
/**
|
||||
* Indicates that an operation attempted to modify or delete a connector or task that is not present on the worker.
|
||||
*/
|
||||
public class NotFoundException extends ConnectException {
|
||||
public NotFoundException(String s) {
|
||||
super(s);
|
||||
}
|
||||
|
||||
public NotFoundException(String s, Throwable throwable) {
|
||||
super(s, throwable);
|
||||
}
|
||||
|
||||
public NotFoundException(Throwable throwable) {
|
||||
super(throwable);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,34 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.errors;
|
||||
|
||||
/**
|
||||
* An exception that indicates the operation can be reattempted.
|
||||
*/
|
||||
public class RetriableException extends ConnectException {
|
||||
public RetriableException(String s) {
|
||||
super(s);
|
||||
}
|
||||
|
||||
public RetriableException(String s, Throwable throwable) {
|
||||
super(s, throwable);
|
||||
}
|
||||
|
||||
public RetriableException(Throwable throwable) {
|
||||
super(throwable);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,31 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.errors;
|
||||
|
||||
public class SchemaBuilderException extends DataException {
|
||||
public SchemaBuilderException(String s) {
|
||||
super(s);
|
||||
}
|
||||
|
||||
public SchemaBuilderException(String s, Throwable throwable) {
|
||||
super(s, throwable);
|
||||
}
|
||||
|
||||
public SchemaBuilderException(Throwable throwable) {
|
||||
super(throwable);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,31 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.errors;
|
||||
|
||||
public class SchemaProjectorException extends DataException {
|
||||
public SchemaProjectorException(String s) {
|
||||
super(s);
|
||||
}
|
||||
|
||||
public SchemaProjectorException(String s, Throwable throwable) {
|
||||
super(s, throwable);
|
||||
}
|
||||
|
||||
public SchemaProjectorException(Throwable throwable) {
|
||||
super(throwable);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,97 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.header;
|
||||
|
||||
import org.apache.kafka.connect.data.Schema;
|
||||
import org.apache.kafka.connect.data.SchemaAndValue;
|
||||
import org.apache.kafka.connect.data.Struct;
|
||||
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* A {@link Header} implementation.
|
||||
*/
|
||||
class ConnectHeader implements Header {
|
||||
|
||||
private static final SchemaAndValue NULL_SCHEMA_AND_VALUE = new SchemaAndValue(null, null);
|
||||
|
||||
private final String key;
|
||||
private final SchemaAndValue schemaAndValue;
|
||||
|
||||
protected ConnectHeader(String key, SchemaAndValue schemaAndValue) {
|
||||
Objects.requireNonNull(key, "Null header keys are not permitted");
|
||||
this.key = key;
|
||||
this.schemaAndValue = schemaAndValue != null ? schemaAndValue : NULL_SCHEMA_AND_VALUE;
|
||||
assert this.schemaAndValue != null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String key() {
|
||||
return key;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object value() {
|
||||
return schemaAndValue.value();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Schema schema() {
|
||||
Schema schema = schemaAndValue.schema();
|
||||
if (schema == null && value() instanceof Struct) {
|
||||
schema = ((Struct) value()).schema();
|
||||
}
|
||||
return schema;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Header rename(String key) {
|
||||
Objects.requireNonNull(key, "Null header keys are not permitted");
|
||||
if (this.key.equals(key)) {
|
||||
return this;
|
||||
}
|
||||
return new ConnectHeader(key, schemaAndValue);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Header with(Schema schema, Object value) {
|
||||
return new ConnectHeader(key, new SchemaAndValue(schema, value));
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(key, schemaAndValue);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == this) {
|
||||
return true;
|
||||
}
|
||||
if (obj instanceof Header) {
|
||||
Header that = (Header) obj;
|
||||
return Objects.equals(this.key, that.key()) && Objects.equals(this.schema(), that.schema()) && Objects.equals(this.value(),
|
||||
that.value());
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ConnectHeader(key=" + key + ", value=" + value() + ", schema=" + schema() + ")";
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,496 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.header;
|
||||
|
||||
import org.apache.kafka.common.utils.AbstractIterator;
|
||||
import org.apache.kafka.connect.data.Date;
|
||||
import org.apache.kafka.connect.data.Decimal;
|
||||
import org.apache.kafka.connect.data.Schema;
|
||||
import org.apache.kafka.connect.data.Schema.Type;
|
||||
import org.apache.kafka.connect.data.SchemaAndValue;
|
||||
import org.apache.kafka.connect.data.Struct;
|
||||
import org.apache.kafka.connect.data.Time;
|
||||
import org.apache.kafka.connect.data.Timestamp;
|
||||
import org.apache.kafka.connect.errors.DataException;
|
||||
|
||||
import java.math.BigDecimal;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.ListIterator;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* A basic {@link Headers} implementation.
|
||||
*/
|
||||
public class ConnectHeaders implements Headers {
|
||||
|
||||
private static final int EMPTY_HASH = Objects.hash(new LinkedList<>());
|
||||
|
||||
private LinkedList<Header> headers;
|
||||
|
||||
public ConnectHeaders() {
|
||||
}
|
||||
|
||||
public ConnectHeaders(Iterable<Header> original) {
|
||||
if (original == null) {
|
||||
return;
|
||||
}
|
||||
if (original instanceof ConnectHeaders) {
|
||||
ConnectHeaders originalHeaders = (ConnectHeaders) original;
|
||||
if (!originalHeaders.isEmpty()) {
|
||||
headers = new LinkedList<>(originalHeaders.headers);
|
||||
}
|
||||
} else {
|
||||
headers = new LinkedList<>();
|
||||
for (Header header : original) {
|
||||
headers.add(header);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int size() {
|
||||
return headers == null ? 0 : headers.size();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isEmpty() {
|
||||
return headers == null ? true : headers.isEmpty();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Headers clear() {
|
||||
if (headers != null) {
|
||||
headers.clear();
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Headers add(Header header) {
|
||||
Objects.requireNonNull(header, "Unable to add a null header.");
|
||||
if (headers == null) {
|
||||
headers = new LinkedList<>();
|
||||
}
|
||||
headers.add(header);
|
||||
return this;
|
||||
}
|
||||
|
||||
protected Headers addWithoutValidating(String key, Object value, Schema schema) {
|
||||
return add(new ConnectHeader(key, new SchemaAndValue(schema, value)));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Headers add(String key, SchemaAndValue schemaAndValue) {
|
||||
checkSchemaMatches(schemaAndValue);
|
||||
return add(new ConnectHeader(key, schemaAndValue != null ? schemaAndValue : SchemaAndValue.NULL));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Headers add(String key, Object value, Schema schema) {
|
||||
return add(key, value != null || schema != null ? new SchemaAndValue(schema, value) : SchemaAndValue.NULL);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Headers addString(String key, String value) {
|
||||
return addWithoutValidating(key, value, value != null ? Schema.STRING_SCHEMA : Schema.OPTIONAL_STRING_SCHEMA);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Headers addBytes(String key, byte[] value) {
|
||||
return addWithoutValidating(key, value, value != null ? Schema.BYTES_SCHEMA : Schema.OPTIONAL_BYTES_SCHEMA);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Headers addBoolean(String key, boolean value) {
|
||||
return addWithoutValidating(key, value, Schema.BOOLEAN_SCHEMA);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Headers addByte(String key, byte value) {
|
||||
return addWithoutValidating(key, value, Schema.INT8_SCHEMA);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Headers addShort(String key, short value) {
|
||||
return addWithoutValidating(key, value, Schema.INT16_SCHEMA);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Headers addInt(String key, int value) {
|
||||
return addWithoutValidating(key, value, Schema.INT32_SCHEMA);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Headers addLong(String key, long value) {
|
||||
return addWithoutValidating(key, value, Schema.INT64_SCHEMA);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Headers addFloat(String key, float value) {
|
||||
return addWithoutValidating(key, value, Schema.FLOAT32_SCHEMA);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Headers addDouble(String key, double value) {
|
||||
return addWithoutValidating(key, value, Schema.FLOAT64_SCHEMA);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Headers addList(String key, List<?> value, Schema schema) {
|
||||
if (value == null) {
|
||||
return add(key, null, null);
|
||||
}
|
||||
checkSchemaType(schema, Type.ARRAY);
|
||||
return addWithoutValidating(key, value, schema);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Headers addMap(String key, Map<?, ?> value, Schema schema) {
|
||||
if (value == null) {
|
||||
return add(key, null, null);
|
||||
}
|
||||
checkSchemaType(schema, Type.MAP);
|
||||
return addWithoutValidating(key, value, schema);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Headers addStruct(String key, Struct value) {
|
||||
if (value == null) {
|
||||
return add(key, null, null);
|
||||
}
|
||||
checkSchemaType(value.schema(), Type.STRUCT);
|
||||
return addWithoutValidating(key, value, value.schema());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Headers addDecimal(String key, BigDecimal value) {
|
||||
if (value == null) {
|
||||
return add(key, null, null);
|
||||
}
|
||||
// Check that this is a decimal ...
|
||||
Schema schema = Decimal.schema(value.scale());
|
||||
Decimal.fromLogical(schema, value);
|
||||
return addWithoutValidating(key, value, schema);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Headers addDate(String key, java.util.Date value) {
|
||||
if (value != null) {
|
||||
// Check that this is a date ...
|
||||
Date.fromLogical(Date.SCHEMA, value);
|
||||
}
|
||||
return addWithoutValidating(key, value, Date.SCHEMA);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Headers addTime(String key, java.util.Date value) {
|
||||
if (value != null) {
|
||||
// Check that this is a time ...
|
||||
Time.fromLogical(Time.SCHEMA, value);
|
||||
}
|
||||
return addWithoutValidating(key, value, Time.SCHEMA);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Headers addTimestamp(String key, java.util.Date value) {
|
||||
if (value != null) {
|
||||
// Check that this is a timestamp ...
|
||||
Timestamp.fromLogical(Timestamp.SCHEMA, value);
|
||||
}
|
||||
return addWithoutValidating(key, value, Timestamp.SCHEMA);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Header lastWithName(String key) {
|
||||
checkKey(key);
|
||||
if (headers != null) {
|
||||
ListIterator<Header> iter = headers.listIterator(headers.size());
|
||||
while (iter.hasPrevious()) {
|
||||
Header header = iter.previous();
|
||||
if (key.equals(header.key())) {
|
||||
return header;
|
||||
}
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Iterator<Header> allWithName(String key) {
|
||||
return new FilterByKeyIterator(iterator(), key);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Iterator<Header> iterator() {
|
||||
return headers == null ? Collections.emptyIterator() :
|
||||
headers.iterator();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Headers remove(String key) {
|
||||
checkKey(key);
|
||||
if (!isEmpty()) {
|
||||
Iterator<Header> iterator = iterator();
|
||||
while (iterator.hasNext()) {
|
||||
if (iterator.next().key().equals(key)) {
|
||||
iterator.remove();
|
||||
}
|
||||
}
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Headers retainLatest() {
|
||||
if (!isEmpty()) {
|
||||
Set<String> keys = new HashSet<>();
|
||||
ListIterator<Header> iter = headers.listIterator(headers.size());
|
||||
while (iter.hasPrevious()) {
|
||||
Header header = iter.previous();
|
||||
String key = header.key();
|
||||
if (!keys.add(key)) {
|
||||
iter.remove();
|
||||
}
|
||||
}
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Headers retainLatest(String key) {
|
||||
checkKey(key);
|
||||
if (!isEmpty()) {
|
||||
boolean found = false;
|
||||
ListIterator<Header> iter = headers.listIterator(headers.size());
|
||||
while (iter.hasPrevious()) {
|
||||
String headerKey = iter.previous().key();
|
||||
if (key.equals(headerKey)) {
|
||||
if (found)
|
||||
iter.remove();
|
||||
found = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Headers apply(String key, HeaderTransform transform) {
|
||||
checkKey(key);
|
||||
if (!isEmpty()) {
|
||||
ListIterator<Header> iter = headers.listIterator();
|
||||
while (iter.hasNext()) {
|
||||
Header orig = iter.next();
|
||||
if (orig.key().equals(key)) {
|
||||
Header updated = transform.apply(orig);
|
||||
if (updated != null) {
|
||||
iter.set(updated);
|
||||
} else {
|
||||
iter.remove();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Headers apply(HeaderTransform transform) {
|
||||
if (!isEmpty()) {
|
||||
ListIterator<Header> iter = headers.listIterator();
|
||||
while (iter.hasNext()) {
|
||||
Header orig = iter.next();
|
||||
Header updated = transform.apply(orig);
|
||||
if (updated != null) {
|
||||
iter.set(updated);
|
||||
} else {
|
||||
iter.remove();
|
||||
}
|
||||
}
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return isEmpty() ? EMPTY_HASH : Objects.hash(headers);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == this) {
|
||||
return true;
|
||||
}
|
||||
if (obj instanceof Headers) {
|
||||
Headers that = (Headers) obj;
|
||||
Iterator<Header> thisIter = this.iterator();
|
||||
Iterator<Header> thatIter = that.iterator();
|
||||
while (thisIter.hasNext() && thatIter.hasNext()) {
|
||||
if (!Objects.equals(thisIter.next(), thatIter.next()))
|
||||
return false;
|
||||
}
|
||||
return !thisIter.hasNext() && !thatIter.hasNext();
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ConnectHeaders(headers=" + (headers != null ? headers : "") + ")";
|
||||
}
|
||||
|
||||
@Override
|
||||
public ConnectHeaders duplicate() {
|
||||
return new ConnectHeaders(this);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check that the key is not null
|
||||
*
|
||||
* @param key the key; may not be null
|
||||
* @throws NullPointerException if the supplied key is null
|
||||
*/
|
||||
private void checkKey(String key) {
|
||||
Objects.requireNonNull(key, "Header key cannot be null");
|
||||
}
|
||||
|
||||
/**
|
||||
* Check the {@link Schema#type() schema's type} matches the specified type.
|
||||
*
|
||||
* @param schema the schema; never null
|
||||
* @param type the expected type
|
||||
* @throws DataException if the schema's type does not match the expected type
|
||||
*/
|
||||
private void checkSchemaType(Schema schema, Type type) {
|
||||
if (schema.type() != type) {
|
||||
throw new DataException("Expecting " + type + " but instead found " + schema.type());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check that the value and its schema are compatible.
|
||||
*
|
||||
* @param schemaAndValue the schema and value pair
|
||||
* @throws DataException if the schema is not compatible with the value
|
||||
*/
|
||||
// visible for testing
|
||||
void checkSchemaMatches(SchemaAndValue schemaAndValue) {
|
||||
if (schemaAndValue != null) {
|
||||
Schema schema = schemaAndValue.schema();
|
||||
if (schema == null)
|
||||
return;
|
||||
schema = schema.schema(); // in case a SchemaBuilder is used
|
||||
Object value = schemaAndValue.value();
|
||||
if (value == null && !schema.isOptional()) {
|
||||
throw new DataException("A null value requires an optional schema but was " + schema);
|
||||
}
|
||||
if (value != null) {
|
||||
switch (schema.type()) {
|
||||
case BYTES:
|
||||
if (value instanceof ByteBuffer)
|
||||
return;
|
||||
if (value instanceof byte[])
|
||||
return;
|
||||
if (value instanceof BigDecimal && Decimal.LOGICAL_NAME.equals(schema.name()))
|
||||
return;
|
||||
break;
|
||||
case STRING:
|
||||
if (value instanceof String)
|
||||
return;
|
||||
break;
|
||||
case BOOLEAN:
|
||||
if (value instanceof Boolean)
|
||||
return;
|
||||
break;
|
||||
case INT8:
|
||||
if (value instanceof Byte)
|
||||
return;
|
||||
break;
|
||||
case INT16:
|
||||
if (value instanceof Short)
|
||||
return;
|
||||
break;
|
||||
case INT32:
|
||||
if (value instanceof Integer)
|
||||
return;
|
||||
if (value instanceof java.util.Date && Date.LOGICAL_NAME.equals(schema.name()))
|
||||
return;
|
||||
if (value instanceof java.util.Date && Time.LOGICAL_NAME.equals(schema.name()))
|
||||
return;
|
||||
break;
|
||||
case INT64:
|
||||
if (value instanceof Long)
|
||||
return;
|
||||
if (value instanceof java.util.Date && Timestamp.LOGICAL_NAME.equals(schema.name()))
|
||||
return;
|
||||
break;
|
||||
case FLOAT32:
|
||||
if (value instanceof Float)
|
||||
return;
|
||||
break;
|
||||
case FLOAT64:
|
||||
if (value instanceof Double)
|
||||
return;
|
||||
break;
|
||||
case ARRAY:
|
||||
if (value instanceof List)
|
||||
return;
|
||||
break;
|
||||
case MAP:
|
||||
if (value instanceof Map)
|
||||
return;
|
||||
break;
|
||||
case STRUCT:
|
||||
if (value instanceof Struct)
|
||||
return;
|
||||
break;
|
||||
}
|
||||
throw new DataException("The value " + value + " is not compatible with the schema " + schema);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static final class FilterByKeyIterator extends AbstractIterator<Header> {
|
||||
|
||||
private final Iterator<Header> original;
|
||||
private final String key;
|
||||
|
||||
private FilterByKeyIterator(Iterator<Header> original, String key) {
|
||||
this.original = original;
|
||||
this.key = key;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Header makeNext() {
|
||||
while (original.hasNext()) {
|
||||
Header header = original.next();
|
||||
if (!header.key().equals(key)) {
|
||||
continue;
|
||||
}
|
||||
return header;
|
||||
}
|
||||
return this.allDone();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,66 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.header;
|
||||
|
||||
import org.apache.kafka.connect.data.Schema;
|
||||
|
||||
/**
|
||||
* A {@link Header} is a key-value pair, and multiple headers can be included with the key, value, and timestamp in each Kafka message.
|
||||
* If the value contains schema information, then the header will have a non-null {@link #schema() schema}.
|
||||
* <p>
|
||||
* This is an immutable interface.
|
||||
*/
|
||||
public interface Header {
|
||||
|
||||
/**
|
||||
* The header's key, which is not necessarily unique within the set of headers on a Kafka message.
|
||||
*
|
||||
* @return the header's key; never null
|
||||
*/
|
||||
String key();
|
||||
|
||||
/**
|
||||
* Return the {@link Schema} associated with this header, if there is one. Not all headers will have schemas.
|
||||
*
|
||||
* @return the header's schema, or null if no schema is associated with this header
|
||||
*/
|
||||
Schema schema();
|
||||
|
||||
/**
|
||||
* Get the header's value as deserialized by Connect's header converter.
|
||||
*
|
||||
* @return the deserialized object representation of the header's value; may be null
|
||||
*/
|
||||
Object value();
|
||||
|
||||
/**
|
||||
* Return a new {@link Header} object that has the same key but with the supplied value.
|
||||
*
|
||||
* @param schema the schema for the new value; may be null
|
||||
* @param value the new value
|
||||
* @return the new {@link Header}; never null
|
||||
*/
|
||||
Header with(Schema schema, Object value);
|
||||
|
||||
/**
|
||||
* Return a new {@link Header} object that has the same schema and value but with the supplied key.
|
||||
*
|
||||
* @param key the key for the new header; may not be null
|
||||
* @return the new {@link Header}; never null
|
||||
*/
|
||||
Header rename(String key);
|
||||
}
|
||||
@@ -0,0 +1,308 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.header;
|
||||
|
||||
import org.apache.kafka.connect.data.Schema;
|
||||
import org.apache.kafka.connect.data.SchemaAndValue;
|
||||
import org.apache.kafka.connect.data.Struct;
|
||||
import org.apache.kafka.connect.errors.DataException;
|
||||
|
||||
import java.math.BigDecimal;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* A mutable ordered collection of {@link Header} objects. Note that multiple headers may have the same {@link Header#key() key}.
|
||||
*/
|
||||
public interface Headers extends Iterable<Header> {
|
||||
|
||||
/**
|
||||
* Get the number of headers in this object.
|
||||
*
|
||||
* @return the number of headers; never negative
|
||||
*/
|
||||
int size();
|
||||
|
||||
/**
|
||||
* Determine whether this object has no headers.
|
||||
*
|
||||
* @return true if there are no headers, or false if there is at least one header
|
||||
*/
|
||||
boolean isEmpty();
|
||||
|
||||
/**
|
||||
* Get the collection of {@link Header} objects whose {@link Header#key() keys} all match the specified key.
|
||||
*
|
||||
* @param key the key; may not be null
|
||||
* @return the iterator over headers with the specified key; may be null if there are no headers with the specified key
|
||||
*/
|
||||
Iterator<Header> allWithName(String key);
|
||||
|
||||
/**
|
||||
* Return the last {@link Header} with the specified key.
|
||||
*
|
||||
* @param key the key for the header; may not be null
|
||||
* @return the last Header, or null if there are no headers with the specified key
|
||||
*/
|
||||
Header lastWithName(String key);
|
||||
|
||||
/**
|
||||
* Add the given {@link Header} to this collection.
|
||||
*
|
||||
* @param header the header; may not be null
|
||||
* @return this object to facilitate chaining multiple methods; never null
|
||||
*/
|
||||
Headers add(Header header);
|
||||
|
||||
/**
|
||||
* Add to this collection a {@link Header} with the given key and value.
|
||||
*
|
||||
* @param key the header's key; may not be null
|
||||
* @param schemaAndValue the {@link SchemaAndValue} for the header; may be null
|
||||
* @return this object to facilitate chaining multiple methods; never null
|
||||
*/
|
||||
Headers add(String key, SchemaAndValue schemaAndValue);
|
||||
|
||||
/**
|
||||
* Add to this collection a {@link Header} with the given key and value.
|
||||
*
|
||||
* @param key the header's key; may not be null
|
||||
* @param value the header's value; may be null
|
||||
* @param schema the schema for the header's value; may not be null if the value is not null
|
||||
* @return this object to facilitate chaining multiple methods; never null
|
||||
*/
|
||||
Headers add(String key, Object value, Schema schema);
|
||||
|
||||
/**
|
||||
* Add to this collection a {@link Header} with the given key and value.
|
||||
*
|
||||
* @param key the header's key; may not be null
|
||||
* @param value the header's value; may be null
|
||||
* @return this object to facilitate chaining multiple methods; never null
|
||||
*/
|
||||
Headers addString(String key, String value);
|
||||
|
||||
/**
|
||||
* Add to this collection a {@link Header} with the given key and value.
|
||||
*
|
||||
* @param key the header's key; may not be null
|
||||
* @param value the header's value; may be null
|
||||
* @return this object to facilitate chaining multiple methods; never null
|
||||
*/
|
||||
Headers addBoolean(String key, boolean value);
|
||||
|
||||
/**
|
||||
* Add to this collection a {@link Header} with the given key and value.
|
||||
*
|
||||
* @param key the header's key; may not be null
|
||||
* @param value the header's value; may be null
|
||||
* @return this object to facilitate chaining multiple methods; never null
|
||||
*/
|
||||
Headers addByte(String key, byte value);
|
||||
|
||||
/**
|
||||
* Add to this collection a {@link Header} with the given key and value.
|
||||
*
|
||||
* @param key the header's key; may not be null
|
||||
* @param value the header's value; may be null
|
||||
* @return this object to facilitate chaining multiple methods; never null
|
||||
*/
|
||||
Headers addShort(String key, short value);
|
||||
|
||||
/**
|
||||
* Add to this collection a {@link Header} with the given key and value.
|
||||
*
|
||||
* @param key the header's key; may not be null
|
||||
* @param value the header's value; may be null
|
||||
* @return this object to facilitate chaining multiple methods; never null
|
||||
*/
|
||||
Headers addInt(String key, int value);
|
||||
|
||||
/**
|
||||
* Add to this collection a {@link Header} with the given key and value.
|
||||
*
|
||||
* @param key the header's key; may not be null
|
||||
* @param value the header's value; may be null
|
||||
* @return this object to facilitate chaining multiple methods; never null
|
||||
*/
|
||||
Headers addLong(String key, long value);
|
||||
|
||||
/**
|
||||
* Add to this collection a {@link Header} with the given key and value.
|
||||
*
|
||||
* @param key the header's key; may not be null
|
||||
* @param value the header's value; may be null
|
||||
* @return this object to facilitate chaining multiple methods; never null
|
||||
*/
|
||||
Headers addFloat(String key, float value);
|
||||
|
||||
/**
|
||||
* Add to this collection a {@link Header} with the given key and value.
|
||||
*
|
||||
* @param key the header's key; may not be null
|
||||
* @param value the header's value; may be null
|
||||
* @return this object to facilitate chaining multiple methods; never null
|
||||
*/
|
||||
Headers addDouble(String key, double value);
|
||||
|
||||
/**
|
||||
* Add to this collection a {@link Header} with the given key and value.
|
||||
*
|
||||
* @param key the header's key; may not be null
|
||||
* @param value the header's value; may be null
|
||||
* @return this object to facilitate chaining multiple methods; never null
|
||||
*/
|
||||
Headers addBytes(String key, byte[] value);
|
||||
|
||||
/**
|
||||
* Add to this collection a {@link Header} with the given key and value.
|
||||
*
|
||||
* @param key the header's key; may not be null
|
||||
* @param value the header's value; may be null
|
||||
* @param schema the schema describing the list value; may not be null
|
||||
* @return this object to facilitate chaining multiple methods; never null
|
||||
* @throws DataException if the header's value is invalid
|
||||
*/
|
||||
Headers addList(String key, List<?> value, Schema schema);
|
||||
|
||||
/**
|
||||
* Add to this collection a {@link Header} with the given key and value.
|
||||
*
|
||||
* @param key the header's key; may not be null
|
||||
* @param value the header's value; may be null
|
||||
* @param schema the schema describing the map value; may not be null
|
||||
* @return this object to facilitate chaining multiple methods; never null
|
||||
* @throws DataException if the header's value is invalid
|
||||
*/
|
||||
Headers addMap(String key, Map<?, ?> value, Schema schema);
|
||||
|
||||
/**
|
||||
* Add to this collection a {@link Header} with the given key and value.
|
||||
*
|
||||
* @param key the header's key; may not be null
|
||||
* @param value the header's value; may be null
|
||||
* @return this object to facilitate chaining multiple methods; never null
|
||||
* @throws DataException if the header's value is invalid
|
||||
*/
|
||||
Headers addStruct(String key, Struct value);
|
||||
|
||||
/**
|
||||
* Add to this collection a {@link Header} with the given key and {@link org.apache.kafka.connect.data.Decimal} value.
|
||||
*
|
||||
* @param key the header's key; may not be null
|
||||
* @param value the header's {@link org.apache.kafka.connect.data.Decimal} value; may be null
|
||||
* @return this object to facilitate chaining multiple methods; never null
|
||||
*/
|
||||
Headers addDecimal(String key, BigDecimal value);
|
||||
|
||||
/**
|
||||
* Add to this collection a {@link Header} with the given key and {@link org.apache.kafka.connect.data.Date} value.
|
||||
*
|
||||
* @param key the header's key; may not be null
|
||||
* @param value the header's {@link org.apache.kafka.connect.data.Date} value; may be null
|
||||
* @return this object to facilitate chaining multiple methods; never null
|
||||
*/
|
||||
Headers addDate(String key, java.util.Date value);
|
||||
|
||||
/**
|
||||
* Add to this collection a {@link Header} with the given key and {@link org.apache.kafka.connect.data.Time} value.
|
||||
*
|
||||
* @param key the header's key; may not be null
|
||||
* @param value the header's {@link org.apache.kafka.connect.data.Time} value; may be null
|
||||
* @return this object to facilitate chaining multiple methods; never null
|
||||
*/
|
||||
Headers addTime(String key, java.util.Date value);
|
||||
|
||||
/**
|
||||
* Add to this collection a {@link Header} with the given key and {@link org.apache.kafka.connect.data.Timestamp} value.
|
||||
*
|
||||
* @param key the header's key; may not be null
|
||||
* @param value the header's {@link org.apache.kafka.connect.data.Timestamp} value; may be null
|
||||
* @return this object to facilitate chaining multiple methods; never null
|
||||
*/
|
||||
Headers addTimestamp(String key, java.util.Date value);
|
||||
|
||||
/**
|
||||
* Removes all {@link Header} objects whose {@link Header#key() key} matches the specified key.
|
||||
*
|
||||
* @param key the key; may not be null
|
||||
* @return this object to facilitate chaining multiple methods; never null
|
||||
*/
|
||||
Headers remove(String key);
|
||||
|
||||
/**
|
||||
* Removes all but the latest {@link Header} objects whose {@link Header#key() key} matches the specified key.
|
||||
*
|
||||
* @param key the key; may not be null
|
||||
* @return this object to facilitate chaining multiple methods; never null
|
||||
*/
|
||||
Headers retainLatest(String key);
|
||||
|
||||
/**
|
||||
* Removes all but the last {@link Header} object with each key.
|
||||
*
|
||||
* @return this object to facilitate chaining multiple methods; never null
|
||||
*/
|
||||
Headers retainLatest();
|
||||
|
||||
/**
|
||||
* Removes all headers from this object.
|
||||
*
|
||||
* @return this object to facilitate chaining multiple methods; never null
|
||||
*/
|
||||
Headers clear();
|
||||
|
||||
/**
|
||||
* Create a copy of this {@link Headers} object. The new copy will contain all of the same {@link Header} objects as this object.
|
||||
* @return the copy; never null
|
||||
*/
|
||||
Headers duplicate();
|
||||
|
||||
/**
|
||||
* Get all {@link Header}s, apply the transform to each and store the result in place of the original.
|
||||
*
|
||||
* @param transform the transform to apply; may not be null
|
||||
* @return this object to facilitate chaining multiple methods; never null
|
||||
* @throws DataException if the header's value is invalid
|
||||
*/
|
||||
Headers apply(HeaderTransform transform);
|
||||
|
||||
/**
|
||||
* Get all {@link Header}s with the given key, apply the transform to each and store the result in place of the original.
|
||||
*
|
||||
* @param key the header's key; may not be null
|
||||
* @param transform the transform to apply; may not be null
|
||||
* @return this object to facilitate chaining multiple methods; never null
|
||||
* @throws DataException if the header's value is invalid
|
||||
*/
|
||||
Headers apply(String key, HeaderTransform transform);
|
||||
|
||||
/**
|
||||
* A function to transform the supplied {@link Header}. Implementations will likely need to use {@link Header#with(Schema, Object)}
|
||||
* to create the new instance.
|
||||
*/
|
||||
interface HeaderTransform {
|
||||
/**
|
||||
* Transform the given {@link Header} and return the updated {@link Header}.
|
||||
*
|
||||
* @param header the input header; never null
|
||||
* @return the new header, or null if the supplied {@link Header} is to be removed
|
||||
*/
|
||||
Header apply(Header header);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,93 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.kafka.connect.health;
|
||||
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Provides the current status along with identifier for Connect worker and tasks.
|
||||
*/
|
||||
public abstract class AbstractState {
|
||||
|
||||
private final String state;
|
||||
private final String traceMessage;
|
||||
private final String workerId;
|
||||
|
||||
/**
|
||||
* Construct a state for connector or task.
|
||||
*
|
||||
* @param state the status of connector or task; may not be null or empty
|
||||
* @param workerId the workerId associated with the connector or the task; may not be null or empty
|
||||
* @param traceMessage any error trace message associated with the connector or the task; may be null or empty
|
||||
*/
|
||||
public AbstractState(String state, String workerId, String traceMessage) {
|
||||
if (state == null || state.trim().isEmpty()) {
|
||||
throw new IllegalArgumentException("State must not be null or empty");
|
||||
}
|
||||
if (workerId == null || workerId.trim().isEmpty()) {
|
||||
throw new IllegalArgumentException("Worker ID must not be null or empty");
|
||||
}
|
||||
this.state = state;
|
||||
this.workerId = workerId;
|
||||
this.traceMessage = traceMessage;
|
||||
}
|
||||
|
||||
/**
|
||||
* Provides the current state of the connector or task.
|
||||
*
|
||||
* @return state, never {@code null} or empty
|
||||
*/
|
||||
public String state() {
|
||||
return state;
|
||||
}
|
||||
|
||||
/**
|
||||
* The identifier of the worker associated with the connector or the task.
|
||||
*
|
||||
* @return workerId, never {@code null} or empty.
|
||||
*/
|
||||
public String workerId() {
|
||||
return workerId;
|
||||
}
|
||||
|
||||
/**
|
||||
* The error message associated with the connector or task.
|
||||
*
|
||||
* @return traceMessage, can be {@code null} or empty.
|
||||
*/
|
||||
public String traceMessage() {
|
||||
return traceMessage;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o)
|
||||
return true;
|
||||
if (o == null || getClass() != o.getClass())
|
||||
return false;
|
||||
AbstractState that = (AbstractState) o;
|
||||
return state.equals(that.state)
|
||||
&& Objects.equals(traceMessage, that.traceMessage)
|
||||
&& workerId.equals(that.workerId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(state, traceMessage, workerId);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,32 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.kafka.connect.health;
|
||||
|
||||
/**
|
||||
* Provides immutable Connect cluster information, such as the ID of the backing Kafka cluster. The
|
||||
* Connect framework provides the implementation for this interface.
|
||||
*/
|
||||
public interface ConnectClusterDetails {
|
||||
|
||||
/**
|
||||
* Get the cluster ID of the Kafka cluster backing this Connect cluster.
|
||||
*
|
||||
* @return the cluster ID of the Kafka cluster backing this Connect cluster
|
||||
**/
|
||||
String kafkaClusterId();
|
||||
}
|
||||
@@ -0,0 +1,72 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.kafka.connect.health;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Provides the ability to lookup connector metadata, including status and configurations, as well
|
||||
* as immutable cluster information such as Kafka cluster ID. This is made available to
|
||||
* {@link org.apache.kafka.connect.rest.ConnectRestExtension} implementations. The Connect framework
|
||||
* provides the implementation for this interface.
|
||||
*/
|
||||
public interface ConnectClusterState {
|
||||
|
||||
/**
|
||||
* Get the names of the connectors currently deployed in this cluster. This is a full list of connectors in the cluster gathered from
|
||||
* the current configuration, which may change over time.
|
||||
*
|
||||
* @return collection of connector names, never {@code null}
|
||||
*/
|
||||
Collection<String> connectors();
|
||||
|
||||
/**
|
||||
* Lookup the current health of a connector and its tasks. This provides the current snapshot of health by querying the underlying
|
||||
* herder. A connector returned by previous invocation of {@link #connectors()} may no longer be available and could result in {@link
|
||||
* org.apache.kafka.connect.errors.NotFoundException}.
|
||||
*
|
||||
* @param connName name of the connector
|
||||
* @return the health of the connector for the connector name
|
||||
* @throws org.apache.kafka.connect.errors.NotFoundException if the requested connector can't be found
|
||||
*/
|
||||
ConnectorHealth connectorHealth(String connName);
|
||||
|
||||
/**
|
||||
* Lookup the current configuration of a connector. This provides the current snapshot of configuration by querying the underlying
|
||||
* herder. A connector returned by previous invocation of {@link #connectors()} may no longer be available and could result in {@link
|
||||
* org.apache.kafka.connect.errors.NotFoundException}.
|
||||
*
|
||||
* @param connName name of the connector
|
||||
* @return the configuration of the connector for the connector name
|
||||
* @throws org.apache.kafka.connect.errors.NotFoundException if the requested connector can't be found
|
||||
* @throws java.lang.UnsupportedOperationException if the default implementation has not been overridden
|
||||
*/
|
||||
default Map<String, String> connectorConfig(String connName) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get details about the setup of the Connect cluster.
|
||||
* @return a {@link ConnectClusterDetails} object containing information about the cluster
|
||||
* @throws java.lang.UnsupportedOperationException if the default implementation has not been overridden
|
||||
**/
|
||||
default ConnectClusterDetails clusterDetails() {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,113 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.health;
|
||||
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Provides basic health information about the connector and its tasks.
|
||||
*/
|
||||
public class ConnectorHealth {
|
||||
|
||||
private final String name;
|
||||
private final ConnectorState connectorState;
|
||||
private final Map<Integer, TaskState> tasks;
|
||||
private final ConnectorType type;
|
||||
|
||||
|
||||
public ConnectorHealth(String name,
|
||||
ConnectorState connectorState,
|
||||
Map<Integer, TaskState> tasks,
|
||||
ConnectorType type) {
|
||||
if (name == null || name.trim().isEmpty()) {
|
||||
throw new IllegalArgumentException("Connector name is required");
|
||||
}
|
||||
Objects.requireNonNull(connectorState, "connectorState can't be null");
|
||||
Objects.requireNonNull(tasks, "tasks can't be null");
|
||||
Objects.requireNonNull(type, "type can't be null");
|
||||
this.name = name;
|
||||
this.connectorState = connectorState;
|
||||
this.tasks = tasks;
|
||||
this.type = type;
|
||||
}
|
||||
|
||||
/**
|
||||
* Provides the name of the connector.
|
||||
*
|
||||
* @return name, never {@code null} or empty
|
||||
*/
|
||||
public String name() {
|
||||
return name;
|
||||
}
|
||||
|
||||
/**
|
||||
* Provides the current state of the connector.
|
||||
*
|
||||
* @return the connector state, never {@code null}
|
||||
*/
|
||||
public ConnectorState connectorState() {
|
||||
return connectorState;
|
||||
}
|
||||
|
||||
/**
|
||||
* Provides the current state of the connector tasks.
|
||||
*
|
||||
* @return the state for each task ID; never {@code null}
|
||||
*/
|
||||
public Map<Integer, TaskState> tasksState() {
|
||||
return tasks;
|
||||
}
|
||||
|
||||
/**
|
||||
* Provides the type of the connector.
|
||||
*
|
||||
* @return type, never {@code null}
|
||||
*/
|
||||
public ConnectorType type() {
|
||||
return type;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o)
|
||||
return true;
|
||||
if (o == null || getClass() != o.getClass())
|
||||
return false;
|
||||
ConnectorHealth that = (ConnectorHealth) o;
|
||||
return name.equals(that.name)
|
||||
&& connectorState.equals(that.connectorState)
|
||||
&& tasks.equals(that.tasks)
|
||||
&& type == that.type;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(name, connectorState, tasks, type);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ConnectorHealth{"
|
||||
+ "name='" + name + '\''
|
||||
+ ", connectorState=" + connectorState
|
||||
+ ", tasks=" + tasks
|
||||
+ ", type=" + type
|
||||
+ '}';
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,44 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.kafka.connect.health;
|
||||
|
||||
/**
|
||||
* Describes the status, worker ID, and any errors associated with a connector.
|
||||
*/
|
||||
public class ConnectorState extends AbstractState {
|
||||
|
||||
/**
|
||||
* Provides an instance of the ConnectorState.
|
||||
*
|
||||
* @param state - the status of connector, may not be {@code null} or empty
|
||||
* @param workerId - the workerId associated with the connector, may not be {@code null} or empty
|
||||
* @param traceMessage - any error message associated with the connector, may be {@code null} or empty
|
||||
*/
|
||||
public ConnectorState(String state, String workerId, String traceMessage) {
|
||||
super(state, workerId, traceMessage);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ConnectorState{"
|
||||
+ "state='" + state() + '\''
|
||||
+ ", traceMessage='" + traceMessage() + '\''
|
||||
+ ", workerId='" + workerId() + '\''
|
||||
+ '}';
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,43 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.health;
|
||||
|
||||
import java.util.Locale;
|
||||
|
||||
/**
|
||||
* Enum definition that identifies the type of the connector.
|
||||
*/
|
||||
public enum ConnectorType {
|
||||
/**
|
||||
* Identifies a source connector
|
||||
*/
|
||||
SOURCE,
|
||||
/**
|
||||
* Identifies a sink connector
|
||||
*/
|
||||
SINK,
|
||||
/**
|
||||
* Identifies a connector whose type could not be inferred
|
||||
*/
|
||||
UNKNOWN;
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return super.toString().toLowerCase(Locale.ROOT);
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,77 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.kafka.connect.health;
|
||||
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Describes the state, IDs, and any errors of a connector task.
|
||||
*/
|
||||
public class TaskState extends AbstractState {
|
||||
|
||||
private final int taskId;
|
||||
|
||||
/**
|
||||
* Provides an instance of {@link TaskState}.
|
||||
*
|
||||
* @param taskId the id associated with the connector task
|
||||
* @param state the status of the task, may not be {@code null} or empty
|
||||
* @param workerId id of the worker the task is associated with, may not be {@code null} or empty
|
||||
* @param trace error message if that task had failed or errored out, may be {@code null} or empty
|
||||
*/
|
||||
public TaskState(int taskId, String state, String workerId, String trace) {
|
||||
super(state, workerId, trace);
|
||||
this.taskId = taskId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Provides the ID of the task.
|
||||
*
|
||||
* @return the task ID
|
||||
*/
|
||||
public int taskId() {
|
||||
return taskId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o)
|
||||
return true;
|
||||
if (o == null || getClass() != o.getClass())
|
||||
return false;
|
||||
if (!super.equals(o))
|
||||
return false;
|
||||
TaskState taskState = (TaskState) o;
|
||||
return taskId == taskState.taskId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(super.hashCode(), taskId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "TaskState{"
|
||||
+ "taskId='" + taskId + '\''
|
||||
+ "state='" + state() + '\''
|
||||
+ ", traceMessage='" + traceMessage() + '\''
|
||||
+ ", workerId='" + workerId() + '\''
|
||||
+ '}';
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,58 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.kafka.connect.rest;
|
||||
|
||||
import org.apache.kafka.common.Configurable;
|
||||
import org.apache.kafka.connect.components.Versioned;
|
||||
import org.apache.kafka.connect.health.ConnectClusterState;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* A plugin interface to allow registration of new JAX-RS resources like Filters, REST endpoints, providers, etc. The implementations will
|
||||
* be discovered using the standard Java {@link java.util.ServiceLoader} mechanism by Connect's plugin class loading mechanism.
|
||||
*
|
||||
* <p>The extension class(es) must be packaged as a plugin, with one JAR containing the implementation classes and a {@code
|
||||
* META-INF/services/org.apache.kafka.connect.rest.extension.ConnectRestExtension} file that contains the fully qualified name of the
|
||||
* class(es) that implement the ConnectRestExtension interface. The plugin should also include the JARs of all dependencies except those
|
||||
* already provided by the Connect framework.
|
||||
*
|
||||
* <p>To install into a Connect installation, add a directory named for the plugin and containing the plugin's JARs into a directory that is
|
||||
* on Connect's {@code plugin.path}, and (re)start the Connect worker.
|
||||
*
|
||||
* <p>When the Connect worker process starts up, it will read its configuration and instantiate all of the REST extension implementation
|
||||
* classes that are specified in the `rest.extension.classes` configuration property. Connect will then pass its configuration to each
|
||||
* extension via the {@link Configurable#configure(Map)} method, and will then call {@link #register} with a provided context.
|
||||
*
|
||||
* <p>When the Connect worker shuts down, it will call the extension's {@link #close} method to allow the implementation to release all of
|
||||
* its resources.
|
||||
*/
|
||||
public interface ConnectRestExtension extends Configurable, Versioned, Closeable {
|
||||
|
||||
/**
|
||||
* ConnectRestExtension implementations can register custom JAX-RS resources via the {@link #register(ConnectRestExtensionContext)}
|
||||
* method. The Connect framework will invoke this method after registering the default Connect resources. If the implementations attempt
|
||||
* to re-register any of the Connect resources, it will be be ignored and will be logged.
|
||||
*
|
||||
* @param restPluginContext The context provides access to JAX-RS {@link javax.ws.rs.core.Configurable} and {@link
|
||||
* ConnectClusterState}.The custom JAX-RS resources can be registered via the {@link
|
||||
* ConnectRestExtensionContext#configurable()}
|
||||
*/
|
||||
void register(ConnectRestExtensionContext restPluginContext);
|
||||
}
|
||||
@@ -0,0 +1,44 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.kafka.connect.rest;
|
||||
|
||||
import org.apache.kafka.connect.health.ConnectClusterState;
|
||||
|
||||
import javax.ws.rs.core.Configurable;
|
||||
|
||||
/**
|
||||
* The interface provides the ability for {@link ConnectRestExtension} implementations to access the JAX-RS
|
||||
* {@link javax.ws.rs.core.Configurable} and cluster state {@link ConnectClusterState}. The implementation for the interface is provided
|
||||
* by the Connect framework.
|
||||
*/
|
||||
public interface ConnectRestExtensionContext {
|
||||
|
||||
/**
|
||||
* Provides an implementation of {@link javax.ws.rs.core.Configurable} that be used to register JAX-RS resources.
|
||||
*
|
||||
* @return @return the JAX-RS {@link javax.ws.rs.core.Configurable}; never {@code null}
|
||||
*/
|
||||
Configurable<? extends Configurable> configurable();
|
||||
|
||||
/**
|
||||
* Provides the cluster state and health information about the connectors and tasks.
|
||||
*
|
||||
* @return the cluster state information; never {@code null}
|
||||
*/
|
||||
ConnectClusterState clusterState();
|
||||
}
|
||||
@@ -0,0 +1,37 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.sink;
|
||||
|
||||
import org.apache.kafka.connect.connector.Connector;
|
||||
|
||||
/**
|
||||
* SinkConnectors implement the Connector interface to send Kafka data to another system.
|
||||
*/
|
||||
public abstract class SinkConnector extends Connector {
|
||||
|
||||
/**
|
||||
* <p>
|
||||
* Configuration key for the list of input topics for this connector.
|
||||
* </p>
|
||||
* <p>
|
||||
* Usually this setting is only relevant to the Kafka Connect framework, but is provided here for
|
||||
* the convenience of Connector developers if they also need to know the set of topics.
|
||||
* </p>
|
||||
*/
|
||||
public static final String TOPICS_CONFIG = "topics";
|
||||
|
||||
}
|
||||
@@ -0,0 +1,103 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.sink;
|
||||
|
||||
import org.apache.kafka.common.record.TimestampType;
|
||||
import org.apache.kafka.connect.connector.ConnectRecord;
|
||||
import org.apache.kafka.connect.data.Schema;
|
||||
import org.apache.kafka.connect.header.Header;
|
||||
|
||||
/**
|
||||
* SinkRecord is a {@link ConnectRecord} that has been read from Kafka and includes the kafkaOffset of
|
||||
* the record in the Kafka topic-partition in addition to the standard fields. This information
|
||||
* should be used by the SinkTask to coordinate kafkaOffset commits.
|
||||
*
|
||||
* It also includes the {@link TimestampType}, which may be {@link TimestampType#NO_TIMESTAMP_TYPE}, and the relevant
|
||||
* timestamp, which may be {@code null}.
|
||||
*/
|
||||
public class SinkRecord extends ConnectRecord<SinkRecord> {
|
||||
private final long kafkaOffset;
|
||||
private final TimestampType timestampType;
|
||||
|
||||
public SinkRecord(String topic, int partition, Schema keySchema, Object key, Schema valueSchema, Object value, long kafkaOffset) {
|
||||
this(topic, partition, keySchema, key, valueSchema, value, kafkaOffset, null, TimestampType.NO_TIMESTAMP_TYPE);
|
||||
}
|
||||
|
||||
public SinkRecord(String topic, int partition, Schema keySchema, Object key, Schema valueSchema, Object value, long kafkaOffset,
|
||||
Long timestamp, TimestampType timestampType) {
|
||||
this(topic, partition, keySchema, key, valueSchema, value, kafkaOffset, timestamp, timestampType, null);
|
||||
}
|
||||
|
||||
public SinkRecord(String topic, int partition, Schema keySchema, Object key, Schema valueSchema, Object value, long kafkaOffset,
|
||||
Long timestamp, TimestampType timestampType, Iterable<Header> headers) {
|
||||
super(topic, partition, keySchema, key, valueSchema, value, timestamp, headers);
|
||||
this.kafkaOffset = kafkaOffset;
|
||||
this.timestampType = timestampType;
|
||||
}
|
||||
|
||||
public long kafkaOffset() {
|
||||
return kafkaOffset;
|
||||
}
|
||||
|
||||
public TimestampType timestampType() {
|
||||
return timestampType;
|
||||
}
|
||||
|
||||
@Override
|
||||
public SinkRecord newRecord(String topic, Integer kafkaPartition, Schema keySchema, Object key, Schema valueSchema, Object value, Long timestamp) {
|
||||
return newRecord(topic, kafkaPartition, keySchema, key, valueSchema, value, timestamp, headers().duplicate());
|
||||
}
|
||||
|
||||
@Override
|
||||
public SinkRecord newRecord(String topic, Integer kafkaPartition, Schema keySchema, Object key, Schema valueSchema, Object value,
|
||||
Long timestamp, Iterable<Header> headers) {
|
||||
return new SinkRecord(topic, kafkaPartition, keySchema, key, valueSchema, value, kafkaOffset(), timestamp, timestampType, headers);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o)
|
||||
return true;
|
||||
if (o == null || getClass() != o.getClass())
|
||||
return false;
|
||||
if (!super.equals(o))
|
||||
return false;
|
||||
|
||||
SinkRecord that = (SinkRecord) o;
|
||||
|
||||
if (kafkaOffset != that.kafkaOffset)
|
||||
return false;
|
||||
|
||||
return timestampType == that.timestampType;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = super.hashCode();
|
||||
result = 31 * result + Long.hashCode(kafkaOffset);
|
||||
result = 31 * result + timestampType.hashCode();
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "SinkRecord{" +
|
||||
"kafkaOffset=" + kafkaOffset +
|
||||
", timestampType=" + timestampType +
|
||||
"} " + super.toString();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,174 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.sink;
|
||||
|
||||
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.connect.connector.Task;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* SinkTask is a Task that takes records loaded from Kafka and sends them to another system. Each task
|
||||
* instance is assigned a set of partitions by the Connect framework and will handle all records received
|
||||
* from those partitions. As records are fetched from Kafka, they will be passed to the sink task using the
|
||||
* {@link #put(Collection)} API, which should either write them to the downstream system or batch them for
|
||||
* later writing. Periodically, Connect will call {@link #flush(Map)} to ensure that batched records are
|
||||
* actually pushed to the downstream system..
|
||||
*
|
||||
* Below we describe the lifecycle of a SinkTask.
|
||||
*
|
||||
* <ol>
|
||||
* <li><b>Initialization:</b> SinkTasks are first initialized using {@link #initialize(SinkTaskContext)}
|
||||
* to prepare the task's context and {@link #start(Map)} to accept configuration and start any services
|
||||
* needed for processing.</li>
|
||||
* <li><b>Partition Assignment:</b> After initialization, Connect will assign the task a set of partitions
|
||||
* using {@link #open(Collection)}. These partitions are owned exclusively by this task until they
|
||||
* have been closed with {@link #close(Collection)}.</li>
|
||||
* <li><b>Record Processing:</b> Once partitions have been opened for writing, Connect will begin forwarding
|
||||
* records from Kafka using the {@link #put(Collection)} API. Periodically, Connect will ask the task
|
||||
* to flush records using {@link #flush(Map)} as described above.</li>
|
||||
* <li><b>Partition Rebalancing:</b> Occasionally, Connect will need to change the assignment of this task.
|
||||
* When this happens, the currently assigned partitions will be closed with {@link #close(Collection)} and
|
||||
* the new assignment will be opened using {@link #open(Collection)}.</li>
|
||||
* <li><b>Shutdown:</b> When the task needs to be shutdown, Connect will close active partitions (if there
|
||||
* are any) and stop the task using {@link #stop()}</li>
|
||||
* </ol>
|
||||
*
|
||||
*/
|
||||
public abstract class SinkTask implements Task {
|
||||
|
||||
/**
|
||||
* <p>
|
||||
* The configuration key that provides the list of topics that are inputs for this
|
||||
* SinkTask.
|
||||
* </p>
|
||||
*/
|
||||
public static final String TOPICS_CONFIG = "topics";
|
||||
|
||||
/**
|
||||
* <p>
|
||||
* The configuration key that provides a regex specifying which topics to include as inputs
|
||||
* for this SinkTask.
|
||||
* </p>
|
||||
*/
|
||||
public static final String TOPICS_REGEX_CONFIG = "topics.regex";
|
||||
|
||||
protected SinkTaskContext context;
|
||||
|
||||
/**
|
||||
* Initialize the context of this task. Note that the partition assignment will be empty until
|
||||
* Connect has opened the partitions for writing with {@link #open(Collection)}.
|
||||
* @param context The sink task's context
|
||||
*/
|
||||
public void initialize(SinkTaskContext context) {
|
||||
this.context = context;
|
||||
}
|
||||
|
||||
/**
|
||||
* Start the Task. This should handle any configuration parsing and one-time setup of the task.
|
||||
* @param props initial configuration
|
||||
*/
|
||||
@Override
|
||||
public abstract void start(Map<String, String> props);
|
||||
|
||||
/**
|
||||
* Put the records in the sink. Usually this should send the records to the sink asynchronously
|
||||
* and immediately return.
|
||||
*
|
||||
* If this operation fails, the SinkTask may throw a {@link org.apache.kafka.connect.errors.RetriableException} to
|
||||
* indicate that the framework should attempt to retry the same call again. Other exceptions will cause the task to
|
||||
* be stopped immediately. {@link SinkTaskContext#timeout(long)} can be used to set the maximum time before the
|
||||
* batch will be retried.
|
||||
*
|
||||
* @param records the set of records to send
|
||||
*/
|
||||
public abstract void put(Collection<SinkRecord> records);
|
||||
|
||||
/**
|
||||
* Flush all records that have been {@link #put(Collection)} for the specified topic-partitions.
|
||||
*
|
||||
* @param currentOffsets the current offset state as of the last call to {@link #put(Collection)}},
|
||||
* provided for convenience but could also be determined by tracking all offsets included in the {@link SinkRecord}s
|
||||
* passed to {@link #put}.
|
||||
*/
|
||||
public void flush(Map<TopicPartition, OffsetAndMetadata> currentOffsets) {
|
||||
}
|
||||
|
||||
/**
|
||||
* Pre-commit hook invoked prior to an offset commit.
|
||||
*
|
||||
* The default implementation simply invokes {@link #flush(Map)} and is thus able to assume all {@code currentOffsets} are safe to commit.
|
||||
*
|
||||
* @param currentOffsets the current offset state as of the last call to {@link #put(Collection)}},
|
||||
* provided for convenience but could also be determined by tracking all offsets included in the {@link SinkRecord}s
|
||||
* passed to {@link #put}.
|
||||
*
|
||||
* @return an empty map if Connect-managed offset commit is not desired, otherwise a map of offsets by topic-partition that are safe to commit.
|
||||
*/
|
||||
public Map<TopicPartition, OffsetAndMetadata> preCommit(Map<TopicPartition, OffsetAndMetadata> currentOffsets) {
|
||||
flush(currentOffsets);
|
||||
return currentOffsets;
|
||||
}
|
||||
|
||||
/**
|
||||
* The SinkTask use this method to create writers for newly assigned partitions in case of partition
|
||||
* rebalance. This method will be called after partition re-assignment completes and before the SinkTask starts
|
||||
* fetching data. Note that any errors raised from this method will cause the task to stop.
|
||||
* @param partitions The list of partitions that are now assigned to the task (may include
|
||||
* partitions previously assigned to the task)
|
||||
*/
|
||||
public void open(Collection<TopicPartition> partitions) {
|
||||
this.onPartitionsAssigned(partitions);
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated Use {@link #open(Collection)} for partition initialization.
|
||||
*/
|
||||
@Deprecated
|
||||
public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
|
||||
}
|
||||
|
||||
/**
|
||||
* The SinkTask use this method to close writers for partitions that are no
|
||||
* longer assigned to the SinkTask. This method will be called before a rebalance operation starts
|
||||
* and after the SinkTask stops fetching data. After being closed, Connect will not write
|
||||
* any records to the task until a new set of partitions has been opened. Note that any errors raised
|
||||
* from this method will cause the task to stop.
|
||||
* @param partitions The list of partitions that should be closed
|
||||
*/
|
||||
public void close(Collection<TopicPartition> partitions) {
|
||||
this.onPartitionsRevoked(partitions);
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated Use {@link #close(Collection)} instead for partition cleanup.
|
||||
*/
|
||||
@Deprecated
|
||||
public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform any cleanup to stop this task. In SinkTasks, this method is invoked only once outstanding calls to other
|
||||
* methods have completed (e.g., {@link #put(Collection)} has returned) and a final {@link #flush(Map)} and offset
|
||||
* commit has completed. Implementations of this method should only need to perform final cleanup operations, such
|
||||
* as closing network connections to the sink system.
|
||||
*/
|
||||
@Override
|
||||
public abstract void stop();
|
||||
}
|
||||
@@ -0,0 +1,98 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.sink;
|
||||
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* Context passed to SinkTasks, allowing them to access utilities in the Kafka Connect runtime.
|
||||
*/
|
||||
public interface SinkTaskContext {
|
||||
|
||||
/**
|
||||
* Get the Task configuration. This is the latest configuration and may differ from that passed on startup.
|
||||
*
|
||||
* For example, this method can be used to obtain the latest configuration if an external secret has changed,
|
||||
* and the configuration is using variable references such as those compatible with
|
||||
* {@link org.apache.kafka.common.config.ConfigTransformer}.
|
||||
*/
|
||||
public Map<String, String> configs();
|
||||
|
||||
/**
|
||||
* Reset the consumer offsets for the given topic partitions. SinkTasks should use this if they manage offsets
|
||||
* in the sink data store rather than using Kafka consumer offsets. For example, an HDFS connector might record
|
||||
* offsets in HDFS to provide exactly once delivery. When the SinkTask is started or a rebalance occurs, the task
|
||||
* would reload offsets from HDFS and use this method to reset the consumer to those offsets.
|
||||
*
|
||||
* SinkTasks that do not manage their own offsets do not need to use this method.
|
||||
*
|
||||
* @param offsets map of offsets for topic partitions
|
||||
*/
|
||||
void offset(Map<TopicPartition, Long> offsets);
|
||||
|
||||
/**
|
||||
* Reset the consumer offsets for the given topic partition. SinkTasks should use if they manage offsets
|
||||
* in the sink data store rather than using Kafka consumer offsets. For example, an HDFS connector might record
|
||||
* offsets in HDFS to provide exactly once delivery. When the topic partition is recovered the task
|
||||
* would reload offsets from HDFS and use this method to reset the consumer to the offset.
|
||||
*
|
||||
* SinkTasks that do not manage their own offsets do not need to use this method.
|
||||
*
|
||||
* @param tp the topic partition to reset offset.
|
||||
* @param offset the offset to reset to.
|
||||
*/
|
||||
void offset(TopicPartition tp, long offset);
|
||||
|
||||
/**
|
||||
* Set the timeout in milliseconds. SinkTasks should use this to indicate that they need to retry certain
|
||||
* operations after the timeout. SinkTasks may have certain operations on external systems that may need
|
||||
* to retry in case of failures. For example, append a record to an HDFS file may fail due to temporary network
|
||||
* issues. SinkTasks use this method to set how long to wait before retrying.
|
||||
* @param timeoutMs the backoff timeout in milliseconds.
|
||||
*/
|
||||
void timeout(long timeoutMs);
|
||||
|
||||
/**
|
||||
* Get the current set of assigned TopicPartitions for this task.
|
||||
* @return the set of currently assigned TopicPartitions
|
||||
*/
|
||||
Set<TopicPartition> assignment();
|
||||
|
||||
/**
|
||||
* Pause consumption of messages from the specified TopicPartitions.
|
||||
* @param partitions the partitions which should be paused
|
||||
*/
|
||||
void pause(TopicPartition... partitions);
|
||||
|
||||
/**
|
||||
* Resume consumption of messages from previously paused TopicPartitions.
|
||||
* @param partitions the partitions to resume
|
||||
*/
|
||||
void resume(TopicPartition... partitions);
|
||||
|
||||
/**
|
||||
* Request an offset commit. Sink tasks can use this to minimize the potential for redelivery
|
||||
* by requesting an offset commit as soon as they flush data to the destination system.
|
||||
*
|
||||
* It is only a hint to the runtime and no timing guarantee should be assumed.
|
||||
*/
|
||||
void requestCommit();
|
||||
|
||||
}
|
||||
@@ -0,0 +1,27 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.source;
|
||||
|
||||
import org.apache.kafka.connect.connector.Connector;
|
||||
|
||||
/**
|
||||
* SourceConnectors implement the connector interface to pull data from another system and send
|
||||
* it to Kafka.
|
||||
*/
|
||||
public abstract class SourceConnector extends Connector {
|
||||
|
||||
}
|
||||
@@ -0,0 +1,136 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.source;
|
||||
|
||||
import org.apache.kafka.connect.connector.ConnectRecord;
|
||||
import org.apache.kafka.connect.data.Schema;
|
||||
import org.apache.kafka.connect.header.Header;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* <p>
|
||||
* SourceRecords are generated by SourceTasks and passed to Kafka Connect for storage in
|
||||
* Kafka. In addition to the standard fields in {@link ConnectRecord} which specify where data is stored
|
||||
* in Kafka, they also include a sourcePartition and sourceOffset.
|
||||
* </p>
|
||||
* <p>
|
||||
* The sourcePartition represents a single input sourcePartition that the record came from (e.g. a filename, table
|
||||
* name, or topic-partition). The sourceOffset represents a position in that sourcePartition which can be used
|
||||
* to resume consumption of data.
|
||||
* </p>
|
||||
* <p>
|
||||
* These values can have arbitrary structure and should be represented using
|
||||
* org.apache.kafka.connect.data objects (or primitive values). For example, a database connector
|
||||
* might specify the sourcePartition as a record containing { "db": "database_name", "table":
|
||||
* "table_name"} and the sourceOffset as a Long containing the timestamp of the row.
|
||||
* </p>
|
||||
*/
|
||||
public class SourceRecord extends ConnectRecord<SourceRecord> {
|
||||
private final Map<String, ?> sourcePartition;
|
||||
private final Map<String, ?> sourceOffset;
|
||||
|
||||
public SourceRecord(Map<String, ?> sourcePartition, Map<String, ?> sourceOffset,
|
||||
String topic, Integer partition, Schema valueSchema, Object value) {
|
||||
this(sourcePartition, sourceOffset, topic, partition, null, null, valueSchema, value);
|
||||
}
|
||||
|
||||
public SourceRecord(Map<String, ?> sourcePartition, Map<String, ?> sourceOffset,
|
||||
String topic, Schema valueSchema, Object value) {
|
||||
this(sourcePartition, sourceOffset, topic, null, null, null, valueSchema, value);
|
||||
}
|
||||
|
||||
public SourceRecord(Map<String, ?> sourcePartition, Map<String, ?> sourceOffset,
|
||||
String topic, Schema keySchema, Object key, Schema valueSchema, Object value) {
|
||||
this(sourcePartition, sourceOffset, topic, null, keySchema, key, valueSchema, value);
|
||||
}
|
||||
|
||||
public SourceRecord(Map<String, ?> sourcePartition, Map<String, ?> sourceOffset,
|
||||
String topic, Integer partition,
|
||||
Schema keySchema, Object key, Schema valueSchema, Object value) {
|
||||
this(sourcePartition, sourceOffset, topic, partition, keySchema, key, valueSchema, value, null);
|
||||
}
|
||||
|
||||
public SourceRecord(Map<String, ?> sourcePartition, Map<String, ?> sourceOffset,
|
||||
String topic, Integer partition,
|
||||
Schema keySchema, Object key,
|
||||
Schema valueSchema, Object value,
|
||||
Long timestamp) {
|
||||
this(sourcePartition, sourceOffset, topic, partition, keySchema, key, valueSchema, value, timestamp, null);
|
||||
}
|
||||
|
||||
public SourceRecord(Map<String, ?> sourcePartition, Map<String, ?> sourceOffset,
|
||||
String topic, Integer partition,
|
||||
Schema keySchema, Object key,
|
||||
Schema valueSchema, Object value,
|
||||
Long timestamp, Iterable<Header> headers) {
|
||||
super(topic, partition, keySchema, key, valueSchema, value, timestamp, headers);
|
||||
this.sourcePartition = sourcePartition;
|
||||
this.sourceOffset = sourceOffset;
|
||||
}
|
||||
|
||||
public Map<String, ?> sourcePartition() {
|
||||
return sourcePartition;
|
||||
}
|
||||
|
||||
public Map<String, ?> sourceOffset() {
|
||||
return sourceOffset;
|
||||
}
|
||||
|
||||
@Override
|
||||
public SourceRecord newRecord(String topic, Integer kafkaPartition, Schema keySchema, Object key, Schema valueSchema, Object value, Long timestamp) {
|
||||
return newRecord(topic, kafkaPartition, keySchema, key, valueSchema, value, timestamp, headers().duplicate());
|
||||
}
|
||||
|
||||
@Override
|
||||
public SourceRecord newRecord(String topic, Integer kafkaPartition, Schema keySchema, Object key, Schema valueSchema, Object value,
|
||||
Long timestamp, Iterable<Header> headers) {
|
||||
return new SourceRecord(sourcePartition, sourceOffset, topic, kafkaPartition, keySchema, key, valueSchema, value, timestamp, headers);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o)
|
||||
return true;
|
||||
if (o == null || getClass() != o.getClass())
|
||||
return false;
|
||||
if (!super.equals(o))
|
||||
return false;
|
||||
|
||||
SourceRecord that = (SourceRecord) o;
|
||||
|
||||
return Objects.equals(sourcePartition, that.sourcePartition) &&
|
||||
Objects.equals(sourceOffset, that.sourceOffset);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = super.hashCode();
|
||||
result = 31 * result + (sourcePartition != null ? sourcePartition.hashCode() : 0);
|
||||
result = 31 * result + (sourceOffset != null ? sourceOffset.hashCode() : 0);
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "SourceRecord{" +
|
||||
"sourcePartition=" + sourcePartition +
|
||||
", sourceOffset=" + sourceOffset +
|
||||
"} " + super.toString();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,139 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.source;
|
||||
|
||||
import org.apache.kafka.connect.connector.Task;
|
||||
import org.apache.kafka.clients.producer.RecordMetadata;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* SourceTask is a Task that pulls records from another system for storage in Kafka.
|
||||
*/
|
||||
public abstract class SourceTask implements Task {
|
||||
|
||||
protected SourceTaskContext context;
|
||||
|
||||
/**
|
||||
* Initialize this SourceTask with the specified context object.
|
||||
*/
|
||||
public void initialize(SourceTaskContext context) {
|
||||
this.context = context;
|
||||
}
|
||||
|
||||
/**
|
||||
* Start the Task. This should handle any configuration parsing and one-time setup of the task.
|
||||
* @param props initial configuration
|
||||
*/
|
||||
@Override
|
||||
public abstract void start(Map<String, String> props);
|
||||
|
||||
/**
|
||||
* <p>
|
||||
* Poll this source task for new records. If no data is currently available, this method
|
||||
* should block but return control to the caller regularly (by returning {@code null}) in
|
||||
* order for the task to transition to the {@code PAUSED} state if requested to do so.
|
||||
* </p>
|
||||
* <p>
|
||||
* The task will be {@link #stop() stopped} on a separate thread, and when that happens
|
||||
* this method is expected to unblock, quickly finish up any remaining processing, and
|
||||
* return.
|
||||
* </p>
|
||||
*
|
||||
* @return a list of source records
|
||||
*/
|
||||
public abstract List<SourceRecord> poll() throws InterruptedException;
|
||||
|
||||
/**
|
||||
* <p>
|
||||
* Commit the offsets, up to the offsets that have been returned by {@link #poll()}. This
|
||||
* method should block until the commit is complete.
|
||||
* </p>
|
||||
* <p>
|
||||
* SourceTasks are not required to implement this functionality; Kafka Connect will record offsets
|
||||
* automatically. This hook is provided for systems that also need to store offsets internally
|
||||
* in their own system.
|
||||
* </p>
|
||||
*/
|
||||
public void commit() throws InterruptedException {
|
||||
// This space intentionally left blank.
|
||||
}
|
||||
|
||||
/**
|
||||
* Signal this SourceTask to stop. In SourceTasks, this method only needs to signal to the task that it should stop
|
||||
* trying to poll for new data and interrupt any outstanding poll() requests. It is not required that the task has
|
||||
* fully stopped. Note that this method necessarily may be invoked from a different thread than {@link #poll()} and
|
||||
* {@link #commit()}.
|
||||
*
|
||||
* For example, if a task uses a {@link java.nio.channels.Selector} to receive data over the network, this method
|
||||
* could set a flag that will force {@link #poll()} to exit immediately and invoke
|
||||
* {@link java.nio.channels.Selector#wakeup() wakeup()} to interrupt any ongoing requests.
|
||||
*/
|
||||
@Override
|
||||
public abstract void stop();
|
||||
|
||||
/**
|
||||
* <p>
|
||||
* Commit an individual {@link SourceRecord} when the callback from the producer client is received. This method is
|
||||
* also called when a record is filtered by a transformation, and thus will never be ACK'd by a broker.
|
||||
* </p>
|
||||
* <p>
|
||||
* This is an alias for {@link commitRecord(SourceRecord, RecordMetadata)} for backwards compatibility. The default
|
||||
* implementation of {@link commitRecord(SourceRecord, RecordMetadata)} just calls this method. It is not necessary
|
||||
* to override both methods.
|
||||
* </p>
|
||||
* <p>
|
||||
* SourceTasks are not required to implement this functionality; Kafka Connect will record offsets
|
||||
* automatically. This hook is provided for systems that also need to store offsets internally
|
||||
* in their own system.
|
||||
* </p>
|
||||
*
|
||||
* @param record {@link SourceRecord} that was successfully sent via the producer or filtered by a transformation
|
||||
* @throws InterruptedException
|
||||
* @see commitRecord(SourceRecord, RecordMetadata)
|
||||
*/
|
||||
public void commitRecord(SourceRecord record) throws InterruptedException {
|
||||
// This space intentionally left blank.
|
||||
}
|
||||
|
||||
/**
|
||||
* <p>
|
||||
* Commit an individual {@link SourceRecord} when the callback from the producer client is received. This method is
|
||||
* also called when a record is filtered by a transformation, and thus will never be ACK'd by a broker. In this case
|
||||
* {@code metadata} will be null.
|
||||
* </p>
|
||||
* <p>
|
||||
* SourceTasks are not required to implement this functionality; Kafka Connect will record offsets
|
||||
* automatically. This hook is provided for systems that also need to store offsets internally
|
||||
* in their own system.
|
||||
* </p>
|
||||
* <p>
|
||||
* The default implementation just calls @{link commitRecord(SourceRecord)}, which is a nop by default. It is
|
||||
* not necessary to implement both methods.
|
||||
* </p>
|
||||
*
|
||||
* @param record {@link SourceRecord} that was successfully sent via the producer or filtered by a transformation
|
||||
* @param metadata {@link RecordMetadata} record metadata returned from the broker, or null if the record was filtered
|
||||
* @throws InterruptedException
|
||||
*/
|
||||
public void commitRecord(SourceRecord record, RecordMetadata metadata)
|
||||
throws InterruptedException {
|
||||
// by default, just call other method for backwards compatability
|
||||
commitRecord(record);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,41 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.source;
|
||||
|
||||
import org.apache.kafka.connect.storage.OffsetStorageReader;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* SourceTaskContext is provided to SourceTasks to allow them to interact with the underlying
|
||||
* runtime.
|
||||
*/
|
||||
public interface SourceTaskContext {
|
||||
/**
|
||||
* Get the Task configuration. This is the latest configuration and may differ from that passed on startup.
|
||||
*
|
||||
* For example, this method can be used to obtain the latest configuration if an external secret has changed,
|
||||
* and the configuration is using variable references such as those compatible with
|
||||
* {@link org.apache.kafka.common.config.ConfigTransformer}.
|
||||
*/
|
||||
public Map<String, String> configs();
|
||||
|
||||
/**
|
||||
* Get the OffsetStorageReader for this SourceTask.
|
||||
*/
|
||||
OffsetStorageReader offsetStorageReader();
|
||||
}
|
||||
@@ -0,0 +1,89 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.storage;
|
||||
|
||||
import org.apache.kafka.common.header.Headers;
|
||||
import org.apache.kafka.connect.data.Schema;
|
||||
import org.apache.kafka.connect.data.SchemaAndValue;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* The Converter interface provides support for translating between Kafka Connect's runtime data format
|
||||
* and byte[]. Internally, this likely includes an intermediate step to the format used by the serialization
|
||||
* layer (e.g. JsonNode, GenericRecord, Message).
|
||||
*/
|
||||
public interface Converter {
|
||||
|
||||
/**
|
||||
* Configure this class.
|
||||
* @param configs configs in key/value pairs
|
||||
* @param isKey whether is for key or value
|
||||
*/
|
||||
void configure(Map<String, ?> configs, boolean isKey);
|
||||
|
||||
/**
|
||||
* Convert a Kafka Connect data object to a native object for serialization.
|
||||
* @param topic the topic associated with the data
|
||||
* @param schema the schema for the value
|
||||
* @param value the value to convert
|
||||
* @return the serialized value
|
||||
*/
|
||||
byte[] fromConnectData(String topic, Schema schema, Object value);
|
||||
|
||||
/**
|
||||
* Convert a Kafka Connect data object to a native object for serialization,
|
||||
* potentially using the supplied topic and headers in the record as necessary.
|
||||
*
|
||||
* <p>Connect uses this method directly, and for backward compatibility reasons this method
|
||||
* by default will call the {@link #fromConnectData(String, Schema, Object)} method.
|
||||
* Override this method to make use of the supplied headers.</p>
|
||||
* @param topic the topic associated with the data
|
||||
* @param headers the headers associated with the data; any changes done to the headers
|
||||
* are applied to the message sent to the broker
|
||||
* @param schema the schema for the value
|
||||
* @param value the value to convert
|
||||
* @return the serialized value
|
||||
*/
|
||||
default byte[] fromConnectData(String topic, Headers headers, Schema schema, Object value) {
|
||||
return fromConnectData(topic, schema, value);
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert a native object to a Kafka Connect data object.
|
||||
* @param topic the topic associated with the data
|
||||
* @param value the value to convert
|
||||
* @return an object containing the {@link Schema} and the converted value
|
||||
*/
|
||||
SchemaAndValue toConnectData(String topic, byte[] value);
|
||||
|
||||
/**
|
||||
* Convert a native object to a Kafka Connect data object,
|
||||
* potentially using the supplied topic and headers in the record as necessary.
|
||||
*
|
||||
* <p>Connect uses this method directly, and for backward compatibility reasons this method
|
||||
* by default will call the {@link #toConnectData(String, byte[])} method.
|
||||
* Override this method to make use of the supplied headers.</p>
|
||||
* @param topic the topic associated with the data
|
||||
* @param headers the headers associated with the data
|
||||
* @param value the value to convert
|
||||
* @return an object containing the {@link Schema} and the converted value
|
||||
*/
|
||||
default SchemaAndValue toConnectData(String topic, Headers headers, byte[] value) {
|
||||
return toConnectData(topic, value);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,58 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.storage;
|
||||
|
||||
import org.apache.kafka.common.config.AbstractConfig;
|
||||
import org.apache.kafka.common.config.ConfigDef;
|
||||
import org.apache.kafka.common.config.ConfigDef.Importance;
|
||||
import org.apache.kafka.common.config.ConfigDef.Type;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import static org.apache.kafka.common.config.ConfigDef.ValidString.in;
|
||||
|
||||
/**
|
||||
* Abstract class that defines the configuration options for {@link Converter} and {@link HeaderConverter} instances.
|
||||
*/
|
||||
public abstract class ConverterConfig extends AbstractConfig {
|
||||
|
||||
public static final String TYPE_CONFIG = "converter.type";
|
||||
private static final String TYPE_DOC = "How this converter will be used.";
|
||||
|
||||
/**
|
||||
* Create a new {@link ConfigDef} instance containing the configurations defined by ConverterConfig. This can be called by subclasses.
|
||||
*
|
||||
* @return the ConfigDef; never null
|
||||
*/
|
||||
public static ConfigDef newConfigDef() {
|
||||
return new ConfigDef().define(TYPE_CONFIG, Type.STRING, ConfigDef.NO_DEFAULT_VALUE,
|
||||
in(ConverterType.KEY.getName(), ConverterType.VALUE.getName(), ConverterType.HEADER.getName()),
|
||||
Importance.LOW, TYPE_DOC);
|
||||
}
|
||||
|
||||
protected ConverterConfig(ConfigDef configDef, Map<String, ?> props) {
|
||||
super(configDef, props, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the type of converter as defined by the {@link #TYPE_CONFIG} configuration.
|
||||
* @return the converter type; never null
|
||||
*/
|
||||
public ConverterType type() {
|
||||
return ConverterType.withName(getString(TYPE_CONFIG));
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,64 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.storage;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* The type of {@link Converter} and {@link HeaderConverter}.
|
||||
*/
|
||||
public enum ConverterType {
|
||||
KEY,
|
||||
VALUE,
|
||||
HEADER;
|
||||
|
||||
private static final Map<String, ConverterType> NAME_TO_TYPE;
|
||||
|
||||
static {
|
||||
ConverterType[] types = ConverterType.values();
|
||||
Map<String, ConverterType> nameToType = new HashMap<>(types.length);
|
||||
for (ConverterType type : types) {
|
||||
nameToType.put(type.name, type);
|
||||
}
|
||||
NAME_TO_TYPE = Collections.unmodifiableMap(nameToType);
|
||||
}
|
||||
|
||||
/**
|
||||
* Find the ConverterType with the given name, using a case-insensitive match.
|
||||
* @param name the name of the converter type; may be null
|
||||
* @return the matching converter type, or null if the supplied name is null or does not match the name of the known types
|
||||
*/
|
||||
public static ConverterType withName(String name) {
|
||||
if (name == null) {
|
||||
return null;
|
||||
}
|
||||
return NAME_TO_TYPE.get(name.toLowerCase(Locale.getDefault()));
|
||||
}
|
||||
|
||||
private String name;
|
||||
|
||||
ConverterType() {
|
||||
this.name = this.name().toLowerCase(Locale.ROOT);
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,53 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.storage;
|
||||
|
||||
import org.apache.kafka.common.Configurable;
|
||||
import org.apache.kafka.common.config.ConfigDef;
|
||||
import org.apache.kafka.connect.data.Schema;
|
||||
import org.apache.kafka.connect.data.SchemaAndValue;
|
||||
import org.apache.kafka.connect.header.Header;
|
||||
|
||||
import java.io.Closeable;
|
||||
|
||||
public interface HeaderConverter extends Configurable, Closeable {
|
||||
|
||||
/**
|
||||
* Convert the header name and byte array value into a {@link Header} object.
|
||||
* @param topic the name of the topic for the record containing the header
|
||||
* @param headerKey the header's key; may not be null
|
||||
* @param value the header's raw value; may be null
|
||||
* @return the {@link SchemaAndValue}; may not be null
|
||||
*/
|
||||
SchemaAndValue toConnectHeader(String topic, String headerKey, byte[] value);
|
||||
|
||||
/**
|
||||
* Convert the {@link Header}'s {@link Header#value() value} into its byte array representation.
|
||||
* @param topic the name of the topic for the record containing the header
|
||||
* @param headerKey the header's key; may not be null
|
||||
* @param schema the schema for the header's value; may be null
|
||||
* @param value the header's value to convert; may be null
|
||||
* @return the byte array form of the Header's value; may be null if the value is null
|
||||
*/
|
||||
byte[] fromConnectHeader(String topic, String headerKey, Schema schema, Object value);
|
||||
|
||||
/**
|
||||
* Configuration specification for this set of header converters.
|
||||
* @return the configuration specification; may not be null
|
||||
*/
|
||||
ConfigDef config();
|
||||
}
|
||||
@@ -0,0 +1,61 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.storage;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* <p>
|
||||
* OffsetStorageReader provides access to the offset storage used by sources. This can be used by
|
||||
* connectors to determine offsets to start consuming data from. This is most commonly used during
|
||||
* initialization of a task, but can also be used during runtime, e.g. when reconfiguring a task.
|
||||
* </p>
|
||||
* <p>
|
||||
* Offsets are always defined as Maps of Strings to primitive types, i.e. all types supported by
|
||||
* {@link org.apache.kafka.connect.data.Schema} other than Array, Map, and Struct.
|
||||
* </p>
|
||||
*/
|
||||
public interface OffsetStorageReader {
|
||||
/**
|
||||
* Get the offset for the specified partition. If the data isn't already available locally, this
|
||||
* gets it from the backing store, which may require some network round trips.
|
||||
*
|
||||
* @param partition object uniquely identifying the partition of data
|
||||
* @return object uniquely identifying the offset in the partition of data
|
||||
*/
|
||||
<T> Map<String, Object> offset(Map<String, T> partition);
|
||||
|
||||
/**
|
||||
* <p>
|
||||
* Get a set of offsets for the specified partition identifiers. This may be more efficient
|
||||
* than calling {@link #offset(Map)} repeatedly.
|
||||
* </p>
|
||||
* <p>
|
||||
* Note that when errors occur, this method omits the associated data and tries to return as
|
||||
* many of the requested values as possible. This allows a task that's managing many partitions to
|
||||
* still proceed with any available data. Therefore, implementations should take care to check
|
||||
* that the data is actually available in the returned response. The only case when an
|
||||
* exception will be thrown is if the entire request failed, e.g. because the underlying
|
||||
* storage was unavailable.
|
||||
* </p>
|
||||
*
|
||||
* @param partitions set of identifiers for partitions of data
|
||||
* @return a map of partition identifiers to decoded offsets
|
||||
*/
|
||||
<T> Map<Map<String, T>, Map<String, Object>> offsets(Collection<Map<String, T>> partitions);
|
||||
}
|
||||
@@ -0,0 +1,85 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.storage;
|
||||
|
||||
import org.apache.kafka.common.config.ConfigDef;
|
||||
import org.apache.kafka.connect.data.Schema;
|
||||
import org.apache.kafka.connect.data.SchemaAndValue;
|
||||
import org.apache.kafka.connect.data.Values;
|
||||
import org.apache.kafka.connect.errors.DataException;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.charset.Charset;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.Map;
|
||||
import java.util.NoSuchElementException;
|
||||
|
||||
/**
|
||||
* A {@link HeaderConverter} that serializes header values as strings and that deserializes header values to the most appropriate
|
||||
* numeric, boolean, array, or map representation. Schemas are not serialized, but are inferred upon deserialization when possible.
|
||||
*/
|
||||
public class SimpleHeaderConverter implements HeaderConverter {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(SimpleHeaderConverter.class);
|
||||
private static final ConfigDef CONFIG_DEF = new ConfigDef();
|
||||
private static final SchemaAndValue NULL_SCHEMA_AND_VALUE = new SchemaAndValue(null, null);
|
||||
private static final Charset UTF_8 = StandardCharsets.UTF_8;
|
||||
|
||||
@Override
|
||||
public ConfigDef config() {
|
||||
return CONFIG_DEF;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void configure(Map<String, ?> configs) {
|
||||
// do nothing
|
||||
}
|
||||
|
||||
@Override
|
||||
public SchemaAndValue toConnectHeader(String topic, String headerKey, byte[] value) {
|
||||
if (value == null) {
|
||||
return NULL_SCHEMA_AND_VALUE;
|
||||
}
|
||||
try {
|
||||
String str = new String(value, UTF_8);
|
||||
if (str.isEmpty()) {
|
||||
return new SchemaAndValue(Schema.STRING_SCHEMA, str);
|
||||
}
|
||||
return Values.parseString(str);
|
||||
} catch (NoSuchElementException e) {
|
||||
throw new DataException("Failed to deserialize value for header '" + headerKey + "' on topic '" + topic + "'", e);
|
||||
} catch (Throwable t) {
|
||||
LOG.warn("Failed to deserialize value for header '{}' on topic '{}', so using byte array", headerKey, topic, t);
|
||||
return new SchemaAndValue(Schema.BYTES_SCHEMA, value);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] fromConnectHeader(String topic, String headerKey, Schema schema, Object value) {
|
||||
if (value == null) {
|
||||
return null;
|
||||
}
|
||||
return Values.convertToString(schema, value).getBytes(UTF_8);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
// do nothing
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,109 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.storage;
|
||||
|
||||
import org.apache.kafka.common.config.ConfigDef;
|
||||
import org.apache.kafka.common.errors.SerializationException;
|
||||
import org.apache.kafka.common.serialization.StringDeserializer;
|
||||
import org.apache.kafka.common.serialization.StringSerializer;
|
||||
import org.apache.kafka.connect.data.Schema;
|
||||
import org.apache.kafka.connect.data.SchemaAndValue;
|
||||
import org.apache.kafka.connect.errors.DataException;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* {@link Converter} and {@link HeaderConverter} implementation that only supports serializing to strings. When converting Kafka Connect
|
||||
* data to bytes, the schema will be ignored and {@link Object#toString()} will always be invoked to convert the data to a String.
|
||||
* When converting from bytes to Kafka Connect format, the converter will only ever return an optional string schema and
|
||||
* a string or null.
|
||||
*
|
||||
* Encoding configuration is identical to {@link StringSerializer} and {@link StringDeserializer}, but for convenience
|
||||
* this class can also be configured to use the same encoding for both encoding and decoding with the
|
||||
* {@link StringConverterConfig#ENCODING_CONFIG converter.encoding} setting.
|
||||
*
|
||||
* This implementation currently does nothing with the topic names or header names.
|
||||
*/
|
||||
public class StringConverter implements Converter, HeaderConverter {
|
||||
|
||||
private final StringSerializer serializer = new StringSerializer();
|
||||
private final StringDeserializer deserializer = new StringDeserializer();
|
||||
|
||||
public StringConverter() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public ConfigDef config() {
|
||||
return StringConverterConfig.configDef();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void configure(Map<String, ?> configs) {
|
||||
StringConverterConfig conf = new StringConverterConfig(configs);
|
||||
String encoding = conf.encoding();
|
||||
|
||||
Map<String, Object> serializerConfigs = new HashMap<>(configs);
|
||||
Map<String, Object> deserializerConfigs = new HashMap<>(configs);
|
||||
serializerConfigs.put("serializer.encoding", encoding);
|
||||
deserializerConfigs.put("deserializer.encoding", encoding);
|
||||
|
||||
boolean isKey = conf.type() == ConverterType.KEY;
|
||||
serializer.configure(serializerConfigs, isKey);
|
||||
deserializer.configure(deserializerConfigs, isKey);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void configure(Map<String, ?> configs, boolean isKey) {
|
||||
Map<String, Object> conf = new HashMap<>(configs);
|
||||
conf.put(StringConverterConfig.TYPE_CONFIG, isKey ? ConverterType.KEY.getName() : ConverterType.VALUE.getName());
|
||||
configure(conf);
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] fromConnectData(String topic, Schema schema, Object value) {
|
||||
try {
|
||||
return serializer.serialize(topic, value == null ? null : value.toString());
|
||||
} catch (SerializationException e) {
|
||||
throw new DataException("Failed to serialize to a string: ", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public SchemaAndValue toConnectData(String topic, byte[] value) {
|
||||
try {
|
||||
return new SchemaAndValue(Schema.OPTIONAL_STRING_SCHEMA, deserializer.deserialize(topic, value));
|
||||
} catch (SerializationException e) {
|
||||
throw new DataException("Failed to deserialize string: ", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] fromConnectHeader(String topic, String headerKey, Schema schema, Object value) {
|
||||
return fromConnectData(topic, schema, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public SchemaAndValue toConnectHeader(String topic, String headerKey, byte[] value) {
|
||||
return toConnectData(topic, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
// do nothing
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,60 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.storage;
|
||||
|
||||
import org.apache.kafka.common.config.ConfigDef;
|
||||
import org.apache.kafka.common.config.ConfigDef.Importance;
|
||||
import org.apache.kafka.common.config.ConfigDef.Type;
|
||||
import org.apache.kafka.common.config.ConfigDef.Width;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Configuration options for {@link StringConverter} instances.
|
||||
*/
|
||||
public class StringConverterConfig extends ConverterConfig {
|
||||
|
||||
public static final String ENCODING_CONFIG = "converter.encoding";
|
||||
public static final String ENCODING_DEFAULT = "UTF8";
|
||||
private static final String ENCODING_DOC = "The name of the Java character set to use for encoding strings as byte arrays.";
|
||||
private static final String ENCODING_DISPLAY = "Encoding";
|
||||
|
||||
private final static ConfigDef CONFIG;
|
||||
|
||||
static {
|
||||
CONFIG = ConverterConfig.newConfigDef();
|
||||
CONFIG.define(ENCODING_CONFIG, Type.STRING, ENCODING_DEFAULT, Importance.HIGH, ENCODING_DOC, null, -1, Width.MEDIUM,
|
||||
ENCODING_DISPLAY);
|
||||
}
|
||||
|
||||
public static ConfigDef configDef() {
|
||||
return CONFIG;
|
||||
}
|
||||
|
||||
public StringConverterConfig(Map<String, ?> props) {
|
||||
super(CONFIG, props);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the string encoding.
|
||||
*
|
||||
* @return the encoding; never null
|
||||
*/
|
||||
public String encoding() {
|
||||
return getString(ENCODING_CONFIG);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,47 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.transforms;
|
||||
|
||||
import org.apache.kafka.common.Configurable;
|
||||
import org.apache.kafka.common.config.ConfigDef;
|
||||
import org.apache.kafka.connect.connector.ConnectRecord;
|
||||
|
||||
import java.io.Closeable;
|
||||
|
||||
/**
|
||||
* Single message transformation for Kafka Connect record types.
|
||||
*
|
||||
* Connectors can be configured with transformations to make lightweight message-at-a-time modifications.
|
||||
*/
|
||||
public interface Transformation<R extends ConnectRecord<R>> extends Configurable, Closeable {
|
||||
|
||||
/**
|
||||
* Apply transformation to the {@code record} and return another record object (which may be {@code record} itself) or {@code null},
|
||||
* corresponding to a map or filter operation respectively.
|
||||
*
|
||||
* The implementation must be thread-safe.
|
||||
*/
|
||||
R apply(R record);
|
||||
|
||||
/** Configuration specification for this transformation. **/
|
||||
ConfigDef config();
|
||||
|
||||
/** Signal that this transformation instance will no longer will be used. **/
|
||||
@Override
|
||||
void close();
|
||||
|
||||
}
|
||||
@@ -0,0 +1,62 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.util;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Utilities that connector implementations might find useful. Contains common building blocks
|
||||
* for writing connectors.
|
||||
*/
|
||||
public class ConnectorUtils {
|
||||
/**
|
||||
* Given a list of elements and a target number of groups, generates list of groups of
|
||||
* elements to match the target number of groups, spreading them evenly among the groups.
|
||||
* This generates groups with contiguous elements, which results in intuitive ordering if
|
||||
* your elements are also ordered (e.g. alphabetical lists of table names if you sort
|
||||
* table names alphabetically to generate the raw partitions) or can result in efficient
|
||||
* partitioning if elements are sorted according to some criteria that affects performance
|
||||
* (e.g. topic partitions with the same leader).
|
||||
*
|
||||
* @param elements list of elements to partition
|
||||
* @param numGroups the number of output groups to generate.
|
||||
*/
|
||||
public static <T> List<List<T>> groupPartitions(List<T> elements, int numGroups) {
|
||||
if (numGroups <= 0)
|
||||
throw new IllegalArgumentException("Number of groups must be positive.");
|
||||
|
||||
List<List<T>> result = new ArrayList<>(numGroups);
|
||||
|
||||
// Each group has either n+1 or n raw partitions
|
||||
int perGroup = elements.size() / numGroups;
|
||||
int leftover = elements.size() - (numGroups * perGroup);
|
||||
|
||||
int assigned = 0;
|
||||
for (int group = 0; group < numGroups; group++) {
|
||||
int numThisGroup = group < leftover ? perGroup + 1 : perGroup;
|
||||
List<T> groupList = new ArrayList<>(numThisGroup);
|
||||
for (int i = 0; i < numThisGroup; i++) {
|
||||
groupList.add(elements.get(assigned));
|
||||
assigned++;
|
||||
}
|
||||
result.add(groupList);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,88 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.connector;
|
||||
|
||||
import org.apache.kafka.common.config.ConfigDef;
|
||||
import org.apache.kafka.connect.errors.ConnectException;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
public class ConnectorReconfigurationTest {
|
||||
|
||||
@Test
|
||||
public void testDefaultReconfigure() {
|
||||
TestConnector conn = new TestConnector(false);
|
||||
conn.reconfigure(Collections.<String, String>emptyMap());
|
||||
assertEquals(conn.stopOrder, 0);
|
||||
assertEquals(conn.configureOrder, 1);
|
||||
}
|
||||
|
||||
@Test(expected = ConnectException.class)
|
||||
public void testReconfigureStopException() {
|
||||
TestConnector conn = new TestConnector(true);
|
||||
conn.reconfigure(Collections.<String, String>emptyMap());
|
||||
}
|
||||
|
||||
private static class TestConnector extends Connector {
|
||||
|
||||
private boolean stopException;
|
||||
private int order = 0;
|
||||
public int stopOrder = -1;
|
||||
public int configureOrder = -1;
|
||||
|
||||
public TestConnector(boolean stopException) {
|
||||
this.stopException = stopException;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String version() {
|
||||
return "1.0";
|
||||
}
|
||||
|
||||
@Override
|
||||
public void start(Map<String, String> props) {
|
||||
configureOrder = order++;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Class<? extends Task> taskClass() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Map<String, String>> taskConfigs(int count) {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void stop() {
|
||||
stopOrder = order++;
|
||||
if (stopException)
|
||||
throw new ConnectException("error");
|
||||
}
|
||||
|
||||
@Override
|
||||
public ConfigDef config() {
|
||||
return new ConfigDef();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,319 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.data;
|
||||
|
||||
import org.apache.kafka.connect.errors.DataException;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.math.BigDecimal;
|
||||
import java.math.BigInteger;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.CharBuffer;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotEquals;
|
||||
import static org.junit.Assert.assertNull;
|
||||
|
||||
public class ConnectSchemaTest {
|
||||
private static final Schema MAP_INT_STRING_SCHEMA = SchemaBuilder.map(Schema.INT32_SCHEMA, Schema.STRING_SCHEMA).build();
|
||||
private static final Schema FLAT_STRUCT_SCHEMA = SchemaBuilder.struct()
|
||||
.field("field", Schema.INT32_SCHEMA)
|
||||
.build();
|
||||
private static final Schema STRUCT_SCHEMA = SchemaBuilder.struct()
|
||||
.field("first", Schema.INT32_SCHEMA)
|
||||
.field("second", Schema.STRING_SCHEMA)
|
||||
.field("array", SchemaBuilder.array(Schema.INT32_SCHEMA).build())
|
||||
.field("map", SchemaBuilder.map(Schema.INT32_SCHEMA, Schema.STRING_SCHEMA).build())
|
||||
.field("nested", FLAT_STRUCT_SCHEMA)
|
||||
.build();
|
||||
private static final Schema PARENT_STRUCT_SCHEMA = SchemaBuilder.struct()
|
||||
.field("nested", FLAT_STRUCT_SCHEMA)
|
||||
.build();
|
||||
|
||||
@Test
|
||||
public void testFieldsOnStructSchema() {
|
||||
Schema schema = SchemaBuilder.struct()
|
||||
.field("foo", Schema.BOOLEAN_SCHEMA)
|
||||
.field("bar", Schema.INT32_SCHEMA)
|
||||
.build();
|
||||
|
||||
assertEquals(2, schema.fields().size());
|
||||
// Validate field lookup by name
|
||||
Field foo = schema.field("foo");
|
||||
assertEquals(0, foo.index());
|
||||
Field bar = schema.field("bar");
|
||||
assertEquals(1, bar.index());
|
||||
// Any other field name should fail
|
||||
assertNull(schema.field("other"));
|
||||
}
|
||||
|
||||
|
||||
@Test(expected = DataException.class)
|
||||
public void testFieldsOnlyValidForStructs() {
|
||||
Schema.INT8_SCHEMA.fields();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testValidateValueMatchingType() {
|
||||
ConnectSchema.validateValue(Schema.INT8_SCHEMA, (byte) 1);
|
||||
ConnectSchema.validateValue(Schema.INT16_SCHEMA, (short) 1);
|
||||
ConnectSchema.validateValue(Schema.INT32_SCHEMA, 1);
|
||||
ConnectSchema.validateValue(Schema.INT64_SCHEMA, (long) 1);
|
||||
ConnectSchema.validateValue(Schema.FLOAT32_SCHEMA, 1.f);
|
||||
ConnectSchema.validateValue(Schema.FLOAT64_SCHEMA, 1.);
|
||||
ConnectSchema.validateValue(Schema.BOOLEAN_SCHEMA, true);
|
||||
ConnectSchema.validateValue(Schema.STRING_SCHEMA, "a string");
|
||||
ConnectSchema.validateValue(Schema.BYTES_SCHEMA, "a byte array".getBytes());
|
||||
ConnectSchema.validateValue(Schema.BYTES_SCHEMA, ByteBuffer.wrap("a byte array".getBytes()));
|
||||
ConnectSchema.validateValue(SchemaBuilder.array(Schema.INT32_SCHEMA).build(), Arrays.asList(1, 2, 3));
|
||||
ConnectSchema.validateValue(
|
||||
SchemaBuilder.map(Schema.INT32_SCHEMA, Schema.STRING_SCHEMA).build(),
|
||||
Collections.singletonMap(1, "value")
|
||||
);
|
||||
// Struct tests the basic struct layout + complex field types + nested structs
|
||||
Struct structValue = new Struct(STRUCT_SCHEMA)
|
||||
.put("first", 1)
|
||||
.put("second", "foo")
|
||||
.put("array", Arrays.asList(1, 2, 3))
|
||||
.put("map", Collections.singletonMap(1, "value"))
|
||||
.put("nested", new Struct(FLAT_STRUCT_SCHEMA).put("field", 12));
|
||||
ConnectSchema.validateValue(STRUCT_SCHEMA, structValue);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testValidateValueMatchingLogicalType() {
|
||||
ConnectSchema.validateValue(Decimal.schema(2), new BigDecimal(new BigInteger("156"), 2));
|
||||
ConnectSchema.validateValue(Date.SCHEMA, new java.util.Date(0));
|
||||
ConnectSchema.validateValue(Time.SCHEMA, new java.util.Date(0));
|
||||
ConnectSchema.validateValue(Timestamp.SCHEMA, new java.util.Date(0));
|
||||
}
|
||||
|
||||
// To avoid requiring excessive numbers of tests, these checks for invalid types use a similar type where possible
|
||||
// to only include a single test for each type
|
||||
|
||||
@Test(expected = DataException.class)
|
||||
public void testValidateValueMismatchInt8() {
|
||||
ConnectSchema.validateValue(Schema.INT8_SCHEMA, 1);
|
||||
}
|
||||
|
||||
@Test(expected = DataException.class)
|
||||
public void testValidateValueMismatchInt16() {
|
||||
ConnectSchema.validateValue(Schema.INT16_SCHEMA, 1);
|
||||
}
|
||||
|
||||
@Test(expected = DataException.class)
|
||||
public void testValidateValueMismatchInt32() {
|
||||
ConnectSchema.validateValue(Schema.INT32_SCHEMA, (long) 1);
|
||||
}
|
||||
|
||||
@Test(expected = DataException.class)
|
||||
public void testValidateValueMismatchInt64() {
|
||||
ConnectSchema.validateValue(Schema.INT64_SCHEMA, 1);
|
||||
}
|
||||
|
||||
@Test(expected = DataException.class)
|
||||
public void testValidateValueMismatchFloat() {
|
||||
ConnectSchema.validateValue(Schema.FLOAT32_SCHEMA, 1.0);
|
||||
}
|
||||
|
||||
@Test(expected = DataException.class)
|
||||
public void testValidateValueMismatchDouble() {
|
||||
ConnectSchema.validateValue(Schema.FLOAT64_SCHEMA, 1.f);
|
||||
}
|
||||
|
||||
@Test(expected = DataException.class)
|
||||
public void testValidateValueMismatchBoolean() {
|
||||
ConnectSchema.validateValue(Schema.BOOLEAN_SCHEMA, 1.f);
|
||||
}
|
||||
|
||||
@Test(expected = DataException.class)
|
||||
public void testValidateValueMismatchString() {
|
||||
// CharSequence is a similar type (supertype of String), but we restrict to String.
|
||||
CharBuffer cbuf = CharBuffer.wrap("abc");
|
||||
ConnectSchema.validateValue(Schema.STRING_SCHEMA, cbuf);
|
||||
}
|
||||
|
||||
@Test(expected = DataException.class)
|
||||
public void testValidateValueMismatchBytes() {
|
||||
ConnectSchema.validateValue(Schema.BYTES_SCHEMA, new Object[]{1, "foo"});
|
||||
}
|
||||
|
||||
@Test(expected = DataException.class)
|
||||
public void testValidateValueMismatchArray() {
|
||||
ConnectSchema.validateValue(SchemaBuilder.array(Schema.INT32_SCHEMA).build(), Arrays.asList("a", "b", "c"));
|
||||
}
|
||||
|
||||
@Test(expected = DataException.class)
|
||||
public void testValidateValueMismatchArraySomeMatch() {
|
||||
// Even if some match the right type, this should fail if any mismatch. In this case, type erasure loses
|
||||
// the fact that the list is actually List<Object>, but we couldn't tell if only checking the first element
|
||||
ConnectSchema.validateValue(SchemaBuilder.array(Schema.INT32_SCHEMA).build(), Arrays.asList(1, 2, "c"));
|
||||
}
|
||||
|
||||
@Test(expected = DataException.class)
|
||||
public void testValidateValueMismatchMapKey() {
|
||||
ConnectSchema.validateValue(MAP_INT_STRING_SCHEMA, Collections.singletonMap("wrong key type", "value"));
|
||||
}
|
||||
|
||||
@Test(expected = DataException.class)
|
||||
public void testValidateValueMismatchMapValue() {
|
||||
ConnectSchema.validateValue(MAP_INT_STRING_SCHEMA, Collections.singletonMap(1, 2));
|
||||
}
|
||||
|
||||
@Test(expected = DataException.class)
|
||||
public void testValidateValueMismatchMapSomeKeys() {
|
||||
Map<Object, String> data = new HashMap<>();
|
||||
data.put(1, "abc");
|
||||
data.put("wrong", "it's as easy as one two three");
|
||||
ConnectSchema.validateValue(MAP_INT_STRING_SCHEMA, data);
|
||||
}
|
||||
|
||||
@Test(expected = DataException.class)
|
||||
public void testValidateValueMismatchMapSomeValues() {
|
||||
Map<Integer, Object> data = new HashMap<>();
|
||||
data.put(1, "abc");
|
||||
data.put(2, "wrong".getBytes());
|
||||
ConnectSchema.validateValue(MAP_INT_STRING_SCHEMA, data);
|
||||
}
|
||||
|
||||
@Test(expected = DataException.class)
|
||||
public void testValidateValueMismatchStructWrongSchema() {
|
||||
// Completely mismatching schemas
|
||||
ConnectSchema.validateValue(
|
||||
FLAT_STRUCT_SCHEMA,
|
||||
new Struct(SchemaBuilder.struct().field("x", Schema.INT32_SCHEMA).build()).put("x", 1)
|
||||
);
|
||||
}
|
||||
|
||||
@Test(expected = DataException.class)
|
||||
public void testValidateValueMismatchStructWrongNestedSchema() {
|
||||
// Top-level schema matches, but nested does not.
|
||||
ConnectSchema.validateValue(
|
||||
PARENT_STRUCT_SCHEMA,
|
||||
new Struct(PARENT_STRUCT_SCHEMA)
|
||||
.put("nested", new Struct(SchemaBuilder.struct().field("x", Schema.INT32_SCHEMA).build()).put("x", 1))
|
||||
);
|
||||
}
|
||||
|
||||
@Test(expected = DataException.class)
|
||||
public void testValidateValueMismatchDecimal() {
|
||||
ConnectSchema.validateValue(Decimal.schema(2), new BigInteger("156"));
|
||||
}
|
||||
|
||||
@Test(expected = DataException.class)
|
||||
public void testValidateValueMismatchDate() {
|
||||
ConnectSchema.validateValue(Date.SCHEMA, 1000L);
|
||||
}
|
||||
|
||||
@Test(expected = DataException.class)
|
||||
public void testValidateValueMismatchTime() {
|
||||
ConnectSchema.validateValue(Time.SCHEMA, 1000L);
|
||||
}
|
||||
|
||||
@Test(expected = DataException.class)
|
||||
public void testValidateValueMismatchTimestamp() {
|
||||
ConnectSchema.validateValue(Timestamp.SCHEMA, 1000L);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPrimitiveEquality() {
|
||||
// Test that primitive types, which only need to consider all the type & metadata fields, handle equality correctly
|
||||
ConnectSchema s1 = new ConnectSchema(Schema.Type.INT8, false, null, "name", 2, "doc");
|
||||
ConnectSchema s2 = new ConnectSchema(Schema.Type.INT8, false, null, "name", 2, "doc");
|
||||
ConnectSchema differentType = new ConnectSchema(Schema.Type.INT16, false, null, "name", 2, "doc");
|
||||
ConnectSchema differentOptional = new ConnectSchema(Schema.Type.INT8, true, null, "name", 2, "doc");
|
||||
ConnectSchema differentDefault = new ConnectSchema(Schema.Type.INT8, false, true, "name", 2, "doc");
|
||||
ConnectSchema differentName = new ConnectSchema(Schema.Type.INT8, false, null, "otherName", 2, "doc");
|
||||
ConnectSchema differentVersion = new ConnectSchema(Schema.Type.INT8, false, null, "name", 4, "doc");
|
||||
ConnectSchema differentDoc = new ConnectSchema(Schema.Type.INT8, false, null, "name", 2, "other doc");
|
||||
ConnectSchema differentParameters = new ConnectSchema(Schema.Type.INT8, false, null, "name", 2, "doc", Collections.singletonMap("param", "value"), null, null, null);
|
||||
|
||||
assertEquals(s1, s2);
|
||||
assertNotEquals(s1, differentType);
|
||||
assertNotEquals(s1, differentOptional);
|
||||
assertNotEquals(s1, differentDefault);
|
||||
assertNotEquals(s1, differentName);
|
||||
assertNotEquals(s1, differentVersion);
|
||||
assertNotEquals(s1, differentDoc);
|
||||
assertNotEquals(s1, differentParameters);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testArrayEquality() {
|
||||
// Validate that the value type for the array is tested for equality. This test makes sure the same schema object is
|
||||
// never reused to ensure we're actually checking equality
|
||||
ConnectSchema s1 = new ConnectSchema(Schema.Type.ARRAY, false, null, null, null, null, null, null, null, SchemaBuilder.int8().build());
|
||||
ConnectSchema s2 = new ConnectSchema(Schema.Type.ARRAY, false, null, null, null, null, null, null, null, SchemaBuilder.int8().build());
|
||||
ConnectSchema differentValueSchema = new ConnectSchema(Schema.Type.ARRAY, false, null, null, null, null, null, null, null, SchemaBuilder.int16().build());
|
||||
|
||||
assertEquals(s1, s2);
|
||||
assertNotEquals(s1, differentValueSchema);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testArrayDefaultValueEquality() {
|
||||
ConnectSchema s1 = new ConnectSchema(Schema.Type.ARRAY, false, new String[] {"a", "b"}, null, null, null, null, null, null, SchemaBuilder.int8().build());
|
||||
ConnectSchema s2 = new ConnectSchema(Schema.Type.ARRAY, false, new String[] {"a", "b"}, null, null, null, null, null, null, SchemaBuilder.int8().build());
|
||||
ConnectSchema differentValueSchema = new ConnectSchema(Schema.Type.ARRAY, false, new String[] {"b", "c"}, null, null, null, null, null, null, SchemaBuilder.int8().build());
|
||||
|
||||
assertEquals(s1, s2);
|
||||
assertNotEquals(s1, differentValueSchema);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMapEquality() {
|
||||
// Same as testArrayEquality, but for both key and value schemas
|
||||
ConnectSchema s1 = new ConnectSchema(Schema.Type.MAP, false, null, null, null, null, null, null, SchemaBuilder.int8().build(), SchemaBuilder.int16().build());
|
||||
ConnectSchema s2 = new ConnectSchema(Schema.Type.MAP, false, null, null, null, null, null, null, SchemaBuilder.int8().build(), SchemaBuilder.int16().build());
|
||||
ConnectSchema differentKeySchema = new ConnectSchema(Schema.Type.MAP, false, null, null, null, null, null, null, SchemaBuilder.string().build(), SchemaBuilder.int16().build());
|
||||
ConnectSchema differentValueSchema = new ConnectSchema(Schema.Type.MAP, false, null, null, null, null, null, null, SchemaBuilder.int8().build(), SchemaBuilder.string().build());
|
||||
|
||||
assertEquals(s1, s2);
|
||||
assertNotEquals(s1, differentKeySchema);
|
||||
assertNotEquals(s1, differentValueSchema);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testStructEquality() {
|
||||
// Same as testArrayEquality, but checks differences in fields. Only does a simple check, relying on tests of
|
||||
// Field's equals() method to validate all variations in the list of fields will be checked
|
||||
ConnectSchema s1 = new ConnectSchema(Schema.Type.STRUCT, false, null, null, null, null, null,
|
||||
Arrays.asList(new Field("field", 0, SchemaBuilder.int8().build()),
|
||||
new Field("field2", 1, SchemaBuilder.int16().build())), null, null);
|
||||
ConnectSchema s2 = new ConnectSchema(Schema.Type.STRUCT, false, null, null, null, null, null,
|
||||
Arrays.asList(new Field("field", 0, SchemaBuilder.int8().build()),
|
||||
new Field("field2", 1, SchemaBuilder.int16().build())), null, null);
|
||||
ConnectSchema differentField = new ConnectSchema(Schema.Type.STRUCT, false, null, null, null, null, null,
|
||||
Arrays.asList(new Field("field", 0, SchemaBuilder.int8().build()),
|
||||
new Field("different field name", 1, SchemaBuilder.int16().build())), null, null);
|
||||
|
||||
assertEquals(s1, s2);
|
||||
assertNotEquals(s1, differentField);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testEmptyStruct() {
|
||||
final ConnectSchema emptyStruct = new ConnectSchema(Schema.Type.STRUCT, false, null, null, null, null);
|
||||
assertEquals(0, emptyStruct.fields().size());
|
||||
new Struct(emptyStruct);
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,77 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.data;
|
||||
|
||||
import org.apache.kafka.connect.errors.DataException;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.util.Calendar;
|
||||
import java.util.GregorianCalendar;
|
||||
import java.util.TimeZone;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
public class DateTest {
|
||||
private static final GregorianCalendar EPOCH;
|
||||
private static final GregorianCalendar EPOCH_PLUS_TEN_THOUSAND_DAYS;
|
||||
private static final GregorianCalendar EPOCH_PLUS_TIME_COMPONENT;
|
||||
static {
|
||||
EPOCH = new GregorianCalendar(1970, Calendar.JANUARY, 1, 0, 0, 0);
|
||||
EPOCH.setTimeZone(TimeZone.getTimeZone("UTC"));
|
||||
|
||||
EPOCH_PLUS_TIME_COMPONENT = new GregorianCalendar(1970, Calendar.JANUARY, 1, 0, 0, 1);
|
||||
EPOCH_PLUS_TIME_COMPONENT.setTimeZone(TimeZone.getTimeZone("UTC"));
|
||||
|
||||
EPOCH_PLUS_TEN_THOUSAND_DAYS = new GregorianCalendar(1970, Calendar.JANUARY, 1, 0, 0, 0);
|
||||
EPOCH_PLUS_TEN_THOUSAND_DAYS.setTimeZone(TimeZone.getTimeZone("UTC"));
|
||||
EPOCH_PLUS_TEN_THOUSAND_DAYS.add(Calendar.DATE, 10000);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBuilder() {
|
||||
Schema plain = Date.SCHEMA;
|
||||
assertEquals(Date.LOGICAL_NAME, plain.name());
|
||||
assertEquals(1, (Object) plain.version());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFromLogical() {
|
||||
assertEquals(0, Date.fromLogical(Date.SCHEMA, EPOCH.getTime()));
|
||||
assertEquals(10000, Date.fromLogical(Date.SCHEMA, EPOCH_PLUS_TEN_THOUSAND_DAYS.getTime()));
|
||||
}
|
||||
|
||||
@Test(expected = DataException.class)
|
||||
public void testFromLogicalInvalidSchema() {
|
||||
Date.fromLogical(Date.builder().name("invalid").build(), EPOCH.getTime());
|
||||
}
|
||||
|
||||
@Test(expected = DataException.class)
|
||||
public void testFromLogicalInvalidHasTimeComponents() {
|
||||
Date.fromLogical(Date.SCHEMA, EPOCH_PLUS_TIME_COMPONENT.getTime());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testToLogical() {
|
||||
assertEquals(EPOCH.getTime(), Date.toLogical(Date.SCHEMA, 0));
|
||||
assertEquals(EPOCH_PLUS_TEN_THOUSAND_DAYS.getTime(), Date.toLogical(Date.SCHEMA, 10000));
|
||||
}
|
||||
|
||||
@Test(expected = DataException.class)
|
||||
public void testToLogicalInvalidSchema() {
|
||||
Date.toLogical(Date.builder().name("invalid").build(), 0);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,62 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.data;
|
||||
|
||||
import org.junit.Test;
|
||||
|
||||
import java.math.BigDecimal;
|
||||
import java.math.BigInteger;
|
||||
import java.util.Collections;
|
||||
|
||||
import static org.junit.Assert.assertArrayEquals;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
public class DecimalTest {
|
||||
private static final int TEST_SCALE = 2;
|
||||
private static final BigDecimal TEST_DECIMAL = new BigDecimal(new BigInteger("156"), TEST_SCALE);
|
||||
private static final BigDecimal TEST_DECIMAL_NEGATIVE = new BigDecimal(new BigInteger("-156"), TEST_SCALE);
|
||||
private static final byte[] TEST_BYTES = new byte[]{0, -100};
|
||||
private static final byte[] TEST_BYTES_NEGATIVE = new byte[]{-1, 100};
|
||||
|
||||
@Test
|
||||
public void testBuilder() {
|
||||
Schema plain = Decimal.builder(2).build();
|
||||
assertEquals(Decimal.LOGICAL_NAME, plain.name());
|
||||
assertEquals(Collections.singletonMap(Decimal.SCALE_FIELD, "2"), plain.parameters());
|
||||
assertEquals(1, (Object) plain.version());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFromLogical() {
|
||||
Schema schema = Decimal.schema(TEST_SCALE);
|
||||
byte[] encoded = Decimal.fromLogical(schema, TEST_DECIMAL);
|
||||
assertArrayEquals(TEST_BYTES, encoded);
|
||||
|
||||
encoded = Decimal.fromLogical(schema, TEST_DECIMAL_NEGATIVE);
|
||||
assertArrayEquals(TEST_BYTES_NEGATIVE, encoded);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testToLogical() {
|
||||
Schema schema = Decimal.schema(2);
|
||||
BigDecimal converted = Decimal.toLogical(schema, TEST_BYTES);
|
||||
assertEquals(TEST_DECIMAL, converted);
|
||||
|
||||
converted = Decimal.toLogical(schema, TEST_BYTES_NEGATIVE);
|
||||
assertEquals(TEST_DECIMAL_NEGATIVE, converted);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,82 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.data;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
public class FakeSchema implements Schema {
|
||||
@Override
|
||||
public Type type() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isOptional() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object defaultValue() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String name() {
|
||||
return "fake";
|
||||
}
|
||||
|
||||
@Override
|
||||
public Integer version() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String doc() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, String> parameters() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Schema keySchema() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Schema valueSchema() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Field> fields() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Field field(String fieldName) {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Schema schema() {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,39 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.data;
|
||||
|
||||
import org.junit.Test;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotEquals;
|
||||
|
||||
public class FieldTest {
|
||||
|
||||
@Test
|
||||
public void testEquality() {
|
||||
Field field1 = new Field("name", 0, Schema.INT8_SCHEMA);
|
||||
Field field2 = new Field("name", 0, Schema.INT8_SCHEMA);
|
||||
Field differentName = new Field("name2", 0, Schema.INT8_SCHEMA);
|
||||
Field differentIndex = new Field("name", 1, Schema.INT8_SCHEMA);
|
||||
Field differentSchema = new Field("name", 0, Schema.INT16_SCHEMA);
|
||||
|
||||
assertEquals(field1, field2);
|
||||
assertNotEquals(field1, differentName);
|
||||
assertNotEquals(field1, differentIndex);
|
||||
assertNotEquals(field1, differentSchema);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,387 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.data;
|
||||
|
||||
import org.apache.kafka.connect.errors.SchemaBuilderException;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNull;
|
||||
|
||||
public class SchemaBuilderTest {
|
||||
private static final String NAME = "name";
|
||||
private static final Integer VERSION = 2;
|
||||
private static final String DOC = "doc";
|
||||
private static final Map<String, String> NO_PARAMS = null;
|
||||
|
||||
@Test
|
||||
public void testInt8Builder() {
|
||||
Schema schema = SchemaBuilder.int8().build();
|
||||
assertTypeAndDefault(schema, Schema.Type.INT8, false, null);
|
||||
assertNoMetadata(schema);
|
||||
|
||||
schema = SchemaBuilder.int8().name(NAME).optional().defaultValue((byte) 12)
|
||||
.version(VERSION).doc(DOC).build();
|
||||
assertTypeAndDefault(schema, Schema.Type.INT8, true, (byte) 12);
|
||||
assertMetadata(schema, NAME, VERSION, DOC, NO_PARAMS);
|
||||
}
|
||||
|
||||
@Test(expected = SchemaBuilderException.class)
|
||||
public void testInt8BuilderInvalidDefault() {
|
||||
SchemaBuilder.int8().defaultValue("invalid");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInt16Builder() {
|
||||
Schema schema = SchemaBuilder.int16().build();
|
||||
assertTypeAndDefault(schema, Schema.Type.INT16, false, null);
|
||||
assertNoMetadata(schema);
|
||||
|
||||
schema = SchemaBuilder.int16().name(NAME).optional().defaultValue((short) 12)
|
||||
.version(VERSION).doc(DOC).build();
|
||||
assertTypeAndDefault(schema, Schema.Type.INT16, true, (short) 12);
|
||||
assertMetadata(schema, NAME, VERSION, DOC, NO_PARAMS);
|
||||
}
|
||||
|
||||
@Test(expected = SchemaBuilderException.class)
|
||||
public void testInt16BuilderInvalidDefault() {
|
||||
SchemaBuilder.int16().defaultValue("invalid");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInt32Builder() {
|
||||
Schema schema = SchemaBuilder.int32().build();
|
||||
assertTypeAndDefault(schema, Schema.Type.INT32, false, null);
|
||||
assertNoMetadata(schema);
|
||||
|
||||
schema = SchemaBuilder.int32().name(NAME).optional().defaultValue(12)
|
||||
.version(VERSION).doc(DOC).build();
|
||||
assertTypeAndDefault(schema, Schema.Type.INT32, true, 12);
|
||||
assertMetadata(schema, NAME, VERSION, DOC, NO_PARAMS);
|
||||
}
|
||||
|
||||
@Test(expected = SchemaBuilderException.class)
|
||||
public void testInt32BuilderInvalidDefault() {
|
||||
SchemaBuilder.int32().defaultValue("invalid");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInt64Builder() {
|
||||
Schema schema = SchemaBuilder.int64().build();
|
||||
assertTypeAndDefault(schema, Schema.Type.INT64, false, null);
|
||||
assertNoMetadata(schema);
|
||||
|
||||
schema = SchemaBuilder.int64().name(NAME).optional().defaultValue((long) 12)
|
||||
.version(VERSION).doc(DOC).build();
|
||||
assertTypeAndDefault(schema, Schema.Type.INT64, true, (long) 12);
|
||||
assertMetadata(schema, NAME, VERSION, DOC, NO_PARAMS);
|
||||
}
|
||||
|
||||
@Test(expected = SchemaBuilderException.class)
|
||||
public void testInt64BuilderInvalidDefault() {
|
||||
SchemaBuilder.int64().defaultValue("invalid");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFloatBuilder() {
|
||||
Schema schema = SchemaBuilder.float32().build();
|
||||
assertTypeAndDefault(schema, Schema.Type.FLOAT32, false, null);
|
||||
assertNoMetadata(schema);
|
||||
|
||||
schema = SchemaBuilder.float32().name(NAME).optional().defaultValue(12.f)
|
||||
.version(VERSION).doc(DOC).build();
|
||||
assertTypeAndDefault(schema, Schema.Type.FLOAT32, true, 12.f);
|
||||
assertMetadata(schema, NAME, VERSION, DOC, NO_PARAMS);
|
||||
}
|
||||
|
||||
@Test(expected = SchemaBuilderException.class)
|
||||
public void testFloatBuilderInvalidDefault() {
|
||||
SchemaBuilder.float32().defaultValue("invalid");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDoubleBuilder() {
|
||||
Schema schema = SchemaBuilder.float64().build();
|
||||
assertTypeAndDefault(schema, Schema.Type.FLOAT64, false, null);
|
||||
assertNoMetadata(schema);
|
||||
|
||||
schema = SchemaBuilder.float64().name(NAME).optional().defaultValue(12.0)
|
||||
.version(VERSION).doc(DOC).build();
|
||||
assertTypeAndDefault(schema, Schema.Type.FLOAT64, true, 12.0);
|
||||
assertMetadata(schema, NAME, VERSION, DOC, NO_PARAMS);
|
||||
}
|
||||
|
||||
@Test(expected = SchemaBuilderException.class)
|
||||
public void testDoubleBuilderInvalidDefault() {
|
||||
SchemaBuilder.float64().defaultValue("invalid");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBooleanBuilder() {
|
||||
Schema schema = SchemaBuilder.bool().build();
|
||||
assertTypeAndDefault(schema, Schema.Type.BOOLEAN, false, null);
|
||||
assertNoMetadata(schema);
|
||||
|
||||
schema = SchemaBuilder.bool().name(NAME).optional().defaultValue(true)
|
||||
.version(VERSION).doc(DOC).build();
|
||||
assertTypeAndDefault(schema, Schema.Type.BOOLEAN, true, true);
|
||||
assertMetadata(schema, NAME, VERSION, DOC, NO_PARAMS);
|
||||
}
|
||||
|
||||
@Test(expected = SchemaBuilderException.class)
|
||||
public void testBooleanBuilderInvalidDefault() {
|
||||
SchemaBuilder.bool().defaultValue("invalid");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testStringBuilder() {
|
||||
Schema schema = SchemaBuilder.string().build();
|
||||
assertTypeAndDefault(schema, Schema.Type.STRING, false, null);
|
||||
assertNoMetadata(schema);
|
||||
|
||||
schema = SchemaBuilder.string().name(NAME).optional().defaultValue("a default string")
|
||||
.version(VERSION).doc(DOC).build();
|
||||
assertTypeAndDefault(schema, Schema.Type.STRING, true, "a default string");
|
||||
assertMetadata(schema, NAME, VERSION, DOC, NO_PARAMS);
|
||||
}
|
||||
|
||||
@Test(expected = SchemaBuilderException.class)
|
||||
public void testStringBuilderInvalidDefault() {
|
||||
SchemaBuilder.string().defaultValue(true);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBytesBuilder() {
|
||||
Schema schema = SchemaBuilder.bytes().build();
|
||||
assertTypeAndDefault(schema, Schema.Type.BYTES, false, null);
|
||||
assertNoMetadata(schema);
|
||||
|
||||
schema = SchemaBuilder.bytes().name(NAME).optional().defaultValue("a default byte array".getBytes())
|
||||
.version(VERSION).doc(DOC).build();
|
||||
assertTypeAndDefault(schema, Schema.Type.BYTES, true, "a default byte array".getBytes());
|
||||
assertMetadata(schema, NAME, VERSION, DOC, NO_PARAMS);
|
||||
}
|
||||
|
||||
@Test(expected = SchemaBuilderException.class)
|
||||
public void testBytesBuilderInvalidDefault() {
|
||||
SchemaBuilder.bytes().defaultValue("a string, not bytes");
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testParameters() {
|
||||
Map<String, String> expectedParameters = new HashMap<>();
|
||||
expectedParameters.put("foo", "val");
|
||||
expectedParameters.put("bar", "baz");
|
||||
|
||||
Schema schema = SchemaBuilder.string().parameter("foo", "val").parameter("bar", "baz").build();
|
||||
assertTypeAndDefault(schema, Schema.Type.STRING, false, null);
|
||||
assertMetadata(schema, null, null, null, expectedParameters);
|
||||
|
||||
schema = SchemaBuilder.string().parameters(expectedParameters).build();
|
||||
assertTypeAndDefault(schema, Schema.Type.STRING, false, null);
|
||||
assertMetadata(schema, null, null, null, expectedParameters);
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testStructBuilder() {
|
||||
Schema schema = SchemaBuilder.struct()
|
||||
.field("field1", Schema.INT8_SCHEMA)
|
||||
.field("field2", Schema.INT8_SCHEMA)
|
||||
.build();
|
||||
assertTypeAndDefault(schema, Schema.Type.STRUCT, false, null);
|
||||
assertEquals(2, schema.fields().size());
|
||||
assertEquals("field1", schema.fields().get(0).name());
|
||||
assertEquals(0, schema.fields().get(0).index());
|
||||
assertEquals(Schema.INT8_SCHEMA, schema.fields().get(0).schema());
|
||||
assertEquals("field2", schema.fields().get(1).name());
|
||||
assertEquals(1, schema.fields().get(1).index());
|
||||
assertEquals(Schema.INT8_SCHEMA, schema.fields().get(1).schema());
|
||||
assertNoMetadata(schema);
|
||||
}
|
||||
|
||||
@Test(expected = SchemaBuilderException.class)
|
||||
public void testNonStructCantHaveFields() {
|
||||
SchemaBuilder.int8().field("field", SchemaBuilder.int8().build());
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testArrayBuilder() {
|
||||
Schema schema = SchemaBuilder.array(Schema.INT8_SCHEMA).build();
|
||||
assertTypeAndDefault(schema, Schema.Type.ARRAY, false, null);
|
||||
assertEquals(schema.valueSchema(), Schema.INT8_SCHEMA);
|
||||
assertNoMetadata(schema);
|
||||
|
||||
// Default value
|
||||
List<Byte> defArray = Arrays.asList((byte) 1, (byte) 2);
|
||||
schema = SchemaBuilder.array(Schema.INT8_SCHEMA).defaultValue(defArray).build();
|
||||
assertTypeAndDefault(schema, Schema.Type.ARRAY, false, defArray);
|
||||
assertEquals(schema.valueSchema(), Schema.INT8_SCHEMA);
|
||||
assertNoMetadata(schema);
|
||||
}
|
||||
|
||||
@Test(expected = SchemaBuilderException.class)
|
||||
public void testArrayBuilderInvalidDefault() {
|
||||
// Array, but wrong embedded type
|
||||
SchemaBuilder.array(Schema.INT8_SCHEMA).defaultValue(Arrays.asList("string")).build();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMapBuilder() {
|
||||
// SchemaBuilder should also pass the check
|
||||
Schema schema = SchemaBuilder.map(Schema.INT8_SCHEMA, Schema.INT8_SCHEMA);
|
||||
assertTypeAndDefault(schema, Schema.Type.MAP, false, null);
|
||||
assertEquals(schema.keySchema(), Schema.INT8_SCHEMA);
|
||||
assertEquals(schema.valueSchema(), Schema.INT8_SCHEMA);
|
||||
assertNoMetadata(schema);
|
||||
|
||||
schema = SchemaBuilder.map(Schema.INT8_SCHEMA, Schema.INT8_SCHEMA).build();
|
||||
assertTypeAndDefault(schema, Schema.Type.MAP, false, null);
|
||||
assertEquals(schema.keySchema(), Schema.INT8_SCHEMA);
|
||||
assertEquals(schema.valueSchema(), Schema.INT8_SCHEMA);
|
||||
assertNoMetadata(schema);
|
||||
|
||||
// Default value
|
||||
Map<Byte, Byte> defMap = Collections.singletonMap((byte) 5, (byte) 10);
|
||||
schema = SchemaBuilder.map(Schema.INT8_SCHEMA, Schema.INT8_SCHEMA)
|
||||
.defaultValue(defMap).build();
|
||||
assertTypeAndDefault(schema, Schema.Type.MAP, false, defMap);
|
||||
assertEquals(schema.keySchema(), Schema.INT8_SCHEMA);
|
||||
assertEquals(schema.valueSchema(), Schema.INT8_SCHEMA);
|
||||
assertNoMetadata(schema);
|
||||
}
|
||||
|
||||
@Test(expected = SchemaBuilderException.class)
|
||||
public void testMapBuilderInvalidDefault() {
|
||||
// Map, but wrong embedded type
|
||||
Map<Byte, String> defMap = Collections.singletonMap((byte) 5, "foo");
|
||||
SchemaBuilder.map(Schema.INT8_SCHEMA, Schema.INT8_SCHEMA)
|
||||
.defaultValue(defMap).build();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testEmptyStruct() {
|
||||
final SchemaBuilder emptyStructSchemaBuilder = SchemaBuilder.struct();
|
||||
assertEquals(0, emptyStructSchemaBuilder.fields().size());
|
||||
new Struct(emptyStructSchemaBuilder);
|
||||
|
||||
final Schema emptyStructSchema = emptyStructSchemaBuilder.build();
|
||||
assertEquals(0, emptyStructSchema.fields().size());
|
||||
new Struct(emptyStructSchema);
|
||||
}
|
||||
|
||||
@Test(expected = SchemaBuilderException.class)
|
||||
public void testDuplicateFields() {
|
||||
final Schema schema = SchemaBuilder.struct()
|
||||
.name("testing")
|
||||
.field("id", SchemaBuilder.string().doc("").build())
|
||||
.field("id", SchemaBuilder.string().doc("").build())
|
||||
.build();
|
||||
final Struct struct = new Struct(schema)
|
||||
.put("id", "testing");
|
||||
struct.validate();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDefaultFieldsSameValueOverwriting() {
|
||||
final SchemaBuilder schemaBuilder = SchemaBuilder.string().name("testing").version(123);
|
||||
|
||||
schemaBuilder.name("testing");
|
||||
schemaBuilder.version(123);
|
||||
|
||||
assertEquals("testing", schemaBuilder.name());
|
||||
}
|
||||
|
||||
@Test(expected = SchemaBuilderException.class)
|
||||
public void testDefaultFieldsDifferentValueOverwriting() {
|
||||
final SchemaBuilder schemaBuilder = SchemaBuilder.string().name("testing").version(123);
|
||||
|
||||
schemaBuilder.name("testing");
|
||||
schemaBuilder.version(456);
|
||||
}
|
||||
|
||||
@Test(expected = SchemaBuilderException.class)
|
||||
public void testFieldNameNull() {
|
||||
Schema schema = SchemaBuilder.struct()
|
||||
.field(null, Schema.STRING_SCHEMA)
|
||||
.build();
|
||||
}
|
||||
|
||||
@Test(expected = SchemaBuilderException.class)
|
||||
public void testFieldSchemaNull() {
|
||||
Schema schema = SchemaBuilder.struct()
|
||||
.field("fieldName", null)
|
||||
.build();
|
||||
}
|
||||
|
||||
@Test(expected = SchemaBuilderException.class)
|
||||
public void testArraySchemaNull() {
|
||||
Schema schema = SchemaBuilder.array(null)
|
||||
.build();
|
||||
}
|
||||
|
||||
@Test(expected = SchemaBuilderException.class)
|
||||
public void testMapKeySchemaNull() {
|
||||
Schema schema = SchemaBuilder.map(null, Schema.STRING_SCHEMA)
|
||||
.build();
|
||||
}
|
||||
|
||||
@Test(expected = SchemaBuilderException.class)
|
||||
public void testMapValueSchemaNull() {
|
||||
Schema schema = SchemaBuilder.map(Schema.STRING_SCHEMA, null)
|
||||
.build();
|
||||
}
|
||||
|
||||
@Test(expected = SchemaBuilderException.class)
|
||||
public void testTypeNotNull() {
|
||||
SchemaBuilder.type(null);
|
||||
}
|
||||
|
||||
private void assertTypeAndDefault(Schema schema, Schema.Type type, boolean optional, Object defaultValue) {
|
||||
assertEquals(type, schema.type());
|
||||
assertEquals(optional, schema.isOptional());
|
||||
if (type == Schema.Type.BYTES) {
|
||||
// byte[] is not comparable, need to wrap to check correctly
|
||||
if (defaultValue == null)
|
||||
assertNull(schema.defaultValue());
|
||||
else
|
||||
assertEquals(ByteBuffer.wrap((byte[]) defaultValue), ByteBuffer.wrap((byte[]) schema.defaultValue()));
|
||||
} else {
|
||||
assertEquals(defaultValue, schema.defaultValue());
|
||||
}
|
||||
}
|
||||
|
||||
private void assertMetadata(Schema schema, String name, Integer version, String doc, Map<String, String> parameters) {
|
||||
assertEquals(name, schema.name());
|
||||
assertEquals(version, schema.version());
|
||||
assertEquals(doc, schema.doc());
|
||||
assertEquals(parameters, schema.parameters());
|
||||
}
|
||||
|
||||
private void assertNoMetadata(Schema schema) {
|
||||
assertMetadata(schema, null, null, null, null);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,528 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.data;
|
||||
|
||||
import org.apache.kafka.connect.data.Schema.Type;
|
||||
import org.apache.kafka.connect.errors.DataException;
|
||||
import org.apache.kafka.connect.errors.SchemaProjectorException;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.math.BigDecimal;
|
||||
import java.math.BigInteger;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
public class SchemaProjectorTest {
|
||||
|
||||
@Test
|
||||
public void testPrimitiveTypeProjection() {
|
||||
Object projected;
|
||||
projected = SchemaProjector.project(Schema.BOOLEAN_SCHEMA, false, Schema.BOOLEAN_SCHEMA);
|
||||
assertEquals(false, projected);
|
||||
|
||||
byte[] bytes = {(byte) 1, (byte) 2};
|
||||
projected = SchemaProjector.project(Schema.BYTES_SCHEMA, bytes, Schema.BYTES_SCHEMA);
|
||||
assertEquals(bytes, projected);
|
||||
|
||||
projected = SchemaProjector.project(Schema.STRING_SCHEMA, "abc", Schema.STRING_SCHEMA);
|
||||
assertEquals("abc", projected);
|
||||
|
||||
projected = SchemaProjector.project(Schema.BOOLEAN_SCHEMA, false, Schema.OPTIONAL_BOOLEAN_SCHEMA);
|
||||
assertEquals(false, projected);
|
||||
|
||||
projected = SchemaProjector.project(Schema.BYTES_SCHEMA, bytes, Schema.OPTIONAL_BYTES_SCHEMA);
|
||||
assertEquals(bytes, projected);
|
||||
|
||||
projected = SchemaProjector.project(Schema.STRING_SCHEMA, "abc", Schema.OPTIONAL_STRING_SCHEMA);
|
||||
assertEquals("abc", projected);
|
||||
|
||||
try {
|
||||
SchemaProjector.project(Schema.OPTIONAL_BOOLEAN_SCHEMA, false, Schema.BOOLEAN_SCHEMA);
|
||||
fail("Cannot project optional schema to schema with no default value.");
|
||||
} catch (DataException e) {
|
||||
// expected
|
||||
}
|
||||
|
||||
try {
|
||||
SchemaProjector.project(Schema.OPTIONAL_BYTES_SCHEMA, bytes, Schema.BYTES_SCHEMA);
|
||||
fail("Cannot project optional schema to schema with no default value.");
|
||||
} catch (DataException e) {
|
||||
// expected
|
||||
}
|
||||
|
||||
try {
|
||||
SchemaProjector.project(Schema.OPTIONAL_STRING_SCHEMA, "abc", Schema.STRING_SCHEMA);
|
||||
fail("Cannot project optional schema to schema with no default value.");
|
||||
} catch (DataException e) {
|
||||
// expected
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testNumericTypeProjection() {
|
||||
Schema[] promotableSchemas = {Schema.INT8_SCHEMA, Schema.INT16_SCHEMA, Schema.INT32_SCHEMA, Schema.INT64_SCHEMA, Schema.FLOAT32_SCHEMA, Schema.FLOAT64_SCHEMA};
|
||||
Schema[] promotableOptionalSchemas = {Schema.OPTIONAL_INT8_SCHEMA, Schema.OPTIONAL_INT16_SCHEMA, Schema.OPTIONAL_INT32_SCHEMA, Schema.OPTIONAL_INT64_SCHEMA,
|
||||
Schema.OPTIONAL_FLOAT32_SCHEMA, Schema.OPTIONAL_FLOAT64_SCHEMA};
|
||||
|
||||
Object[] values = {(byte) 127, (short) 255, 32767, 327890L, 1.2F, 1.2345};
|
||||
Map<Object, List<?>> expectedProjected = new HashMap<>();
|
||||
expectedProjected.put(values[0], Arrays.asList((byte) 127, (short) 127, 127, 127L, 127.F, 127.));
|
||||
expectedProjected.put(values[1], Arrays.asList((short) 255, 255, 255L, 255.F, 255.));
|
||||
expectedProjected.put(values[2], Arrays.asList(32767, 32767L, 32767.F, 32767.));
|
||||
expectedProjected.put(values[3], Arrays.asList(327890L, 327890.F, 327890.));
|
||||
expectedProjected.put(values[4], Arrays.asList(1.2F, 1.2));
|
||||
expectedProjected.put(values[5], Arrays.asList(1.2345));
|
||||
|
||||
Object promoted;
|
||||
for (int i = 0; i < promotableSchemas.length; ++i) {
|
||||
Schema source = promotableSchemas[i];
|
||||
List<?> expected = expectedProjected.get(values[i]);
|
||||
for (int j = i; j < promotableSchemas.length; ++j) {
|
||||
Schema target = promotableSchemas[j];
|
||||
promoted = SchemaProjector.project(source, values[i], target);
|
||||
if (target.type() == Type.FLOAT64) {
|
||||
assertEquals((Double) (expected.get(j - i)), (double) promoted, 1e-6);
|
||||
} else {
|
||||
assertEquals(expected.get(j - i), promoted);
|
||||
}
|
||||
}
|
||||
for (int j = i; j < promotableOptionalSchemas.length; ++j) {
|
||||
Schema target = promotableOptionalSchemas[j];
|
||||
promoted = SchemaProjector.project(source, values[i], target);
|
||||
if (target.type() == Type.FLOAT64) {
|
||||
assertEquals((Double) (expected.get(j - i)), (double) promoted, 1e-6);
|
||||
} else {
|
||||
assertEquals(expected.get(j - i), promoted);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = 0; i < promotableOptionalSchemas.length; ++i) {
|
||||
Schema source = promotableSchemas[i];
|
||||
List<?> expected = expectedProjected.get(values[i]);
|
||||
for (int j = i; j < promotableOptionalSchemas.length; ++j) {
|
||||
Schema target = promotableOptionalSchemas[j];
|
||||
promoted = SchemaProjector.project(source, values[i], target);
|
||||
if (target.type() == Type.FLOAT64) {
|
||||
assertEquals((Double) (expected.get(j - i)), (double) promoted, 1e-6);
|
||||
} else {
|
||||
assertEquals(expected.get(j - i), promoted);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Schema[] nonPromotableSchemas = {Schema.BOOLEAN_SCHEMA, Schema.BYTES_SCHEMA, Schema.STRING_SCHEMA};
|
||||
for (Schema promotableSchema: promotableSchemas) {
|
||||
for (Schema nonPromotableSchema: nonPromotableSchemas) {
|
||||
Object dummy = new Object();
|
||||
try {
|
||||
SchemaProjector.project(promotableSchema, dummy, nonPromotableSchema);
|
||||
fail("Cannot promote " + promotableSchema.type() + " to " + nonPromotableSchema.type());
|
||||
} catch (DataException e) {
|
||||
// expected
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPrimitiveOptionalProjection() {
|
||||
verifyOptionalProjection(Schema.OPTIONAL_BOOLEAN_SCHEMA, Type.BOOLEAN, false, true, false, true);
|
||||
verifyOptionalProjection(Schema.OPTIONAL_BOOLEAN_SCHEMA, Type.BOOLEAN, false, true, false, false);
|
||||
|
||||
byte[] bytes = {(byte) 1, (byte) 2};
|
||||
byte[] defaultBytes = {(byte) 3, (byte) 4};
|
||||
verifyOptionalProjection(Schema.OPTIONAL_BYTES_SCHEMA, Type.BYTES, bytes, defaultBytes, bytes, true);
|
||||
verifyOptionalProjection(Schema.OPTIONAL_BYTES_SCHEMA, Type.BYTES, bytes, defaultBytes, bytes, false);
|
||||
|
||||
verifyOptionalProjection(Schema.OPTIONAL_STRING_SCHEMA, Type.STRING, "abc", "def", "abc", true);
|
||||
verifyOptionalProjection(Schema.OPTIONAL_STRING_SCHEMA, Type.STRING, "abc", "def", "abc", false);
|
||||
|
||||
verifyOptionalProjection(Schema.OPTIONAL_INT8_SCHEMA, Type.INT8, (byte) 12, (byte) 127, (byte) 12, true);
|
||||
verifyOptionalProjection(Schema.OPTIONAL_INT8_SCHEMA, Type.INT8, (byte) 12, (byte) 127, (byte) 12, false);
|
||||
verifyOptionalProjection(Schema.OPTIONAL_INT8_SCHEMA, Type.INT16, (byte) 12, (short) 127, (short) 12, true);
|
||||
verifyOptionalProjection(Schema.OPTIONAL_INT8_SCHEMA, Type.INT16, (byte) 12, (short) 127, (short) 12, false);
|
||||
verifyOptionalProjection(Schema.OPTIONAL_INT8_SCHEMA, Type.INT32, (byte) 12, 12789, 12, true);
|
||||
verifyOptionalProjection(Schema.OPTIONAL_INT8_SCHEMA, Type.INT32, (byte) 12, 12789, 12, false);
|
||||
verifyOptionalProjection(Schema.OPTIONAL_INT8_SCHEMA, Type.INT64, (byte) 12, 127890L, 12L, true);
|
||||
verifyOptionalProjection(Schema.OPTIONAL_INT8_SCHEMA, Type.INT64, (byte) 12, 127890L, 12L, false);
|
||||
verifyOptionalProjection(Schema.OPTIONAL_INT8_SCHEMA, Type.FLOAT32, (byte) 12, 3.45F, 12.F, true);
|
||||
verifyOptionalProjection(Schema.OPTIONAL_INT8_SCHEMA, Type.FLOAT32, (byte) 12, 3.45F, 12.F, false);
|
||||
verifyOptionalProjection(Schema.OPTIONAL_INT8_SCHEMA, Type.FLOAT64, (byte) 12, 3.4567, 12., true);
|
||||
verifyOptionalProjection(Schema.OPTIONAL_INT8_SCHEMA, Type.FLOAT64, (byte) 12, 3.4567, 12., false);
|
||||
|
||||
verifyOptionalProjection(Schema.OPTIONAL_INT16_SCHEMA, Type.INT16, (short) 12, (short) 127, (short) 12, true);
|
||||
verifyOptionalProjection(Schema.OPTIONAL_INT16_SCHEMA, Type.INT16, (short) 12, (short) 127, (short) 12, false);
|
||||
verifyOptionalProjection(Schema.OPTIONAL_INT16_SCHEMA, Type.INT32, (short) 12, 12789, 12, true);
|
||||
verifyOptionalProjection(Schema.OPTIONAL_INT16_SCHEMA, Type.INT32, (short) 12, 12789, 12, false);
|
||||
verifyOptionalProjection(Schema.OPTIONAL_INT16_SCHEMA, Type.INT64, (short) 12, 127890L, 12L, true);
|
||||
verifyOptionalProjection(Schema.OPTIONAL_INT16_SCHEMA, Type.INT64, (short) 12, 127890L, 12L, false);
|
||||
verifyOptionalProjection(Schema.OPTIONAL_INT16_SCHEMA, Type.FLOAT32, (short) 12, 3.45F, 12.F, true);
|
||||
verifyOptionalProjection(Schema.OPTIONAL_INT16_SCHEMA, Type.FLOAT32, (short) 12, 3.45F, 12.F, false);
|
||||
verifyOptionalProjection(Schema.OPTIONAL_INT16_SCHEMA, Type.FLOAT64, (short) 12, 3.4567, 12., true);
|
||||
verifyOptionalProjection(Schema.OPTIONAL_INT16_SCHEMA, Type.FLOAT64, (short) 12, 3.4567, 12., false);
|
||||
|
||||
verifyOptionalProjection(Schema.OPTIONAL_INT32_SCHEMA, Type.INT32, 12, 12789, 12, true);
|
||||
verifyOptionalProjection(Schema.OPTIONAL_INT32_SCHEMA, Type.INT32, 12, 12789, 12, false);
|
||||
verifyOptionalProjection(Schema.OPTIONAL_INT32_SCHEMA, Type.INT64, 12, 127890L, 12L, true);
|
||||
verifyOptionalProjection(Schema.OPTIONAL_INT32_SCHEMA, Type.INT64, 12, 127890L, 12L, false);
|
||||
verifyOptionalProjection(Schema.OPTIONAL_INT32_SCHEMA, Type.FLOAT32, 12, 3.45F, 12.F, true);
|
||||
verifyOptionalProjection(Schema.OPTIONAL_INT32_SCHEMA, Type.FLOAT32, 12, 3.45F, 12.F, false);
|
||||
verifyOptionalProjection(Schema.OPTIONAL_INT32_SCHEMA, Type.FLOAT64, 12, 3.4567, 12., true);
|
||||
verifyOptionalProjection(Schema.OPTIONAL_INT32_SCHEMA, Type.FLOAT64, 12, 3.4567, 12., false);
|
||||
|
||||
verifyOptionalProjection(Schema.OPTIONAL_INT64_SCHEMA, Type.INT64, 12L, 127890L, 12L, true);
|
||||
verifyOptionalProjection(Schema.OPTIONAL_INT64_SCHEMA, Type.INT64, 12L, 127890L, 12L, false);
|
||||
verifyOptionalProjection(Schema.OPTIONAL_INT64_SCHEMA, Type.FLOAT32, 12L, 3.45F, 12.F, true);
|
||||
verifyOptionalProjection(Schema.OPTIONAL_INT64_SCHEMA, Type.FLOAT32, 12L, 3.45F, 12.F, false);
|
||||
verifyOptionalProjection(Schema.OPTIONAL_INT64_SCHEMA, Type.FLOAT64, 12L, 3.4567, 12., true);
|
||||
verifyOptionalProjection(Schema.OPTIONAL_INT64_SCHEMA, Type.FLOAT64, 12L, 3.4567, 12., false);
|
||||
|
||||
verifyOptionalProjection(Schema.OPTIONAL_FLOAT32_SCHEMA, Type.FLOAT32, 12.345F, 3.45F, 12.345F, true);
|
||||
verifyOptionalProjection(Schema.OPTIONAL_FLOAT32_SCHEMA, Type.FLOAT32, 12.345F, 3.45F, 12.345F, false);
|
||||
verifyOptionalProjection(Schema.OPTIONAL_FLOAT32_SCHEMA, Type.FLOAT64, 12.345F, 3.4567, 12.345, true);
|
||||
verifyOptionalProjection(Schema.OPTIONAL_FLOAT32_SCHEMA, Type.FLOAT64, 12.345F, 3.4567, 12.345, false);
|
||||
|
||||
verifyOptionalProjection(Schema.OPTIONAL_FLOAT32_SCHEMA, Type.FLOAT64, 12.345, 3.4567, 12.345, true);
|
||||
verifyOptionalProjection(Schema.OPTIONAL_FLOAT32_SCHEMA, Type.FLOAT64, 12.345, 3.4567, 12.345, false);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testStructAddField() {
|
||||
Schema source = SchemaBuilder.struct()
|
||||
.field("field", Schema.INT32_SCHEMA)
|
||||
.build();
|
||||
Struct sourceStruct = new Struct(source);
|
||||
sourceStruct.put("field", 1);
|
||||
|
||||
Schema target = SchemaBuilder.struct()
|
||||
.field("field", Schema.INT32_SCHEMA)
|
||||
.field("field2", SchemaBuilder.int32().defaultValue(123).build())
|
||||
.build();
|
||||
|
||||
Struct targetStruct = (Struct) SchemaProjector.project(source, sourceStruct, target);
|
||||
|
||||
|
||||
assertEquals(1, (int) targetStruct.getInt32("field"));
|
||||
assertEquals(123, (int) targetStruct.getInt32("field2"));
|
||||
|
||||
Schema incompatibleTargetSchema = SchemaBuilder.struct()
|
||||
.field("field", Schema.INT32_SCHEMA)
|
||||
.field("field2", Schema.INT32_SCHEMA)
|
||||
.build();
|
||||
|
||||
try {
|
||||
SchemaProjector.project(source, sourceStruct, incompatibleTargetSchema);
|
||||
fail("Incompatible schema.");
|
||||
} catch (DataException e) {
|
||||
// expected
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testStructRemoveField() {
|
||||
Schema source = SchemaBuilder.struct()
|
||||
.field("field", Schema.INT32_SCHEMA)
|
||||
.field("field2", Schema.INT32_SCHEMA)
|
||||
.build();
|
||||
Struct sourceStruct = new Struct(source);
|
||||
sourceStruct.put("field", 1);
|
||||
sourceStruct.put("field2", 234);
|
||||
|
||||
Schema target = SchemaBuilder.struct()
|
||||
.field("field", Schema.INT32_SCHEMA)
|
||||
.build();
|
||||
Struct targetStruct = (Struct) SchemaProjector.project(source, sourceStruct, target);
|
||||
|
||||
assertEquals(1, targetStruct.get("field"));
|
||||
try {
|
||||
targetStruct.get("field2");
|
||||
fail("field2 is not part of the projected struct");
|
||||
} catch (DataException e) {
|
||||
// expected
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testStructDefaultValue() {
|
||||
Schema source = SchemaBuilder.struct().optional()
|
||||
.field("field", Schema.INT32_SCHEMA)
|
||||
.field("field2", Schema.INT32_SCHEMA)
|
||||
.build();
|
||||
|
||||
SchemaBuilder builder = SchemaBuilder.struct()
|
||||
.field("field", Schema.INT32_SCHEMA)
|
||||
.field("field2", Schema.INT32_SCHEMA);
|
||||
|
||||
Struct defaultStruct = new Struct(builder).put("field", 12).put("field2", 345);
|
||||
builder.defaultValue(defaultStruct);
|
||||
Schema target = builder.build();
|
||||
|
||||
Object projected = SchemaProjector.project(source, null, target);
|
||||
assertEquals(defaultStruct, projected);
|
||||
|
||||
Struct sourceStruct = new Struct(source).put("field", 45).put("field2", 678);
|
||||
Struct targetStruct = (Struct) SchemaProjector.project(source, sourceStruct, target);
|
||||
|
||||
assertEquals(sourceStruct.get("field"), targetStruct.get("field"));
|
||||
assertEquals(sourceStruct.get("field2"), targetStruct.get("field2"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testNestedSchemaProjection() {
|
||||
Schema sourceFlatSchema = SchemaBuilder.struct()
|
||||
.field("field", Schema.INT32_SCHEMA)
|
||||
.build();
|
||||
Schema targetFlatSchema = SchemaBuilder.struct()
|
||||
.field("field", Schema.INT32_SCHEMA)
|
||||
.field("field2", SchemaBuilder.int32().defaultValue(123).build())
|
||||
.build();
|
||||
Schema sourceNestedSchema = SchemaBuilder.struct()
|
||||
.field("first", Schema.INT32_SCHEMA)
|
||||
.field("second", Schema.STRING_SCHEMA)
|
||||
.field("array", SchemaBuilder.array(Schema.INT32_SCHEMA).build())
|
||||
.field("map", SchemaBuilder.map(Schema.INT32_SCHEMA, Schema.STRING_SCHEMA).build())
|
||||
.field("nested", sourceFlatSchema)
|
||||
.build();
|
||||
Schema targetNestedSchema = SchemaBuilder.struct()
|
||||
.field("first", Schema.INT32_SCHEMA)
|
||||
.field("second", Schema.STRING_SCHEMA)
|
||||
.field("array", SchemaBuilder.array(Schema.INT32_SCHEMA).build())
|
||||
.field("map", SchemaBuilder.map(Schema.INT32_SCHEMA, Schema.STRING_SCHEMA).build())
|
||||
.field("nested", targetFlatSchema)
|
||||
.build();
|
||||
|
||||
Struct sourceFlatStruct = new Struct(sourceFlatSchema);
|
||||
sourceFlatStruct.put("field", 113);
|
||||
|
||||
Struct sourceNestedStruct = new Struct(sourceNestedSchema);
|
||||
sourceNestedStruct.put("first", 1);
|
||||
sourceNestedStruct.put("second", "abc");
|
||||
sourceNestedStruct.put("array", Arrays.asList(1, 2));
|
||||
sourceNestedStruct.put("map", Collections.singletonMap(5, "def"));
|
||||
sourceNestedStruct.put("nested", sourceFlatStruct);
|
||||
|
||||
Struct targetNestedStruct = (Struct) SchemaProjector.project(sourceNestedSchema, sourceNestedStruct,
|
||||
targetNestedSchema);
|
||||
assertEquals(1, targetNestedStruct.get("first"));
|
||||
assertEquals("abc", targetNestedStruct.get("second"));
|
||||
assertEquals(Arrays.asList(1, 2), targetNestedStruct.get("array"));
|
||||
assertEquals(Collections.singletonMap(5, "def"), targetNestedStruct.get("map"));
|
||||
|
||||
Struct projectedStruct = (Struct) targetNestedStruct.get("nested");
|
||||
assertEquals(113, projectedStruct.get("field"));
|
||||
assertEquals(123, projectedStruct.get("field2"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testLogicalTypeProjection() {
|
||||
Schema[] logicalTypeSchemas = {Decimal.schema(2), Date.SCHEMA, Time.SCHEMA, Timestamp.SCHEMA};
|
||||
Object projected;
|
||||
|
||||
BigDecimal testDecimal = new BigDecimal(new BigInteger("156"), 2);
|
||||
projected = SchemaProjector.project(Decimal.schema(2), testDecimal, Decimal.schema(2));
|
||||
assertEquals(testDecimal, projected);
|
||||
|
||||
projected = SchemaProjector.project(Date.SCHEMA, 1000, Date.SCHEMA);
|
||||
assertEquals(1000, projected);
|
||||
|
||||
projected = SchemaProjector.project(Time.SCHEMA, 231, Time.SCHEMA);
|
||||
assertEquals(231, projected);
|
||||
|
||||
projected = SchemaProjector.project(Timestamp.SCHEMA, 34567L, Timestamp.SCHEMA);
|
||||
assertEquals(34567L, projected);
|
||||
|
||||
java.util.Date date = new java.util.Date();
|
||||
|
||||
projected = SchemaProjector.project(Date.SCHEMA, date, Date.SCHEMA);
|
||||
assertEquals(date, projected);
|
||||
|
||||
projected = SchemaProjector.project(Time.SCHEMA, date, Time.SCHEMA);
|
||||
assertEquals(date, projected);
|
||||
|
||||
projected = SchemaProjector.project(Timestamp.SCHEMA, date, Timestamp.SCHEMA);
|
||||
assertEquals(date, projected);
|
||||
|
||||
Schema namedSchema = SchemaBuilder.int32().name("invalidLogicalTypeName").build();
|
||||
for (Schema logicalTypeSchema: logicalTypeSchemas) {
|
||||
try {
|
||||
SchemaProjector.project(logicalTypeSchema, null, Schema.BOOLEAN_SCHEMA);
|
||||
fail("Cannot project logical types to non-logical types.");
|
||||
} catch (SchemaProjectorException e) {
|
||||
// expected
|
||||
}
|
||||
|
||||
try {
|
||||
SchemaProjector.project(logicalTypeSchema, null, namedSchema);
|
||||
fail("Reader name is not a valid logical type name.");
|
||||
} catch (SchemaProjectorException e) {
|
||||
// expected
|
||||
}
|
||||
|
||||
try {
|
||||
SchemaProjector.project(Schema.BOOLEAN_SCHEMA, null, logicalTypeSchema);
|
||||
fail("Cannot project non-logical types to logical types.");
|
||||
} catch (SchemaProjectorException e) {
|
||||
// expected
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testArrayProjection() {
|
||||
Schema source = SchemaBuilder.array(Schema.INT32_SCHEMA).build();
|
||||
|
||||
Object projected = SchemaProjector.project(source, Arrays.asList(1, 2, 3), source);
|
||||
assertEquals(Arrays.asList(1, 2, 3), projected);
|
||||
|
||||
Schema optionalSource = SchemaBuilder.array(Schema.INT32_SCHEMA).optional().build();
|
||||
Schema target = SchemaBuilder.array(Schema.INT32_SCHEMA).defaultValue(Arrays.asList(1, 2, 3)).build();
|
||||
projected = SchemaProjector.project(optionalSource, Arrays.asList(4, 5), target);
|
||||
assertEquals(Arrays.asList(4, 5), projected);
|
||||
projected = SchemaProjector.project(optionalSource, null, target);
|
||||
assertEquals(Arrays.asList(1, 2, 3), projected);
|
||||
|
||||
Schema promotedTarget = SchemaBuilder.array(Schema.INT64_SCHEMA).defaultValue(Arrays.asList(1L, 2L, 3L)).build();
|
||||
projected = SchemaProjector.project(optionalSource, Arrays.asList(4, 5), promotedTarget);
|
||||
List<Long> expectedProjected = Arrays.asList(4L, 5L);
|
||||
assertEquals(expectedProjected, projected);
|
||||
projected = SchemaProjector.project(optionalSource, null, promotedTarget);
|
||||
assertEquals(Arrays.asList(1L, 2L, 3L), projected);
|
||||
|
||||
Schema noDefaultValueTarget = SchemaBuilder.array(Schema.INT32_SCHEMA).build();
|
||||
try {
|
||||
SchemaProjector.project(optionalSource, null, noDefaultValueTarget);
|
||||
fail("Target schema does not provide a default value.");
|
||||
} catch (SchemaProjectorException e) {
|
||||
// expected
|
||||
}
|
||||
|
||||
Schema nonPromotableTarget = SchemaBuilder.array(Schema.BOOLEAN_SCHEMA).build();
|
||||
try {
|
||||
SchemaProjector.project(optionalSource, null, nonPromotableTarget);
|
||||
fail("Neither source type matches target type nor source type can be promoted to target type");
|
||||
} catch (SchemaProjectorException e) {
|
||||
// expected
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMapProjection() {
|
||||
Schema source = SchemaBuilder.map(Schema.INT32_SCHEMA, Schema.INT32_SCHEMA).optional().build();
|
||||
|
||||
Schema target = SchemaBuilder.map(Schema.INT32_SCHEMA, Schema.INT32_SCHEMA).defaultValue(Collections.singletonMap(1, 2)).build();
|
||||
Object projected = SchemaProjector.project(source, Collections.singletonMap(3, 4), target);
|
||||
assertEquals(Collections.singletonMap(3, 4), projected);
|
||||
projected = SchemaProjector.project(source, null, target);
|
||||
assertEquals(Collections.singletonMap(1, 2), projected);
|
||||
|
||||
Schema promotedTarget = SchemaBuilder.map(Schema.INT64_SCHEMA, Schema.FLOAT32_SCHEMA).defaultValue(
|
||||
Collections.singletonMap(3L, 4.5F)).build();
|
||||
projected = SchemaProjector.project(source, Collections.singletonMap(3, 4), promotedTarget);
|
||||
assertEquals(Collections.singletonMap(3L, 4.F), projected);
|
||||
projected = SchemaProjector.project(source, null, promotedTarget);
|
||||
assertEquals(Collections.singletonMap(3L, 4.5F), projected);
|
||||
|
||||
Schema noDefaultValueTarget = SchemaBuilder.map(Schema.INT32_SCHEMA, Schema.INT32_SCHEMA).build();
|
||||
try {
|
||||
SchemaProjector.project(source, null, noDefaultValueTarget);
|
||||
fail("Reader does not provide a default value.");
|
||||
} catch (SchemaProjectorException e) {
|
||||
// expected
|
||||
}
|
||||
|
||||
Schema nonPromotableTarget = SchemaBuilder.map(Schema.BOOLEAN_SCHEMA, Schema.STRING_SCHEMA).build();
|
||||
try {
|
||||
SchemaProjector.project(source, null, nonPromotableTarget);
|
||||
fail("Neither source type matches target type nor source type can be promoted to target type");
|
||||
} catch (SchemaProjectorException e) {
|
||||
// expected
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMaybeCompatible() {
|
||||
Schema source = SchemaBuilder.int32().name("source").build();
|
||||
Schema target = SchemaBuilder.int32().name("target").build();
|
||||
|
||||
try {
|
||||
SchemaProjector.project(source, 12, target);
|
||||
fail("Source name and target name mismatch.");
|
||||
} catch (SchemaProjectorException e) {
|
||||
// expected
|
||||
}
|
||||
|
||||
Schema targetWithParameters = SchemaBuilder.int32().parameters(Collections.singletonMap("key", "value"));
|
||||
try {
|
||||
SchemaProjector.project(source, 34, targetWithParameters);
|
||||
fail("Source parameters and target parameters mismatch.");
|
||||
} catch (SchemaProjectorException e) {
|
||||
// expected
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testProjectMissingDefaultValuedStructField() {
|
||||
final Schema source = SchemaBuilder.struct().build();
|
||||
final Schema target = SchemaBuilder.struct().field("id", SchemaBuilder.int64().defaultValue(42L).build()).build();
|
||||
assertEquals(42L, (long) ((Struct) SchemaProjector.project(source, new Struct(source), target)).getInt64("id"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testProjectMissingOptionalStructField() {
|
||||
final Schema source = SchemaBuilder.struct().build();
|
||||
final Schema target = SchemaBuilder.struct().field("id", SchemaBuilder.OPTIONAL_INT64_SCHEMA).build();
|
||||
assertEquals(null, ((Struct) SchemaProjector.project(source, new Struct(source), target)).getInt64("id"));
|
||||
}
|
||||
|
||||
@Test(expected = SchemaProjectorException.class)
|
||||
public void testProjectMissingRequiredField() {
|
||||
final Schema source = SchemaBuilder.struct().build();
|
||||
final Schema target = SchemaBuilder.struct().field("id", SchemaBuilder.INT64_SCHEMA).build();
|
||||
SchemaProjector.project(source, new Struct(source), target);
|
||||
}
|
||||
|
||||
private void verifyOptionalProjection(Schema source, Type targetType, Object value, Object defaultValue, Object expectedProjected, boolean optional) {
|
||||
Schema target;
|
||||
assert source.isOptional();
|
||||
assert value != null;
|
||||
if (optional) {
|
||||
target = SchemaBuilder.type(targetType).optional().defaultValue(defaultValue).build();
|
||||
} else {
|
||||
target = SchemaBuilder.type(targetType).defaultValue(defaultValue).build();
|
||||
}
|
||||
Object projected = SchemaProjector.project(source, value, target);
|
||||
if (targetType == Type.FLOAT64) {
|
||||
assertEquals((double) expectedProjected, (double) projected, 1e-6);
|
||||
} else {
|
||||
assertEquals(expectedProjected, projected);
|
||||
}
|
||||
|
||||
projected = SchemaProjector.project(source, null, target);
|
||||
if (optional) {
|
||||
assertEquals(null, projected);
|
||||
} else {
|
||||
assertEquals(defaultValue, projected);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,338 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.data;
|
||||
|
||||
import org.apache.kafka.connect.errors.DataException;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotEquals;
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.assertThrows;
|
||||
|
||||
|
||||
public class StructTest {
|
||||
|
||||
private static final Schema FLAT_STRUCT_SCHEMA = SchemaBuilder.struct()
|
||||
.field("int8", Schema.INT8_SCHEMA)
|
||||
.field("int16", Schema.INT16_SCHEMA)
|
||||
.field("int32", Schema.INT32_SCHEMA)
|
||||
.field("int64", Schema.INT64_SCHEMA)
|
||||
.field("float32", Schema.FLOAT32_SCHEMA)
|
||||
.field("float64", Schema.FLOAT64_SCHEMA)
|
||||
.field("boolean", Schema.BOOLEAN_SCHEMA)
|
||||
.field("string", Schema.STRING_SCHEMA)
|
||||
.field("bytes", Schema.BYTES_SCHEMA)
|
||||
.build();
|
||||
|
||||
private static final Schema ARRAY_SCHEMA = SchemaBuilder.array(Schema.INT8_SCHEMA).build();
|
||||
private static final Schema MAP_SCHEMA = SchemaBuilder.map(
|
||||
Schema.INT32_SCHEMA,
|
||||
Schema.STRING_SCHEMA
|
||||
).build();
|
||||
private static final Schema NESTED_CHILD_SCHEMA = SchemaBuilder.struct()
|
||||
.field("int8", Schema.INT8_SCHEMA)
|
||||
.build();
|
||||
private static final Schema NESTED_SCHEMA = SchemaBuilder.struct()
|
||||
.field("array", ARRAY_SCHEMA)
|
||||
.field("map", MAP_SCHEMA)
|
||||
.field("nested", NESTED_CHILD_SCHEMA)
|
||||
.build();
|
||||
|
||||
private static final Schema REQUIRED_FIELD_SCHEMA = Schema.INT8_SCHEMA;
|
||||
private static final Schema OPTIONAL_FIELD_SCHEMA = SchemaBuilder.int8().optional().build();
|
||||
private static final Schema DEFAULT_FIELD_SCHEMA = SchemaBuilder.int8().defaultValue((byte) 0).build();
|
||||
|
||||
@Test
|
||||
public void testFlatStruct() {
|
||||
Struct struct = new Struct(FLAT_STRUCT_SCHEMA)
|
||||
.put("int8", (byte) 12)
|
||||
.put("int16", (short) 12)
|
||||
.put("int32", 12)
|
||||
.put("int64", (long) 12)
|
||||
.put("float32", 12.f)
|
||||
.put("float64", 12.)
|
||||
.put("boolean", true)
|
||||
.put("string", "foobar")
|
||||
.put("bytes", "foobar".getBytes());
|
||||
|
||||
// Test equality, and also the type-specific getters
|
||||
assertEquals((byte) 12, (byte) struct.getInt8("int8"));
|
||||
assertEquals((short) 12, (short) struct.getInt16("int16"));
|
||||
assertEquals(12, (int) struct.getInt32("int32"));
|
||||
assertEquals((long) 12, (long) struct.getInt64("int64"));
|
||||
assertEquals((Float) 12.f, struct.getFloat32("float32"));
|
||||
assertEquals((Double) 12., struct.getFloat64("float64"));
|
||||
assertEquals(true, struct.getBoolean("boolean"));
|
||||
assertEquals("foobar", struct.getString("string"));
|
||||
assertEquals(ByteBuffer.wrap("foobar".getBytes()), ByteBuffer.wrap(struct.getBytes("bytes")));
|
||||
|
||||
struct.validate();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testComplexStruct() {
|
||||
List<Byte> array = Arrays.asList((byte) 1, (byte) 2);
|
||||
Map<Integer, String> map = Collections.singletonMap(1, "string");
|
||||
Struct struct = new Struct(NESTED_SCHEMA)
|
||||
.put("array", array)
|
||||
.put("map", map)
|
||||
.put("nested", new Struct(NESTED_CHILD_SCHEMA).put("int8", (byte) 12));
|
||||
|
||||
// Separate the call to get the array and map to validate the typed get methods work properly
|
||||
List<Byte> arrayExtracted = struct.getArray("array");
|
||||
assertEquals(array, arrayExtracted);
|
||||
Map<Byte, Byte> mapExtracted = struct.getMap("map");
|
||||
assertEquals(map, mapExtracted);
|
||||
assertEquals((byte) 12, struct.getStruct("nested").get("int8"));
|
||||
|
||||
struct.validate();
|
||||
}
|
||||
|
||||
|
||||
// These don't test all the ways validation can fail, just one for each element. See more extensive validation
|
||||
// tests in SchemaTest. These are meant to ensure that we are invoking the same code path and that we do deeper
|
||||
// inspection than just checking the class of the object
|
||||
|
||||
@Test(expected = DataException.class)
|
||||
public void testInvalidFieldType() {
|
||||
new Struct(FLAT_STRUCT_SCHEMA).put("int8", "should fail because this is a string, not int8");
|
||||
}
|
||||
|
||||
@Test(expected = DataException.class)
|
||||
public void testInvalidArrayFieldElements() {
|
||||
new Struct(NESTED_SCHEMA).put("array", Arrays.asList("should fail since elements should be int8s"));
|
||||
}
|
||||
|
||||
@Test(expected = DataException.class)
|
||||
public void testInvalidMapKeyElements() {
|
||||
new Struct(NESTED_SCHEMA).put("map", Collections.singletonMap("should fail because keys should be int8s", (byte) 12));
|
||||
}
|
||||
|
||||
@Test(expected = DataException.class)
|
||||
public void testInvalidStructFieldSchema() {
|
||||
new Struct(NESTED_SCHEMA).put("nested", new Struct(MAP_SCHEMA));
|
||||
}
|
||||
|
||||
@Test(expected = DataException.class)
|
||||
public void testInvalidStructFieldValue() {
|
||||
new Struct(NESTED_SCHEMA).put("nested", new Struct(NESTED_CHILD_SCHEMA));
|
||||
}
|
||||
|
||||
|
||||
@Test(expected = DataException.class)
|
||||
public void testMissingFieldValidation() {
|
||||
// Required int8 field
|
||||
Schema schema = SchemaBuilder.struct().field("field", REQUIRED_FIELD_SCHEMA).build();
|
||||
Struct struct = new Struct(schema);
|
||||
struct.validate();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMissingOptionalFieldValidation() {
|
||||
Schema schema = SchemaBuilder.struct().field("field", OPTIONAL_FIELD_SCHEMA).build();
|
||||
Struct struct = new Struct(schema);
|
||||
struct.validate();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMissingFieldWithDefaultValidation() {
|
||||
Schema schema = SchemaBuilder.struct().field("field", DEFAULT_FIELD_SCHEMA).build();
|
||||
Struct struct = new Struct(schema);
|
||||
struct.validate();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMissingFieldWithDefaultValue() {
|
||||
Schema schema = SchemaBuilder.struct().field("field", DEFAULT_FIELD_SCHEMA).build();
|
||||
Struct struct = new Struct(schema);
|
||||
assertEquals((byte) 0, struct.get("field"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMissingFieldWithoutDefaultValue() {
|
||||
Schema schema = SchemaBuilder.struct().field("field", REQUIRED_FIELD_SCHEMA).build();
|
||||
Struct struct = new Struct(schema);
|
||||
assertNull(struct.get("field"));
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testEquals() {
|
||||
Struct struct1 = new Struct(FLAT_STRUCT_SCHEMA)
|
||||
.put("int8", (byte) 12)
|
||||
.put("int16", (short) 12)
|
||||
.put("int32", 12)
|
||||
.put("int64", (long) 12)
|
||||
.put("float32", 12.f)
|
||||
.put("float64", 12.)
|
||||
.put("boolean", true)
|
||||
.put("string", "foobar")
|
||||
.put("bytes", ByteBuffer.wrap("foobar".getBytes()));
|
||||
Struct struct2 = new Struct(FLAT_STRUCT_SCHEMA)
|
||||
.put("int8", (byte) 12)
|
||||
.put("int16", (short) 12)
|
||||
.put("int32", 12)
|
||||
.put("int64", (long) 12)
|
||||
.put("float32", 12.f)
|
||||
.put("float64", 12.)
|
||||
.put("boolean", true)
|
||||
.put("string", "foobar")
|
||||
.put("bytes", ByteBuffer.wrap("foobar".getBytes()));
|
||||
Struct struct3 = new Struct(FLAT_STRUCT_SCHEMA)
|
||||
.put("int8", (byte) 12)
|
||||
.put("int16", (short) 12)
|
||||
.put("int32", 12)
|
||||
.put("int64", (long) 12)
|
||||
.put("float32", 12.f)
|
||||
.put("float64", 12.)
|
||||
.put("boolean", true)
|
||||
.put("string", "mismatching string")
|
||||
.put("bytes", ByteBuffer.wrap("foobar".getBytes()));
|
||||
|
||||
assertEquals(struct1, struct2);
|
||||
assertNotEquals(struct1, struct3);
|
||||
|
||||
List<Byte> array = Arrays.asList((byte) 1, (byte) 2);
|
||||
Map<Integer, String> map = Collections.singletonMap(1, "string");
|
||||
struct1 = new Struct(NESTED_SCHEMA)
|
||||
.put("array", array)
|
||||
.put("map", map)
|
||||
.put("nested", new Struct(NESTED_CHILD_SCHEMA).put("int8", (byte) 12));
|
||||
List<Byte> array2 = Arrays.asList((byte) 1, (byte) 2);
|
||||
Map<Integer, String> map2 = Collections.singletonMap(1, "string");
|
||||
struct2 = new Struct(NESTED_SCHEMA)
|
||||
.put("array", array2)
|
||||
.put("map", map2)
|
||||
.put("nested", new Struct(NESTED_CHILD_SCHEMA).put("int8", (byte) 12));
|
||||
List<Byte> array3 = Arrays.asList((byte) 1, (byte) 2, (byte) 3);
|
||||
Map<Integer, String> map3 = Collections.singletonMap(2, "string");
|
||||
struct3 = new Struct(NESTED_SCHEMA)
|
||||
.put("array", array3)
|
||||
.put("map", map3)
|
||||
.put("nested", new Struct(NESTED_CHILD_SCHEMA).put("int8", (byte) 13));
|
||||
|
||||
assertEquals(struct1, struct2);
|
||||
assertNotEquals(struct1, struct3);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testEqualsAndHashCodeWithByteArrayValue() {
|
||||
Struct struct1 = new Struct(FLAT_STRUCT_SCHEMA)
|
||||
.put("int8", (byte) 12)
|
||||
.put("int16", (short) 12)
|
||||
.put("int32", 12)
|
||||
.put("int64", (long) 12)
|
||||
.put("float32", 12.f)
|
||||
.put("float64", 12.)
|
||||
.put("boolean", true)
|
||||
.put("string", "foobar")
|
||||
.put("bytes", "foobar".getBytes());
|
||||
|
||||
Struct struct2 = new Struct(FLAT_STRUCT_SCHEMA)
|
||||
.put("int8", (byte) 12)
|
||||
.put("int16", (short) 12)
|
||||
.put("int32", 12)
|
||||
.put("int64", (long) 12)
|
||||
.put("float32", 12.f)
|
||||
.put("float64", 12.)
|
||||
.put("boolean", true)
|
||||
.put("string", "foobar")
|
||||
.put("bytes", "foobar".getBytes());
|
||||
|
||||
Struct struct3 = new Struct(FLAT_STRUCT_SCHEMA)
|
||||
.put("int8", (byte) 12)
|
||||
.put("int16", (short) 12)
|
||||
.put("int32", 12)
|
||||
.put("int64", (long) 12)
|
||||
.put("float32", 12.f)
|
||||
.put("float64", 12.)
|
||||
.put("boolean", true)
|
||||
.put("string", "foobar")
|
||||
.put("bytes", "mismatching_string".getBytes());
|
||||
|
||||
// Verify contract for equals: method must be reflexive and transitive
|
||||
assertEquals(struct1, struct2);
|
||||
assertEquals(struct2, struct1);
|
||||
assertNotEquals(struct1, struct3);
|
||||
assertNotEquals(struct2, struct3);
|
||||
// Testing hashCode against a hardcoded value here would be incorrect: hashCode values need not be equal for any
|
||||
// two distinct executions. However, based on the general contract for hashCode, if two objects are equal, their
|
||||
// hashCodes must be equal. If they are not equal, their hashCodes should not be equal for performance reasons.
|
||||
assertEquals(struct1.hashCode(), struct2.hashCode());
|
||||
assertNotEquals(struct1.hashCode(), struct3.hashCode());
|
||||
assertNotEquals(struct2.hashCode(), struct3.hashCode());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testValidateStructWithNullValue() {
|
||||
Schema schema = SchemaBuilder.struct()
|
||||
.field("one", Schema.STRING_SCHEMA)
|
||||
.field("two", Schema.STRING_SCHEMA)
|
||||
.field("three", Schema.STRING_SCHEMA)
|
||||
.build();
|
||||
|
||||
Struct struct = new Struct(schema);
|
||||
Exception e = assertThrows(DataException.class, struct::validate);
|
||||
assertEquals("Invalid value: null used for required field: \"one\", schema type: STRING",
|
||||
e.getMessage());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testValidateFieldWithInvalidValueType() {
|
||||
String fieldName = "field";
|
||||
FakeSchema fakeSchema = new FakeSchema();
|
||||
|
||||
Exception e = assertThrows(DataException.class, () -> ConnectSchema.validateValue(fieldName,
|
||||
fakeSchema, new Object()));
|
||||
assertEquals("Invalid Java object for schema type null: class java.lang.Object for field: \"field\"",
|
||||
e.getMessage());
|
||||
|
||||
e = assertThrows(DataException.class, () -> ConnectSchema.validateValue(fieldName,
|
||||
Schema.INT8_SCHEMA, new Object()));
|
||||
assertEquals("Invalid Java object for schema type INT8: class java.lang.Object for field: \"field\"",
|
||||
e.getMessage());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPutNullField() {
|
||||
final String fieldName = "fieldName";
|
||||
Schema testSchema = SchemaBuilder.struct()
|
||||
.field(fieldName, Schema.STRING_SCHEMA);
|
||||
Struct struct = new Struct(testSchema);
|
||||
|
||||
assertThrows(DataException.class, () -> struct.put((Field) null, "valid"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInvalidPutIncludesFieldName() {
|
||||
final String fieldName = "fieldName";
|
||||
Schema testSchema = SchemaBuilder.struct()
|
||||
.field(fieldName, Schema.STRING_SCHEMA);
|
||||
Struct struct = new Struct(testSchema);
|
||||
|
||||
Exception e = assertThrows(DataException.class, () -> struct.put(fieldName, null));
|
||||
assertEquals("Invalid value: null used for required field: \"fieldName\", schema type: STRING",
|
||||
e.getMessage());
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,79 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.data;
|
||||
|
||||
import org.apache.kafka.connect.errors.DataException;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.util.Calendar;
|
||||
import java.util.GregorianCalendar;
|
||||
import java.util.TimeZone;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
public class TimeTest {
|
||||
private static final GregorianCalendar EPOCH;
|
||||
private static final GregorianCalendar EPOCH_PLUS_DATE_COMPONENT;
|
||||
private static final GregorianCalendar EPOCH_PLUS_TEN_THOUSAND_MILLIS;
|
||||
static {
|
||||
EPOCH = new GregorianCalendar(1970, Calendar.JANUARY, 1, 0, 0, 0);
|
||||
EPOCH.setTimeZone(TimeZone.getTimeZone("UTC"));
|
||||
|
||||
EPOCH_PLUS_TEN_THOUSAND_MILLIS = new GregorianCalendar(1970, Calendar.JANUARY, 1, 0, 0, 0);
|
||||
EPOCH_PLUS_TEN_THOUSAND_MILLIS.setTimeZone(TimeZone.getTimeZone("UTC"));
|
||||
EPOCH_PLUS_TEN_THOUSAND_MILLIS.add(Calendar.MILLISECOND, 10000);
|
||||
|
||||
|
||||
EPOCH_PLUS_DATE_COMPONENT = new GregorianCalendar(1970, Calendar.JANUARY, 1, 0, 0, 0);
|
||||
EPOCH_PLUS_DATE_COMPONENT.setTimeZone(TimeZone.getTimeZone("UTC"));
|
||||
EPOCH_PLUS_DATE_COMPONENT.add(Calendar.DATE, 10000);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBuilder() {
|
||||
Schema plain = Time.SCHEMA;
|
||||
assertEquals(Time.LOGICAL_NAME, plain.name());
|
||||
assertEquals(1, (Object) plain.version());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFromLogical() {
|
||||
assertEquals(0, Time.fromLogical(Time.SCHEMA, EPOCH.getTime()));
|
||||
assertEquals(10000, Time.fromLogical(Time.SCHEMA, EPOCH_PLUS_TEN_THOUSAND_MILLIS.getTime()));
|
||||
}
|
||||
|
||||
@Test(expected = DataException.class)
|
||||
public void testFromLogicalInvalidSchema() {
|
||||
Time.fromLogical(Time.builder().name("invalid").build(), EPOCH.getTime());
|
||||
}
|
||||
|
||||
@Test(expected = DataException.class)
|
||||
public void testFromLogicalInvalidHasDateComponents() {
|
||||
Time.fromLogical(Time.SCHEMA, EPOCH_PLUS_DATE_COMPONENT.getTime());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testToLogical() {
|
||||
assertEquals(EPOCH.getTime(), Time.toLogical(Time.SCHEMA, 0));
|
||||
assertEquals(EPOCH_PLUS_TEN_THOUSAND_MILLIS.getTime(), Time.toLogical(Time.SCHEMA, 10000));
|
||||
}
|
||||
|
||||
@Test(expected = DataException.class)
|
||||
public void testToLogicalInvalidSchema() {
|
||||
Time.toLogical(Time.builder().name("invalid").build(), 0);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,74 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.data;
|
||||
|
||||
import org.apache.kafka.connect.errors.DataException;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.util.Calendar;
|
||||
import java.util.GregorianCalendar;
|
||||
import java.util.TimeZone;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
public class TimestampTest {
|
||||
private static final GregorianCalendar EPOCH;
|
||||
private static final GregorianCalendar EPOCH_PLUS_MILLIS;
|
||||
|
||||
private static final int NUM_MILLIS = 2000000000;
|
||||
private static final long TOTAL_MILLIS = ((long) NUM_MILLIS) * 2;
|
||||
|
||||
static {
|
||||
EPOCH = new GregorianCalendar(1970, Calendar.JANUARY, 1, 0, 0, 0);
|
||||
EPOCH.setTimeZone(TimeZone.getTimeZone("UTC"));
|
||||
|
||||
|
||||
EPOCH_PLUS_MILLIS = new GregorianCalendar(1970, Calendar.JANUARY, 1, 0, 0, 0);
|
||||
EPOCH_PLUS_MILLIS.setTimeZone(TimeZone.getTimeZone("UTC"));
|
||||
EPOCH_PLUS_MILLIS.add(Calendar.MILLISECOND, NUM_MILLIS);
|
||||
EPOCH_PLUS_MILLIS.add(Calendar.MILLISECOND, NUM_MILLIS);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBuilder() {
|
||||
Schema plain = Date.SCHEMA;
|
||||
assertEquals(Date.LOGICAL_NAME, plain.name());
|
||||
assertEquals(1, (Object) plain.version());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFromLogical() {
|
||||
assertEquals(0L, Timestamp.fromLogical(Timestamp.SCHEMA, EPOCH.getTime()));
|
||||
assertEquals(TOTAL_MILLIS, Timestamp.fromLogical(Timestamp.SCHEMA, EPOCH_PLUS_MILLIS.getTime()));
|
||||
}
|
||||
|
||||
@Test(expected = DataException.class)
|
||||
public void testFromLogicalInvalidSchema() {
|
||||
Timestamp.fromLogical(Timestamp.builder().name("invalid").build(), EPOCH.getTime());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testToLogical() {
|
||||
assertEquals(EPOCH.getTime(), Timestamp.toLogical(Timestamp.SCHEMA, 0L));
|
||||
assertEquals(EPOCH_PLUS_MILLIS.getTime(), Timestamp.toLogical(Timestamp.SCHEMA, TOTAL_MILLIS));
|
||||
}
|
||||
|
||||
@Test(expected = DataException.class)
|
||||
public void testToLogicalInvalidSchema() {
|
||||
Date.toLogical(Date.builder().name("invalid").build(), 0);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,855 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.data;
|
||||
|
||||
import org.apache.kafka.connect.data.Schema.Type;
|
||||
import org.apache.kafka.connect.data.Values.Parser;
|
||||
import org.apache.kafka.connect.errors.DataException;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.text.SimpleDateFormat;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
public class ValuesTest {
|
||||
|
||||
private static final String WHITESPACE = "\n \t \t\n";
|
||||
|
||||
private static final long MILLIS_PER_DAY = 24 * 60 * 60 * 1000;
|
||||
|
||||
private static final Map<String, String> STRING_MAP = new LinkedHashMap<>();
|
||||
private static final Schema STRING_MAP_SCHEMA = SchemaBuilder.map(Schema.STRING_SCHEMA, Schema.STRING_SCHEMA).schema();
|
||||
|
||||
private static final Map<String, Short> STRING_SHORT_MAP = new LinkedHashMap<>();
|
||||
private static final Schema STRING_SHORT_MAP_SCHEMA = SchemaBuilder.map(Schema.STRING_SCHEMA, Schema.INT16_SCHEMA).schema();
|
||||
|
||||
private static final Map<String, Integer> STRING_INT_MAP = new LinkedHashMap<>();
|
||||
private static final Schema STRING_INT_MAP_SCHEMA = SchemaBuilder.map(Schema.STRING_SCHEMA, Schema.INT32_SCHEMA).schema();
|
||||
|
||||
private static final List<Integer> INT_LIST = new ArrayList<>();
|
||||
private static final Schema INT_LIST_SCHEMA = SchemaBuilder.array(Schema.INT32_SCHEMA).schema();
|
||||
|
||||
private static final List<String> STRING_LIST = new ArrayList<>();
|
||||
private static final Schema STRING_LIST_SCHEMA = SchemaBuilder.array(Schema.STRING_SCHEMA).schema();
|
||||
|
||||
static {
|
||||
STRING_MAP.put("foo", "123");
|
||||
STRING_MAP.put("bar", "baz");
|
||||
STRING_SHORT_MAP.put("foo", (short) 12345);
|
||||
STRING_SHORT_MAP.put("bar", (short) 0);
|
||||
STRING_SHORT_MAP.put("baz", (short) -4321);
|
||||
STRING_INT_MAP.put("foo", 1234567890);
|
||||
STRING_INT_MAP.put("bar", 0);
|
||||
STRING_INT_MAP.put("baz", -987654321);
|
||||
STRING_LIST.add("foo");
|
||||
STRING_LIST.add("bar");
|
||||
INT_LIST.add(1234567890);
|
||||
INT_LIST.add(-987654321);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldNotParseUnquotedEmbeddedMapKeysAsStrings() {
|
||||
SchemaAndValue schemaAndValue = Values.parseString("{foo: 3}");
|
||||
assertEquals(Type.STRING, schemaAndValue.schema().type());
|
||||
assertEquals("{foo: 3}", schemaAndValue.value());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldNotParseUnquotedEmbeddedMapValuesAsStrings() {
|
||||
SchemaAndValue schemaAndValue = Values.parseString("{3: foo}");
|
||||
assertEquals(Type.STRING, schemaAndValue.schema().type());
|
||||
assertEquals("{3: foo}", schemaAndValue.value());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldNotParseUnquotedArrayElementsAsStrings() {
|
||||
SchemaAndValue schemaAndValue = Values.parseString("[foo]");
|
||||
assertEquals(Type.STRING, schemaAndValue.schema().type());
|
||||
assertEquals("[foo]", schemaAndValue.value());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldNotParseStringsBeginningWithNullAsStrings() {
|
||||
SchemaAndValue schemaAndValue = Values.parseString("null=");
|
||||
assertEquals(Type.STRING, schemaAndValue.schema().type());
|
||||
assertEquals("null=", schemaAndValue.value());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldParseStringsBeginningWithTrueAsStrings() {
|
||||
SchemaAndValue schemaAndValue = Values.parseString("true}");
|
||||
assertEquals(Type.STRING, schemaAndValue.schema().type());
|
||||
assertEquals("true}", schemaAndValue.value());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldParseStringsBeginningWithFalseAsStrings() {
|
||||
SchemaAndValue schemaAndValue = Values.parseString("false]");
|
||||
assertEquals(Type.STRING, schemaAndValue.schema().type());
|
||||
assertEquals("false]", schemaAndValue.value());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldParseTrueAsBooleanIfSurroundedByWhitespace() {
|
||||
SchemaAndValue schemaAndValue = Values.parseString(WHITESPACE + "true" + WHITESPACE);
|
||||
assertEquals(Type.BOOLEAN, schemaAndValue.schema().type());
|
||||
assertEquals(true, schemaAndValue.value());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldParseFalseAsBooleanIfSurroundedByWhitespace() {
|
||||
SchemaAndValue schemaAndValue = Values.parseString(WHITESPACE + "false" + WHITESPACE);
|
||||
assertEquals(Type.BOOLEAN, schemaAndValue.schema().type());
|
||||
assertEquals(false, schemaAndValue.value());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldParseNullAsNullIfSurroundedByWhitespace() {
|
||||
SchemaAndValue schemaAndValue = Values.parseString(WHITESPACE + "null" + WHITESPACE);
|
||||
assertNull(schemaAndValue);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldParseBooleanLiteralsEmbeddedInArray() {
|
||||
SchemaAndValue schemaAndValue = Values.parseString("[true, false]");
|
||||
assertEquals(Type.ARRAY, schemaAndValue.schema().type());
|
||||
assertEquals(Type.BOOLEAN, schemaAndValue.schema().valueSchema().type());
|
||||
assertEquals(Arrays.asList(true, false), schemaAndValue.value());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldParseBooleanLiteralsEmbeddedInMap() {
|
||||
SchemaAndValue schemaAndValue = Values.parseString("{true: false, false: true}");
|
||||
assertEquals(Type.MAP, schemaAndValue.schema().type());
|
||||
assertEquals(Type.BOOLEAN, schemaAndValue.schema().keySchema().type());
|
||||
assertEquals(Type.BOOLEAN, schemaAndValue.schema().valueSchema().type());
|
||||
Map<Boolean, Boolean> expectedValue = new HashMap<>();
|
||||
expectedValue.put(true, false);
|
||||
expectedValue.put(false, true);
|
||||
assertEquals(expectedValue, schemaAndValue.value());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldNotParseAsMapWithoutCommas() {
|
||||
SchemaAndValue schemaAndValue = Values.parseString("{6:9 4:20}");
|
||||
assertEquals(Type.STRING, schemaAndValue.schema().type());
|
||||
assertEquals("{6:9 4:20}", schemaAndValue.value());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldNotParseAsArrayWithoutCommas() {
|
||||
SchemaAndValue schemaAndValue = Values.parseString("[0 1 2]");
|
||||
assertEquals(Type.STRING, schemaAndValue.schema().type());
|
||||
assertEquals("[0 1 2]", schemaAndValue.value());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldParseEmptyMap() {
|
||||
SchemaAndValue schemaAndValue = Values.parseString("{}");
|
||||
assertEquals(Type.MAP, schemaAndValue.schema().type());
|
||||
assertEquals(Collections.emptyMap(), schemaAndValue.value());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldParseEmptyArray() {
|
||||
SchemaAndValue schemaAndValue = Values.parseString("[]");
|
||||
assertEquals(Type.ARRAY, schemaAndValue.schema().type());
|
||||
assertEquals(Collections.emptyList(), schemaAndValue.value());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldNotParseAsMapWithNullKeys() {
|
||||
SchemaAndValue schemaAndValue = Values.parseString("{null: 3}");
|
||||
assertEquals(Type.STRING, schemaAndValue.schema().type());
|
||||
assertEquals("{null: 3}", schemaAndValue.value());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldParseNull() {
|
||||
SchemaAndValue schemaAndValue = Values.parseString("null");
|
||||
assertNull(schemaAndValue);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldConvertStringOfNull() {
|
||||
assertRoundTrip(Schema.STRING_SCHEMA, "null");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldParseNullMapValues() {
|
||||
SchemaAndValue schemaAndValue = Values.parseString("{3: null}");
|
||||
assertEquals(Type.MAP, schemaAndValue.schema().type());
|
||||
assertEquals(Type.INT8, schemaAndValue.schema().keySchema().type());
|
||||
assertEquals(Collections.singletonMap((byte) 3, null), schemaAndValue.value());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldParseNullArrayElements() {
|
||||
SchemaAndValue schemaAndValue = Values.parseString("[null]");
|
||||
assertEquals(Type.ARRAY, schemaAndValue.schema().type());
|
||||
assertEquals(Collections.singletonList(null), schemaAndValue.value());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldEscapeStringsWithEmbeddedQuotesAndBackslashes() {
|
||||
String original = "three\"blind\\\"mice";
|
||||
String expected = "three\\\"blind\\\\\\\"mice";
|
||||
assertEquals(expected, Values.escape(original));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldConvertNullValue() {
|
||||
assertRoundTrip(Schema.STRING_SCHEMA, Schema.STRING_SCHEMA, null);
|
||||
assertRoundTrip(Schema.OPTIONAL_STRING_SCHEMA, Schema.STRING_SCHEMA, null);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldConvertBooleanValues() {
|
||||
assertRoundTrip(Schema.BOOLEAN_SCHEMA, Schema.BOOLEAN_SCHEMA, Boolean.FALSE);
|
||||
SchemaAndValue resultFalse = roundTrip(Schema.BOOLEAN_SCHEMA, "false");
|
||||
assertEquals(Schema.BOOLEAN_SCHEMA, resultFalse.schema());
|
||||
assertEquals(Boolean.FALSE, resultFalse.value());
|
||||
|
||||
assertRoundTrip(Schema.BOOLEAN_SCHEMA, Schema.BOOLEAN_SCHEMA, Boolean.TRUE);
|
||||
SchemaAndValue resultTrue = roundTrip(Schema.BOOLEAN_SCHEMA, "true");
|
||||
assertEquals(Schema.BOOLEAN_SCHEMA, resultTrue.schema());
|
||||
assertEquals(Boolean.TRUE, resultTrue.value());
|
||||
}
|
||||
|
||||
@Test(expected = DataException.class)
|
||||
public void shouldFailToParseInvalidBooleanValueString() {
|
||||
Values.convertToBoolean(Schema.STRING_SCHEMA, "\"green\"");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldConvertSimpleString() {
|
||||
assertRoundTrip(Schema.STRING_SCHEMA, "simple");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldConvertEmptyString() {
|
||||
assertRoundTrip(Schema.STRING_SCHEMA, "");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldConvertStringWithQuotesAndOtherDelimiterCharacters() {
|
||||
assertRoundTrip(Schema.STRING_SCHEMA, Schema.STRING_SCHEMA, "three\"blind\\\"mice");
|
||||
assertRoundTrip(Schema.STRING_SCHEMA, Schema.STRING_SCHEMA, "string with delimiters: <>?,./\\=+-!@#$%^&*(){}[]|;':");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldConvertMapWithStringKeys() {
|
||||
assertRoundTrip(STRING_MAP_SCHEMA, STRING_MAP_SCHEMA, STRING_MAP);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldParseStringOfMapWithStringValuesWithoutWhitespaceAsMap() {
|
||||
SchemaAndValue result = roundTrip(STRING_MAP_SCHEMA, "{\"foo\":\"123\",\"bar\":\"baz\"}");
|
||||
assertEquals(STRING_MAP_SCHEMA, result.schema());
|
||||
assertEquals(STRING_MAP, result.value());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldParseStringOfMapWithStringValuesWithWhitespaceAsMap() {
|
||||
SchemaAndValue result = roundTrip(STRING_MAP_SCHEMA, "{ \"foo\" : \"123\", \n\"bar\" : \"baz\" } ");
|
||||
assertEquals(STRING_MAP_SCHEMA, result.schema());
|
||||
assertEquals(STRING_MAP, result.value());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldConvertMapWithStringKeysAndShortValues() {
|
||||
assertRoundTrip(STRING_SHORT_MAP_SCHEMA, STRING_SHORT_MAP_SCHEMA, STRING_SHORT_MAP);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldParseStringOfMapWithShortValuesWithoutWhitespaceAsMap() {
|
||||
SchemaAndValue result = roundTrip(STRING_SHORT_MAP_SCHEMA, "{\"foo\":12345,\"bar\":0,\"baz\":-4321}");
|
||||
assertEquals(STRING_SHORT_MAP_SCHEMA, result.schema());
|
||||
assertEquals(STRING_SHORT_MAP, result.value());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldParseStringOfMapWithShortValuesWithWhitespaceAsMap() {
|
||||
SchemaAndValue result = roundTrip(STRING_SHORT_MAP_SCHEMA, " { \"foo\" : 12345 , \"bar\" : 0, \"baz\" : -4321 } ");
|
||||
assertEquals(STRING_SHORT_MAP_SCHEMA, result.schema());
|
||||
assertEquals(STRING_SHORT_MAP, result.value());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldConvertMapWithStringKeysAndIntegerValues() {
|
||||
assertRoundTrip(STRING_INT_MAP_SCHEMA, STRING_INT_MAP_SCHEMA, STRING_INT_MAP);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldParseStringOfMapWithIntValuesWithoutWhitespaceAsMap() {
|
||||
SchemaAndValue result = roundTrip(STRING_INT_MAP_SCHEMA, "{\"foo\":1234567890,\"bar\":0,\"baz\":-987654321}");
|
||||
assertEquals(STRING_INT_MAP_SCHEMA, result.schema());
|
||||
assertEquals(STRING_INT_MAP, result.value());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldParseStringOfMapWithIntValuesWithWhitespaceAsMap() {
|
||||
SchemaAndValue result = roundTrip(STRING_INT_MAP_SCHEMA, " { \"foo\" : 1234567890 , \"bar\" : 0, \"baz\" : -987654321 } ");
|
||||
assertEquals(STRING_INT_MAP_SCHEMA, result.schema());
|
||||
assertEquals(STRING_INT_MAP, result.value());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldConvertListWithStringValues() {
|
||||
assertRoundTrip(STRING_LIST_SCHEMA, STRING_LIST_SCHEMA, STRING_LIST);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldConvertListWithIntegerValues() {
|
||||
assertRoundTrip(INT_LIST_SCHEMA, INT_LIST_SCHEMA, INT_LIST);
|
||||
}
|
||||
|
||||
/**
|
||||
* The parsed array has byte values and one int value, so we should return list with single unified type of integers.
|
||||
*/
|
||||
@Test
|
||||
public void shouldConvertStringOfListWithOnlyNumericElementTypesIntoListOfLargestNumericType() {
|
||||
int thirdValue = Short.MAX_VALUE + 1;
|
||||
List<?> list = Values.convertToList(Schema.STRING_SCHEMA, "[1, 2, " + thirdValue + "]");
|
||||
assertEquals(3, list.size());
|
||||
assertEquals(1, ((Number) list.get(0)).intValue());
|
||||
assertEquals(2, ((Number) list.get(1)).intValue());
|
||||
assertEquals(thirdValue, ((Number) list.get(2)).intValue());
|
||||
}
|
||||
|
||||
/**
|
||||
* The parsed array has byte values and one int value, so we should return list with single unified type of integers.
|
||||
*/
|
||||
@Test
|
||||
public void shouldConvertStringOfListWithMixedElementTypesIntoListWithDifferentElementTypes() {
|
||||
String str = "[1, 2, \"three\"]";
|
||||
List<?> list = Values.convertToList(Schema.STRING_SCHEMA, str);
|
||||
assertEquals(3, list.size());
|
||||
assertEquals(1, ((Number) list.get(0)).intValue());
|
||||
assertEquals(2, ((Number) list.get(1)).intValue());
|
||||
assertEquals("three", list.get(2));
|
||||
}
|
||||
|
||||
/**
|
||||
* We parse into different element types, but cannot infer a common element schema.
|
||||
*/
|
||||
@Test
|
||||
public void shouldParseStringListWithMultipleElementTypesAndReturnListWithNoSchema() {
|
||||
String str = "[1, 2, 3, \"four\"]";
|
||||
SchemaAndValue result = Values.parseString(str);
|
||||
assertEquals(Type.ARRAY, result.schema().type());
|
||||
assertNull(result.schema().valueSchema());
|
||||
List<?> list = (List<?>) result.value();
|
||||
assertEquals(4, list.size());
|
||||
assertEquals(1, ((Number) list.get(0)).intValue());
|
||||
assertEquals(2, ((Number) list.get(1)).intValue());
|
||||
assertEquals(3, ((Number) list.get(2)).intValue());
|
||||
assertEquals("four", list.get(3));
|
||||
}
|
||||
|
||||
/**
|
||||
* We can't infer or successfully parse into a different type, so this returns the same string.
|
||||
*/
|
||||
@Test
|
||||
public void shouldParseStringListWithExtraDelimitersAndReturnString() {
|
||||
String str = "[1, 2, 3,,,]";
|
||||
SchemaAndValue result = Values.parseString(str);
|
||||
assertEquals(Type.STRING, result.schema().type());
|
||||
assertEquals(str, result.value());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldParseTimestampStringAsTimestamp() throws Exception {
|
||||
String str = "2019-08-23T14:34:54.346Z";
|
||||
SchemaAndValue result = Values.parseString(str);
|
||||
assertEquals(Type.INT64, result.schema().type());
|
||||
assertEquals(Timestamp.LOGICAL_NAME, result.schema().name());
|
||||
java.util.Date expected = new SimpleDateFormat(Values.ISO_8601_TIMESTAMP_FORMAT_PATTERN).parse(str);
|
||||
assertEquals(expected, result.value());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldParseDateStringAsDate() throws Exception {
|
||||
String str = "2019-08-23";
|
||||
SchemaAndValue result = Values.parseString(str);
|
||||
assertEquals(Type.INT32, result.schema().type());
|
||||
assertEquals(Date.LOGICAL_NAME, result.schema().name());
|
||||
java.util.Date expected = new SimpleDateFormat(Values.ISO_8601_DATE_FORMAT_PATTERN).parse(str);
|
||||
assertEquals(expected, result.value());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldParseTimeStringAsDate() throws Exception {
|
||||
String str = "14:34:54.346Z";
|
||||
SchemaAndValue result = Values.parseString(str);
|
||||
assertEquals(Type.INT32, result.schema().type());
|
||||
assertEquals(Time.LOGICAL_NAME, result.schema().name());
|
||||
java.util.Date expected = new SimpleDateFormat(Values.ISO_8601_TIME_FORMAT_PATTERN).parse(str);
|
||||
assertEquals(expected, result.value());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldParseTimestampStringWithEscapedColonsAsTimestamp() throws Exception {
|
||||
String str = "2019-08-23T14\\:34\\:54.346Z";
|
||||
SchemaAndValue result = Values.parseString(str);
|
||||
assertEquals(Type.INT64, result.schema().type());
|
||||
assertEquals(Timestamp.LOGICAL_NAME, result.schema().name());
|
||||
String expectedStr = "2019-08-23T14:34:54.346Z";
|
||||
java.util.Date expected = new SimpleDateFormat(Values.ISO_8601_TIMESTAMP_FORMAT_PATTERN).parse(expectedStr);
|
||||
assertEquals(expected, result.value());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldParseTimeStringWithEscapedColonsAsDate() throws Exception {
|
||||
String str = "14\\:34\\:54.346Z";
|
||||
SchemaAndValue result = Values.parseString(str);
|
||||
assertEquals(Type.INT32, result.schema().type());
|
||||
assertEquals(Time.LOGICAL_NAME, result.schema().name());
|
||||
String expectedStr = "14:34:54.346Z";
|
||||
java.util.Date expected = new SimpleDateFormat(Values.ISO_8601_TIME_FORMAT_PATTERN).parse(expectedStr);
|
||||
assertEquals(expected, result.value());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldParseDateStringAsDateInArray() throws Exception {
|
||||
String dateStr = "2019-08-23";
|
||||
String arrayStr = "[" + dateStr + "]";
|
||||
SchemaAndValue result = Values.parseString(arrayStr);
|
||||
assertEquals(Type.ARRAY, result.schema().type());
|
||||
Schema elementSchema = result.schema().valueSchema();
|
||||
assertEquals(Type.INT32, elementSchema.type());
|
||||
assertEquals(Date.LOGICAL_NAME, elementSchema.name());
|
||||
java.util.Date expected = new SimpleDateFormat(Values.ISO_8601_DATE_FORMAT_PATTERN).parse(dateStr);
|
||||
assertEquals(Collections.singletonList(expected), result.value());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldParseTimeStringAsTimeInArray() throws Exception {
|
||||
String timeStr = "14:34:54.346Z";
|
||||
String arrayStr = "[" + timeStr + "]";
|
||||
SchemaAndValue result = Values.parseString(arrayStr);
|
||||
assertEquals(Type.ARRAY, result.schema().type());
|
||||
Schema elementSchema = result.schema().valueSchema();
|
||||
assertEquals(Type.INT32, elementSchema.type());
|
||||
assertEquals(Time.LOGICAL_NAME, elementSchema.name());
|
||||
java.util.Date expected = new SimpleDateFormat(Values.ISO_8601_TIME_FORMAT_PATTERN).parse(timeStr);
|
||||
assertEquals(Collections.singletonList(expected), result.value());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldParseTimestampStringAsTimestampInArray() throws Exception {
|
||||
String tsStr = "2019-08-23T14:34:54.346Z";
|
||||
String arrayStr = "[" + tsStr + "]";
|
||||
SchemaAndValue result = Values.parseString(arrayStr);
|
||||
assertEquals(Type.ARRAY, result.schema().type());
|
||||
Schema elementSchema = result.schema().valueSchema();
|
||||
assertEquals(Type.INT64, elementSchema.type());
|
||||
assertEquals(Timestamp.LOGICAL_NAME, elementSchema.name());
|
||||
java.util.Date expected = new SimpleDateFormat(Values.ISO_8601_TIMESTAMP_FORMAT_PATTERN).parse(tsStr);
|
||||
assertEquals(Collections.singletonList(expected), result.value());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldParseMultipleTimestampStringAsTimestampInArray() throws Exception {
|
||||
String tsStr1 = "2019-08-23T14:34:54.346Z";
|
||||
String tsStr2 = "2019-01-23T15:12:34.567Z";
|
||||
String tsStr3 = "2019-04-23T19:12:34.567Z";
|
||||
String arrayStr = "[" + tsStr1 + "," + tsStr2 + ", " + tsStr3 + "]";
|
||||
SchemaAndValue result = Values.parseString(arrayStr);
|
||||
assertEquals(Type.ARRAY, result.schema().type());
|
||||
Schema elementSchema = result.schema().valueSchema();
|
||||
assertEquals(Type.INT64, elementSchema.type());
|
||||
assertEquals(Timestamp.LOGICAL_NAME, elementSchema.name());
|
||||
java.util.Date expected1 = new SimpleDateFormat(Values.ISO_8601_TIMESTAMP_FORMAT_PATTERN).parse(tsStr1);
|
||||
java.util.Date expected2 = new SimpleDateFormat(Values.ISO_8601_TIMESTAMP_FORMAT_PATTERN).parse(tsStr2);
|
||||
java.util.Date expected3 = new SimpleDateFormat(Values.ISO_8601_TIMESTAMP_FORMAT_PATTERN).parse(tsStr3);
|
||||
assertEquals(Arrays.asList(expected1, expected2, expected3), result.value());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldParseQuotedTimeStringAsTimeInMap() throws Exception {
|
||||
String keyStr = "k1";
|
||||
String timeStr = "14:34:54.346Z";
|
||||
String mapStr = "{\"" + keyStr + "\":\"" + timeStr + "\"}";
|
||||
SchemaAndValue result = Values.parseString(mapStr);
|
||||
assertEquals(Type.MAP, result.schema().type());
|
||||
Schema keySchema = result.schema().keySchema();
|
||||
Schema valueSchema = result.schema().valueSchema();
|
||||
assertEquals(Type.STRING, keySchema.type());
|
||||
assertEquals(Type.INT32, valueSchema.type());
|
||||
assertEquals(Time.LOGICAL_NAME, valueSchema.name());
|
||||
java.util.Date expected = new SimpleDateFormat(Values.ISO_8601_TIME_FORMAT_PATTERN).parse(timeStr);
|
||||
assertEquals(Collections.singletonMap(keyStr, expected), result.value());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldParseTimeStringAsTimeInMap() throws Exception {
|
||||
String keyStr = "k1";
|
||||
String timeStr = "14:34:54.346Z";
|
||||
String mapStr = "{\"" + keyStr + "\":" + timeStr + "}";
|
||||
SchemaAndValue result = Values.parseString(mapStr);
|
||||
assertEquals(Type.MAP, result.schema().type());
|
||||
Schema keySchema = result.schema().keySchema();
|
||||
Schema valueSchema = result.schema().valueSchema();
|
||||
assertEquals(Type.STRING, keySchema.type());
|
||||
assertEquals(Type.INT32, valueSchema.type());
|
||||
assertEquals(Time.LOGICAL_NAME, valueSchema.name());
|
||||
java.util.Date expected = new SimpleDateFormat(Values.ISO_8601_TIME_FORMAT_PATTERN).parse(timeStr);
|
||||
assertEquals(Collections.singletonMap(keyStr, expected), result.value());
|
||||
}
|
||||
|
||||
/**
|
||||
* This is technically invalid JSON, and we don't want to simply ignore the blank elements.
|
||||
*/
|
||||
@Test(expected = DataException.class)
|
||||
public void shouldFailToConvertToListFromStringWithExtraDelimiters() {
|
||||
Values.convertToList(Schema.STRING_SCHEMA, "[1, 2, 3,,,]");
|
||||
}
|
||||
|
||||
/**
|
||||
* Schema of type ARRAY requires a schema for the values, but Connect has no union or "any" schema type.
|
||||
* Therefore, we can't represent this.
|
||||
*/
|
||||
@Test(expected = DataException.class)
|
||||
public void shouldFailToConvertToListFromStringWithNonCommonElementTypeAndBlankElement() {
|
||||
Values.convertToList(Schema.STRING_SCHEMA, "[1, 2, 3, \"four\",,,]");
|
||||
}
|
||||
|
||||
/**
|
||||
* This is technically invalid JSON, and we don't want to simply ignore the blank entry.
|
||||
*/
|
||||
@Test(expected = DataException.class)
|
||||
public void shouldFailToParseStringOfMapWithIntValuesWithBlankEntry() {
|
||||
Values.convertToMap(Schema.STRING_SCHEMA, " { \"foo\" : 1234567890 ,, \"bar\" : 0, \"baz\" : -987654321 } ");
|
||||
}
|
||||
|
||||
/**
|
||||
* This is technically invalid JSON, and we don't want to simply ignore the malformed entry.
|
||||
*/
|
||||
@Test(expected = DataException.class)
|
||||
public void shouldFailToParseStringOfMalformedMap() {
|
||||
Values.convertToMap(Schema.STRING_SCHEMA, " { \"foo\" : 1234567890 , \"a\", \"bar\" : 0, \"baz\" : -987654321 } ");
|
||||
}
|
||||
|
||||
/**
|
||||
* This is technically invalid JSON, and we don't want to simply ignore the blank entries.
|
||||
*/
|
||||
@Test(expected = DataException.class)
|
||||
public void shouldFailToParseStringOfMapWithIntValuesWithOnlyBlankEntries() {
|
||||
Values.convertToMap(Schema.STRING_SCHEMA, " { ,, , , } ");
|
||||
}
|
||||
|
||||
/**
|
||||
* This is technically invalid JSON, and we don't want to simply ignore the blank entry.
|
||||
*/
|
||||
@Test(expected = DataException.class)
|
||||
public void shouldFailToParseStringOfMapWithIntValuesWithBlankEntries() {
|
||||
Values.convertToMap(Schema.STRING_SCHEMA, " { \"foo\" : \"1234567890\" ,, \"bar\" : \"0\", \"baz\" : \"boz\" } ");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldConsumeMultipleTokens() {
|
||||
String value = "a:b:c:d:e:f:g:h";
|
||||
Parser parser = new Parser(value);
|
||||
String firstFive = parser.next(5);
|
||||
assertEquals("a:b:c", firstFive);
|
||||
assertEquals(":", parser.next());
|
||||
assertEquals("d", parser.next());
|
||||
assertEquals(":", parser.next());
|
||||
String lastEight = parser.next(8); // only 7 remain
|
||||
assertNull(lastEight);
|
||||
assertEquals("e", parser.next());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldParseStringsWithoutDelimiters() {
|
||||
//assertParsed("");
|
||||
assertParsed(" ");
|
||||
assertParsed("simple");
|
||||
assertParsed("simple string");
|
||||
assertParsed("simple \n\t\bstring");
|
||||
assertParsed("'simple' string");
|
||||
assertParsed("si\\mple");
|
||||
assertParsed("si\\\\mple");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldParseStringsWithEscapedDelimiters() {
|
||||
assertParsed("si\\\"mple");
|
||||
assertParsed("si\\{mple");
|
||||
assertParsed("si\\}mple");
|
||||
assertParsed("si\\]mple");
|
||||
assertParsed("si\\[mple");
|
||||
assertParsed("si\\:mple");
|
||||
assertParsed("si\\,mple");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldParseStringsWithSingleDelimiter() {
|
||||
assertParsed("a{b", "a", "{", "b");
|
||||
assertParsed("a}b", "a", "}", "b");
|
||||
assertParsed("a[b", "a", "[", "b");
|
||||
assertParsed("a]b", "a", "]", "b");
|
||||
assertParsed("a:b", "a", ":", "b");
|
||||
assertParsed("a,b", "a", ",", "b");
|
||||
assertParsed("a\"b", "a", "\"", "b");
|
||||
assertParsed("{b", "{", "b");
|
||||
assertParsed("}b", "}", "b");
|
||||
assertParsed("[b", "[", "b");
|
||||
assertParsed("]b", "]", "b");
|
||||
assertParsed(":b", ":", "b");
|
||||
assertParsed(",b", ",", "b");
|
||||
assertParsed("\"b", "\"", "b");
|
||||
assertParsed("{", "{");
|
||||
assertParsed("}", "}");
|
||||
assertParsed("[", "[");
|
||||
assertParsed("]", "]");
|
||||
assertParsed(":", ":");
|
||||
assertParsed(",", ",");
|
||||
assertParsed("\"", "\"");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldParseStringsWithMultipleDelimiters() {
|
||||
assertParsed("\"simple\" string", "\"", "simple", "\"", " string");
|
||||
assertParsed("a{bc}d", "a", "{", "bc", "}", "d");
|
||||
assertParsed("a { b c } d", "a ", "{", " b c ", "}", " d");
|
||||
assertParsed("a { b c } d", "a ", "{", " b c ", "}", " d");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldConvertTimeValues() {
|
||||
java.util.Date current = new java.util.Date();
|
||||
long currentMillis = current.getTime() % MILLIS_PER_DAY;
|
||||
|
||||
// java.util.Date - just copy
|
||||
java.util.Date t1 = Values.convertToTime(Time.SCHEMA, current);
|
||||
assertEquals(current, t1);
|
||||
|
||||
// java.util.Date as a Timestamp - discard the date and keep just day's milliseconds
|
||||
t1 = Values.convertToTime(Timestamp.SCHEMA, current);
|
||||
assertEquals(new java.util.Date(currentMillis), t1);
|
||||
|
||||
// ISO8601 strings - currently broken because tokenization breaks at colon
|
||||
|
||||
// Millis as string
|
||||
java.util.Date t3 = Values.convertToTime(Time.SCHEMA, Long.toString(currentMillis));
|
||||
assertEquals(currentMillis, t3.getTime());
|
||||
|
||||
// Millis as long
|
||||
java.util.Date t4 = Values.convertToTime(Time.SCHEMA, currentMillis);
|
||||
assertEquals(currentMillis, t4.getTime());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldConvertDateValues() {
|
||||
java.util.Date current = new java.util.Date();
|
||||
long currentMillis = current.getTime() % MILLIS_PER_DAY;
|
||||
long days = current.getTime() / MILLIS_PER_DAY;
|
||||
|
||||
// java.util.Date - just copy
|
||||
java.util.Date d1 = Values.convertToDate(Date.SCHEMA, current);
|
||||
assertEquals(current, d1);
|
||||
|
||||
// java.util.Date as a Timestamp - discard the day's milliseconds and keep the date
|
||||
java.util.Date currentDate = new java.util.Date(current.getTime() - currentMillis);
|
||||
d1 = Values.convertToDate(Timestamp.SCHEMA, currentDate);
|
||||
assertEquals(currentDate, d1);
|
||||
|
||||
// ISO8601 strings - currently broken because tokenization breaks at colon
|
||||
|
||||
// Days as string
|
||||
java.util.Date d3 = Values.convertToDate(Date.SCHEMA, Long.toString(days));
|
||||
assertEquals(currentDate, d3);
|
||||
|
||||
// Days as long
|
||||
java.util.Date d4 = Values.convertToDate(Date.SCHEMA, days);
|
||||
assertEquals(currentDate, d4);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldConvertTimestampValues() {
|
||||
java.util.Date current = new java.util.Date();
|
||||
long currentMillis = current.getTime() % MILLIS_PER_DAY;
|
||||
|
||||
// java.util.Date - just copy
|
||||
java.util.Date ts1 = Values.convertToTimestamp(Timestamp.SCHEMA, current);
|
||||
assertEquals(current, ts1);
|
||||
|
||||
// java.util.Date as a Timestamp - discard the day's milliseconds and keep the date
|
||||
java.util.Date currentDate = new java.util.Date(current.getTime() - currentMillis);
|
||||
ts1 = Values.convertToTimestamp(Date.SCHEMA, currentDate);
|
||||
assertEquals(currentDate, ts1);
|
||||
|
||||
// java.util.Date as a Time - discard the date and keep the day's milliseconds
|
||||
ts1 = Values.convertToTimestamp(Time.SCHEMA, currentMillis);
|
||||
assertEquals(new java.util.Date(currentMillis), ts1);
|
||||
|
||||
// ISO8601 strings - currently broken because tokenization breaks at colon
|
||||
|
||||
// Millis as string
|
||||
java.util.Date ts3 = Values.convertToTimestamp(Timestamp.SCHEMA, Long.toString(current.getTime()));
|
||||
assertEquals(current, ts3);
|
||||
|
||||
// Millis as long
|
||||
java.util.Date ts4 = Values.convertToTimestamp(Timestamp.SCHEMA, current.getTime());
|
||||
assertEquals(current, ts4);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void canConsume() {
|
||||
}
|
||||
|
||||
protected void assertParsed(String input) {
|
||||
assertParsed(input, input);
|
||||
}
|
||||
|
||||
protected void assertParsed(String input, String... expectedTokens) {
|
||||
Parser parser = new Parser(input);
|
||||
if (!parser.hasNext()) {
|
||||
assertEquals(1, expectedTokens.length);
|
||||
assertTrue(expectedTokens[0].isEmpty());
|
||||
return;
|
||||
}
|
||||
|
||||
for (String expectedToken : expectedTokens) {
|
||||
assertTrue(parser.hasNext());
|
||||
int position = parser.mark();
|
||||
assertEquals(expectedToken, parser.next());
|
||||
assertEquals(position + expectedToken.length(), parser.position());
|
||||
assertEquals(expectedToken, parser.previous());
|
||||
parser.rewindTo(position);
|
||||
assertEquals(position, parser.position());
|
||||
assertEquals(expectedToken, parser.next());
|
||||
int newPosition = parser.mark();
|
||||
assertEquals(position + expectedToken.length(), newPosition);
|
||||
assertEquals(expectedToken, parser.previous());
|
||||
}
|
||||
assertFalse(parser.hasNext());
|
||||
|
||||
// Rewind and try consuming expected tokens ...
|
||||
parser.rewindTo(0);
|
||||
assertConsumable(parser, expectedTokens);
|
||||
|
||||
// Parse again and try consuming expected tokens ...
|
||||
parser = new Parser(input);
|
||||
assertConsumable(parser, expectedTokens);
|
||||
}
|
||||
|
||||
protected void assertConsumable(Parser parser, String... expectedTokens) {
|
||||
for (String expectedToken : expectedTokens) {
|
||||
if (!expectedToken.trim().isEmpty()) {
|
||||
int position = parser.mark();
|
||||
assertTrue(parser.canConsume(expectedToken.trim()));
|
||||
parser.rewindTo(position);
|
||||
assertTrue(parser.canConsume(expectedToken.trim(), true));
|
||||
parser.rewindTo(position);
|
||||
assertTrue(parser.canConsume(expectedToken, false));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected SchemaAndValue roundTrip(Schema desiredSchema, String currentValue) {
|
||||
return roundTrip(desiredSchema, new SchemaAndValue(Schema.STRING_SCHEMA, currentValue));
|
||||
}
|
||||
|
||||
protected SchemaAndValue roundTrip(Schema desiredSchema, SchemaAndValue input) {
|
||||
String serialized = Values.convertToString(input.schema(), input.value());
|
||||
if (input != null && input.value() != null) {
|
||||
assertNotNull(serialized);
|
||||
}
|
||||
if (desiredSchema == null) {
|
||||
desiredSchema = Values.inferSchema(input);
|
||||
assertNotNull(desiredSchema);
|
||||
}
|
||||
Object newValue = null;
|
||||
Schema newSchema = null;
|
||||
switch (desiredSchema.type()) {
|
||||
case STRING:
|
||||
newValue = Values.convertToString(Schema.STRING_SCHEMA, serialized);
|
||||
break;
|
||||
case INT8:
|
||||
newValue = Values.convertToByte(Schema.STRING_SCHEMA, serialized);
|
||||
break;
|
||||
case INT16:
|
||||
newValue = Values.convertToShort(Schema.STRING_SCHEMA, serialized);
|
||||
break;
|
||||
case INT32:
|
||||
newValue = Values.convertToInteger(Schema.STRING_SCHEMA, serialized);
|
||||
break;
|
||||
case INT64:
|
||||
newValue = Values.convertToLong(Schema.STRING_SCHEMA, serialized);
|
||||
break;
|
||||
case FLOAT32:
|
||||
newValue = Values.convertToFloat(Schema.STRING_SCHEMA, serialized);
|
||||
break;
|
||||
case FLOAT64:
|
||||
newValue = Values.convertToDouble(Schema.STRING_SCHEMA, serialized);
|
||||
break;
|
||||
case BOOLEAN:
|
||||
newValue = Values.convertToBoolean(Schema.STRING_SCHEMA, serialized);
|
||||
break;
|
||||
case ARRAY:
|
||||
newValue = Values.convertToList(Schema.STRING_SCHEMA, serialized);
|
||||
break;
|
||||
case MAP:
|
||||
newValue = Values.convertToMap(Schema.STRING_SCHEMA, serialized);
|
||||
break;
|
||||
case STRUCT:
|
||||
newValue = Values.convertToStruct(Schema.STRING_SCHEMA, serialized);
|
||||
break;
|
||||
case BYTES:
|
||||
fail("unexpected schema type");
|
||||
break;
|
||||
}
|
||||
newSchema = Values.inferSchema(newValue);
|
||||
return new SchemaAndValue(newSchema, newValue);
|
||||
}
|
||||
|
||||
protected void assertRoundTrip(Schema schema, String value) {
|
||||
assertRoundTrip(schema, Schema.STRING_SCHEMA, value);
|
||||
}
|
||||
|
||||
protected void assertRoundTrip(Schema schema, Schema currentSchema, Object value) {
|
||||
SchemaAndValue result = roundTrip(schema, new SchemaAndValue(currentSchema, value));
|
||||
|
||||
if (value == null) {
|
||||
assertNull(result.schema());
|
||||
assertNull(result.value());
|
||||
} else {
|
||||
assertEquals(value, result.value());
|
||||
assertEquals(schema, result.schema());
|
||||
|
||||
SchemaAndValue result2 = roundTrip(result.schema(), result);
|
||||
assertEquals(schema, result2.schema());
|
||||
assertEquals(value, result2.value());
|
||||
assertEquals(result, result2);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,108 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.header;
|
||||
|
||||
import org.apache.kafka.connect.data.Schema;
|
||||
import org.apache.kafka.connect.data.SchemaAndValue;
|
||||
import org.apache.kafka.connect.data.SchemaBuilder;
|
||||
import org.apache.kafka.connect.data.Struct;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotEquals;
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.assertSame;
|
||||
|
||||
public class ConnectHeaderTest {
|
||||
|
||||
private String key;
|
||||
private ConnectHeader header;
|
||||
|
||||
@Before
|
||||
public void beforeEach() {
|
||||
key = "key";
|
||||
withString("value");
|
||||
}
|
||||
|
||||
protected Header withValue(Schema schema, Object value) {
|
||||
header = new ConnectHeader(key, new SchemaAndValue(schema, value));
|
||||
return header;
|
||||
}
|
||||
|
||||
protected Header withString(String value) {
|
||||
return withValue(Schema.STRING_SCHEMA, value);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldAllowNullValues() {
|
||||
withValue(Schema.OPTIONAL_STRING_SCHEMA, null);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldAllowNullSchema() {
|
||||
withValue(null, null);
|
||||
assertNull(header.schema());
|
||||
assertNull(header.value());
|
||||
|
||||
String value = "non-null value";
|
||||
withValue(null, value);
|
||||
assertNull(header.schema());
|
||||
assertSame(value, header.value());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldAllowNonNullValue() {
|
||||
String value = "non-null value";
|
||||
withValue(Schema.STRING_SCHEMA, value);
|
||||
assertSame(Schema.STRING_SCHEMA, header.schema());
|
||||
assertEquals(value, header.value());
|
||||
|
||||
withValue(Schema.BOOLEAN_SCHEMA, true);
|
||||
assertSame(Schema.BOOLEAN_SCHEMA, header.schema());
|
||||
assertEquals(true, header.value());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldGetSchemaFromStruct() {
|
||||
Schema schema = SchemaBuilder.struct()
|
||||
.field("foo", Schema.STRING_SCHEMA)
|
||||
.field("bar", Schema.INT32_SCHEMA)
|
||||
.build();
|
||||
Struct value = new Struct(schema);
|
||||
value.put("foo", "value");
|
||||
value.put("bar", 100);
|
||||
withValue(null, value);
|
||||
assertSame(schema, header.schema());
|
||||
assertSame(value, header.value());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldSatisfyEquals() {
|
||||
String value = "non-null value";
|
||||
Header h1 = withValue(Schema.STRING_SCHEMA, value);
|
||||
assertSame(Schema.STRING_SCHEMA, header.schema());
|
||||
assertEquals(value, header.value());
|
||||
|
||||
Header h2 = withValue(Schema.STRING_SCHEMA, value);
|
||||
assertEquals(h1, h2);
|
||||
assertEquals(h1.hashCode(), h2.hashCode());
|
||||
|
||||
Header h3 = withValue(Schema.INT8_SCHEMA, 100);
|
||||
assertNotEquals(h3, h2);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,568 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.header;
|
||||
|
||||
import org.apache.kafka.connect.data.Date;
|
||||
import org.apache.kafka.connect.data.Decimal;
|
||||
import org.apache.kafka.connect.data.Schema;
|
||||
import org.apache.kafka.connect.data.Schema.Type;
|
||||
import org.apache.kafka.connect.data.SchemaAndValue;
|
||||
import org.apache.kafka.connect.data.SchemaBuilder;
|
||||
import org.apache.kafka.connect.data.Struct;
|
||||
import org.apache.kafka.connect.data.Time;
|
||||
import org.apache.kafka.connect.data.Timestamp;
|
||||
import org.apache.kafka.connect.data.Values;
|
||||
import org.apache.kafka.connect.errors.DataException;
|
||||
import org.apache.kafka.connect.header.Headers.HeaderTransform;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.math.BigDecimal;
|
||||
import java.math.RoundingMode;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Calendar;
|
||||
import java.util.Collections;
|
||||
import java.util.GregorianCalendar;
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.TimeZone;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertNotEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertNotSame;
|
||||
import static org.junit.Assert.assertSame;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
public class ConnectHeadersTest {
|
||||
|
||||
private static final GregorianCalendar EPOCH_PLUS_TEN_THOUSAND_DAYS;
|
||||
private static final GregorianCalendar EPOCH_PLUS_TEN_THOUSAND_MILLIS;
|
||||
|
||||
static {
|
||||
EPOCH_PLUS_TEN_THOUSAND_DAYS = new GregorianCalendar(1970, Calendar.JANUARY, 1, 0, 0, 0);
|
||||
EPOCH_PLUS_TEN_THOUSAND_DAYS.setTimeZone(TimeZone.getTimeZone("UTC"));
|
||||
EPOCH_PLUS_TEN_THOUSAND_DAYS.add(Calendar.DATE, 10000);
|
||||
|
||||
EPOCH_PLUS_TEN_THOUSAND_MILLIS = new GregorianCalendar(1970, Calendar.JANUARY, 1, 0, 0, 0);
|
||||
EPOCH_PLUS_TEN_THOUSAND_MILLIS.setTimeZone(TimeZone.getTimeZone("UTC"));
|
||||
EPOCH_PLUS_TEN_THOUSAND_MILLIS.add(Calendar.MILLISECOND, 10000);
|
||||
}
|
||||
|
||||
private ConnectHeaders headers;
|
||||
private Iterator<Header> iter;
|
||||
private String key;
|
||||
private String other;
|
||||
|
||||
@Before
|
||||
public void beforeEach() {
|
||||
headers = new ConnectHeaders();
|
||||
key = "k1";
|
||||
other = "other key";
|
||||
}
|
||||
|
||||
@Test(expected = NullPointerException.class)
|
||||
public void shouldNotAllowNullKey() {
|
||||
headers.add(null, "value", Schema.STRING_SCHEMA);
|
||||
}
|
||||
|
||||
protected void populate(Headers headers) {
|
||||
headers.addBoolean(key, true);
|
||||
headers.addInt(key, 0);
|
||||
headers.addString(other, "other value");
|
||||
headers.addString(key, null);
|
||||
headers.addString(key, "third");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldBeEquals() {
|
||||
Headers other = new ConnectHeaders();
|
||||
assertEquals(headers, other);
|
||||
assertEquals(headers.hashCode(), other.hashCode());
|
||||
|
||||
populate(headers);
|
||||
assertNotEquals(headers, other);
|
||||
assertNotEquals(headers.hashCode(), other.hashCode());
|
||||
|
||||
populate(other);
|
||||
assertEquals(headers, other);
|
||||
assertEquals(headers.hashCode(), other.hashCode());
|
||||
|
||||
headers.addString("wow", "some value");
|
||||
assertNotEquals(headers, other);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldHaveToString() {
|
||||
// empty
|
||||
assertNotNull(headers.toString());
|
||||
|
||||
// not empty
|
||||
populate(headers);
|
||||
assertNotNull(headers.toString());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldRetainLatestWhenEmpty() {
|
||||
headers.retainLatest(other);
|
||||
headers.retainLatest(key);
|
||||
headers.retainLatest();
|
||||
assertTrue(headers.isEmpty());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldAddMultipleHeadersWithSameKeyAndRetainLatest() {
|
||||
populate(headers);
|
||||
|
||||
Header header = headers.lastWithName(key);
|
||||
assertHeader(header, key, Schema.STRING_SCHEMA, "third");
|
||||
|
||||
iter = headers.allWithName(key);
|
||||
assertNextHeader(iter, key, Schema.BOOLEAN_SCHEMA, true);
|
||||
assertNextHeader(iter, key, Schema.INT32_SCHEMA, 0);
|
||||
assertNextHeader(iter, key, Schema.OPTIONAL_STRING_SCHEMA, null);
|
||||
assertNextHeader(iter, key, Schema.STRING_SCHEMA, "third");
|
||||
assertNoNextHeader(iter);
|
||||
|
||||
iter = headers.allWithName(other);
|
||||
assertOnlyNextHeader(iter, other, Schema.STRING_SCHEMA, "other value");
|
||||
|
||||
headers.retainLatest(other);
|
||||
assertOnlySingleHeader(other, Schema.STRING_SCHEMA, "other value");
|
||||
|
||||
headers.retainLatest(key);
|
||||
assertOnlySingleHeader(key, Schema.STRING_SCHEMA, "third");
|
||||
|
||||
headers.retainLatest();
|
||||
assertOnlySingleHeader(other, Schema.STRING_SCHEMA, "other value");
|
||||
assertOnlySingleHeader(key, Schema.STRING_SCHEMA, "third");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldAddHeadersWithPrimitiveValues() {
|
||||
String key = "k1";
|
||||
headers.addBoolean(key, true);
|
||||
headers.addByte(key, (byte) 0);
|
||||
headers.addShort(key, (short) 0);
|
||||
headers.addInt(key, 0);
|
||||
headers.addLong(key, 0);
|
||||
headers.addFloat(key, 1.0f);
|
||||
headers.addDouble(key, 1.0d);
|
||||
headers.addString(key, null);
|
||||
headers.addString(key, "third");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldAddHeadersWithNullObjectValuesWithOptionalSchema() {
|
||||
addHeader("k1", Schema.BOOLEAN_SCHEMA, true);
|
||||
addHeader("k2", Schema.STRING_SCHEMA, "hello");
|
||||
addHeader("k3", Schema.OPTIONAL_STRING_SCHEMA, null);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldNotAddHeadersWithNullObjectValuesWithNonOptionalSchema() {
|
||||
attemptAndFailToAddHeader("k1", Schema.BOOLEAN_SCHEMA, null);
|
||||
attemptAndFailToAddHeader("k2", Schema.STRING_SCHEMA, null);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldNotAddHeadersWithObjectValuesAndMismatchedSchema() {
|
||||
attemptAndFailToAddHeader("k1", Schema.BOOLEAN_SCHEMA, "wrong");
|
||||
attemptAndFailToAddHeader("k2", Schema.OPTIONAL_STRING_SCHEMA, 0L);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldRemoveAllHeadersWithSameKeyWhenEmpty() {
|
||||
headers.remove(key);
|
||||
assertNoHeaderWithKey(key);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldRemoveAllHeadersWithSameKey() {
|
||||
populate(headers);
|
||||
|
||||
iter = headers.allWithName(key);
|
||||
assertContainsHeader(key, Schema.BOOLEAN_SCHEMA, true);
|
||||
assertContainsHeader(key, Schema.INT32_SCHEMA, 0);
|
||||
assertContainsHeader(key, Schema.STRING_SCHEMA, "third");
|
||||
assertOnlySingleHeader(other, Schema.STRING_SCHEMA, "other value");
|
||||
|
||||
headers.remove(key);
|
||||
assertNoHeaderWithKey(key);
|
||||
assertOnlySingleHeader(other, Schema.STRING_SCHEMA, "other value");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldRemoveAllHeaders() {
|
||||
populate(headers);
|
||||
|
||||
iter = headers.allWithName(key);
|
||||
assertContainsHeader(key, Schema.BOOLEAN_SCHEMA, true);
|
||||
assertContainsHeader(key, Schema.INT32_SCHEMA, 0);
|
||||
assertContainsHeader(key, Schema.STRING_SCHEMA, "third");
|
||||
assertOnlySingleHeader(other, Schema.STRING_SCHEMA, "other value");
|
||||
|
||||
headers.clear();
|
||||
assertNoHeaderWithKey(key);
|
||||
assertNoHeaderWithKey(other);
|
||||
assertEquals(0, headers.size());
|
||||
assertTrue(headers.isEmpty());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldTransformHeadersWhenEmpty() {
|
||||
headers.apply(appendToKey("-suffix"));
|
||||
headers.apply(key, appendToKey("-suffix"));
|
||||
assertTrue(headers.isEmpty());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldTransformHeaders() {
|
||||
populate(headers);
|
||||
|
||||
iter = headers.allWithName(key);
|
||||
assertNextHeader(iter, key, Schema.BOOLEAN_SCHEMA, true);
|
||||
assertNextHeader(iter, key, Schema.INT32_SCHEMA, 0);
|
||||
assertNextHeader(iter, key, Schema.OPTIONAL_STRING_SCHEMA, null);
|
||||
assertNextHeader(iter, key, Schema.STRING_SCHEMA, "third");
|
||||
assertNoNextHeader(iter);
|
||||
|
||||
iter = headers.allWithName(other);
|
||||
assertOnlyNextHeader(iter, other, Schema.STRING_SCHEMA, "other value");
|
||||
|
||||
// Transform the headers
|
||||
assertEquals(5, headers.size());
|
||||
headers.apply(appendToKey("-suffix"));
|
||||
assertEquals(5, headers.size());
|
||||
|
||||
assertNoHeaderWithKey(key);
|
||||
assertNoHeaderWithKey(other);
|
||||
|
||||
String altKey = key + "-suffix";
|
||||
iter = headers.allWithName(altKey);
|
||||
assertNextHeader(iter, altKey, Schema.BOOLEAN_SCHEMA, true);
|
||||
assertNextHeader(iter, altKey, Schema.INT32_SCHEMA, 0);
|
||||
assertNextHeader(iter, altKey, Schema.OPTIONAL_STRING_SCHEMA, null);
|
||||
assertNextHeader(iter, altKey, Schema.STRING_SCHEMA, "third");
|
||||
assertNoNextHeader(iter);
|
||||
|
||||
iter = headers.allWithName(other + "-suffix");
|
||||
assertOnlyNextHeader(iter, other + "-suffix", Schema.STRING_SCHEMA, "other value");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldTransformHeadersWithKey() {
|
||||
populate(headers);
|
||||
|
||||
iter = headers.allWithName(key);
|
||||
assertNextHeader(iter, key, Schema.BOOLEAN_SCHEMA, true);
|
||||
assertNextHeader(iter, key, Schema.INT32_SCHEMA, 0);
|
||||
assertNextHeader(iter, key, Schema.OPTIONAL_STRING_SCHEMA, null);
|
||||
assertNextHeader(iter, key, Schema.STRING_SCHEMA, "third");
|
||||
assertNoNextHeader(iter);
|
||||
|
||||
iter = headers.allWithName(other);
|
||||
assertOnlyNextHeader(iter, other, Schema.STRING_SCHEMA, "other value");
|
||||
|
||||
// Transform the headers
|
||||
assertEquals(5, headers.size());
|
||||
headers.apply(key, appendToKey("-suffix"));
|
||||
assertEquals(5, headers.size());
|
||||
|
||||
assertNoHeaderWithKey(key);
|
||||
|
||||
String altKey = key + "-suffix";
|
||||
iter = headers.allWithName(altKey);
|
||||
assertNextHeader(iter, altKey, Schema.BOOLEAN_SCHEMA, true);
|
||||
assertNextHeader(iter, altKey, Schema.INT32_SCHEMA, 0);
|
||||
assertNextHeader(iter, altKey, Schema.OPTIONAL_STRING_SCHEMA, null);
|
||||
assertNextHeader(iter, altKey, Schema.STRING_SCHEMA, "third");
|
||||
assertNoNextHeader(iter);
|
||||
|
||||
iter = headers.allWithName(other);
|
||||
assertOnlyNextHeader(iter, other, Schema.STRING_SCHEMA, "other value");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldTransformAndRemoveHeaders() {
|
||||
populate(headers);
|
||||
|
||||
iter = headers.allWithName(key);
|
||||
assertNextHeader(iter, key, Schema.BOOLEAN_SCHEMA, true);
|
||||
assertNextHeader(iter, key, Schema.INT32_SCHEMA, 0);
|
||||
assertNextHeader(iter, key, Schema.OPTIONAL_STRING_SCHEMA, null);
|
||||
assertNextHeader(iter, key, Schema.STRING_SCHEMA, "third");
|
||||
assertNoNextHeader(iter);
|
||||
|
||||
iter = headers.allWithName(other);
|
||||
assertOnlyNextHeader(iter, other, Schema.STRING_SCHEMA, "other value");
|
||||
|
||||
// Transform the headers
|
||||
assertEquals(5, headers.size());
|
||||
headers.apply(key, removeHeadersOfType(Type.STRING));
|
||||
assertEquals(3, headers.size());
|
||||
|
||||
iter = headers.allWithName(key);
|
||||
assertNextHeader(iter, key, Schema.BOOLEAN_SCHEMA, true);
|
||||
assertNextHeader(iter, key, Schema.INT32_SCHEMA, 0);
|
||||
assertNoNextHeader(iter);
|
||||
|
||||
assertHeader(headers.lastWithName(key), key, Schema.INT32_SCHEMA, 0);
|
||||
|
||||
iter = headers.allWithName(other);
|
||||
assertOnlyNextHeader(iter, other, Schema.STRING_SCHEMA, "other value");
|
||||
|
||||
// Transform the headers
|
||||
assertEquals(3, headers.size());
|
||||
headers.apply(removeHeadersOfType(Type.STRING));
|
||||
assertEquals(2, headers.size());
|
||||
|
||||
assertNoHeaderWithKey(other);
|
||||
|
||||
iter = headers.allWithName(key);
|
||||
assertNextHeader(iter, key, Schema.BOOLEAN_SCHEMA, true);
|
||||
assertNextHeader(iter, key, Schema.INT32_SCHEMA, 0);
|
||||
assertNoNextHeader(iter);
|
||||
}
|
||||
|
||||
protected HeaderTransform appendToKey(final String suffix) {
|
||||
return new HeaderTransform() {
|
||||
@Override
|
||||
public Header apply(Header header) {
|
||||
return header.rename(header.key() + suffix);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
protected HeaderTransform removeHeadersOfType(final Type type) {
|
||||
return new HeaderTransform() {
|
||||
@Override
|
||||
public Header apply(Header header) {
|
||||
Schema schema = header.schema();
|
||||
if (schema != null && schema.type() == type) {
|
||||
return null;
|
||||
}
|
||||
return header;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldValidateBuildInTypes() {
|
||||
assertSchemaMatches(Schema.OPTIONAL_BOOLEAN_SCHEMA, null);
|
||||
assertSchemaMatches(Schema.OPTIONAL_BYTES_SCHEMA, null);
|
||||
assertSchemaMatches(Schema.OPTIONAL_INT8_SCHEMA, null);
|
||||
assertSchemaMatches(Schema.OPTIONAL_INT16_SCHEMA, null);
|
||||
assertSchemaMatches(Schema.OPTIONAL_INT32_SCHEMA, null);
|
||||
assertSchemaMatches(Schema.OPTIONAL_INT64_SCHEMA, null);
|
||||
assertSchemaMatches(Schema.OPTIONAL_FLOAT32_SCHEMA, null);
|
||||
assertSchemaMatches(Schema.OPTIONAL_FLOAT64_SCHEMA, null);
|
||||
assertSchemaMatches(Schema.OPTIONAL_STRING_SCHEMA, null);
|
||||
assertSchemaMatches(Schema.BOOLEAN_SCHEMA, true);
|
||||
assertSchemaMatches(Schema.BYTES_SCHEMA, new byte[]{});
|
||||
assertSchemaMatches(Schema.INT8_SCHEMA, (byte) 0);
|
||||
assertSchemaMatches(Schema.INT16_SCHEMA, (short) 0);
|
||||
assertSchemaMatches(Schema.INT32_SCHEMA, 0);
|
||||
assertSchemaMatches(Schema.INT64_SCHEMA, 0L);
|
||||
assertSchemaMatches(Schema.FLOAT32_SCHEMA, 1.0f);
|
||||
assertSchemaMatches(Schema.FLOAT64_SCHEMA, 1.0d);
|
||||
assertSchemaMatches(Schema.STRING_SCHEMA, "value");
|
||||
assertSchemaMatches(SchemaBuilder.array(Schema.STRING_SCHEMA), new ArrayList<String>());
|
||||
assertSchemaMatches(SchemaBuilder.array(Schema.STRING_SCHEMA), Collections.singletonList("value"));
|
||||
assertSchemaMatches(SchemaBuilder.map(Schema.STRING_SCHEMA, Schema.INT32_SCHEMA), new HashMap<String, Integer>());
|
||||
assertSchemaMatches(SchemaBuilder.map(Schema.STRING_SCHEMA, Schema.INT32_SCHEMA), Collections.singletonMap("a", 0));
|
||||
Schema emptyStructSchema = SchemaBuilder.struct();
|
||||
assertSchemaMatches(emptyStructSchema, new Struct(emptyStructSchema));
|
||||
Schema structSchema = SchemaBuilder.struct().field("foo", Schema.OPTIONAL_BOOLEAN_SCHEMA).field("bar", Schema.STRING_SCHEMA)
|
||||
.schema();
|
||||
assertSchemaMatches(structSchema, new Struct(structSchema).put("foo", true).put("bar", "v"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldValidateLogicalTypes() {
|
||||
assertSchemaMatches(Decimal.schema(3), new BigDecimal(100.00));
|
||||
assertSchemaMatches(Time.SCHEMA, new java.util.Date());
|
||||
assertSchemaMatches(Date.SCHEMA, new java.util.Date());
|
||||
assertSchemaMatches(Timestamp.SCHEMA, new java.util.Date());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldNotValidateNullValuesWithBuiltInTypes() {
|
||||
assertSchemaDoesNotMatch(Schema.BOOLEAN_SCHEMA, null);
|
||||
assertSchemaDoesNotMatch(Schema.BYTES_SCHEMA, null);
|
||||
assertSchemaDoesNotMatch(Schema.INT8_SCHEMA, null);
|
||||
assertSchemaDoesNotMatch(Schema.INT16_SCHEMA, null);
|
||||
assertSchemaDoesNotMatch(Schema.INT32_SCHEMA, null);
|
||||
assertSchemaDoesNotMatch(Schema.INT64_SCHEMA, null);
|
||||
assertSchemaDoesNotMatch(Schema.FLOAT32_SCHEMA, null);
|
||||
assertSchemaDoesNotMatch(Schema.FLOAT64_SCHEMA, null);
|
||||
assertSchemaDoesNotMatch(Schema.STRING_SCHEMA, null);
|
||||
assertSchemaDoesNotMatch(SchemaBuilder.array(Schema.STRING_SCHEMA), null);
|
||||
assertSchemaDoesNotMatch(SchemaBuilder.map(Schema.STRING_SCHEMA, Schema.INT32_SCHEMA), null);
|
||||
assertSchemaDoesNotMatch(SchemaBuilder.struct(), null);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldNotValidateMismatchedValuesWithBuiltInTypes() {
|
||||
assertSchemaDoesNotMatch(Schema.BOOLEAN_SCHEMA, 0L);
|
||||
assertSchemaDoesNotMatch(Schema.BYTES_SCHEMA, "oops");
|
||||
assertSchemaDoesNotMatch(Schema.INT8_SCHEMA, 1.0f);
|
||||
assertSchemaDoesNotMatch(Schema.INT16_SCHEMA, 1.0f);
|
||||
assertSchemaDoesNotMatch(Schema.INT32_SCHEMA, 0L);
|
||||
assertSchemaDoesNotMatch(Schema.INT64_SCHEMA, 1.0f);
|
||||
assertSchemaDoesNotMatch(Schema.FLOAT32_SCHEMA, 1L);
|
||||
assertSchemaDoesNotMatch(Schema.FLOAT64_SCHEMA, 1L);
|
||||
assertSchemaDoesNotMatch(Schema.STRING_SCHEMA, true);
|
||||
assertSchemaDoesNotMatch(SchemaBuilder.array(Schema.STRING_SCHEMA), "value");
|
||||
assertSchemaDoesNotMatch(SchemaBuilder.map(Schema.STRING_SCHEMA, Schema.INT32_SCHEMA), "value");
|
||||
assertSchemaDoesNotMatch(SchemaBuilder.struct(), new ArrayList<String>());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldAddDate() {
|
||||
java.util.Date dateObj = EPOCH_PLUS_TEN_THOUSAND_DAYS.getTime();
|
||||
int days = Date.fromLogical(Date.SCHEMA, dateObj);
|
||||
headers.addDate(key, dateObj);
|
||||
Header header = headers.lastWithName(key);
|
||||
assertEquals(days, (int) Values.convertToInteger(header.schema(), header.value()));
|
||||
assertSame(dateObj, Values.convertToDate(header.schema(), header.value()));
|
||||
|
||||
headers.addInt(other, days);
|
||||
header = headers.lastWithName(other);
|
||||
assertEquals(days, (int) Values.convertToInteger(header.schema(), header.value()));
|
||||
assertEquals(dateObj, Values.convertToDate(header.schema(), header.value()));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldAddTime() {
|
||||
java.util.Date dateObj = EPOCH_PLUS_TEN_THOUSAND_MILLIS.getTime();
|
||||
long millis = Time.fromLogical(Time.SCHEMA, dateObj);
|
||||
headers.addTime(key, dateObj);
|
||||
Header header = headers.lastWithName(key);
|
||||
assertEquals(millis, (long) Values.convertToLong(header.schema(), header.value()));
|
||||
assertSame(dateObj, Values.convertToTime(header.schema(), header.value()));
|
||||
|
||||
headers.addLong(other, millis);
|
||||
header = headers.lastWithName(other);
|
||||
assertEquals(millis, (long) Values.convertToLong(header.schema(), header.value()));
|
||||
assertEquals(dateObj, Values.convertToTime(header.schema(), header.value()));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldAddTimestamp() {
|
||||
java.util.Date dateObj = EPOCH_PLUS_TEN_THOUSAND_MILLIS.getTime();
|
||||
long millis = Timestamp.fromLogical(Timestamp.SCHEMA, dateObj);
|
||||
headers.addTimestamp(key, dateObj);
|
||||
Header header = headers.lastWithName(key);
|
||||
assertEquals(millis, (long) Values.convertToLong(header.schema(), header.value()));
|
||||
assertSame(dateObj, Values.convertToTimestamp(header.schema(), header.value()));
|
||||
|
||||
headers.addLong(other, millis);
|
||||
header = headers.lastWithName(other);
|
||||
assertEquals(millis, (long) Values.convertToLong(header.schema(), header.value()));
|
||||
assertEquals(dateObj, Values.convertToTimestamp(header.schema(), header.value()));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldAddDecimal() {
|
||||
BigDecimal value = new BigDecimal("3.038573478e+3");
|
||||
headers.addDecimal(key, value);
|
||||
Header header = headers.lastWithName(key);
|
||||
assertEquals(value.doubleValue(), Values.convertToDouble(header.schema(), header.value()), 0.00001d);
|
||||
assertEquals(value, Values.convertToDecimal(header.schema(), header.value(), value.scale()));
|
||||
|
||||
value = value.setScale(3, RoundingMode.DOWN);
|
||||
BigDecimal decimal = Values.convertToDecimal(header.schema(), header.value(), value.scale());
|
||||
assertEquals(value, decimal.setScale(value.scale(), RoundingMode.DOWN));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldDuplicateAndAlwaysReturnEquivalentButDifferentObject() {
|
||||
assertEquals(headers, headers.duplicate());
|
||||
assertNotSame(headers, headers.duplicate());
|
||||
}
|
||||
|
||||
protected void assertSchemaMatches(Schema schema, Object value) {
|
||||
headers.checkSchemaMatches(new SchemaAndValue(schema.schema(), value));
|
||||
}
|
||||
|
||||
protected void assertSchemaDoesNotMatch(Schema schema, Object value) {
|
||||
try {
|
||||
assertSchemaMatches(schema, value);
|
||||
fail("Should have failed to validate value '" + value + "' and schema: " + schema);
|
||||
} catch (DataException e) {
|
||||
// expected
|
||||
}
|
||||
}
|
||||
|
||||
protected void attemptAndFailToAddHeader(String key, Schema schema, Object value) {
|
||||
try {
|
||||
headers.add(key, value, schema);
|
||||
fail("Should have failed to add header with key '" + key + "', value '" + value + "', and schema: " + schema);
|
||||
} catch (DataException e) {
|
||||
// expected
|
||||
}
|
||||
}
|
||||
|
||||
protected void addHeader(String key, Schema schema, Object value) {
|
||||
headers.add(key, value, schema);
|
||||
Header header = headers.lastWithName(key);
|
||||
assertNotNull(header);
|
||||
assertHeader(header, key, schema, value);
|
||||
}
|
||||
|
||||
protected void assertNoHeaderWithKey(String key) {
|
||||
assertNoNextHeader(headers.allWithName(key));
|
||||
}
|
||||
|
||||
protected void assertContainsHeader(String key, Schema schema, Object value) {
|
||||
Header expected = new ConnectHeader(key, new SchemaAndValue(schema, value));
|
||||
Iterator<Header> iter = headers.allWithName(key);
|
||||
while (iter.hasNext()) {
|
||||
Header header = iter.next();
|
||||
if (header.equals(expected))
|
||||
return;
|
||||
}
|
||||
fail("Should have found header " + expected);
|
||||
}
|
||||
|
||||
protected void assertOnlySingleHeader(String key, Schema schema, Object value) {
|
||||
assertOnlyNextHeader(headers.allWithName(key), key, schema, value);
|
||||
}
|
||||
|
||||
protected void assertOnlyNextHeader(Iterator<Header> iter, String key, Schema schema, Object value) {
|
||||
assertNextHeader(iter, key, schema, value);
|
||||
assertNoNextHeader(iter);
|
||||
}
|
||||
|
||||
protected void assertNextHeader(Iterator<Header> iter, String key, Schema schema, Object value) {
|
||||
Header header = iter.next();
|
||||
assertHeader(header, key, schema, value);
|
||||
}
|
||||
|
||||
protected void assertNoNextHeader(Iterator<Header> iter) {
|
||||
assertFalse(iter.hasNext());
|
||||
}
|
||||
|
||||
protected void assertHeader(Header header, String key, Schema schema, Object value) {
|
||||
assertNotNull(header);
|
||||
assertSame(schema, header.schema());
|
||||
assertSame(value, header.value());
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,128 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.sink;
|
||||
|
||||
import org.apache.kafka.common.record.TimestampType;
|
||||
import org.apache.kafka.connect.data.Schema;
|
||||
import org.apache.kafka.connect.data.Values;
|
||||
import org.apache.kafka.connect.header.ConnectHeaders;
|
||||
import org.apache.kafka.connect.header.Header;
|
||||
import org.apache.kafka.connect.header.Headers;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertNotEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertNotSame;
|
||||
import static org.junit.Assert.assertSame;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
public class SinkRecordTest {
|
||||
|
||||
private static final String TOPIC_NAME = "myTopic";
|
||||
private static final Integer PARTITION_NUMBER = 0;
|
||||
private static final long KAFKA_OFFSET = 0L;
|
||||
private static final Long KAFKA_TIMESTAMP = 0L;
|
||||
private static final TimestampType TS_TYPE = TimestampType.CREATE_TIME;
|
||||
|
||||
private SinkRecord record;
|
||||
|
||||
@Before
|
||||
public void beforeEach() {
|
||||
record = new SinkRecord(TOPIC_NAME, PARTITION_NUMBER, Schema.STRING_SCHEMA, "key", Schema.BOOLEAN_SCHEMA, false, KAFKA_OFFSET,
|
||||
KAFKA_TIMESTAMP, TS_TYPE, null);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldCreateSinkRecordWithHeaders() {
|
||||
Headers headers = new ConnectHeaders().addString("h1", "hv1").addBoolean("h2", true);
|
||||
record = new SinkRecord(TOPIC_NAME, PARTITION_NUMBER, Schema.STRING_SCHEMA, "key", Schema.BOOLEAN_SCHEMA, false, KAFKA_OFFSET,
|
||||
KAFKA_TIMESTAMP, TS_TYPE, headers);
|
||||
assertNotNull(record.headers());
|
||||
assertSame(headers, record.headers());
|
||||
assertFalse(record.headers().isEmpty());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldCreateSinkRecordWithEmptyHeaders() {
|
||||
assertEquals(TOPIC_NAME, record.topic());
|
||||
assertEquals(PARTITION_NUMBER, record.kafkaPartition());
|
||||
assertEquals(Schema.STRING_SCHEMA, record.keySchema());
|
||||
assertEquals("key", record.key());
|
||||
assertEquals(Schema.BOOLEAN_SCHEMA, record.valueSchema());
|
||||
assertEquals(false, record.value());
|
||||
assertEquals(KAFKA_OFFSET, record.kafkaOffset());
|
||||
assertEquals(KAFKA_TIMESTAMP, record.timestamp());
|
||||
assertEquals(TS_TYPE, record.timestampType());
|
||||
assertNotNull(record.headers());
|
||||
assertTrue(record.headers().isEmpty());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldDuplicateRecordAndCloneHeaders() {
|
||||
SinkRecord duplicate = record.newRecord(TOPIC_NAME, PARTITION_NUMBER, Schema.STRING_SCHEMA, "key", Schema.BOOLEAN_SCHEMA, false,
|
||||
KAFKA_TIMESTAMP);
|
||||
|
||||
assertEquals(TOPIC_NAME, duplicate.topic());
|
||||
assertEquals(PARTITION_NUMBER, duplicate.kafkaPartition());
|
||||
assertEquals(Schema.STRING_SCHEMA, duplicate.keySchema());
|
||||
assertEquals("key", duplicate.key());
|
||||
assertEquals(Schema.BOOLEAN_SCHEMA, duplicate.valueSchema());
|
||||
assertEquals(false, duplicate.value());
|
||||
assertEquals(KAFKA_OFFSET, duplicate.kafkaOffset());
|
||||
assertEquals(KAFKA_TIMESTAMP, duplicate.timestamp());
|
||||
assertEquals(TS_TYPE, duplicate.timestampType());
|
||||
assertNotNull(duplicate.headers());
|
||||
assertTrue(duplicate.headers().isEmpty());
|
||||
assertNotSame(record.headers(), duplicate.headers());
|
||||
assertEquals(record.headers(), duplicate.headers());
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void shouldDuplicateRecordUsingNewHeaders() {
|
||||
Headers newHeaders = new ConnectHeaders().addString("h3", "hv3");
|
||||
SinkRecord duplicate = record.newRecord(TOPIC_NAME, PARTITION_NUMBER, Schema.STRING_SCHEMA, "key", Schema.BOOLEAN_SCHEMA, false,
|
||||
KAFKA_TIMESTAMP, newHeaders);
|
||||
|
||||
assertEquals(TOPIC_NAME, duplicate.topic());
|
||||
assertEquals(PARTITION_NUMBER, duplicate.kafkaPartition());
|
||||
assertEquals(Schema.STRING_SCHEMA, duplicate.keySchema());
|
||||
assertEquals("key", duplicate.key());
|
||||
assertEquals(Schema.BOOLEAN_SCHEMA, duplicate.valueSchema());
|
||||
assertEquals(false, duplicate.value());
|
||||
assertEquals(KAFKA_OFFSET, duplicate.kafkaOffset());
|
||||
assertEquals(KAFKA_TIMESTAMP, duplicate.timestamp());
|
||||
assertEquals(TS_TYPE, duplicate.timestampType());
|
||||
assertNotNull(duplicate.headers());
|
||||
assertEquals(newHeaders, duplicate.headers());
|
||||
assertSame(newHeaders, duplicate.headers());
|
||||
assertNotSame(record.headers(), duplicate.headers());
|
||||
assertNotEquals(record.headers(), duplicate.headers());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldModifyRecordHeader() {
|
||||
assertTrue(record.headers().isEmpty());
|
||||
record.headers().addInt("intHeader", 100);
|
||||
assertEquals(1, record.headers().size());
|
||||
Header header = record.headers().lastWithName("intHeader");
|
||||
assertEquals(100, (int) Values.convertToInteger(header.schema(), header.value()));
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,129 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.source;
|
||||
|
||||
import org.apache.kafka.connect.data.Schema;
|
||||
import org.apache.kafka.connect.data.Values;
|
||||
import org.apache.kafka.connect.header.ConnectHeaders;
|
||||
import org.apache.kafka.connect.header.Header;
|
||||
import org.apache.kafka.connect.header.Headers;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertNotEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertNotSame;
|
||||
import static org.junit.Assert.assertSame;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
public class SourceRecordTest {
|
||||
|
||||
private static final Map<String, ?> SOURCE_PARTITION = Collections.singletonMap("src", "abc");
|
||||
private static final Map<String, ?> SOURCE_OFFSET = Collections.singletonMap("offset", "1");
|
||||
private static final String TOPIC_NAME = "myTopic";
|
||||
private static final Integer PARTITION_NUMBER = 0;
|
||||
private static final Long KAFKA_TIMESTAMP = 0L;
|
||||
|
||||
private SourceRecord record;
|
||||
|
||||
@Before
|
||||
public void beforeEach() {
|
||||
record = new SourceRecord(SOURCE_PARTITION, SOURCE_OFFSET, TOPIC_NAME, PARTITION_NUMBER, Schema.STRING_SCHEMA, "key",
|
||||
Schema.BOOLEAN_SCHEMA, false, KAFKA_TIMESTAMP, null);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldCreateSinkRecordWithHeaders() {
|
||||
Headers headers = new ConnectHeaders().addString("h1", "hv1").addBoolean("h2", true);
|
||||
record = new SourceRecord(SOURCE_PARTITION, SOURCE_OFFSET, TOPIC_NAME, PARTITION_NUMBER, Schema.STRING_SCHEMA, "key",
|
||||
Schema.BOOLEAN_SCHEMA, false, KAFKA_TIMESTAMP, headers);
|
||||
assertNotNull(record.headers());
|
||||
assertSame(headers, record.headers());
|
||||
assertFalse(record.headers().isEmpty());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldCreateSinkRecordWithEmtpyHeaders() {
|
||||
assertEquals(SOURCE_PARTITION, record.sourcePartition());
|
||||
assertEquals(SOURCE_OFFSET, record.sourceOffset());
|
||||
assertEquals(TOPIC_NAME, record.topic());
|
||||
assertEquals(PARTITION_NUMBER, record.kafkaPartition());
|
||||
assertEquals(Schema.STRING_SCHEMA, record.keySchema());
|
||||
assertEquals("key", record.key());
|
||||
assertEquals(Schema.BOOLEAN_SCHEMA, record.valueSchema());
|
||||
assertEquals(false, record.value());
|
||||
assertEquals(KAFKA_TIMESTAMP, record.timestamp());
|
||||
assertNotNull(record.headers());
|
||||
assertTrue(record.headers().isEmpty());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldDuplicateRecordAndCloneHeaders() {
|
||||
SourceRecord duplicate = record.newRecord(TOPIC_NAME, PARTITION_NUMBER, Schema.STRING_SCHEMA, "key", Schema.BOOLEAN_SCHEMA, false,
|
||||
KAFKA_TIMESTAMP);
|
||||
|
||||
assertEquals(SOURCE_PARTITION, duplicate.sourcePartition());
|
||||
assertEquals(SOURCE_OFFSET, duplicate.sourceOffset());
|
||||
assertEquals(TOPIC_NAME, duplicate.topic());
|
||||
assertEquals(PARTITION_NUMBER, duplicate.kafkaPartition());
|
||||
assertEquals(Schema.STRING_SCHEMA, duplicate.keySchema());
|
||||
assertEquals("key", duplicate.key());
|
||||
assertEquals(Schema.BOOLEAN_SCHEMA, duplicate.valueSchema());
|
||||
assertEquals(false, duplicate.value());
|
||||
assertEquals(KAFKA_TIMESTAMP, duplicate.timestamp());
|
||||
assertNotNull(duplicate.headers());
|
||||
assertTrue(duplicate.headers().isEmpty());
|
||||
assertNotSame(record.headers(), duplicate.headers());
|
||||
assertEquals(record.headers(), duplicate.headers());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldDuplicateRecordUsingNewHeaders() {
|
||||
Headers newHeaders = new ConnectHeaders().addString("h3", "hv3");
|
||||
SourceRecord duplicate = record.newRecord(TOPIC_NAME, PARTITION_NUMBER, Schema.STRING_SCHEMA, "key", Schema.BOOLEAN_SCHEMA, false,
|
||||
KAFKA_TIMESTAMP, newHeaders);
|
||||
|
||||
assertEquals(SOURCE_PARTITION, duplicate.sourcePartition());
|
||||
assertEquals(SOURCE_OFFSET, duplicate.sourceOffset());
|
||||
assertEquals(TOPIC_NAME, duplicate.topic());
|
||||
assertEquals(PARTITION_NUMBER, duplicate.kafkaPartition());
|
||||
assertEquals(Schema.STRING_SCHEMA, duplicate.keySchema());
|
||||
assertEquals("key", duplicate.key());
|
||||
assertEquals(Schema.BOOLEAN_SCHEMA, duplicate.valueSchema());
|
||||
assertEquals(false, duplicate.value());
|
||||
assertEquals(KAFKA_TIMESTAMP, duplicate.timestamp());
|
||||
assertNotNull(duplicate.headers());
|
||||
assertEquals(newHeaders, duplicate.headers());
|
||||
assertSame(newHeaders, duplicate.headers());
|
||||
assertNotSame(record.headers(), duplicate.headers());
|
||||
assertNotEquals(record.headers(), duplicate.headers());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldModifyRecordHeader() {
|
||||
assertTrue(record.headers().isEmpty());
|
||||
record.headers().addInt("intHeader", 100);
|
||||
assertEquals(1, record.headers().size());
|
||||
Header header = record.headers().lastWithName("intHeader");
|
||||
assertEquals(100, (int) Values.convertToInteger(header.schema(), header.value()));
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,31 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.storage;
|
||||
|
||||
import org.junit.Test;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
public class ConverterTypeTest {
|
||||
|
||||
@Test
|
||||
public void shouldFindByName() {
|
||||
for (ConverterType type : ConverterType.values()) {
|
||||
assertEquals(type, ConverterType.withName(type.getName()));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,236 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.storage;
|
||||
|
||||
import org.apache.kafka.connect.data.Schema;
|
||||
import org.apache.kafka.connect.data.SchemaAndValue;
|
||||
import org.apache.kafka.connect.data.SchemaBuilder;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.junit.Assert.assertArrayEquals;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertNull;
|
||||
|
||||
public class SimpleHeaderConverterTest {
|
||||
|
||||
private static final String TOPIC = "topic";
|
||||
private static final String HEADER = "header";
|
||||
|
||||
private static final Map<String, String> STRING_MAP = new LinkedHashMap<>();
|
||||
private static final Schema STRING_MAP_SCHEMA = SchemaBuilder.map(Schema.STRING_SCHEMA, Schema.STRING_SCHEMA).schema();
|
||||
|
||||
private static final Map<String, Short> STRING_SHORT_MAP = new LinkedHashMap<>();
|
||||
private static final Schema STRING_SHORT_MAP_SCHEMA = SchemaBuilder.map(Schema.STRING_SCHEMA, Schema.INT16_SCHEMA).schema();
|
||||
|
||||
private static final Map<String, Integer> STRING_INT_MAP = new LinkedHashMap<>();
|
||||
private static final Schema STRING_INT_MAP_SCHEMA = SchemaBuilder.map(Schema.STRING_SCHEMA, Schema.INT32_SCHEMA).schema();
|
||||
|
||||
private static final List<Integer> INT_LIST = new ArrayList<>();
|
||||
private static final Schema INT_LIST_SCHEMA = SchemaBuilder.array(Schema.INT32_SCHEMA).schema();
|
||||
|
||||
private static final List<String> STRING_LIST = new ArrayList<>();
|
||||
private static final Schema STRING_LIST_SCHEMA = SchemaBuilder.array(Schema.STRING_SCHEMA).schema();
|
||||
|
||||
static {
|
||||
STRING_MAP.put("foo", "123");
|
||||
STRING_MAP.put("bar", "baz");
|
||||
STRING_SHORT_MAP.put("foo", (short) 12345);
|
||||
STRING_SHORT_MAP.put("bar", (short) 0);
|
||||
STRING_SHORT_MAP.put("baz", (short) -4321);
|
||||
STRING_INT_MAP.put("foo", 1234567890);
|
||||
STRING_INT_MAP.put("bar", 0);
|
||||
STRING_INT_MAP.put("baz", -987654321);
|
||||
STRING_LIST.add("foo");
|
||||
STRING_LIST.add("bar");
|
||||
INT_LIST.add(1234567890);
|
||||
INT_LIST.add(-987654321);
|
||||
}
|
||||
|
||||
private SimpleHeaderConverter converter;
|
||||
|
||||
@Before
|
||||
public void beforeEach() {
|
||||
converter = new SimpleHeaderConverter();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldConvertNullValue() {
|
||||
assertRoundTrip(Schema.STRING_SCHEMA, null);
|
||||
assertRoundTrip(Schema.OPTIONAL_STRING_SCHEMA, null);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldConvertSimpleString() {
|
||||
assertRoundTrip(Schema.STRING_SCHEMA, "simple");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldConvertEmptyString() {
|
||||
assertRoundTrip(Schema.STRING_SCHEMA, "");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldConvertStringWithQuotesAndOtherDelimiterCharacters() {
|
||||
assertRoundTrip(Schema.STRING_SCHEMA, "three\"blind\\\"mice");
|
||||
assertRoundTrip(Schema.STRING_SCHEMA, "string with delimiters: <>?,./\\=+-!@#$%^&*(){}[]|;':");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldConvertMapWithStringKeys() {
|
||||
assertRoundTrip(STRING_MAP_SCHEMA, STRING_MAP);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldParseStringOfMapWithStringValuesWithoutWhitespaceAsMap() {
|
||||
SchemaAndValue result = roundTrip(Schema.STRING_SCHEMA, "{\"foo\":\"123\",\"bar\":\"baz\"}");
|
||||
assertEquals(STRING_MAP_SCHEMA, result.schema());
|
||||
assertEquals(STRING_MAP, result.value());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldParseStringOfMapWithStringValuesWithWhitespaceAsMap() {
|
||||
SchemaAndValue result = roundTrip(Schema.STRING_SCHEMA, "{ \"foo\" : \"123\", \n\"bar\" : \"baz\" } ");
|
||||
assertEquals(STRING_MAP_SCHEMA, result.schema());
|
||||
assertEquals(STRING_MAP, result.value());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldConvertMapWithStringKeysAndShortValues() {
|
||||
assertRoundTrip(STRING_SHORT_MAP_SCHEMA, STRING_SHORT_MAP);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldParseStringOfMapWithShortValuesWithoutWhitespaceAsMap() {
|
||||
SchemaAndValue result = roundTrip(Schema.STRING_SCHEMA, "{\"foo\":12345,\"bar\":0,\"baz\":-4321}");
|
||||
assertEquals(STRING_SHORT_MAP_SCHEMA, result.schema());
|
||||
assertEquals(STRING_SHORT_MAP, result.value());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldParseStringOfMapWithShortValuesWithWhitespaceAsMap() {
|
||||
SchemaAndValue result = roundTrip(Schema.STRING_SCHEMA, " { \"foo\" : 12345 , \"bar\" : 0, \"baz\" : -4321 } ");
|
||||
assertEquals(STRING_SHORT_MAP_SCHEMA, result.schema());
|
||||
assertEquals(STRING_SHORT_MAP, result.value());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldConvertMapWithStringKeysAndIntegerValues() {
|
||||
assertRoundTrip(STRING_INT_MAP_SCHEMA, STRING_INT_MAP);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldParseStringOfMapWithIntValuesWithoutWhitespaceAsMap() {
|
||||
SchemaAndValue result = roundTrip(Schema.STRING_SCHEMA, "{\"foo\":1234567890,\"bar\":0,\"baz\":-987654321}");
|
||||
assertEquals(STRING_INT_MAP_SCHEMA, result.schema());
|
||||
assertEquals(STRING_INT_MAP, result.value());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldParseStringOfMapWithIntValuesWithWhitespaceAsMap() {
|
||||
SchemaAndValue result = roundTrip(Schema.STRING_SCHEMA, " { \"foo\" : 1234567890 , \"bar\" : 0, \"baz\" : -987654321 } ");
|
||||
assertEquals(STRING_INT_MAP_SCHEMA, result.schema());
|
||||
assertEquals(STRING_INT_MAP, result.value());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldConvertListWithStringValues() {
|
||||
assertRoundTrip(STRING_LIST_SCHEMA, STRING_LIST);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldConvertListWithIntegerValues() {
|
||||
assertRoundTrip(INT_LIST_SCHEMA, INT_LIST);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldConvertMapWithStringKeysAndMixedValuesToMap() {
|
||||
Map<String, Object> map = new LinkedHashMap<>();
|
||||
map.put("foo", "bar");
|
||||
map.put("baz", (short) 3456);
|
||||
SchemaAndValue result = roundTrip(null, map);
|
||||
assertEquals(Schema.Type.MAP, result.schema().type());
|
||||
assertEquals(Schema.Type.STRING, result.schema().keySchema().type());
|
||||
assertNull(result.schema().valueSchema());
|
||||
assertEquals(map, result.value());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldConvertListWithMixedValuesToListWithoutSchema() {
|
||||
List<Object> list = new ArrayList<>();
|
||||
list.add("foo");
|
||||
list.add((short) 13344);
|
||||
SchemaAndValue result = roundTrip(null, list);
|
||||
assertEquals(Schema.Type.ARRAY, result.schema().type());
|
||||
assertNull(result.schema().valueSchema());
|
||||
assertEquals(list, result.value());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldConvertEmptyMapToMap() {
|
||||
Map<Object, Object> map = new LinkedHashMap<>();
|
||||
SchemaAndValue result = roundTrip(null, map);
|
||||
assertEquals(Schema.Type.MAP, result.schema().type());
|
||||
assertNull(result.schema().keySchema());
|
||||
assertNull(result.schema().valueSchema());
|
||||
assertEquals(map, result.value());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldConvertEmptyListToList() {
|
||||
List<Object> list = new ArrayList<>();
|
||||
SchemaAndValue result = roundTrip(null, list);
|
||||
assertEquals(Schema.Type.ARRAY, result.schema().type());
|
||||
assertNull(result.schema().valueSchema());
|
||||
assertEquals(list, result.value());
|
||||
}
|
||||
|
||||
protected SchemaAndValue roundTrip(Schema schema, Object input) {
|
||||
byte[] serialized = converter.fromConnectHeader(TOPIC, HEADER, schema, input);
|
||||
return converter.toConnectHeader(TOPIC, HEADER, serialized);
|
||||
}
|
||||
|
||||
protected void assertRoundTrip(Schema schema, Object value) {
|
||||
byte[] serialized = converter.fromConnectHeader(TOPIC, HEADER, schema, value);
|
||||
SchemaAndValue result = converter.toConnectHeader(TOPIC, HEADER, serialized);
|
||||
|
||||
if (value == null) {
|
||||
assertNull(serialized);
|
||||
assertNull(result.schema());
|
||||
assertNull(result.value());
|
||||
} else {
|
||||
assertNotNull(serialized);
|
||||
assertEquals(value, result.value());
|
||||
assertEquals(schema, result.schema());
|
||||
|
||||
byte[] serialized2 = converter.fromConnectHeader(TOPIC, HEADER, result.schema(), result.value());
|
||||
SchemaAndValue result2 = converter.toConnectHeader(TOPIC, HEADER, serialized2);
|
||||
assertNotNull(serialized2);
|
||||
assertEquals(schema, result2.schema());
|
||||
assertEquals(value, result2.value());
|
||||
assertEquals(result, result2);
|
||||
assertArrayEquals(serialized, serialized);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,100 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.storage;
|
||||
|
||||
import org.apache.kafka.connect.data.Schema;
|
||||
import org.apache.kafka.connect.data.SchemaAndValue;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.io.UnsupportedEncodingException;
|
||||
import java.util.Collections;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertArrayEquals;
|
||||
|
||||
public class StringConverterTest {
|
||||
private static final String TOPIC = "topic";
|
||||
private static final String SAMPLE_STRING = "a string";
|
||||
|
||||
private StringConverter converter = new StringConverter();
|
||||
|
||||
@Test
|
||||
public void testStringToBytes() throws UnsupportedEncodingException {
|
||||
assertArrayEquals(SAMPLE_STRING.getBytes("UTF8"), converter.fromConnectData(TOPIC, Schema.STRING_SCHEMA, SAMPLE_STRING));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testNonStringToBytes() throws UnsupportedEncodingException {
|
||||
assertArrayEquals("true".getBytes("UTF8"), converter.fromConnectData(TOPIC, Schema.BOOLEAN_SCHEMA, true));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testNullToBytes() {
|
||||
assertEquals(null, converter.fromConnectData(TOPIC, Schema.OPTIONAL_STRING_SCHEMA, null));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testToBytesIgnoresSchema() throws UnsupportedEncodingException {
|
||||
assertArrayEquals("true".getBytes("UTF8"), converter.fromConnectData(TOPIC, null, true));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testToBytesNonUtf8Encoding() throws UnsupportedEncodingException {
|
||||
converter.configure(Collections.singletonMap("converter.encoding", "UTF-16"), true);
|
||||
assertArrayEquals(SAMPLE_STRING.getBytes("UTF-16"), converter.fromConnectData(TOPIC, Schema.STRING_SCHEMA, SAMPLE_STRING));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBytesToString() {
|
||||
SchemaAndValue data = converter.toConnectData(TOPIC, SAMPLE_STRING.getBytes());
|
||||
assertEquals(Schema.OPTIONAL_STRING_SCHEMA, data.schema());
|
||||
assertEquals(SAMPLE_STRING, data.value());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBytesNullToString() {
|
||||
SchemaAndValue data = converter.toConnectData(TOPIC, null);
|
||||
assertEquals(Schema.OPTIONAL_STRING_SCHEMA, data.schema());
|
||||
assertEquals(null, data.value());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBytesToStringNonUtf8Encoding() throws UnsupportedEncodingException {
|
||||
converter.configure(Collections.singletonMap("converter.encoding", "UTF-16"), true);
|
||||
SchemaAndValue data = converter.toConnectData(TOPIC, SAMPLE_STRING.getBytes("UTF-16"));
|
||||
assertEquals(Schema.OPTIONAL_STRING_SCHEMA, data.schema());
|
||||
assertEquals(SAMPLE_STRING, data.value());
|
||||
}
|
||||
|
||||
// Note: the header conversion methods delegates to the data conversion methods, which are tested above.
|
||||
// The following simply verify that the delegation works.
|
||||
|
||||
@Test
|
||||
public void testStringHeaderValueToBytes() throws UnsupportedEncodingException {
|
||||
assertArrayEquals(SAMPLE_STRING.getBytes("UTF8"), converter.fromConnectHeader(TOPIC, "hdr", Schema.STRING_SCHEMA, SAMPLE_STRING));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testNonStringHeaderValueToBytes() throws UnsupportedEncodingException {
|
||||
assertArrayEquals("true".getBytes("UTF8"), converter.fromConnectHeader(TOPIC, "hdr", Schema.BOOLEAN_SCHEMA, true));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testNullHeaderValueToBytes() {
|
||||
assertEquals(null, converter.fromConnectHeader(TOPIC, "hdr", Schema.OPTIONAL_STRING_SCHEMA, null));
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,66 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.util;
|
||||
|
||||
import org.junit.Test;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
public class ConnectorUtilsTest {
|
||||
|
||||
private static final List<Integer> FIVE_ELEMENTS = Arrays.asList(1, 2, 3, 4, 5);
|
||||
|
||||
@Test
|
||||
public void testGroupPartitions() {
|
||||
|
||||
List<List<Integer>> grouped = ConnectorUtils.groupPartitions(FIVE_ELEMENTS, 1);
|
||||
assertEquals(Arrays.asList(FIVE_ELEMENTS), grouped);
|
||||
|
||||
grouped = ConnectorUtils.groupPartitions(FIVE_ELEMENTS, 2);
|
||||
assertEquals(Arrays.asList(Arrays.asList(1, 2, 3), Arrays.asList(4, 5)), grouped);
|
||||
|
||||
grouped = ConnectorUtils.groupPartitions(FIVE_ELEMENTS, 3);
|
||||
assertEquals(Arrays.asList(Arrays.asList(1, 2),
|
||||
Arrays.asList(3, 4),
|
||||
Arrays.asList(5)), grouped);
|
||||
|
||||
grouped = ConnectorUtils.groupPartitions(FIVE_ELEMENTS, 5);
|
||||
assertEquals(Arrays.asList(Arrays.asList(1),
|
||||
Arrays.asList(2),
|
||||
Arrays.asList(3),
|
||||
Arrays.asList(4),
|
||||
Arrays.asList(5)), grouped);
|
||||
|
||||
grouped = ConnectorUtils.groupPartitions(FIVE_ELEMENTS, 7);
|
||||
assertEquals(Arrays.asList(Arrays.asList(1),
|
||||
Arrays.asList(2),
|
||||
Arrays.asList(3),
|
||||
Arrays.asList(4),
|
||||
Arrays.asList(5),
|
||||
Collections.emptyList(),
|
||||
Collections.emptyList()), grouped);
|
||||
}
|
||||
|
||||
@Test(expected = IllegalArgumentException.class)
|
||||
public void testGroupPartitionsInvalidCount() {
|
||||
ConnectorUtils.groupPartitions(FIVE_ELEMENTS, 0);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,79 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.kafka.connect.rest.basic.auth.extension;
|
||||
|
||||
import org.apache.kafka.common.utils.AppInfoParser;
|
||||
import org.apache.kafka.connect.rest.ConnectRestExtension;
|
||||
import org.apache.kafka.connect.rest.ConnectRestExtensionContext;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Provides the ability to authenticate incoming BasicAuth credentials using the configured JAAS {@link
|
||||
* javax.security.auth.spi.LoginModule}. An entry with the name {@code KafkaConnect} is expected in the JAAS config file configured in the
|
||||
* JVM. An implementation of {@link javax.security.auth.spi.LoginModule} needs to be provided in the JAAS config file. The {@code
|
||||
* LoginModule} implementation should configure the {@link javax.security.auth.callback.CallbackHandler} with only {@link
|
||||
* javax.security.auth.callback.NameCallback} and {@link javax.security.auth.callback.PasswordCallback}.
|
||||
*
|
||||
* <p>To use this extension, one needs to add the following config in the {@code worker.properties}
|
||||
* <pre>
|
||||
* rest.extension.classes = org.apache.kafka.connect.rest.basic.auth.extension.BasicAuthSecurityRestExtension
|
||||
* </pre>
|
||||
*
|
||||
* <p> An example JAAS config would look as below
|
||||
* <Pre>
|
||||
* KafkaConnect {
|
||||
* org.apache.kafka.connect.rest.basic.auth.extension.PropertyFileLoginModule required
|
||||
* file="/mnt/secret/credentials.properties";
|
||||
* };
|
||||
*</Pre>
|
||||
*
|
||||
* <p>This is a reference implementation of the {@link ConnectRestExtension} interface. It registers an implementation of {@link
|
||||
* javax.ws.rs.container.ContainerRequestFilter} that does JAAS based authentication of incoming Basic Auth credentials. {@link
|
||||
* ConnectRestExtension} implementations are loaded via the plugin class loader using {@link java.util.ServiceLoader} mechanism and hence
|
||||
* the packaged jar includes {@code META-INF/services/org.apache.kafka.connect.rest.extension.ConnectRestExtension} with the entry
|
||||
* {@code org.apache.kafka.connect.extension.auth.jaas.BasicAuthSecurityRestExtension}
|
||||
*
|
||||
* <p><b>NOTE: The implementation ships with a default {@link PropertyFileLoginModule} that helps authenticate the request against a
|
||||
* property file. {@link PropertyFileLoginModule} is NOT intended to be used in production since the credentials are stored in PLAINTEXT. One can use
|
||||
* this extension in production by using their own implementation of {@link javax.security.auth.spi.LoginModule} that authenticates against
|
||||
* stores like LDAP, DB, etc.</b>
|
||||
*/
|
||||
public class BasicAuthSecurityRestExtension implements ConnectRestExtension {
|
||||
|
||||
@Override
|
||||
public void register(ConnectRestExtensionContext restPluginContext) {
|
||||
restPluginContext.configurable().register(JaasBasicAuthFilter.class);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void configure(Map<String, ?> configs) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public String version() {
|
||||
return AppInfoParser.getVersion();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,102 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.kafka.connect.rest.basic.auth.extension;
|
||||
|
||||
import java.util.regex.Pattern;
|
||||
import javax.ws.rs.HttpMethod;
|
||||
import org.apache.kafka.common.config.ConfigException;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.Base64;
|
||||
|
||||
import javax.security.auth.callback.Callback;
|
||||
import javax.security.auth.callback.CallbackHandler;
|
||||
import javax.security.auth.callback.NameCallback;
|
||||
import javax.security.auth.callback.PasswordCallback;
|
||||
import javax.security.auth.callback.UnsupportedCallbackException;
|
||||
import javax.security.auth.login.LoginContext;
|
||||
import javax.security.auth.login.LoginException;
|
||||
import javax.ws.rs.container.ContainerRequestContext;
|
||||
import javax.ws.rs.container.ContainerRequestFilter;
|
||||
import javax.ws.rs.core.Response;
|
||||
|
||||
public class JaasBasicAuthFilter implements ContainerRequestFilter {
|
||||
private static final String CONNECT_LOGIN_MODULE = "KafkaConnect";
|
||||
static final String AUTHORIZATION = "Authorization";
|
||||
private static final Pattern TASK_REQUEST_PATTERN = Pattern.compile("/?connectors/([^/]+)/tasks/?");
|
||||
@Override
|
||||
public void filter(ContainerRequestContext requestContext) throws IOException {
|
||||
try {
|
||||
if (!(requestContext.getMethod().equals(HttpMethod.POST) && TASK_REQUEST_PATTERN.matcher(requestContext.getUriInfo().getPath()).matches())) {
|
||||
LoginContext loginContext =
|
||||
new LoginContext(CONNECT_LOGIN_MODULE, new BasicAuthCallBackHandler(
|
||||
requestContext.getHeaderString(AUTHORIZATION)));
|
||||
loginContext.login();
|
||||
}
|
||||
} catch (LoginException | ConfigException e) {
|
||||
requestContext.abortWith(
|
||||
Response.status(Response.Status.UNAUTHORIZED)
|
||||
.entity("User cannot access the resource.")
|
||||
.build());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public static class BasicAuthCallBackHandler implements CallbackHandler {
|
||||
|
||||
private static final String BASIC = "basic";
|
||||
private static final char COLON = ':';
|
||||
private static final char SPACE = ' ';
|
||||
private String username;
|
||||
private String password;
|
||||
|
||||
public BasicAuthCallBackHandler(String credentials) {
|
||||
if (credentials != null) {
|
||||
int space = credentials.indexOf(SPACE);
|
||||
if (space > 0) {
|
||||
String method = credentials.substring(0, space);
|
||||
if (BASIC.equalsIgnoreCase(method)) {
|
||||
credentials = credentials.substring(space + 1);
|
||||
credentials = new String(Base64.getDecoder().decode(credentials),
|
||||
StandardCharsets.UTF_8);
|
||||
int i = credentials.indexOf(COLON);
|
||||
if (i > 0) {
|
||||
username = credentials.substring(0, i);
|
||||
password = credentials.substring(i + 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handle(Callback[] callbacks) throws UnsupportedCallbackException {
|
||||
for (Callback callback : callbacks) {
|
||||
if (callback instanceof NameCallback) {
|
||||
((NameCallback) callback).setName(username);
|
||||
} else if (callback instanceof PasswordCallback) {
|
||||
((PasswordCallback) callback).setPassword(password.toCharArray());
|
||||
} else {
|
||||
throw new UnsupportedCallbackException(callback, "Supports only NameCallback "
|
||||
+ "and PasswordCallback");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,119 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.kafka.connect.rest.basic.auth.extension;
|
||||
|
||||
import org.apache.kafka.common.config.ConfigException;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
import javax.security.auth.Subject;
|
||||
import javax.security.auth.callback.Callback;
|
||||
import javax.security.auth.callback.CallbackHandler;
|
||||
import javax.security.auth.callback.NameCallback;
|
||||
import javax.security.auth.callback.PasswordCallback;
|
||||
import javax.security.auth.login.LoginException;
|
||||
import javax.security.auth.spi.LoginModule;
|
||||
|
||||
/**
|
||||
* {@link PropertyFileLoginModule} authenticates against a properties file.
|
||||
* The credentials should be stored in the format {username}={password} in the properties file.
|
||||
* The absolute path of the file needs to specified using the option <b>file</b>
|
||||
*
|
||||
* <p><b>NOTE: This implementation is NOT intended to be used in production since the credentials are stored in PLAINTEXT in the
|
||||
* properties file.</b>
|
||||
*/
|
||||
public class PropertyFileLoginModule implements LoginModule {
|
||||
private static final Logger log = LoggerFactory.getLogger(PropertyFileLoginModule.class);
|
||||
|
||||
private CallbackHandler callbackHandler;
|
||||
private static final String FILE_OPTIONS = "file";
|
||||
private String fileName;
|
||||
private boolean authenticated;
|
||||
|
||||
private static Map<String, Properties> credentialPropertiesMap = new ConcurrentHashMap<>();
|
||||
|
||||
@Override
|
||||
public void initialize(Subject subject, CallbackHandler callbackHandler, Map<String, ?> sharedState, Map<String, ?> options) {
|
||||
this.callbackHandler = callbackHandler;
|
||||
fileName = (String) options.get(FILE_OPTIONS);
|
||||
if (fileName == null || fileName.trim().isEmpty()) {
|
||||
throw new ConfigException("Property Credentials file must be specified");
|
||||
}
|
||||
if (!credentialPropertiesMap.containsKey(fileName)) {
|
||||
Properties credentialProperties = new Properties();
|
||||
try {
|
||||
try (InputStream inputStream = Files.newInputStream(Paths.get(fileName))) {
|
||||
credentialProperties.load(inputStream);
|
||||
}
|
||||
credentialPropertiesMap.putIfAbsent(fileName, credentialProperties);
|
||||
} catch (IOException e) {
|
||||
log.error("Error loading credentials file ", e);
|
||||
throw new ConfigException("Error loading Property Credentials file");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean login() throws LoginException {
|
||||
Callback[] callbacks = configureCallbacks();
|
||||
try {
|
||||
callbackHandler.handle(callbacks);
|
||||
} catch (Exception e) {
|
||||
throw new LoginException(e.getMessage());
|
||||
}
|
||||
|
||||
String username = ((NameCallback) callbacks[0]).getName();
|
||||
char[] passwordChars = ((PasswordCallback) callbacks[1]).getPassword();
|
||||
String password = passwordChars != null ? new String(passwordChars) : null;
|
||||
Properties credentialProperties = credentialPropertiesMap.get(fileName);
|
||||
authenticated = credentialProperties.isEmpty() ||
|
||||
(password != null && password.equals(credentialProperties.get(username)));
|
||||
return authenticated;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean commit() throws LoginException {
|
||||
return authenticated;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean abort() throws LoginException {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean logout() throws LoginException {
|
||||
return true;
|
||||
}
|
||||
|
||||
private Callback[] configureCallbacks() {
|
||||
|
||||
Callback[] callbacks = new Callback[2];
|
||||
callbacks[0] = new NameCallback("Enter user name");
|
||||
callbacks[1] = new PasswordCallback("Enter password", false);
|
||||
return callbacks;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,16 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
org.apache.kafka.connect.rest.basic.auth.extension.BasicAuthSecurityRestExtension
|
||||
@@ -0,0 +1,200 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.kafka.connect.rest.basic.auth.extension;
|
||||
|
||||
import javax.ws.rs.HttpMethod;
|
||||
import javax.ws.rs.core.UriInfo;
|
||||
import org.apache.kafka.common.security.JaasUtils;
|
||||
import org.easymock.EasyMock;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.powermock.api.easymock.PowerMock;
|
||||
import org.powermock.api.easymock.annotation.MockStrict;
|
||||
import org.powermock.core.classloader.annotations.PowerMockIgnore;
|
||||
import org.powermock.modules.junit4.PowerMockRunner;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Files;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Base64;
|
||||
import java.util.List;
|
||||
|
||||
import javax.security.auth.login.Configuration;
|
||||
import javax.ws.rs.container.ContainerRequestContext;
|
||||
import javax.ws.rs.core.Response;
|
||||
|
||||
import static org.powermock.api.easymock.PowerMock.replayAll;
|
||||
|
||||
@RunWith(PowerMockRunner.class)
|
||||
@PowerMockIgnore("javax.*")
|
||||
public class JaasBasicAuthFilterTest {
|
||||
|
||||
@MockStrict
|
||||
private ContainerRequestContext requestContext;
|
||||
|
||||
private JaasBasicAuthFilter jaasBasicAuthFilter = new JaasBasicAuthFilter();
|
||||
private String previousJaasConfig;
|
||||
private Configuration previousConfiguration;
|
||||
|
||||
@MockStrict
|
||||
private UriInfo uriInfo;
|
||||
|
||||
@Before
|
||||
public void setup() {
|
||||
EasyMock.reset(requestContext);
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() {
|
||||
if (previousJaasConfig != null) {
|
||||
System.setProperty(JaasUtils.JAVA_LOGIN_CONFIG_PARAM, previousJaasConfig);
|
||||
}
|
||||
Configuration.setConfiguration(previousConfiguration);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSuccess() throws IOException {
|
||||
File credentialFile = File.createTempFile("credential", ".properties");
|
||||
credentialFile.deleteOnExit();
|
||||
List<String> lines = new ArrayList<>();
|
||||
lines.add("user=password");
|
||||
lines.add("user1=password1");
|
||||
Files.write(credentialFile.toPath(), lines, StandardCharsets.UTF_8);
|
||||
|
||||
setupJaasConfig("KafkaConnect", credentialFile.getPath(), true);
|
||||
setMock("Basic", "user", "password", false);
|
||||
|
||||
jaasBasicAuthFilter.filter(requestContext);
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testBadCredential() throws IOException {
|
||||
setMock("Basic", "user1", "password", true);
|
||||
jaasBasicAuthFilter.filter(requestContext);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBadPassword() throws IOException {
|
||||
setMock("Basic", "user", "password1", true);
|
||||
jaasBasicAuthFilter.filter(requestContext);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testUnknownBearer() throws IOException {
|
||||
setMock("Unknown", "user", "password", true);
|
||||
jaasBasicAuthFilter.filter(requestContext);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testUnknownLoginModule() throws IOException {
|
||||
setupJaasConfig("KafkaConnect1", "/tmp/testcrednetial", true);
|
||||
Configuration.setConfiguration(null);
|
||||
setMock("Basic", "user", "password", true);
|
||||
jaasBasicAuthFilter.filter(requestContext);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testUnknownCredentialsFile() throws IOException {
|
||||
setupJaasConfig("KafkaConnect", "/tmp/testcrednetial", true);
|
||||
Configuration.setConfiguration(null);
|
||||
setMock("Basic", "user", "password", true);
|
||||
jaasBasicAuthFilter.filter(requestContext);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testEmptyCredentialsFile() throws IOException {
|
||||
File jaasConfigFile = File.createTempFile("ks-jaas-", ".conf");
|
||||
jaasConfigFile.deleteOnExit();
|
||||
System.setProperty(JaasUtils.JAVA_LOGIN_CONFIG_PARAM, jaasConfigFile.getPath());
|
||||
setupJaasConfig("KafkaConnect", "", true);
|
||||
Configuration.setConfiguration(null);
|
||||
setMock("Basic", "user", "password", true);
|
||||
jaasBasicAuthFilter.filter(requestContext);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testNoFileOption() throws IOException {
|
||||
File jaasConfigFile = File.createTempFile("ks-jaas-", ".conf");
|
||||
jaasConfigFile.deleteOnExit();
|
||||
System.setProperty(JaasUtils.JAVA_LOGIN_CONFIG_PARAM, jaasConfigFile.getPath());
|
||||
setupJaasConfig("KafkaConnect", "", false);
|
||||
Configuration.setConfiguration(null);
|
||||
setMock("Basic", "user", "password", true);
|
||||
jaasBasicAuthFilter.filter(requestContext);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPostWithoutAppropriateCredential() throws IOException {
|
||||
EasyMock.expect(requestContext.getMethod()).andReturn(HttpMethod.POST);
|
||||
EasyMock.expect(requestContext.getUriInfo()).andReturn(uriInfo);
|
||||
EasyMock.expect(uriInfo.getPath()).andReturn("connectors/connName/tasks");
|
||||
|
||||
PowerMock.replayAll();
|
||||
jaasBasicAuthFilter.filter(requestContext);
|
||||
EasyMock.verify(requestContext);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPostNotChangingConnectorTask() throws IOException {
|
||||
EasyMock.expect(requestContext.getMethod()).andReturn(HttpMethod.POST);
|
||||
EasyMock.expect(requestContext.getUriInfo()).andReturn(uriInfo);
|
||||
EasyMock.expect(uriInfo.getPath()).andReturn("local:randomport/connectors/connName");
|
||||
String authHeader = "Basic" + Base64.getEncoder().encodeToString(("user" + ":" + "password").getBytes());
|
||||
EasyMock.expect(requestContext.getHeaderString(JaasBasicAuthFilter.AUTHORIZATION))
|
||||
.andReturn(authHeader);
|
||||
requestContext.abortWith(EasyMock.anyObject(Response.class));
|
||||
EasyMock.expectLastCall();
|
||||
PowerMock.replayAll();
|
||||
jaasBasicAuthFilter.filter(requestContext);
|
||||
EasyMock.verify(requestContext);
|
||||
}
|
||||
|
||||
private void setMock(String authorization, String username, String password, boolean exceptionCase) {
|
||||
EasyMock.expect(requestContext.getMethod()).andReturn(HttpMethod.GET);
|
||||
String authHeader = authorization + " " + Base64.getEncoder().encodeToString((username + ":" + password).getBytes());
|
||||
EasyMock.expect(requestContext.getHeaderString(JaasBasicAuthFilter.AUTHORIZATION))
|
||||
.andReturn(authHeader);
|
||||
if (exceptionCase) {
|
||||
requestContext.abortWith(EasyMock.anyObject(Response.class));
|
||||
EasyMock.expectLastCall();
|
||||
}
|
||||
replayAll();
|
||||
}
|
||||
|
||||
private void setupJaasConfig(String loginModule, String credentialFilePath, boolean includeFileOptions) throws IOException {
|
||||
File jaasConfigFile = File.createTempFile("ks-jaas-", ".conf");
|
||||
jaasConfigFile.deleteOnExit();
|
||||
previousJaasConfig = System.setProperty(JaasUtils.JAVA_LOGIN_CONFIG_PARAM, jaasConfigFile.getPath());
|
||||
List<String> lines;
|
||||
lines = new ArrayList<>();
|
||||
lines.add(loginModule + " { org.apache.kafka.connect.rest.basic.auth.extension.PropertyFileLoginModule required ");
|
||||
if (includeFileOptions) {
|
||||
lines.add("file=\"" + credentialFilePath + "\"");
|
||||
}
|
||||
lines.add(";};");
|
||||
Files.write(jaasConfigFile.toPath(), lines, StandardCharsets.UTF_8);
|
||||
previousConfiguration = Configuration.getConfiguration();
|
||||
Configuration.setConfiguration(null);
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,81 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.file;
|
||||
|
||||
import org.apache.kafka.common.config.AbstractConfig;
|
||||
import org.apache.kafka.common.config.ConfigDef;
|
||||
import org.apache.kafka.common.config.ConfigDef.Importance;
|
||||
import org.apache.kafka.common.config.ConfigDef.Type;
|
||||
import org.apache.kafka.common.utils.AppInfoParser;
|
||||
import org.apache.kafka.connect.connector.Task;
|
||||
import org.apache.kafka.connect.sink.SinkConnector;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Very simple connector that works with the console. This connector supports both source and
|
||||
* sink modes via its 'mode' setting.
|
||||
*/
|
||||
public class FileStreamSinkConnector extends SinkConnector {
|
||||
|
||||
public static final String FILE_CONFIG = "file";
|
||||
private static final ConfigDef CONFIG_DEF = new ConfigDef()
|
||||
.define(FILE_CONFIG, Type.STRING, null, Importance.HIGH, "Destination filename. If not specified, the standard output will be used");
|
||||
|
||||
private String filename;
|
||||
|
||||
@Override
|
||||
public String version() {
|
||||
return AppInfoParser.getVersion();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void start(Map<String, String> props) {
|
||||
AbstractConfig parsedConfig = new AbstractConfig(CONFIG_DEF, props);
|
||||
filename = parsedConfig.getString(FILE_CONFIG);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Class<? extends Task> taskClass() {
|
||||
return FileStreamSinkTask.class;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Map<String, String>> taskConfigs(int maxTasks) {
|
||||
ArrayList<Map<String, String>> configs = new ArrayList<>();
|
||||
for (int i = 0; i < maxTasks; i++) {
|
||||
Map<String, String> config = new HashMap<>();
|
||||
if (filename != null)
|
||||
config.put(FILE_CONFIG, filename);
|
||||
configs.add(config);
|
||||
}
|
||||
return configs;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void stop() {
|
||||
// Nothing to do since FileStreamSinkConnector has no background monitoring.
|
||||
}
|
||||
|
||||
@Override
|
||||
public ConfigDef config() {
|
||||
return CONFIG_DEF;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,99 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.file;
|
||||
|
||||
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.connect.errors.ConnectException;
|
||||
import org.apache.kafka.connect.sink.SinkRecord;
|
||||
import org.apache.kafka.connect.sink.SinkTask;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.PrintStream;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Paths;
|
||||
import java.nio.file.StandardOpenOption;
|
||||
import java.util.Collection;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* FileStreamSinkTask writes records to stdout or a file.
|
||||
*/
|
||||
public class FileStreamSinkTask extends SinkTask {
|
||||
private static final Logger log = LoggerFactory.getLogger(FileStreamSinkTask.class);
|
||||
|
||||
private String filename;
|
||||
private PrintStream outputStream;
|
||||
|
||||
public FileStreamSinkTask() {
|
||||
}
|
||||
|
||||
// for testing
|
||||
public FileStreamSinkTask(PrintStream outputStream) {
|
||||
filename = null;
|
||||
this.outputStream = outputStream;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String version() {
|
||||
return new FileStreamSinkConnector().version();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void start(Map<String, String> props) {
|
||||
filename = props.get(FileStreamSinkConnector.FILE_CONFIG);
|
||||
if (filename == null) {
|
||||
outputStream = System.out;
|
||||
} else {
|
||||
try {
|
||||
outputStream = new PrintStream(
|
||||
Files.newOutputStream(Paths.get(filename), StandardOpenOption.CREATE, StandardOpenOption.APPEND),
|
||||
false,
|
||||
StandardCharsets.UTF_8.name());
|
||||
} catch (IOException e) {
|
||||
throw new ConnectException("Couldn't find or create file '" + filename + "' for FileStreamSinkTask", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void put(Collection<SinkRecord> sinkRecords) {
|
||||
for (SinkRecord record : sinkRecords) {
|
||||
log.trace("Writing line to {}: {}", logFilename(), record.value());
|
||||
outputStream.println(record.value());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void flush(Map<TopicPartition, OffsetAndMetadata> offsets) {
|
||||
log.trace("Flushing output stream for {}", logFilename());
|
||||
outputStream.flush();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void stop() {
|
||||
if (outputStream != null && outputStream != System.out)
|
||||
outputStream.close();
|
||||
}
|
||||
|
||||
private String logFilename() {
|
||||
return filename == null ? "stdout" : filename;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,98 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.file;
|
||||
|
||||
import org.apache.kafka.common.config.AbstractConfig;
|
||||
import org.apache.kafka.common.config.ConfigDef;
|
||||
import org.apache.kafka.common.config.ConfigDef.Importance;
|
||||
import org.apache.kafka.common.config.ConfigDef.Type;
|
||||
import org.apache.kafka.common.config.ConfigException;
|
||||
import org.apache.kafka.common.utils.AppInfoParser;
|
||||
import org.apache.kafka.connect.connector.Task;
|
||||
import org.apache.kafka.connect.source.SourceConnector;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Very simple connector that works with the console. This connector supports both source and
|
||||
* sink modes via its 'mode' setting.
|
||||
*/
|
||||
public class FileStreamSourceConnector extends SourceConnector {
|
||||
public static final String TOPIC_CONFIG = "topic";
|
||||
public static final String FILE_CONFIG = "file";
|
||||
public static final String TASK_BATCH_SIZE_CONFIG = "batch.size";
|
||||
|
||||
public static final int DEFAULT_TASK_BATCH_SIZE = 2000;
|
||||
|
||||
private static final ConfigDef CONFIG_DEF = new ConfigDef()
|
||||
.define(FILE_CONFIG, Type.STRING, null, Importance.HIGH, "Source filename. If not specified, the standard input will be used")
|
||||
.define(TOPIC_CONFIG, Type.LIST, Importance.HIGH, "The topic to publish data to")
|
||||
.define(TASK_BATCH_SIZE_CONFIG, Type.INT, DEFAULT_TASK_BATCH_SIZE, Importance.LOW,
|
||||
"The maximum number of records the Source task can read from file one time");
|
||||
|
||||
private String filename;
|
||||
private String topic;
|
||||
private int batchSize;
|
||||
|
||||
@Override
|
||||
public String version() {
|
||||
return AppInfoParser.getVersion();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void start(Map<String, String> props) {
|
||||
AbstractConfig parsedConfig = new AbstractConfig(CONFIG_DEF, props);
|
||||
filename = parsedConfig.getString(FILE_CONFIG);
|
||||
List<String> topics = parsedConfig.getList(TOPIC_CONFIG);
|
||||
if (topics.size() != 1) {
|
||||
throw new ConfigException("'topic' in FileStreamSourceConnector configuration requires definition of a single topic");
|
||||
}
|
||||
topic = topics.get(0);
|
||||
batchSize = parsedConfig.getInt(TASK_BATCH_SIZE_CONFIG);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Class<? extends Task> taskClass() {
|
||||
return FileStreamSourceTask.class;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Map<String, String>> taskConfigs(int maxTasks) {
|
||||
ArrayList<Map<String, String>> configs = new ArrayList<>();
|
||||
// Only one input stream makes sense.
|
||||
Map<String, String> config = new HashMap<>();
|
||||
if (filename != null)
|
||||
config.put(FILE_CONFIG, filename);
|
||||
config.put(TOPIC_CONFIG, topic);
|
||||
config.put(TASK_BATCH_SIZE_CONFIG, String.valueOf(batchSize));
|
||||
configs.add(config);
|
||||
return configs;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void stop() {
|
||||
// Nothing to do since FileStreamSourceConnector has no background monitoring.
|
||||
}
|
||||
|
||||
@Override
|
||||
public ConfigDef config() {
|
||||
return CONFIG_DEF;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,234 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.file;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.InputStreamReader;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.NoSuchFileException;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.connect.data.Schema;
|
||||
import org.apache.kafka.connect.errors.ConnectException;
|
||||
import org.apache.kafka.connect.source.SourceRecord;
|
||||
import org.apache.kafka.connect.source.SourceTask;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* FileStreamSourceTask reads from stdin or a file.
|
||||
*/
|
||||
public class FileStreamSourceTask extends SourceTask {
|
||||
private static final Logger log = LoggerFactory.getLogger(FileStreamSourceTask.class);
|
||||
public static final String FILENAME_FIELD = "filename";
|
||||
public static final String POSITION_FIELD = "position";
|
||||
private static final Schema VALUE_SCHEMA = Schema.STRING_SCHEMA;
|
||||
|
||||
private String filename;
|
||||
private InputStream stream;
|
||||
private BufferedReader reader = null;
|
||||
private char[] buffer = new char[1024];
|
||||
private int offset = 0;
|
||||
private String topic = null;
|
||||
private int batchSize = FileStreamSourceConnector.DEFAULT_TASK_BATCH_SIZE;
|
||||
|
||||
private Long streamOffset;
|
||||
|
||||
@Override
|
||||
public String version() {
|
||||
return new FileStreamSourceConnector().version();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void start(Map<String, String> props) {
|
||||
filename = props.get(FileStreamSourceConnector.FILE_CONFIG);
|
||||
if (filename == null || filename.isEmpty()) {
|
||||
stream = System.in;
|
||||
// Tracking offset for stdin doesn't make sense
|
||||
streamOffset = null;
|
||||
reader = new BufferedReader(new InputStreamReader(stream, StandardCharsets.UTF_8));
|
||||
}
|
||||
// Missing topic or parsing error is not possible because we've parsed the config in the
|
||||
// Connector
|
||||
topic = props.get(FileStreamSourceConnector.TOPIC_CONFIG);
|
||||
batchSize = Integer.parseInt(props.get(FileStreamSourceConnector.TASK_BATCH_SIZE_CONFIG));
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<SourceRecord> poll() throws InterruptedException {
|
||||
if (stream == null) {
|
||||
try {
|
||||
stream = Files.newInputStream(Paths.get(filename));
|
||||
Map<String, Object> offset = context.offsetStorageReader().offset(Collections.singletonMap(FILENAME_FIELD, filename));
|
||||
if (offset != null) {
|
||||
Object lastRecordedOffset = offset.get(POSITION_FIELD);
|
||||
if (lastRecordedOffset != null && !(lastRecordedOffset instanceof Long))
|
||||
throw new ConnectException("Offset position is the incorrect type");
|
||||
if (lastRecordedOffset != null) {
|
||||
log.debug("Found previous offset, trying to skip to file offset {}", lastRecordedOffset);
|
||||
long skipLeft = (Long) lastRecordedOffset;
|
||||
while (skipLeft > 0) {
|
||||
try {
|
||||
long skipped = stream.skip(skipLeft);
|
||||
skipLeft -= skipped;
|
||||
} catch (IOException e) {
|
||||
log.error("Error while trying to seek to previous offset in file {}: ", filename, e);
|
||||
throw new ConnectException(e);
|
||||
}
|
||||
}
|
||||
log.debug("Skipped to offset {}", lastRecordedOffset);
|
||||
}
|
||||
streamOffset = (lastRecordedOffset != null) ? (Long) lastRecordedOffset : 0L;
|
||||
} else {
|
||||
streamOffset = 0L;
|
||||
}
|
||||
reader = new BufferedReader(new InputStreamReader(stream, StandardCharsets.UTF_8));
|
||||
log.debug("Opened {} for reading", logFilename());
|
||||
} catch (NoSuchFileException e) {
|
||||
log.warn("Couldn't find file {} for FileStreamSourceTask, sleeping to wait for it to be created", logFilename());
|
||||
synchronized (this) {
|
||||
this.wait(1000);
|
||||
}
|
||||
return null;
|
||||
} catch (IOException e) {
|
||||
log.error("Error while trying to open file {}: ", filename, e);
|
||||
throw new ConnectException(e);
|
||||
}
|
||||
}
|
||||
|
||||
// Unfortunately we can't just use readLine() because it blocks in an uninterruptible way.
|
||||
// Instead we have to manage splitting lines ourselves, using simple backoff when no new data
|
||||
// is available.
|
||||
try {
|
||||
final BufferedReader readerCopy;
|
||||
synchronized (this) {
|
||||
readerCopy = reader;
|
||||
}
|
||||
if (readerCopy == null)
|
||||
return null;
|
||||
|
||||
ArrayList<SourceRecord> records = null;
|
||||
|
||||
int nread = 0;
|
||||
while (readerCopy.ready()) {
|
||||
nread = readerCopy.read(buffer, offset, buffer.length - offset);
|
||||
log.trace("Read {} bytes from {}", nread, logFilename());
|
||||
|
||||
if (nread > 0) {
|
||||
offset += nread;
|
||||
if (offset == buffer.length) {
|
||||
char[] newbuf = new char[buffer.length * 2];
|
||||
System.arraycopy(buffer, 0, newbuf, 0, buffer.length);
|
||||
buffer = newbuf;
|
||||
}
|
||||
|
||||
String line;
|
||||
do {
|
||||
line = extractLine();
|
||||
if (line != null) {
|
||||
log.trace("Read a line from {}", logFilename());
|
||||
if (records == null)
|
||||
records = new ArrayList<>();
|
||||
records.add(new SourceRecord(offsetKey(filename), offsetValue(streamOffset), topic, null,
|
||||
null, null, VALUE_SCHEMA, line, System.currentTimeMillis()));
|
||||
|
||||
if (records.size() >= batchSize) {
|
||||
return records;
|
||||
}
|
||||
}
|
||||
} while (line != null);
|
||||
}
|
||||
}
|
||||
|
||||
if (nread <= 0)
|
||||
synchronized (this) {
|
||||
this.wait(1000);
|
||||
}
|
||||
|
||||
return records;
|
||||
} catch (IOException e) {
|
||||
// Underlying stream was killed, probably as a result of calling stop. Allow to return
|
||||
// null, and driving thread will handle any shutdown if necessary.
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private String extractLine() {
|
||||
int until = -1, newStart = -1;
|
||||
for (int i = 0; i < offset; i++) {
|
||||
if (buffer[i] == '\n') {
|
||||
until = i;
|
||||
newStart = i + 1;
|
||||
break;
|
||||
} else if (buffer[i] == '\r') {
|
||||
// We need to check for \r\n, so we must skip this if we can't check the next char
|
||||
if (i + 1 >= offset)
|
||||
return null;
|
||||
|
||||
until = i;
|
||||
newStart = (buffer[i + 1] == '\n') ? i + 2 : i + 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (until != -1) {
|
||||
String result = new String(buffer, 0, until);
|
||||
System.arraycopy(buffer, newStart, buffer, 0, buffer.length - newStart);
|
||||
offset = offset - newStart;
|
||||
if (streamOffset != null)
|
||||
streamOffset += newStart;
|
||||
return result;
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void stop() {
|
||||
log.trace("Stopping");
|
||||
synchronized (this) {
|
||||
try {
|
||||
if (stream != null && stream != System.in) {
|
||||
stream.close();
|
||||
log.trace("Closed input stream");
|
||||
}
|
||||
} catch (IOException e) {
|
||||
log.error("Failed to close FileStreamSourceTask stream: ", e);
|
||||
}
|
||||
this.notify();
|
||||
}
|
||||
}
|
||||
|
||||
private Map<String, String> offsetKey(String filename) {
|
||||
return Collections.singletonMap(FILENAME_FIELD, filename);
|
||||
}
|
||||
|
||||
private Map<String, Long> offsetValue(Long pos) {
|
||||
return Collections.singletonMap(POSITION_FIELD, pos);
|
||||
}
|
||||
|
||||
private String logFilename() {
|
||||
return filename == null ? "stdin" : filename;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,103 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.file;
|
||||
|
||||
import org.apache.kafka.common.config.ConfigValue;
|
||||
import org.apache.kafka.connect.connector.ConnectorContext;
|
||||
import org.apache.kafka.connect.sink.SinkConnector;
|
||||
import org.easymock.EasyMockSupport;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNull;
|
||||
|
||||
public class FileStreamSinkConnectorTest extends EasyMockSupport {
|
||||
|
||||
private static final String MULTIPLE_TOPICS = "test1,test2";
|
||||
private static final String FILENAME = "/afilename";
|
||||
|
||||
private FileStreamSinkConnector connector;
|
||||
private ConnectorContext ctx;
|
||||
private Map<String, String> sinkProperties;
|
||||
|
||||
@Before
|
||||
public void setup() {
|
||||
connector = new FileStreamSinkConnector();
|
||||
ctx = createMock(ConnectorContext.class);
|
||||
connector.initialize(ctx);
|
||||
|
||||
sinkProperties = new HashMap<>();
|
||||
sinkProperties.put(SinkConnector.TOPICS_CONFIG, MULTIPLE_TOPICS);
|
||||
sinkProperties.put(FileStreamSinkConnector.FILE_CONFIG, FILENAME);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testConnectorConfigValidation() {
|
||||
replayAll();
|
||||
List<ConfigValue> configValues = connector.config().validate(sinkProperties);
|
||||
for (ConfigValue val : configValues) {
|
||||
assertEquals("Config property errors: " + val.errorMessages(), 0, val.errorMessages().size());
|
||||
}
|
||||
verifyAll();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSinkTasks() {
|
||||
replayAll();
|
||||
|
||||
connector.start(sinkProperties);
|
||||
List<Map<String, String>> taskConfigs = connector.taskConfigs(1);
|
||||
assertEquals(1, taskConfigs.size());
|
||||
assertEquals(FILENAME, taskConfigs.get(0).get(FileStreamSinkConnector.FILE_CONFIG));
|
||||
|
||||
taskConfigs = connector.taskConfigs(2);
|
||||
assertEquals(2, taskConfigs.size());
|
||||
for (int i = 0; i < 2; i++) {
|
||||
assertEquals(FILENAME, taskConfigs.get(0).get(FileStreamSinkConnector.FILE_CONFIG));
|
||||
}
|
||||
|
||||
verifyAll();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSinkTasksStdout() {
|
||||
replayAll();
|
||||
|
||||
sinkProperties.remove(FileStreamSourceConnector.FILE_CONFIG);
|
||||
connector.start(sinkProperties);
|
||||
List<Map<String, String>> taskConfigs = connector.taskConfigs(1);
|
||||
assertEquals(1, taskConfigs.size());
|
||||
assertNull(taskConfigs.get(0).get(FileStreamSourceConnector.FILE_CONFIG));
|
||||
|
||||
verifyAll();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testTaskClass() {
|
||||
replayAll();
|
||||
|
||||
connector.start(sinkProperties);
|
||||
assertEquals(FileStreamSinkTask.class, connector.taskClass());
|
||||
|
||||
verifyAll();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,119 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.file;
|
||||
|
||||
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.connect.data.Schema;
|
||||
import org.apache.kafka.connect.sink.SinkRecord;
|
||||
import org.junit.Before;
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
import org.junit.rules.TemporaryFolder;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.io.PrintStream;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.UUID;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
public class FileStreamSinkTaskTest {
|
||||
|
||||
private FileStreamSinkTask task;
|
||||
private ByteArrayOutputStream os;
|
||||
private PrintStream printStream;
|
||||
|
||||
@Rule
|
||||
public TemporaryFolder topDir = new TemporaryFolder();
|
||||
private String outputFile;
|
||||
|
||||
@Before
|
||||
public void setup() throws Exception {
|
||||
os = new ByteArrayOutputStream();
|
||||
printStream = new PrintStream(os);
|
||||
task = new FileStreamSinkTask(printStream);
|
||||
File outputDir = topDir.newFolder("file-stream-sink-" + UUID.randomUUID().toString());
|
||||
outputFile = outputDir.getCanonicalPath() + "/connect.output";
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPutFlush() {
|
||||
HashMap<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
|
||||
final String newLine = System.getProperty("line.separator");
|
||||
|
||||
// We do not call task.start() since it would override the output stream
|
||||
|
||||
task.put(Arrays.asList(
|
||||
new SinkRecord("topic1", 0, null, null, Schema.STRING_SCHEMA, "line1", 1)
|
||||
));
|
||||
offsets.put(new TopicPartition("topic1", 0), new OffsetAndMetadata(1L));
|
||||
task.flush(offsets);
|
||||
assertEquals("line1" + newLine, os.toString());
|
||||
|
||||
task.put(Arrays.asList(
|
||||
new SinkRecord("topic1", 0, null, null, Schema.STRING_SCHEMA, "line2", 2),
|
||||
new SinkRecord("topic2", 0, null, null, Schema.STRING_SCHEMA, "line3", 1)
|
||||
));
|
||||
offsets.put(new TopicPartition("topic1", 0), new OffsetAndMetadata(2L));
|
||||
offsets.put(new TopicPartition("topic2", 0), new OffsetAndMetadata(1L));
|
||||
task.flush(offsets);
|
||||
assertEquals("line1" + newLine + "line2" + newLine + "line3" + newLine, os.toString());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testStart() throws IOException {
|
||||
task = new FileStreamSinkTask();
|
||||
Map<String, String> props = new HashMap<>();
|
||||
props.put(FileStreamSinkConnector.FILE_CONFIG, outputFile);
|
||||
task.start(props);
|
||||
|
||||
HashMap<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
|
||||
task.put(Arrays.asList(
|
||||
new SinkRecord("topic1", 0, null, null, Schema.STRING_SCHEMA, "line0", 1)
|
||||
));
|
||||
offsets.put(new TopicPartition("topic1", 0), new OffsetAndMetadata(1L));
|
||||
task.flush(offsets);
|
||||
|
||||
int numLines = 3;
|
||||
String[] lines = new String[numLines];
|
||||
int i = 0;
|
||||
try (BufferedReader reader = Files.newBufferedReader(Paths.get(outputFile))) {
|
||||
lines[i++] = reader.readLine();
|
||||
task.put(Arrays.asList(
|
||||
new SinkRecord("topic1", 0, null, null, Schema.STRING_SCHEMA, "line1", 2),
|
||||
new SinkRecord("topic2", 0, null, null, Schema.STRING_SCHEMA, "line2", 1)
|
||||
));
|
||||
offsets.put(new TopicPartition("topic1", 0), new OffsetAndMetadata(2L));
|
||||
offsets.put(new TopicPartition("topic2", 0), new OffsetAndMetadata(1L));
|
||||
task.flush(offsets);
|
||||
lines[i++] = reader.readLine();
|
||||
lines[i++] = reader.readLine();
|
||||
}
|
||||
|
||||
while (--i >= 0) {
|
||||
assertEquals("line" + i, lines[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,135 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.file;
|
||||
|
||||
import org.apache.kafka.common.config.ConfigException;
|
||||
import org.apache.kafka.common.config.ConfigValue;
|
||||
import org.apache.kafka.connect.connector.ConnectorContext;
|
||||
import org.easymock.EasyMock;
|
||||
import org.easymock.EasyMockSupport;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNull;
|
||||
|
||||
public class FileStreamSourceConnectorTest extends EasyMockSupport {
|
||||
|
||||
private static final String SINGLE_TOPIC = "test";
|
||||
private static final String MULTIPLE_TOPICS = "test1,test2";
|
||||
private static final String FILENAME = "/somefilename";
|
||||
|
||||
private FileStreamSourceConnector connector;
|
||||
private ConnectorContext ctx;
|
||||
private Map<String, String> sourceProperties;
|
||||
|
||||
@Before
|
||||
public void setup() {
|
||||
connector = new FileStreamSourceConnector();
|
||||
ctx = createMock(ConnectorContext.class);
|
||||
connector.initialize(ctx);
|
||||
|
||||
sourceProperties = new HashMap<>();
|
||||
sourceProperties.put(FileStreamSourceConnector.TOPIC_CONFIG, SINGLE_TOPIC);
|
||||
sourceProperties.put(FileStreamSourceConnector.FILE_CONFIG, FILENAME);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testConnectorConfigValidation() {
|
||||
replayAll();
|
||||
List<ConfigValue> configValues = connector.config().validate(sourceProperties);
|
||||
for (ConfigValue val : configValues) {
|
||||
assertEquals("Config property errors: " + val.errorMessages(), 0, val.errorMessages().size());
|
||||
}
|
||||
verifyAll();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSourceTasks() {
|
||||
replayAll();
|
||||
|
||||
connector.start(sourceProperties);
|
||||
List<Map<String, String>> taskConfigs = connector.taskConfigs(1);
|
||||
assertEquals(1, taskConfigs.size());
|
||||
assertEquals(FILENAME,
|
||||
taskConfigs.get(0).get(FileStreamSourceConnector.FILE_CONFIG));
|
||||
assertEquals(SINGLE_TOPIC,
|
||||
taskConfigs.get(0).get(FileStreamSourceConnector.TOPIC_CONFIG));
|
||||
|
||||
// Should be able to return fewer than requested #
|
||||
taskConfigs = connector.taskConfigs(2);
|
||||
assertEquals(1, taskConfigs.size());
|
||||
assertEquals(FILENAME,
|
||||
taskConfigs.get(0).get(FileStreamSourceConnector.FILE_CONFIG));
|
||||
assertEquals(SINGLE_TOPIC,
|
||||
taskConfigs.get(0).get(FileStreamSourceConnector.TOPIC_CONFIG));
|
||||
|
||||
verifyAll();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSourceTasksStdin() {
|
||||
EasyMock.replay(ctx);
|
||||
|
||||
sourceProperties.remove(FileStreamSourceConnector.FILE_CONFIG);
|
||||
connector.start(sourceProperties);
|
||||
List<Map<String, String>> taskConfigs = connector.taskConfigs(1);
|
||||
assertEquals(1, taskConfigs.size());
|
||||
assertNull(taskConfigs.get(0).get(FileStreamSourceConnector.FILE_CONFIG));
|
||||
|
||||
EasyMock.verify(ctx);
|
||||
}
|
||||
|
||||
@Test(expected = ConfigException.class)
|
||||
public void testMultipleSourcesInvalid() {
|
||||
sourceProperties.put(FileStreamSourceConnector.TOPIC_CONFIG, MULTIPLE_TOPICS);
|
||||
connector.start(sourceProperties);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testTaskClass() {
|
||||
EasyMock.replay(ctx);
|
||||
|
||||
connector.start(sourceProperties);
|
||||
assertEquals(FileStreamSourceTask.class, connector.taskClass());
|
||||
|
||||
EasyMock.verify(ctx);
|
||||
}
|
||||
|
||||
@Test(expected = ConfigException.class)
|
||||
public void testMissingTopic() {
|
||||
sourceProperties.remove(FileStreamSourceConnector.TOPIC_CONFIG);
|
||||
connector.start(sourceProperties);
|
||||
}
|
||||
|
||||
@Test(expected = ConfigException.class)
|
||||
public void testBlankTopic() {
|
||||
// Because of trimming this tests is same as testing for empty string.
|
||||
sourceProperties.put(FileStreamSourceConnector.TOPIC_CONFIG, " ");
|
||||
connector.start(sourceProperties);
|
||||
}
|
||||
|
||||
@Test(expected = ConfigException.class)
|
||||
public void testInvalidBatchSize() {
|
||||
sourceProperties.put(FileStreamSourceConnector.TASK_BATCH_SIZE_CONFIG, "abcd");
|
||||
connector.start(sourceProperties);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,186 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.file;
|
||||
|
||||
import org.apache.kafka.connect.source.SourceRecord;
|
||||
import org.apache.kafka.connect.source.SourceTaskContext;
|
||||
import org.apache.kafka.connect.storage.OffsetStorageReader;
|
||||
import org.easymock.EasyMock;
|
||||
import org.easymock.EasyMockSupport;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.nio.file.Files;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
public class FileStreamSourceTaskTest extends EasyMockSupport {
|
||||
|
||||
private static final String TOPIC = "test";
|
||||
|
||||
private File tempFile;
|
||||
private Map<String, String> config;
|
||||
private OffsetStorageReader offsetStorageReader;
|
||||
private SourceTaskContext context;
|
||||
private FileStreamSourceTask task;
|
||||
|
||||
private boolean verifyMocks = false;
|
||||
|
||||
@Before
|
||||
public void setup() throws IOException {
|
||||
tempFile = File.createTempFile("file-stream-source-task-test", null);
|
||||
config = new HashMap<>();
|
||||
config.put(FileStreamSourceConnector.FILE_CONFIG, tempFile.getAbsolutePath());
|
||||
config.put(FileStreamSourceConnector.TOPIC_CONFIG, TOPIC);
|
||||
config.put(FileStreamSourceConnector.TASK_BATCH_SIZE_CONFIG, String.valueOf(FileStreamSourceConnector.DEFAULT_TASK_BATCH_SIZE));
|
||||
task = new FileStreamSourceTask();
|
||||
offsetStorageReader = createMock(OffsetStorageReader.class);
|
||||
context = createMock(SourceTaskContext.class);
|
||||
task.initialize(context);
|
||||
}
|
||||
|
||||
@After
|
||||
public void teardown() {
|
||||
tempFile.delete();
|
||||
|
||||
if (verifyMocks)
|
||||
verifyAll();
|
||||
}
|
||||
|
||||
private void replay() {
|
||||
replayAll();
|
||||
verifyMocks = true;
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testNormalLifecycle() throws InterruptedException, IOException {
|
||||
expectOffsetLookupReturnNone();
|
||||
replay();
|
||||
|
||||
task.start(config);
|
||||
|
||||
OutputStream os = Files.newOutputStream(tempFile.toPath());
|
||||
assertEquals(null, task.poll());
|
||||
os.write("partial line".getBytes());
|
||||
os.flush();
|
||||
assertEquals(null, task.poll());
|
||||
os.write(" finished\n".getBytes());
|
||||
os.flush();
|
||||
List<SourceRecord> records = task.poll();
|
||||
assertEquals(1, records.size());
|
||||
assertEquals(TOPIC, records.get(0).topic());
|
||||
assertEquals("partial line finished", records.get(0).value());
|
||||
assertEquals(Collections.singletonMap(FileStreamSourceTask.FILENAME_FIELD, tempFile.getAbsolutePath()), records.get(0).sourcePartition());
|
||||
assertEquals(Collections.singletonMap(FileStreamSourceTask.POSITION_FIELD, 22L), records.get(0).sourceOffset());
|
||||
assertEquals(null, task.poll());
|
||||
|
||||
// Different line endings, and make sure the final \r doesn't result in a line until we can
|
||||
// read the subsequent byte.
|
||||
os.write("line1\rline2\r\nline3\nline4\n\r".getBytes());
|
||||
os.flush();
|
||||
records = task.poll();
|
||||
assertEquals(4, records.size());
|
||||
assertEquals("line1", records.get(0).value());
|
||||
assertEquals(Collections.singletonMap(FileStreamSourceTask.FILENAME_FIELD, tempFile.getAbsolutePath()), records.get(0).sourcePartition());
|
||||
assertEquals(Collections.singletonMap(FileStreamSourceTask.POSITION_FIELD, 28L), records.get(0).sourceOffset());
|
||||
assertEquals("line2", records.get(1).value());
|
||||
assertEquals(Collections.singletonMap(FileStreamSourceTask.FILENAME_FIELD, tempFile.getAbsolutePath()), records.get(1).sourcePartition());
|
||||
assertEquals(Collections.singletonMap(FileStreamSourceTask.POSITION_FIELD, 35L), records.get(1).sourceOffset());
|
||||
assertEquals("line3", records.get(2).value());
|
||||
assertEquals(Collections.singletonMap(FileStreamSourceTask.FILENAME_FIELD, tempFile.getAbsolutePath()), records.get(2).sourcePartition());
|
||||
assertEquals(Collections.singletonMap(FileStreamSourceTask.POSITION_FIELD, 41L), records.get(2).sourceOffset());
|
||||
assertEquals("line4", records.get(3).value());
|
||||
assertEquals(Collections.singletonMap(FileStreamSourceTask.FILENAME_FIELD, tempFile.getAbsolutePath()), records.get(3).sourcePartition());
|
||||
assertEquals(Collections.singletonMap(FileStreamSourceTask.POSITION_FIELD, 47L), records.get(3).sourceOffset());
|
||||
|
||||
os.write("subsequent text".getBytes());
|
||||
os.flush();
|
||||
records = task.poll();
|
||||
assertEquals(1, records.size());
|
||||
assertEquals("", records.get(0).value());
|
||||
assertEquals(Collections.singletonMap(FileStreamSourceTask.FILENAME_FIELD, tempFile.getAbsolutePath()), records.get(0).sourcePartition());
|
||||
assertEquals(Collections.singletonMap(FileStreamSourceTask.POSITION_FIELD, 48L), records.get(0).sourceOffset());
|
||||
|
||||
os.close();
|
||||
task.stop();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBatchSize() throws IOException, InterruptedException {
|
||||
expectOffsetLookupReturnNone();
|
||||
replay();
|
||||
|
||||
config.put(FileStreamSourceConnector.TASK_BATCH_SIZE_CONFIG, "5000");
|
||||
task.start(config);
|
||||
|
||||
OutputStream os = Files.newOutputStream(tempFile.toPath());
|
||||
for (int i = 0; i < 10_000; i++) {
|
||||
os.write("Neque porro quisquam est qui dolorem ipsum quia dolor sit amet, consectetur, adipisci velit...\n".getBytes());
|
||||
}
|
||||
os.flush();
|
||||
|
||||
List<SourceRecord> records = task.poll();
|
||||
assertEquals(5000, records.size());
|
||||
|
||||
records = task.poll();
|
||||
assertEquals(5000, records.size());
|
||||
|
||||
os.close();
|
||||
task.stop();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMissingFile() throws InterruptedException {
|
||||
replay();
|
||||
|
||||
String data = "line\n";
|
||||
System.setIn(new ByteArrayInputStream(data.getBytes()));
|
||||
|
||||
config.remove(FileStreamSourceConnector.FILE_CONFIG);
|
||||
task.start(config);
|
||||
|
||||
List<SourceRecord> records = task.poll();
|
||||
assertEquals(1, records.size());
|
||||
assertEquals(TOPIC, records.get(0).topic());
|
||||
assertEquals("line", records.get(0).value());
|
||||
|
||||
task.stop();
|
||||
}
|
||||
|
||||
public void testInvalidFile() throws InterruptedException {
|
||||
config.put(FileStreamSourceConnector.FILE_CONFIG, "bogusfilename");
|
||||
task.start(config);
|
||||
// Currently the task retries indefinitely if the file isn't found, but shouldn't return any data.
|
||||
for (int i = 0; i < 100; i++)
|
||||
assertEquals(null, task.poll());
|
||||
}
|
||||
|
||||
|
||||
private void expectOffsetLookupReturnNone() {
|
||||
EasyMock.expect(context.offsetStorageReader()).andReturn(offsetStorageReader);
|
||||
EasyMock.expect(offsetStorageReader.offset(EasyMock.<Map<String, String>>anyObject())).andReturn(null);
|
||||
}
|
||||
}
|
||||
1
connect/json/.gitignore
vendored
Normal file
1
connect/json/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
/bin/
|
||||
@@ -0,0 +1,36 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.json;
|
||||
|
||||
/**
|
||||
* Represents the valid {@link org.apache.kafka.connect.data.Decimal} serialization formats
|
||||
* in a {@link JsonConverter}.
|
||||
*/
|
||||
public enum DecimalFormat {
|
||||
|
||||
/**
|
||||
* Serializes the JSON Decimal as a base-64 string. For example, serializing the value
|
||||
* `10.2345` with the BASE64 setting will result in `"D3J5"`.
|
||||
*/
|
||||
BASE64,
|
||||
|
||||
/**
|
||||
* Serializes the JSON Decimal as a JSON number. For example, serializing the value
|
||||
* `10.2345` with the NUMERIC setting will result in `10.2345`.
|
||||
*/
|
||||
NUMERIC
|
||||
}
|
||||
@@ -0,0 +1,783 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.json;
|
||||
|
||||
import com.fasterxml.jackson.databind.DeserializationFeature;
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.fasterxml.jackson.databind.node.ArrayNode;
|
||||
import com.fasterxml.jackson.databind.node.JsonNodeFactory;
|
||||
import com.fasterxml.jackson.databind.node.ObjectNode;
|
||||
import org.apache.kafka.common.cache.Cache;
|
||||
import org.apache.kafka.common.cache.LRUCache;
|
||||
import org.apache.kafka.common.cache.SynchronizedCache;
|
||||
import org.apache.kafka.common.config.ConfigDef;
|
||||
import org.apache.kafka.common.errors.SerializationException;
|
||||
import org.apache.kafka.connect.data.SchemaBuilder;
|
||||
import org.apache.kafka.connect.data.Schema;
|
||||
import org.apache.kafka.connect.data.Struct;
|
||||
import org.apache.kafka.connect.data.Field;
|
||||
import org.apache.kafka.connect.data.ConnectSchema;
|
||||
import org.apache.kafka.connect.data.SchemaAndValue;
|
||||
import org.apache.kafka.connect.data.Timestamp;
|
||||
import org.apache.kafka.connect.data.Time;
|
||||
import org.apache.kafka.connect.data.Decimal;
|
||||
import org.apache.kafka.connect.data.Date;
|
||||
import org.apache.kafka.connect.errors.DataException;
|
||||
import org.apache.kafka.connect.storage.Converter;
|
||||
import org.apache.kafka.connect.storage.ConverterType;
|
||||
import org.apache.kafka.connect.storage.HeaderConverter;
|
||||
import org.apache.kafka.connect.storage.StringConverterConfig;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.math.BigDecimal;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.EnumMap;
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.apache.kafka.common.utils.Utils.mkSet;
|
||||
|
||||
/**
|
||||
* Implementation of Converter that uses JSON to store schemas and objects. By default this converter will serialize Connect keys, values,
|
||||
* and headers with schemas, although this can be disabled with {@link JsonConverterConfig#SCHEMAS_ENABLE_CONFIG schemas.enable}
|
||||
* configuration option.
|
||||
*
|
||||
* This implementation currently does nothing with the topic names or header names.
|
||||
*/
|
||||
public class JsonConverter implements Converter, HeaderConverter {
|
||||
|
||||
private static final Map<Schema.Type, JsonToConnectTypeConverter> TO_CONNECT_CONVERTERS = new EnumMap<>(Schema.Type.class);
|
||||
|
||||
static {
|
||||
TO_CONNECT_CONVERTERS.put(Schema.Type.BOOLEAN, new JsonToConnectTypeConverter() {
|
||||
@Override
|
||||
public Object convert(Schema schema, JsonNode value) {
|
||||
return value.booleanValue();
|
||||
}
|
||||
});
|
||||
TO_CONNECT_CONVERTERS.put(Schema.Type.INT8, new JsonToConnectTypeConverter() {
|
||||
@Override
|
||||
public Object convert(Schema schema, JsonNode value) {
|
||||
return (byte) value.intValue();
|
||||
}
|
||||
});
|
||||
TO_CONNECT_CONVERTERS.put(Schema.Type.INT16, new JsonToConnectTypeConverter() {
|
||||
@Override
|
||||
public Object convert(Schema schema, JsonNode value) {
|
||||
return (short) value.intValue();
|
||||
}
|
||||
});
|
||||
TO_CONNECT_CONVERTERS.put(Schema.Type.INT32, new JsonToConnectTypeConverter() {
|
||||
@Override
|
||||
public Object convert(Schema schema, JsonNode value) {
|
||||
return value.intValue();
|
||||
}
|
||||
});
|
||||
TO_CONNECT_CONVERTERS.put(Schema.Type.INT64, new JsonToConnectTypeConverter() {
|
||||
@Override
|
||||
public Object convert(Schema schema, JsonNode value) {
|
||||
return value.longValue();
|
||||
}
|
||||
});
|
||||
TO_CONNECT_CONVERTERS.put(Schema.Type.FLOAT32, new JsonToConnectTypeConverter() {
|
||||
@Override
|
||||
public Object convert(Schema schema, JsonNode value) {
|
||||
return value.floatValue();
|
||||
}
|
||||
});
|
||||
TO_CONNECT_CONVERTERS.put(Schema.Type.FLOAT64, new JsonToConnectTypeConverter() {
|
||||
@Override
|
||||
public Object convert(Schema schema, JsonNode value) {
|
||||
return value.doubleValue();
|
||||
}
|
||||
});
|
||||
TO_CONNECT_CONVERTERS.put(Schema.Type.BYTES, new JsonToConnectTypeConverter() {
|
||||
@Override
|
||||
public Object convert(Schema schema, JsonNode value) {
|
||||
try {
|
||||
return value.binaryValue();
|
||||
} catch (IOException e) {
|
||||
throw new DataException("Invalid bytes field", e);
|
||||
}
|
||||
}
|
||||
});
|
||||
TO_CONNECT_CONVERTERS.put(Schema.Type.STRING, new JsonToConnectTypeConverter() {
|
||||
@Override
|
||||
public Object convert(Schema schema, JsonNode value) {
|
||||
return value.textValue();
|
||||
}
|
||||
});
|
||||
TO_CONNECT_CONVERTERS.put(Schema.Type.ARRAY, new JsonToConnectTypeConverter() {
|
||||
@Override
|
||||
public Object convert(Schema schema, JsonNode value) {
|
||||
Schema elemSchema = schema == null ? null : schema.valueSchema();
|
||||
ArrayList<Object> result = new ArrayList<>();
|
||||
for (JsonNode elem : value) {
|
||||
result.add(convertToConnect(elemSchema, elem));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
});
|
||||
TO_CONNECT_CONVERTERS.put(Schema.Type.MAP, new JsonToConnectTypeConverter() {
|
||||
@Override
|
||||
public Object convert(Schema schema, JsonNode value) {
|
||||
Schema keySchema = schema == null ? null : schema.keySchema();
|
||||
Schema valueSchema = schema == null ? null : schema.valueSchema();
|
||||
|
||||
// If the map uses strings for keys, it should be encoded in the natural JSON format. If it uses other
|
||||
// primitive types or a complex type as a key, it will be encoded as a list of pairs. If we don't have a
|
||||
// schema, we default to encoding in a Map.
|
||||
Map<Object, Object> result = new HashMap<>();
|
||||
if (schema == null || keySchema.type() == Schema.Type.STRING) {
|
||||
if (!value.isObject())
|
||||
throw new DataException("Maps with string fields should be encoded as JSON objects, but found " + value.getNodeType());
|
||||
Iterator<Map.Entry<String, JsonNode>> fieldIt = value.fields();
|
||||
while (fieldIt.hasNext()) {
|
||||
Map.Entry<String, JsonNode> entry = fieldIt.next();
|
||||
result.put(entry.getKey(), convertToConnect(valueSchema, entry.getValue()));
|
||||
}
|
||||
} else {
|
||||
if (!value.isArray())
|
||||
throw new DataException("Maps with non-string fields should be encoded as JSON array of tuples, but found " + value.getNodeType());
|
||||
for (JsonNode entry : value) {
|
||||
if (!entry.isArray())
|
||||
throw new DataException("Found invalid map entry instead of array tuple: " + entry.getNodeType());
|
||||
if (entry.size() != 2)
|
||||
throw new DataException("Found invalid map entry, expected length 2 but found :" + entry.size());
|
||||
result.put(convertToConnect(keySchema, entry.get(0)),
|
||||
convertToConnect(valueSchema, entry.get(1)));
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
});
|
||||
TO_CONNECT_CONVERTERS.put(Schema.Type.STRUCT, new JsonToConnectTypeConverter() {
|
||||
@Override
|
||||
public Object convert(Schema schema, JsonNode value) {
|
||||
if (!value.isObject())
|
||||
throw new DataException("Structs should be encoded as JSON objects, but found " + value.getNodeType());
|
||||
|
||||
// We only have ISchema here but need Schema, so we need to materialize the actual schema. Using ISchema
|
||||
// avoids having to materialize the schema for non-Struct types but it cannot be avoided for Structs since
|
||||
// they require a schema to be provided at construction. However, the schema is only a SchemaBuilder during
|
||||
// translation of schemas to JSON; during the more common translation of data to JSON, the call to schema.schema()
|
||||
// just returns the schema Object and has no overhead.
|
||||
Struct result = new Struct(schema.schema());
|
||||
for (Field field : schema.fields())
|
||||
result.put(field, convertToConnect(field.schema(), value.get(field.name())));
|
||||
|
||||
return result;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Convert values in Kafka Connect form into/from their logical types. These logical converters are discovered by logical type
|
||||
// names specified in the field
|
||||
private static final HashMap<String, LogicalTypeConverter> LOGICAL_CONVERTERS = new HashMap<>();
|
||||
|
||||
private static final JsonNodeFactory JSON_NODE_FACTORY = JsonNodeFactory.withExactBigDecimals(true);
|
||||
|
||||
static {
|
||||
LOGICAL_CONVERTERS.put(Decimal.LOGICAL_NAME, new LogicalTypeConverter() {
|
||||
@Override
|
||||
public JsonNode toJson(final Schema schema, final Object value, final JsonConverterConfig config) {
|
||||
if (!(value instanceof BigDecimal))
|
||||
throw new DataException("Invalid type for Decimal, expected BigDecimal but was " + value.getClass());
|
||||
|
||||
final BigDecimal decimal = (BigDecimal) value;
|
||||
switch (config.decimalFormat()) {
|
||||
case NUMERIC:
|
||||
return JSON_NODE_FACTORY.numberNode(decimal);
|
||||
case BASE64:
|
||||
return JSON_NODE_FACTORY.binaryNode(Decimal.fromLogical(schema, decimal));
|
||||
default:
|
||||
throw new DataException("Unexpected " + JsonConverterConfig.DECIMAL_FORMAT_CONFIG + ": " + config.decimalFormat());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object toConnect(final Schema schema, final JsonNode value) {
|
||||
if (value.isNumber()) return value.decimalValue();
|
||||
if (value.isBinary() || value.isTextual()) {
|
||||
try {
|
||||
return Decimal.toLogical(schema, value.binaryValue());
|
||||
} catch (Exception e) {
|
||||
throw new DataException("Invalid bytes for Decimal field", e);
|
||||
}
|
||||
}
|
||||
|
||||
throw new DataException("Invalid type for Decimal, underlying representation should be numeric or bytes but was " + value.getNodeType());
|
||||
}
|
||||
});
|
||||
|
||||
LOGICAL_CONVERTERS.put(Date.LOGICAL_NAME, new LogicalTypeConverter() {
|
||||
@Override
|
||||
public JsonNode toJson(final Schema schema, final Object value, final JsonConverterConfig config) {
|
||||
if (!(value instanceof java.util.Date))
|
||||
throw new DataException("Invalid type for Date, expected Date but was " + value.getClass());
|
||||
return JSON_NODE_FACTORY.numberNode(Date.fromLogical(schema, (java.util.Date) value));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object toConnect(final Schema schema, final JsonNode value) {
|
||||
if (!(value.isInt()))
|
||||
throw new DataException("Invalid type for Date, underlying representation should be integer but was " + value.getNodeType());
|
||||
return Date.toLogical(schema, value.intValue());
|
||||
}
|
||||
});
|
||||
|
||||
LOGICAL_CONVERTERS.put(Time.LOGICAL_NAME, new LogicalTypeConverter() {
|
||||
@Override
|
||||
public JsonNode toJson(final Schema schema, final Object value, final JsonConverterConfig config) {
|
||||
if (!(value instanceof java.util.Date))
|
||||
throw new DataException("Invalid type for Time, expected Date but was " + value.getClass());
|
||||
return JSON_NODE_FACTORY.numberNode(Time.fromLogical(schema, (java.util.Date) value));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object toConnect(final Schema schema, final JsonNode value) {
|
||||
if (!(value.isInt()))
|
||||
throw new DataException("Invalid type for Time, underlying representation should be integer but was " + value.getNodeType());
|
||||
return Time.toLogical(schema, value.intValue());
|
||||
}
|
||||
});
|
||||
|
||||
LOGICAL_CONVERTERS.put(Timestamp.LOGICAL_NAME, new LogicalTypeConverter() {
|
||||
@Override
|
||||
public JsonNode toJson(final Schema schema, final Object value, final JsonConverterConfig config) {
|
||||
if (!(value instanceof java.util.Date))
|
||||
throw new DataException("Invalid type for Timestamp, expected Date but was " + value.getClass());
|
||||
return JSON_NODE_FACTORY.numberNode(Timestamp.fromLogical(schema, (java.util.Date) value));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object toConnect(final Schema schema, final JsonNode value) {
|
||||
if (!(value.isIntegralNumber()))
|
||||
throw new DataException("Invalid type for Timestamp, underlying representation should be integral but was " + value.getNodeType());
|
||||
return Timestamp.toLogical(schema, value.longValue());
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private JsonConverterConfig config;
|
||||
private Cache<Schema, ObjectNode> fromConnectSchemaCache;
|
||||
private Cache<JsonNode, Schema> toConnectSchemaCache;
|
||||
|
||||
private final JsonSerializer serializer;
|
||||
private final JsonDeserializer deserializer;
|
||||
|
||||
public JsonConverter() {
|
||||
serializer = new JsonSerializer(
|
||||
mkSet(),
|
||||
JSON_NODE_FACTORY
|
||||
);
|
||||
|
||||
deserializer = new JsonDeserializer(
|
||||
mkSet(
|
||||
// this ensures that the JsonDeserializer maintains full precision on
|
||||
// floating point numbers that cannot fit into float64
|
||||
DeserializationFeature.USE_BIG_DECIMAL_FOR_FLOATS
|
||||
),
|
||||
JSON_NODE_FACTORY
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ConfigDef config() {
|
||||
return JsonConverterConfig.configDef();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void configure(Map<String, ?> configs) {
|
||||
config = new JsonConverterConfig(configs);
|
||||
|
||||
serializer.configure(configs, config.type() == ConverterType.KEY);
|
||||
deserializer.configure(configs, config.type() == ConverterType.KEY);
|
||||
|
||||
fromConnectSchemaCache = new SynchronizedCache<>(new LRUCache<>(config.schemaCacheSize()));
|
||||
toConnectSchemaCache = new SynchronizedCache<>(new LRUCache<>(config.schemaCacheSize()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void configure(Map<String, ?> configs, boolean isKey) {
|
||||
Map<String, Object> conf = new HashMap<>(configs);
|
||||
conf.put(StringConverterConfig.TYPE_CONFIG, isKey ? ConverterType.KEY.getName() : ConverterType.VALUE.getName());
|
||||
configure(conf);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
// do nothing
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] fromConnectHeader(String topic, String headerKey, Schema schema, Object value) {
|
||||
return fromConnectData(topic, schema, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public SchemaAndValue toConnectHeader(String topic, String headerKey, byte[] value) {
|
||||
return toConnectData(topic, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] fromConnectData(String topic, Schema schema, Object value) {
|
||||
if (schema == null && value == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
JsonNode jsonValue = config.schemasEnabled() ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value);
|
||||
try {
|
||||
return serializer.serialize(topic, jsonValue);
|
||||
} catch (SerializationException e) {
|
||||
throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public SchemaAndValue toConnectData(String topic, byte[] value) {
|
||||
JsonNode jsonValue;
|
||||
|
||||
// This handles a tombstone message
|
||||
if (value == null) {
|
||||
return SchemaAndValue.NULL;
|
||||
}
|
||||
|
||||
try {
|
||||
jsonValue = deserializer.deserialize(topic, value);
|
||||
} catch (SerializationException e) {
|
||||
throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e);
|
||||
}
|
||||
|
||||
if (config.schemasEnabled() && (!jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME) || !jsonValue.has(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME)))
|
||||
throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." +
|
||||
" If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration.");
|
||||
|
||||
// The deserialized data should either be an envelope object containing the schema and the payload or the schema
|
||||
// was stripped during serialization and we need to fill in an all-encompassing schema.
|
||||
if (!config.schemasEnabled()) {
|
||||
ObjectNode envelope = JSON_NODE_FACTORY.objectNode();
|
||||
envelope.set(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME, null);
|
||||
envelope.set(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME, jsonValue);
|
||||
jsonValue = envelope;
|
||||
}
|
||||
|
||||
Schema schema = asConnectSchema(jsonValue.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME));
|
||||
return new SchemaAndValue(
|
||||
schema,
|
||||
convertToConnect(schema, jsonValue.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME))
|
||||
);
|
||||
}
|
||||
|
||||
public ObjectNode asJsonSchema(Schema schema) {
|
||||
if (schema == null)
|
||||
return null;
|
||||
|
||||
ObjectNode cached = fromConnectSchemaCache.get(schema);
|
||||
if (cached != null)
|
||||
return cached;
|
||||
|
||||
final ObjectNode jsonSchema;
|
||||
switch (schema.type()) {
|
||||
case BOOLEAN:
|
||||
jsonSchema = JsonSchema.BOOLEAN_SCHEMA.deepCopy();
|
||||
break;
|
||||
case BYTES:
|
||||
jsonSchema = JsonSchema.BYTES_SCHEMA.deepCopy();
|
||||
break;
|
||||
case FLOAT64:
|
||||
jsonSchema = JsonSchema.DOUBLE_SCHEMA.deepCopy();
|
||||
break;
|
||||
case FLOAT32:
|
||||
jsonSchema = JsonSchema.FLOAT_SCHEMA.deepCopy();
|
||||
break;
|
||||
case INT8:
|
||||
jsonSchema = JsonSchema.INT8_SCHEMA.deepCopy();
|
||||
break;
|
||||
case INT16:
|
||||
jsonSchema = JsonSchema.INT16_SCHEMA.deepCopy();
|
||||
break;
|
||||
case INT32:
|
||||
jsonSchema = JsonSchema.INT32_SCHEMA.deepCopy();
|
||||
break;
|
||||
case INT64:
|
||||
jsonSchema = JsonSchema.INT64_SCHEMA.deepCopy();
|
||||
break;
|
||||
case STRING:
|
||||
jsonSchema = JsonSchema.STRING_SCHEMA.deepCopy();
|
||||
break;
|
||||
case ARRAY:
|
||||
jsonSchema = JSON_NODE_FACTORY.objectNode().put(JsonSchema.SCHEMA_TYPE_FIELD_NAME, JsonSchema.ARRAY_TYPE_NAME);
|
||||
jsonSchema.set(JsonSchema.ARRAY_ITEMS_FIELD_NAME, asJsonSchema(schema.valueSchema()));
|
||||
break;
|
||||
case MAP:
|
||||
jsonSchema = JSON_NODE_FACTORY.objectNode().put(JsonSchema.SCHEMA_TYPE_FIELD_NAME, JsonSchema.MAP_TYPE_NAME);
|
||||
jsonSchema.set(JsonSchema.MAP_KEY_FIELD_NAME, asJsonSchema(schema.keySchema()));
|
||||
jsonSchema.set(JsonSchema.MAP_VALUE_FIELD_NAME, asJsonSchema(schema.valueSchema()));
|
||||
break;
|
||||
case STRUCT:
|
||||
jsonSchema = JSON_NODE_FACTORY.objectNode().put(JsonSchema.SCHEMA_TYPE_FIELD_NAME, JsonSchema.STRUCT_TYPE_NAME);
|
||||
ArrayNode fields = JSON_NODE_FACTORY.arrayNode();
|
||||
for (Field field : schema.fields()) {
|
||||
ObjectNode fieldJsonSchema = asJsonSchema(field.schema()).deepCopy();
|
||||
fieldJsonSchema.put(JsonSchema.STRUCT_FIELD_NAME_FIELD_NAME, field.name());
|
||||
fields.add(fieldJsonSchema);
|
||||
}
|
||||
jsonSchema.set(JsonSchema.STRUCT_FIELDS_FIELD_NAME, fields);
|
||||
break;
|
||||
default:
|
||||
throw new DataException("Couldn't translate unsupported schema type " + schema + ".");
|
||||
}
|
||||
|
||||
jsonSchema.put(JsonSchema.SCHEMA_OPTIONAL_FIELD_NAME, schema.isOptional());
|
||||
if (schema.name() != null)
|
||||
jsonSchema.put(JsonSchema.SCHEMA_NAME_FIELD_NAME, schema.name());
|
||||
if (schema.version() != null)
|
||||
jsonSchema.put(JsonSchema.SCHEMA_VERSION_FIELD_NAME, schema.version());
|
||||
if (schema.doc() != null)
|
||||
jsonSchema.put(JsonSchema.SCHEMA_DOC_FIELD_NAME, schema.doc());
|
||||
if (schema.parameters() != null) {
|
||||
ObjectNode jsonSchemaParams = JSON_NODE_FACTORY.objectNode();
|
||||
for (Map.Entry<String, String> prop : schema.parameters().entrySet())
|
||||
jsonSchemaParams.put(prop.getKey(), prop.getValue());
|
||||
jsonSchema.set(JsonSchema.SCHEMA_PARAMETERS_FIELD_NAME, jsonSchemaParams);
|
||||
}
|
||||
if (schema.defaultValue() != null)
|
||||
jsonSchema.set(JsonSchema.SCHEMA_DEFAULT_FIELD_NAME, convertToJson(schema, schema.defaultValue()));
|
||||
|
||||
fromConnectSchemaCache.put(schema, jsonSchema);
|
||||
return jsonSchema;
|
||||
}
|
||||
|
||||
|
||||
public Schema asConnectSchema(JsonNode jsonSchema) {
|
||||
if (jsonSchema.isNull())
|
||||
return null;
|
||||
|
||||
Schema cached = toConnectSchemaCache.get(jsonSchema);
|
||||
if (cached != null)
|
||||
return cached;
|
||||
|
||||
JsonNode schemaTypeNode = jsonSchema.get(JsonSchema.SCHEMA_TYPE_FIELD_NAME);
|
||||
if (schemaTypeNode == null || !schemaTypeNode.isTextual())
|
||||
throw new DataException("Schema must contain 'type' field");
|
||||
|
||||
final SchemaBuilder builder;
|
||||
switch (schemaTypeNode.textValue()) {
|
||||
case JsonSchema.BOOLEAN_TYPE_NAME:
|
||||
builder = SchemaBuilder.bool();
|
||||
break;
|
||||
case JsonSchema.INT8_TYPE_NAME:
|
||||
builder = SchemaBuilder.int8();
|
||||
break;
|
||||
case JsonSchema.INT16_TYPE_NAME:
|
||||
builder = SchemaBuilder.int16();
|
||||
break;
|
||||
case JsonSchema.INT32_TYPE_NAME:
|
||||
builder = SchemaBuilder.int32();
|
||||
break;
|
||||
case JsonSchema.INT64_TYPE_NAME:
|
||||
builder = SchemaBuilder.int64();
|
||||
break;
|
||||
case JsonSchema.FLOAT_TYPE_NAME:
|
||||
builder = SchemaBuilder.float32();
|
||||
break;
|
||||
case JsonSchema.DOUBLE_TYPE_NAME:
|
||||
builder = SchemaBuilder.float64();
|
||||
break;
|
||||
case JsonSchema.BYTES_TYPE_NAME:
|
||||
builder = SchemaBuilder.bytes();
|
||||
break;
|
||||
case JsonSchema.STRING_TYPE_NAME:
|
||||
builder = SchemaBuilder.string();
|
||||
break;
|
||||
case JsonSchema.ARRAY_TYPE_NAME:
|
||||
JsonNode elemSchema = jsonSchema.get(JsonSchema.ARRAY_ITEMS_FIELD_NAME);
|
||||
if (elemSchema == null || elemSchema.isNull())
|
||||
throw new DataException("Array schema did not specify the element type");
|
||||
builder = SchemaBuilder.array(asConnectSchema(elemSchema));
|
||||
break;
|
||||
case JsonSchema.MAP_TYPE_NAME:
|
||||
JsonNode keySchema = jsonSchema.get(JsonSchema.MAP_KEY_FIELD_NAME);
|
||||
if (keySchema == null)
|
||||
throw new DataException("Map schema did not specify the key type");
|
||||
JsonNode valueSchema = jsonSchema.get(JsonSchema.MAP_VALUE_FIELD_NAME);
|
||||
if (valueSchema == null)
|
||||
throw new DataException("Map schema did not specify the value type");
|
||||
builder = SchemaBuilder.map(asConnectSchema(keySchema), asConnectSchema(valueSchema));
|
||||
break;
|
||||
case JsonSchema.STRUCT_TYPE_NAME:
|
||||
builder = SchemaBuilder.struct();
|
||||
JsonNode fields = jsonSchema.get(JsonSchema.STRUCT_FIELDS_FIELD_NAME);
|
||||
if (fields == null || !fields.isArray())
|
||||
throw new DataException("Struct schema's \"fields\" argument is not an array.");
|
||||
for (JsonNode field : fields) {
|
||||
JsonNode jsonFieldName = field.get(JsonSchema.STRUCT_FIELD_NAME_FIELD_NAME);
|
||||
if (jsonFieldName == null || !jsonFieldName.isTextual())
|
||||
throw new DataException("Struct schema's field name not specified properly");
|
||||
builder.field(jsonFieldName.asText(), asConnectSchema(field));
|
||||
}
|
||||
break;
|
||||
default:
|
||||
throw new DataException("Unknown schema type: " + schemaTypeNode.textValue());
|
||||
}
|
||||
|
||||
|
||||
JsonNode schemaOptionalNode = jsonSchema.get(JsonSchema.SCHEMA_OPTIONAL_FIELD_NAME);
|
||||
if (schemaOptionalNode != null && schemaOptionalNode.isBoolean() && schemaOptionalNode.booleanValue())
|
||||
builder.optional();
|
||||
else
|
||||
builder.required();
|
||||
|
||||
JsonNode schemaNameNode = jsonSchema.get(JsonSchema.SCHEMA_NAME_FIELD_NAME);
|
||||
if (schemaNameNode != null && schemaNameNode.isTextual())
|
||||
builder.name(schemaNameNode.textValue());
|
||||
|
||||
JsonNode schemaVersionNode = jsonSchema.get(JsonSchema.SCHEMA_VERSION_FIELD_NAME);
|
||||
if (schemaVersionNode != null && schemaVersionNode.isIntegralNumber()) {
|
||||
builder.version(schemaVersionNode.intValue());
|
||||
}
|
||||
|
||||
JsonNode schemaDocNode = jsonSchema.get(JsonSchema.SCHEMA_DOC_FIELD_NAME);
|
||||
if (schemaDocNode != null && schemaDocNode.isTextual())
|
||||
builder.doc(schemaDocNode.textValue());
|
||||
|
||||
JsonNode schemaParamsNode = jsonSchema.get(JsonSchema.SCHEMA_PARAMETERS_FIELD_NAME);
|
||||
if (schemaParamsNode != null && schemaParamsNode.isObject()) {
|
||||
Iterator<Map.Entry<String, JsonNode>> paramsIt = schemaParamsNode.fields();
|
||||
while (paramsIt.hasNext()) {
|
||||
Map.Entry<String, JsonNode> entry = paramsIt.next();
|
||||
JsonNode paramValue = entry.getValue();
|
||||
if (!paramValue.isTextual())
|
||||
throw new DataException("Schema parameters must have string values.");
|
||||
builder.parameter(entry.getKey(), paramValue.textValue());
|
||||
}
|
||||
}
|
||||
|
||||
JsonNode schemaDefaultNode = jsonSchema.get(JsonSchema.SCHEMA_DEFAULT_FIELD_NAME);
|
||||
if (schemaDefaultNode != null)
|
||||
builder.defaultValue(convertToConnect(builder, schemaDefaultNode));
|
||||
|
||||
Schema result = builder.build();
|
||||
toConnectSchemaCache.put(jsonSchema, result);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Convert this object, in org.apache.kafka.connect.data format, into a JSON object with an envelope object
|
||||
* containing schema and payload fields.
|
||||
* @param schema the schema for the data
|
||||
* @param value the value
|
||||
* @return JsonNode-encoded version
|
||||
*/
|
||||
private JsonNode convertToJsonWithEnvelope(Schema schema, Object value) {
|
||||
return new JsonSchema.Envelope(asJsonSchema(schema), convertToJson(schema, value)).toJsonNode();
|
||||
}
|
||||
|
||||
private JsonNode convertToJsonWithoutEnvelope(Schema schema, Object value) {
|
||||
return convertToJson(schema, value);
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert this object, in the org.apache.kafka.connect.data format, into a JSON object, returning both the schema
|
||||
* and the converted object.
|
||||
*/
|
||||
private JsonNode convertToJson(Schema schema, Object value) {
|
||||
if (value == null) {
|
||||
if (schema == null) // Any schema is valid and we don't have a default, so treat this as an optional schema
|
||||
return null;
|
||||
if (schema.defaultValue() != null)
|
||||
return convertToJson(schema, schema.defaultValue());
|
||||
if (schema.isOptional())
|
||||
return JSON_NODE_FACTORY.nullNode();
|
||||
throw new DataException("Conversion error: null value for field that is required and has no default value");
|
||||
}
|
||||
|
||||
if (schema != null && schema.name() != null) {
|
||||
LogicalTypeConverter logicalConverter = LOGICAL_CONVERTERS.get(schema.name());
|
||||
if (logicalConverter != null)
|
||||
return logicalConverter.toJson(schema, value, config);
|
||||
}
|
||||
|
||||
try {
|
||||
final Schema.Type schemaType;
|
||||
if (schema == null) {
|
||||
schemaType = ConnectSchema.schemaType(value.getClass());
|
||||
if (schemaType == null)
|
||||
throw new DataException("Java class " + value.getClass() + " does not have corresponding schema type.");
|
||||
} else {
|
||||
schemaType = schema.type();
|
||||
}
|
||||
switch (schemaType) {
|
||||
case INT8:
|
||||
return JSON_NODE_FACTORY.numberNode((Byte) value);
|
||||
case INT16:
|
||||
return JSON_NODE_FACTORY.numberNode((Short) value);
|
||||
case INT32:
|
||||
return JSON_NODE_FACTORY.numberNode((Integer) value);
|
||||
case INT64:
|
||||
return JSON_NODE_FACTORY.numberNode((Long) value);
|
||||
case FLOAT32:
|
||||
return JSON_NODE_FACTORY.numberNode((Float) value);
|
||||
case FLOAT64:
|
||||
return JSON_NODE_FACTORY.numberNode((Double) value);
|
||||
case BOOLEAN:
|
||||
return JSON_NODE_FACTORY.booleanNode((Boolean) value);
|
||||
case STRING:
|
||||
CharSequence charSeq = (CharSequence) value;
|
||||
return JSON_NODE_FACTORY.textNode(charSeq.toString());
|
||||
case BYTES:
|
||||
if (value instanceof byte[])
|
||||
return JSON_NODE_FACTORY.binaryNode((byte[]) value);
|
||||
else if (value instanceof ByteBuffer)
|
||||
return JSON_NODE_FACTORY.binaryNode(((ByteBuffer) value).array());
|
||||
else
|
||||
throw new DataException("Invalid type for bytes type: " + value.getClass());
|
||||
case ARRAY: {
|
||||
Collection collection = (Collection) value;
|
||||
ArrayNode list = JSON_NODE_FACTORY.arrayNode();
|
||||
for (Object elem : collection) {
|
||||
Schema valueSchema = schema == null ? null : schema.valueSchema();
|
||||
JsonNode fieldValue = convertToJson(valueSchema, elem);
|
||||
list.add(fieldValue);
|
||||
}
|
||||
return list;
|
||||
}
|
||||
case MAP: {
|
||||
Map<?, ?> map = (Map<?, ?>) value;
|
||||
// If true, using string keys and JSON object; if false, using non-string keys and Array-encoding
|
||||
boolean objectMode;
|
||||
if (schema == null) {
|
||||
objectMode = true;
|
||||
for (Map.Entry<?, ?> entry : map.entrySet()) {
|
||||
if (!(entry.getKey() instanceof String)) {
|
||||
objectMode = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
objectMode = schema.keySchema().type() == Schema.Type.STRING;
|
||||
}
|
||||
ObjectNode obj = null;
|
||||
ArrayNode list = null;
|
||||
if (objectMode)
|
||||
obj = JSON_NODE_FACTORY.objectNode();
|
||||
else
|
||||
list = JSON_NODE_FACTORY.arrayNode();
|
||||
for (Map.Entry<?, ?> entry : map.entrySet()) {
|
||||
Schema keySchema = schema == null ? null : schema.keySchema();
|
||||
Schema valueSchema = schema == null ? null : schema.valueSchema();
|
||||
JsonNode mapKey = convertToJson(keySchema, entry.getKey());
|
||||
JsonNode mapValue = convertToJson(valueSchema, entry.getValue());
|
||||
|
||||
if (objectMode)
|
||||
obj.set(mapKey.asText(), mapValue);
|
||||
else
|
||||
list.add(JSON_NODE_FACTORY.arrayNode().add(mapKey).add(mapValue));
|
||||
}
|
||||
return objectMode ? obj : list;
|
||||
}
|
||||
case STRUCT: {
|
||||
Struct struct = (Struct) value;
|
||||
if (!struct.schema().equals(schema))
|
||||
throw new DataException("Mismatching schema.");
|
||||
ObjectNode obj = JSON_NODE_FACTORY.objectNode();
|
||||
for (Field field : schema.fields()) {
|
||||
obj.set(field.name(), convertToJson(field.schema(), struct.get(field)));
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
}
|
||||
|
||||
throw new DataException("Couldn't convert " + value + " to JSON.");
|
||||
} catch (ClassCastException e) {
|
||||
String schemaTypeStr = (schema != null) ? schema.type().toString() : "unknown schema";
|
||||
throw new DataException("Invalid type for " + schemaTypeStr + ": " + value.getClass());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private static Object convertToConnect(Schema schema, JsonNode jsonValue) {
|
||||
final Schema.Type schemaType;
|
||||
if (schema != null) {
|
||||
schemaType = schema.type();
|
||||
if (jsonValue == null || jsonValue.isNull()) {
|
||||
if (schema.defaultValue() != null)
|
||||
return schema.defaultValue(); // any logical type conversions should already have been applied
|
||||
if (schema.isOptional())
|
||||
return null;
|
||||
throw new DataException("Invalid null value for required " + schemaType + " field");
|
||||
}
|
||||
} else {
|
||||
switch (jsonValue.getNodeType()) {
|
||||
case NULL:
|
||||
// Special case. With no schema
|
||||
return null;
|
||||
case BOOLEAN:
|
||||
schemaType = Schema.Type.BOOLEAN;
|
||||
break;
|
||||
case NUMBER:
|
||||
if (jsonValue.isIntegralNumber())
|
||||
schemaType = Schema.Type.INT64;
|
||||
else
|
||||
schemaType = Schema.Type.FLOAT64;
|
||||
break;
|
||||
case ARRAY:
|
||||
schemaType = Schema.Type.ARRAY;
|
||||
break;
|
||||
case OBJECT:
|
||||
schemaType = Schema.Type.MAP;
|
||||
break;
|
||||
case STRING:
|
||||
schemaType = Schema.Type.STRING;
|
||||
break;
|
||||
|
||||
case BINARY:
|
||||
case MISSING:
|
||||
case POJO:
|
||||
default:
|
||||
schemaType = null;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
final JsonToConnectTypeConverter typeConverter = TO_CONNECT_CONVERTERS.get(schemaType);
|
||||
if (typeConverter == null)
|
||||
throw new DataException("Unknown schema type: " + schemaType);
|
||||
|
||||
if (schema != null && schema.name() != null) {
|
||||
LogicalTypeConverter logicalConverter = LOGICAL_CONVERTERS.get(schema.name());
|
||||
if (logicalConverter != null)
|
||||
return logicalConverter.toConnect(schema, jsonValue);
|
||||
}
|
||||
|
||||
return typeConverter.convert(schema, jsonValue);
|
||||
}
|
||||
|
||||
private interface JsonToConnectTypeConverter {
|
||||
Object convert(Schema schema, JsonNode value);
|
||||
}
|
||||
|
||||
private interface LogicalTypeConverter {
|
||||
JsonNode toJson(Schema schema, Object value, JsonConverterConfig config);
|
||||
Object toConnect(Schema schema, JsonNode value);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,114 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.json;
|
||||
|
||||
import java.util.Locale;
|
||||
import org.apache.kafka.common.config.ConfigDef;
|
||||
import org.apache.kafka.common.config.ConfigDef.Importance;
|
||||
import org.apache.kafka.common.config.ConfigDef.Type;
|
||||
import org.apache.kafka.common.config.ConfigDef.Width;
|
||||
import org.apache.kafka.connect.storage.ConverterConfig;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Configuration options for {@link JsonConverter} instances.
|
||||
*/
|
||||
public class JsonConverterConfig extends ConverterConfig {
|
||||
|
||||
public static final String SCHEMAS_ENABLE_CONFIG = "schemas.enable";
|
||||
public static final boolean SCHEMAS_ENABLE_DEFAULT = true;
|
||||
private static final String SCHEMAS_ENABLE_DOC = "Include schemas within each of the serialized values and keys.";
|
||||
private static final String SCHEMAS_ENABLE_DISPLAY = "Enable Schemas";
|
||||
|
||||
public static final String SCHEMAS_CACHE_SIZE_CONFIG = "schemas.cache.size";
|
||||
public static final int SCHEMAS_CACHE_SIZE_DEFAULT = 1000;
|
||||
private static final String SCHEMAS_CACHE_SIZE_DOC = "The maximum number of schemas that can be cached in this converter instance.";
|
||||
private static final String SCHEMAS_CACHE_SIZE_DISPLAY = "Schema Cache Size";
|
||||
|
||||
public static final String DECIMAL_FORMAT_CONFIG = "decimal.format";
|
||||
public static final String DECIMAL_FORMAT_DEFAULT = DecimalFormat.BASE64.name();
|
||||
private static final String DECIMAL_FORMAT_DOC = "Controls which format this converter will serialize decimals in."
|
||||
+ " This value is case insensitive and can be either 'BASE64' (default) or 'NUMERIC'";
|
||||
private static final String DECIMAL_FORMAT_DISPLAY = "Decimal Format";
|
||||
|
||||
private final static ConfigDef CONFIG;
|
||||
|
||||
static {
|
||||
String group = "Schemas";
|
||||
int orderInGroup = 0;
|
||||
CONFIG = ConverterConfig.newConfigDef();
|
||||
CONFIG.define(SCHEMAS_ENABLE_CONFIG, Type.BOOLEAN, SCHEMAS_ENABLE_DEFAULT, Importance.HIGH, SCHEMAS_ENABLE_DOC, group,
|
||||
orderInGroup++, Width.MEDIUM, SCHEMAS_ENABLE_DISPLAY);
|
||||
CONFIG.define(SCHEMAS_CACHE_SIZE_CONFIG, Type.INT, SCHEMAS_CACHE_SIZE_DEFAULT, Importance.HIGH, SCHEMAS_CACHE_SIZE_DOC, group,
|
||||
orderInGroup++, Width.MEDIUM, SCHEMAS_CACHE_SIZE_DISPLAY);
|
||||
|
||||
group = "Serialization";
|
||||
orderInGroup = 0;
|
||||
CONFIG.define(
|
||||
DECIMAL_FORMAT_CONFIG, Type.STRING, DECIMAL_FORMAT_DEFAULT,
|
||||
ConfigDef.CaseInsensitiveValidString.in(
|
||||
DecimalFormat.BASE64.name(),
|
||||
DecimalFormat.NUMERIC.name()),
|
||||
Importance.LOW, DECIMAL_FORMAT_DOC, group, orderInGroup++,
|
||||
Width.MEDIUM, DECIMAL_FORMAT_DISPLAY);
|
||||
}
|
||||
|
||||
public static ConfigDef configDef() {
|
||||
return CONFIG;
|
||||
}
|
||||
|
||||
// cached config values
|
||||
private final boolean schemasEnabled;
|
||||
private final int schemaCacheSize;
|
||||
private final DecimalFormat decimalFormat;
|
||||
|
||||
public JsonConverterConfig(Map<String, ?> props) {
|
||||
super(CONFIG, props);
|
||||
this.schemasEnabled = getBoolean(SCHEMAS_ENABLE_CONFIG);
|
||||
this.schemaCacheSize = getInt(SCHEMAS_CACHE_SIZE_CONFIG);
|
||||
this.decimalFormat = DecimalFormat.valueOf(getString(DECIMAL_FORMAT_CONFIG).toUpperCase(Locale.ROOT));
|
||||
}
|
||||
|
||||
/**
|
||||
* Return whether schemas are enabled.
|
||||
*
|
||||
* @return true if enabled, or false otherwise
|
||||
*/
|
||||
public boolean schemasEnabled() {
|
||||
return schemasEnabled;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the cache size.
|
||||
*
|
||||
* @return the cache size
|
||||
*/
|
||||
public int schemaCacheSize() {
|
||||
return schemaCacheSize;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the serialization format for decimal types.
|
||||
*
|
||||
* @return the decimal serialization format
|
||||
*/
|
||||
public DecimalFormat decimalFormat() {
|
||||
return decimalFormat;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,71 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.json;
|
||||
|
||||
import com.fasterxml.jackson.databind.DeserializationFeature;
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.fasterxml.jackson.databind.node.JsonNodeFactory;
|
||||
import java.util.Collections;
|
||||
import java.util.Set;
|
||||
import org.apache.kafka.common.errors.SerializationException;
|
||||
import org.apache.kafka.common.serialization.Deserializer;
|
||||
|
||||
/**
|
||||
* JSON deserializer for Jackson's JsonNode tree model. Using the tree model allows it to work with arbitrarily
|
||||
* structured data without having associated Java classes. This deserializer also supports Connect schemas.
|
||||
*/
|
||||
public class JsonDeserializer implements Deserializer<JsonNode> {
|
||||
private final ObjectMapper objectMapper = new ObjectMapper();
|
||||
|
||||
/**
|
||||
* Default constructor needed by Kafka
|
||||
*/
|
||||
public JsonDeserializer() {
|
||||
this(Collections.emptySet(), JsonNodeFactory.withExactBigDecimals(true));
|
||||
}
|
||||
|
||||
/**
|
||||
* A constructor that additionally specifies some {@link DeserializationFeature}
|
||||
* for the deserializer
|
||||
*
|
||||
* @param deserializationFeatures the specified deserialization features
|
||||
* @param jsonNodeFactory the json node factory to use.
|
||||
*/
|
||||
JsonDeserializer(
|
||||
final Set<DeserializationFeature> deserializationFeatures,
|
||||
final JsonNodeFactory jsonNodeFactory
|
||||
) {
|
||||
deserializationFeatures.forEach(objectMapper::enable);
|
||||
objectMapper.setNodeFactory(jsonNodeFactory);
|
||||
}
|
||||
|
||||
@Override
|
||||
public JsonNode deserialize(String topic, byte[] bytes) {
|
||||
if (bytes == null)
|
||||
return null;
|
||||
|
||||
JsonNode data;
|
||||
try {
|
||||
data = objectMapper.readTree(bytes);
|
||||
} catch (Exception e) {
|
||||
throw new SerializationException(e);
|
||||
}
|
||||
|
||||
return data;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,81 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.json;
|
||||
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.fasterxml.jackson.databind.node.JsonNodeFactory;
|
||||
import com.fasterxml.jackson.databind.node.ObjectNode;
|
||||
|
||||
public class JsonSchema {
|
||||
|
||||
static final String ENVELOPE_SCHEMA_FIELD_NAME = "schema";
|
||||
static final String ENVELOPE_PAYLOAD_FIELD_NAME = "payload";
|
||||
static final String SCHEMA_TYPE_FIELD_NAME = "type";
|
||||
static final String SCHEMA_OPTIONAL_FIELD_NAME = "optional";
|
||||
static final String SCHEMA_NAME_FIELD_NAME = "name";
|
||||
static final String SCHEMA_VERSION_FIELD_NAME = "version";
|
||||
static final String SCHEMA_DOC_FIELD_NAME = "doc";
|
||||
static final String SCHEMA_PARAMETERS_FIELD_NAME = "parameters";
|
||||
static final String SCHEMA_DEFAULT_FIELD_NAME = "default";
|
||||
static final String ARRAY_ITEMS_FIELD_NAME = "items";
|
||||
static final String MAP_KEY_FIELD_NAME = "keys";
|
||||
static final String MAP_VALUE_FIELD_NAME = "values";
|
||||
static final String STRUCT_FIELDS_FIELD_NAME = "fields";
|
||||
static final String STRUCT_FIELD_NAME_FIELD_NAME = "field";
|
||||
static final String BOOLEAN_TYPE_NAME = "boolean";
|
||||
static final ObjectNode BOOLEAN_SCHEMA = JsonNodeFactory.instance.objectNode().put(SCHEMA_TYPE_FIELD_NAME, BOOLEAN_TYPE_NAME);
|
||||
static final String INT8_TYPE_NAME = "int8";
|
||||
static final ObjectNode INT8_SCHEMA = JsonNodeFactory.instance.objectNode().put(SCHEMA_TYPE_FIELD_NAME, INT8_TYPE_NAME);
|
||||
static final String INT16_TYPE_NAME = "int16";
|
||||
static final ObjectNode INT16_SCHEMA = JsonNodeFactory.instance.objectNode().put(SCHEMA_TYPE_FIELD_NAME, INT16_TYPE_NAME);
|
||||
static final String INT32_TYPE_NAME = "int32";
|
||||
static final ObjectNode INT32_SCHEMA = JsonNodeFactory.instance.objectNode().put(SCHEMA_TYPE_FIELD_NAME, INT32_TYPE_NAME);
|
||||
static final String INT64_TYPE_NAME = "int64";
|
||||
static final ObjectNode INT64_SCHEMA = JsonNodeFactory.instance.objectNode().put(SCHEMA_TYPE_FIELD_NAME, INT64_TYPE_NAME);
|
||||
static final String FLOAT_TYPE_NAME = "float";
|
||||
static final ObjectNode FLOAT_SCHEMA = JsonNodeFactory.instance.objectNode().put(SCHEMA_TYPE_FIELD_NAME, FLOAT_TYPE_NAME);
|
||||
static final String DOUBLE_TYPE_NAME = "double";
|
||||
static final ObjectNode DOUBLE_SCHEMA = JsonNodeFactory.instance.objectNode().put(SCHEMA_TYPE_FIELD_NAME, DOUBLE_TYPE_NAME);
|
||||
static final String BYTES_TYPE_NAME = "bytes";
|
||||
static final ObjectNode BYTES_SCHEMA = JsonNodeFactory.instance.objectNode().put(SCHEMA_TYPE_FIELD_NAME, BYTES_TYPE_NAME);
|
||||
static final String STRING_TYPE_NAME = "string";
|
||||
static final ObjectNode STRING_SCHEMA = JsonNodeFactory.instance.objectNode().put(SCHEMA_TYPE_FIELD_NAME, STRING_TYPE_NAME);
|
||||
static final String ARRAY_TYPE_NAME = "array";
|
||||
static final String MAP_TYPE_NAME = "map";
|
||||
static final String STRUCT_TYPE_NAME = "struct";
|
||||
|
||||
public static ObjectNode envelope(JsonNode schema, JsonNode payload) {
|
||||
ObjectNode result = JsonNodeFactory.instance.objectNode();
|
||||
result.set(ENVELOPE_SCHEMA_FIELD_NAME, schema);
|
||||
result.set(ENVELOPE_PAYLOAD_FIELD_NAME, payload);
|
||||
return result;
|
||||
}
|
||||
|
||||
static class Envelope {
|
||||
public JsonNode schema;
|
||||
public JsonNode payload;
|
||||
|
||||
public Envelope(JsonNode schema, JsonNode payload) {
|
||||
this.schema = schema;
|
||||
this.payload = payload;
|
||||
}
|
||||
|
||||
public ObjectNode toJsonNode() {
|
||||
return envelope(schema, payload);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,69 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.json;
|
||||
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.fasterxml.jackson.databind.SerializationFeature;
|
||||
import com.fasterxml.jackson.databind.node.JsonNodeFactory;
|
||||
import org.apache.kafka.common.errors.SerializationException;
|
||||
import org.apache.kafka.common.serialization.Serializer;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* Serialize Jackson JsonNode tree model objects to UTF-8 JSON. Using the tree model allows handling arbitrarily
|
||||
* structured data without corresponding Java classes. This serializer also supports Connect schemas.
|
||||
*/
|
||||
public class JsonSerializer implements Serializer<JsonNode> {
|
||||
private final ObjectMapper objectMapper = new ObjectMapper();
|
||||
|
||||
/**
|
||||
* Default constructor needed by Kafka
|
||||
*/
|
||||
public JsonSerializer() {
|
||||
this(Collections.emptySet(), JsonNodeFactory.withExactBigDecimals(true));
|
||||
}
|
||||
|
||||
/**
|
||||
* A constructor that additionally specifies some {@link SerializationFeature}
|
||||
* for the serializer
|
||||
*
|
||||
* @param serializationFeatures the specified serialization features
|
||||
* @param jsonNodeFactory the json node factory to use.
|
||||
*/
|
||||
JsonSerializer(
|
||||
final Set<SerializationFeature> serializationFeatures,
|
||||
final JsonNodeFactory jsonNodeFactory
|
||||
) {
|
||||
serializationFeatures.forEach(objectMapper::enable);
|
||||
objectMapper.setNodeFactory(jsonNodeFactory);
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] serialize(String topic, JsonNode data) {
|
||||
if (data == null)
|
||||
return null;
|
||||
|
||||
try {
|
||||
return objectMapper.writeValueAsBytes(data);
|
||||
} catch (Exception e) {
|
||||
throw new SerializationException("Error serializing JSON message", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,39 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.json;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import org.apache.kafka.connect.storage.ConverterConfig;
|
||||
import org.apache.kafka.connect.storage.ConverterType;
|
||||
import org.junit.Test;
|
||||
|
||||
public class JsonConverterConfigTest {
|
||||
|
||||
@Test
|
||||
public void shouldBeCaseInsensitiveForDecimalFormatConfig() {
|
||||
final Map<String, Object> configValues = new HashMap<>();
|
||||
configValues.put(ConverterConfig.TYPE_CONFIG, ConverterType.KEY.getName());
|
||||
configValues.put(JsonConverterConfig.DECIMAL_FORMAT_CONFIG, "NuMeRiC");
|
||||
|
||||
final JsonConverterConfig config = new JsonConverterConfig(configValues);
|
||||
assertEquals(config.decimalFormat(), DecimalFormat.NUMERIC);
|
||||
}
|
||||
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user