mirror of
https://github.com/didi/KnowStreaming.git
synced 2026-01-02 18:32:08 +08:00
Add km module kafka gateway
This commit is contained in:
@@ -0,0 +1,140 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.cli;
|
||||
|
||||
import org.apache.kafka.common.utils.Exit;
|
||||
import org.apache.kafka.common.utils.Time;
|
||||
import org.apache.kafka.common.utils.Utils;
|
||||
import org.apache.kafka.connect.connector.policy.ConnectorClientConfigOverridePolicy;
|
||||
import org.apache.kafka.connect.runtime.Connect;
|
||||
import org.apache.kafka.connect.runtime.Worker;
|
||||
import org.apache.kafka.connect.runtime.WorkerConfig;
|
||||
import org.apache.kafka.connect.runtime.WorkerConfigTransformer;
|
||||
import org.apache.kafka.connect.runtime.WorkerInfo;
|
||||
import org.apache.kafka.connect.runtime.distributed.DistributedConfig;
|
||||
import org.apache.kafka.connect.runtime.distributed.DistributedHerder;
|
||||
import org.apache.kafka.connect.runtime.isolation.Plugins;
|
||||
import org.apache.kafka.connect.runtime.rest.RestServer;
|
||||
import org.apache.kafka.connect.storage.ConfigBackingStore;
|
||||
import org.apache.kafka.connect.storage.Converter;
|
||||
import org.apache.kafka.connect.storage.KafkaConfigBackingStore;
|
||||
import org.apache.kafka.connect.storage.KafkaOffsetBackingStore;
|
||||
import org.apache.kafka.connect.storage.KafkaStatusBackingStore;
|
||||
import org.apache.kafka.connect.storage.StatusBackingStore;
|
||||
import org.apache.kafka.connect.util.ConnectUtils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.net.URI;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* <p>
|
||||
* Command line utility that runs Kafka Connect in distributed mode. In this mode, the process joints a group of other workers
|
||||
* and work is distributed among them. This is useful for running Connect as a service, where connectors can be
|
||||
* submitted to the cluster to be automatically executed in a scalable, distributed fashion. This also allows you to
|
||||
* easily scale out horizontally, elastically adding or removing capacity simply by starting or stopping worker
|
||||
* instances.
|
||||
* </p>
|
||||
*/
|
||||
public class ConnectDistributed {
|
||||
private static final Logger log = LoggerFactory.getLogger(ConnectDistributed.class);
|
||||
|
||||
private final Time time = Time.SYSTEM;
|
||||
private final long initStart = time.hiResClockMs();
|
||||
|
||||
public static void main(String[] args) {
|
||||
|
||||
if (args.length < 1 || Arrays.asList(args).contains("--help")) {
|
||||
log.info("Usage: ConnectDistributed worker.properties");
|
||||
Exit.exit(1);
|
||||
}
|
||||
|
||||
try {
|
||||
WorkerInfo initInfo = new WorkerInfo();
|
||||
initInfo.logAll();
|
||||
|
||||
String workerPropsFile = args[0];
|
||||
Map<String, String> workerProps = !workerPropsFile.isEmpty() ?
|
||||
Utils.propsToStringMap(Utils.loadProps(workerPropsFile)) : Collections.emptyMap();
|
||||
|
||||
ConnectDistributed connectDistributed = new ConnectDistributed();
|
||||
Connect connect = connectDistributed.startConnect(workerProps);
|
||||
|
||||
// Shutdown will be triggered by Ctrl-C or via HTTP shutdown request
|
||||
connect.awaitStop();
|
||||
|
||||
} catch (Throwable t) {
|
||||
log.error("Stopping due to error", t);
|
||||
Exit.exit(2);
|
||||
}
|
||||
}
|
||||
|
||||
public Connect startConnect(Map<String, String> workerProps) {
|
||||
log.info("Scanning for plugin classes. This might take a moment ...");
|
||||
Plugins plugins = new Plugins(workerProps);
|
||||
plugins.compareAndSwapWithDelegatingLoader();
|
||||
DistributedConfig config = new DistributedConfig(workerProps);
|
||||
|
||||
String kafkaClusterId = ConnectUtils.lookupKafkaClusterId(config);
|
||||
log.debug("Kafka cluster ID: {}", kafkaClusterId);
|
||||
|
||||
RestServer rest = new RestServer(config);
|
||||
rest.initializeServer();
|
||||
|
||||
URI advertisedUrl = rest.advertisedUrl();
|
||||
String workerId = advertisedUrl.getHost() + ":" + advertisedUrl.getPort();
|
||||
|
||||
KafkaOffsetBackingStore offsetBackingStore = new KafkaOffsetBackingStore();
|
||||
offsetBackingStore.configure(config);
|
||||
|
||||
ConnectorClientConfigOverridePolicy connectorClientConfigOverridePolicy = plugins.newPlugin(
|
||||
config.getString(WorkerConfig.CONNECTOR_CLIENT_POLICY_CLASS_CONFIG),
|
||||
config, ConnectorClientConfigOverridePolicy.class);
|
||||
|
||||
Worker worker = new Worker(workerId, time, plugins, config, offsetBackingStore, connectorClientConfigOverridePolicy);
|
||||
WorkerConfigTransformer configTransformer = worker.configTransformer();
|
||||
|
||||
Converter internalValueConverter = worker.getInternalValueConverter();
|
||||
StatusBackingStore statusBackingStore = new KafkaStatusBackingStore(time, internalValueConverter);
|
||||
statusBackingStore.configure(config);
|
||||
|
||||
ConfigBackingStore configBackingStore = new KafkaConfigBackingStore(
|
||||
internalValueConverter,
|
||||
config,
|
||||
configTransformer);
|
||||
|
||||
DistributedHerder herder = new DistributedHerder(config, time, worker,
|
||||
kafkaClusterId, statusBackingStore, configBackingStore,
|
||||
advertisedUrl.toString(), connectorClientConfigOverridePolicy);
|
||||
|
||||
final Connect connect = new Connect(herder, rest);
|
||||
log.info("Kafka Connect distributed worker initialization took {}ms", time.hiResClockMs() - initStart);
|
||||
try {
|
||||
connect.start();
|
||||
} catch (Exception e) {
|
||||
log.error("Failed to start Connect", e);
|
||||
connect.stop();
|
||||
Exit.exit(3);
|
||||
}
|
||||
|
||||
return connect;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,134 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.cli;
|
||||
|
||||
import org.apache.kafka.common.utils.Exit;
|
||||
import org.apache.kafka.common.utils.Time;
|
||||
import org.apache.kafka.common.utils.Utils;
|
||||
import org.apache.kafka.connect.connector.policy.ConnectorClientConfigOverridePolicy;
|
||||
import org.apache.kafka.connect.runtime.Connect;
|
||||
import org.apache.kafka.connect.runtime.ConnectorConfig;
|
||||
import org.apache.kafka.connect.runtime.Herder;
|
||||
import org.apache.kafka.connect.runtime.Worker;
|
||||
import org.apache.kafka.connect.runtime.WorkerConfig;
|
||||
import org.apache.kafka.connect.runtime.WorkerInfo;
|
||||
import org.apache.kafka.connect.runtime.isolation.Plugins;
|
||||
import org.apache.kafka.connect.runtime.rest.RestServer;
|
||||
import org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo;
|
||||
import org.apache.kafka.connect.runtime.standalone.StandaloneConfig;
|
||||
import org.apache.kafka.connect.runtime.standalone.StandaloneHerder;
|
||||
import org.apache.kafka.connect.storage.FileOffsetBackingStore;
|
||||
import org.apache.kafka.connect.util.Callback;
|
||||
import org.apache.kafka.connect.util.ConnectUtils;
|
||||
import org.apache.kafka.connect.util.FutureCallback;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.net.URI;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* <p>
|
||||
* Command line utility that runs Kafka Connect as a standalone process. In this mode, work is not
|
||||
* distributed. Instead, all the normal Connect machinery works within a single process. This is
|
||||
* useful for ad hoc, small, or experimental jobs.
|
||||
* </p>
|
||||
* <p>
|
||||
* By default, no job configs or offset data is persistent. You can make jobs persistent and
|
||||
* fault tolerant by overriding the settings to use file storage for both.
|
||||
* </p>
|
||||
*/
|
||||
public class ConnectStandalone {
|
||||
private static final Logger log = LoggerFactory.getLogger(ConnectStandalone.class);
|
||||
|
||||
public static void main(String[] args) {
|
||||
|
||||
if (args.length < 2 || Arrays.asList(args).contains("--help")) {
|
||||
log.info("Usage: ConnectStandalone worker.properties connector1.properties [connector2.properties ...]");
|
||||
Exit.exit(1);
|
||||
}
|
||||
|
||||
try {
|
||||
Time time = Time.SYSTEM;
|
||||
log.info("Kafka Connect standalone worker initializing ...");
|
||||
long initStart = time.hiResClockMs();
|
||||
WorkerInfo initInfo = new WorkerInfo();
|
||||
initInfo.logAll();
|
||||
|
||||
String workerPropsFile = args[0];
|
||||
Map<String, String> workerProps = !workerPropsFile.isEmpty() ?
|
||||
Utils.propsToStringMap(Utils.loadProps(workerPropsFile)) : Collections.<String, String>emptyMap();
|
||||
|
||||
log.info("Scanning for plugin classes. This might take a moment ...");
|
||||
Plugins plugins = new Plugins(workerProps);
|
||||
plugins.compareAndSwapWithDelegatingLoader();
|
||||
StandaloneConfig config = new StandaloneConfig(workerProps);
|
||||
|
||||
String kafkaClusterId = ConnectUtils.lookupKafkaClusterId(config);
|
||||
log.debug("Kafka cluster ID: {}", kafkaClusterId);
|
||||
|
||||
RestServer rest = new RestServer(config);
|
||||
rest.initializeServer();
|
||||
|
||||
URI advertisedUrl = rest.advertisedUrl();
|
||||
String workerId = advertisedUrl.getHost() + ":" + advertisedUrl.getPort();
|
||||
|
||||
ConnectorClientConfigOverridePolicy connectorClientConfigOverridePolicy = plugins.newPlugin(
|
||||
config.getString(WorkerConfig.CONNECTOR_CLIENT_POLICY_CLASS_CONFIG),
|
||||
config, ConnectorClientConfigOverridePolicy.class);
|
||||
Worker worker = new Worker(workerId, time, plugins, config, new FileOffsetBackingStore(),
|
||||
connectorClientConfigOverridePolicy);
|
||||
|
||||
Herder herder = new StandaloneHerder(worker, kafkaClusterId, connectorClientConfigOverridePolicy);
|
||||
final Connect connect = new Connect(herder, rest);
|
||||
log.info("Kafka Connect standalone worker initialization took {}ms", time.hiResClockMs() - initStart);
|
||||
|
||||
try {
|
||||
connect.start();
|
||||
for (final String connectorPropsFile : Arrays.copyOfRange(args, 1, args.length)) {
|
||||
Map<String, String> connectorProps = Utils.propsToStringMap(Utils.loadProps(connectorPropsFile));
|
||||
FutureCallback<Herder.Created<ConnectorInfo>> cb = new FutureCallback<>(new Callback<Herder.Created<ConnectorInfo>>() {
|
||||
@Override
|
||||
public void onCompletion(Throwable error, Herder.Created<ConnectorInfo> info) {
|
||||
if (error != null)
|
||||
log.error("Failed to create job for {}", connectorPropsFile);
|
||||
else
|
||||
log.info("Created connector {}", info.result().name());
|
||||
}
|
||||
});
|
||||
herder.putConnectorConfig(
|
||||
connectorProps.get(ConnectorConfig.NAME_CONFIG),
|
||||
connectorProps, false, cb);
|
||||
cb.get();
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
log.error("Stopping after connector error", t);
|
||||
connect.stop();
|
||||
Exit.exit(3);
|
||||
}
|
||||
|
||||
// Shutdown will be triggered by Ctrl-C or via HTTP shutdown request
|
||||
connect.awaitStop();
|
||||
|
||||
} catch (Throwable t) {
|
||||
log.error("Stopping due to error", t);
|
||||
Exit.exit(2);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,57 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.kafka.connect.connector.policy;
|
||||
|
||||
import org.apache.kafka.common.config.ConfigValue;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public abstract class AbstractConnectorClientConfigOverridePolicy implements ConnectorClientConfigOverridePolicy {
|
||||
|
||||
@Override
|
||||
public void close() throws Exception {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public final List<ConfigValue> validate(ConnectorClientConfigRequest connectorClientConfigRequest) {
|
||||
Map<String, Object> inputConfig = connectorClientConfigRequest.clientProps();
|
||||
return inputConfig.entrySet().stream().map(this::configValue).collect(Collectors.toList());
|
||||
}
|
||||
|
||||
protected ConfigValue configValue(Map.Entry<String, Object> configEntry) {
|
||||
ConfigValue configValue =
|
||||
new ConfigValue(configEntry.getKey(), configEntry.getValue(), new ArrayList<>(), new ArrayList<String>());
|
||||
validate(configValue);
|
||||
return configValue;
|
||||
}
|
||||
|
||||
protected void validate(ConfigValue configValue) {
|
||||
if (!isAllowed(configValue)) {
|
||||
configValue.addErrorMessage("The '" + policyName() + "' policy does not allow '" + configValue.name()
|
||||
+ "' to be overridden in the connector configuration.");
|
||||
}
|
||||
}
|
||||
|
||||
protected abstract String policyName();
|
||||
|
||||
protected abstract boolean isAllowed(ConfigValue configValue);
|
||||
}
|
||||
@@ -0,0 +1,46 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.kafka.connect.connector.policy;
|
||||
|
||||
import org.apache.kafka.common.config.ConfigValue;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Allows all client configurations to be overridden via the connector configs by setting {@code connector.client.config.override.policy} to {@code All}
|
||||
*/
|
||||
public class AllConnectorClientConfigOverridePolicy extends AbstractConnectorClientConfigOverridePolicy {
|
||||
private static final Logger log = LoggerFactory.getLogger(AllConnectorClientConfigOverridePolicy.class);
|
||||
|
||||
@Override
|
||||
protected String policyName() {
|
||||
return "All";
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean isAllowed(ConfigValue configValue) {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void configure(Map<String, ?> configs) {
|
||||
log.info("Setting up All Policy for ConnectorClientConfigOverride. This will allow all client configurations to be overridden");
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,47 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.kafka.connect.connector.policy;
|
||||
|
||||
import org.apache.kafka.common.config.ConfigValue;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Disallow any client configuration to be overridden via the connector configs by setting {@code connector.client.config.override.policy} to {@code None}.
|
||||
* This is the default behavior.
|
||||
*/
|
||||
public class NoneConnectorClientConfigOverridePolicy extends AbstractConnectorClientConfigOverridePolicy {
|
||||
private static final Logger log = LoggerFactory.getLogger(NoneConnectorClientConfigOverridePolicy.class);
|
||||
|
||||
@Override
|
||||
protected String policyName() {
|
||||
return "None";
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean isAllowed(ConfigValue configValue) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void configure(Map<String, ?> configs) {
|
||||
log.info("Setting up None Policy for ConnectorClientConfigOverride. This will disallow any client configuration to be overridden");
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,57 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.kafka.connect.connector.policy;
|
||||
|
||||
import org.apache.kafka.clients.CommonClientConfigs;
|
||||
import org.apache.kafka.common.config.ConfigValue;
|
||||
import org.apache.kafka.common.config.SaslConfigs;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
/**
|
||||
* Allows all {@code sasl} configurations to be overridden via the connector configs by setting {@code connector.client.config.override.policy} to
|
||||
* {@code Principal}. This allows to set a principal per connector.
|
||||
*/
|
||||
public class PrincipalConnectorClientConfigOverridePolicy extends AbstractConnectorClientConfigOverridePolicy {
|
||||
private static final Logger log = LoggerFactory.getLogger(PrincipalConnectorClientConfigOverridePolicy.class);
|
||||
|
||||
private static final Set<String> ALLOWED_CONFIG =
|
||||
Stream.of(SaslConfigs.SASL_JAAS_CONFIG, SaslConfigs.SASL_MECHANISM, CommonClientConfigs.SECURITY_PROTOCOL_CONFIG).
|
||||
collect(Collectors.toSet());
|
||||
|
||||
@Override
|
||||
protected String policyName() {
|
||||
return "Principal";
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean isAllowed(ConfigValue configValue) {
|
||||
return ALLOWED_CONFIG.contains(configValue.name());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void configure(Map<String, ?> configs) {
|
||||
log.info("Setting up Principal policy for ConnectorClientConfigOverride. This will allow `sasl` client configuration to be "
|
||||
+ "overridden.");
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,82 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.kafka.connect.converters;
|
||||
|
||||
import org.apache.kafka.common.config.ConfigDef;
|
||||
import org.apache.kafka.connect.data.Schema;
|
||||
import org.apache.kafka.connect.data.SchemaAndValue;
|
||||
import org.apache.kafka.connect.errors.DataException;
|
||||
import org.apache.kafka.connect.storage.Converter;
|
||||
import org.apache.kafka.connect.storage.ConverterConfig;
|
||||
import org.apache.kafka.connect.storage.HeaderConverter;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Pass-through converter for raw byte data.
|
||||
*
|
||||
* This implementation currently does nothing with the topic names or header names.
|
||||
*/
|
||||
public class ByteArrayConverter implements Converter, HeaderConverter {
|
||||
|
||||
private static final ConfigDef CONFIG_DEF = ConverterConfig.newConfigDef();
|
||||
|
||||
@Override
|
||||
public ConfigDef config() {
|
||||
return CONFIG_DEF;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void configure(Map<String, ?> configs) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void configure(Map<String, ?> configs, boolean isKey) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] fromConnectData(String topic, Schema schema, Object value) {
|
||||
if (schema != null && schema.type() != Schema.Type.BYTES)
|
||||
throw new DataException("Invalid schema type for ByteArrayConverter: " + schema.type().toString());
|
||||
|
||||
if (value != null && !(value instanceof byte[]))
|
||||
throw new DataException("ByteArrayConverter is not compatible with objects of type " + value.getClass());
|
||||
|
||||
return (byte[]) value;
|
||||
}
|
||||
|
||||
@Override
|
||||
public SchemaAndValue toConnectData(String topic, byte[] value) {
|
||||
return new SchemaAndValue(Schema.OPTIONAL_BYTES_SCHEMA, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] fromConnectHeader(String topic, String headerKey, Schema schema, Object value) {
|
||||
return fromConnectData(topic, schema, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public SchemaAndValue toConnectHeader(String topic, String headerKey, byte[] value) {
|
||||
return toConnectData(topic, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
// do nothing
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,37 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.converters;
|
||||
|
||||
import org.apache.kafka.common.serialization.DoubleDeserializer;
|
||||
import org.apache.kafka.common.serialization.DoubleSerializer;
|
||||
import org.apache.kafka.connect.data.Schema;
|
||||
import org.apache.kafka.connect.storage.Converter;
|
||||
import org.apache.kafka.connect.storage.HeaderConverter;
|
||||
|
||||
/**
|
||||
* {@link Converter} and {@link HeaderConverter} implementation that only supports serializing to and deserializing from double values.
|
||||
* It does support handling nulls. When converting from bytes to Kafka Connect format, the converter will always return an
|
||||
* optional FLOAT64 schema.
|
||||
* <p>
|
||||
* This implementation currently does nothing with the topic names or header names.
|
||||
*/
|
||||
public class DoubleConverter extends NumberConverter<Double> {
|
||||
|
||||
public DoubleConverter() {
|
||||
super("double", Schema.OPTIONAL_FLOAT64_SCHEMA, new DoubleSerializer(), new DoubleDeserializer());
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,37 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.converters;
|
||||
|
||||
import org.apache.kafka.common.serialization.FloatDeserializer;
|
||||
import org.apache.kafka.common.serialization.FloatSerializer;
|
||||
import org.apache.kafka.connect.data.Schema;
|
||||
import org.apache.kafka.connect.storage.Converter;
|
||||
import org.apache.kafka.connect.storage.HeaderConverter;
|
||||
|
||||
/**
|
||||
* {@link Converter} and {@link HeaderConverter} implementation that only supports serializing to and deserializing from float values.
|
||||
* It does support handling nulls. When converting from bytes to Kafka Connect format, the converter will always return an
|
||||
* optional FLOAT32 schema.
|
||||
* <p>
|
||||
* This implementation currently does nothing with the topic names or header names.
|
||||
*/
|
||||
public class FloatConverter extends NumberConverter<Float> {
|
||||
|
||||
public FloatConverter() {
|
||||
super("float", Schema.OPTIONAL_FLOAT32_SCHEMA, new FloatSerializer(), new FloatDeserializer());
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,37 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.converters;
|
||||
|
||||
import org.apache.kafka.common.serialization.IntegerDeserializer;
|
||||
import org.apache.kafka.common.serialization.IntegerSerializer;
|
||||
import org.apache.kafka.connect.data.Schema;
|
||||
import org.apache.kafka.connect.storage.Converter;
|
||||
import org.apache.kafka.connect.storage.HeaderConverter;
|
||||
|
||||
/**
|
||||
* {@link Converter} and {@link HeaderConverter} implementation that only supports serializing to and deserializing from integer values.
|
||||
* It does support handling nulls. When converting from bytes to Kafka Connect format, the converter will always return an
|
||||
* optional INT32 schema.
|
||||
* <p>
|
||||
* This implementation currently does nothing with the topic names or header names.
|
||||
*/
|
||||
public class IntegerConverter extends NumberConverter<Integer> {
|
||||
|
||||
public IntegerConverter() {
|
||||
super("integer", Schema.OPTIONAL_INT32_SCHEMA, new IntegerSerializer(), new IntegerDeserializer());
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,38 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.converters;
|
||||
|
||||
import org.apache.kafka.common.serialization.LongDeserializer;
|
||||
import org.apache.kafka.common.serialization.LongSerializer;
|
||||
import org.apache.kafka.connect.data.Schema;
|
||||
import org.apache.kafka.connect.storage.Converter;
|
||||
import org.apache.kafka.connect.storage.HeaderConverter;
|
||||
|
||||
/**
|
||||
* {@link Converter} and {@link HeaderConverter} implementation that only supports serializing to and deserializing from long values.
|
||||
* It does support handling nulls. When converting from bytes to Kafka Connect format, the converter will always return an
|
||||
* optional INT64 schema.
|
||||
* <p>
|
||||
* This implementation currently does nothing with the topic names or header names.
|
||||
*/
|
||||
public class LongConverter extends NumberConverter<Long> {
|
||||
|
||||
public LongConverter() {
|
||||
super("long", Schema.OPTIONAL_INT64_SCHEMA, new LongSerializer(), new LongDeserializer());
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,127 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.converters;
|
||||
|
||||
import org.apache.kafka.common.config.ConfigDef;
|
||||
import org.apache.kafka.common.errors.SerializationException;
|
||||
import org.apache.kafka.common.serialization.Deserializer;
|
||||
import org.apache.kafka.common.serialization.Serializer;
|
||||
import org.apache.kafka.connect.data.Schema;
|
||||
import org.apache.kafka.connect.data.SchemaAndValue;
|
||||
import org.apache.kafka.connect.errors.DataException;
|
||||
import org.apache.kafka.connect.storage.Converter;
|
||||
import org.apache.kafka.connect.storage.ConverterType;
|
||||
import org.apache.kafka.connect.storage.HeaderConverter;
|
||||
import org.apache.kafka.connect.storage.StringConverterConfig;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* {@link Converter} and {@link HeaderConverter} implementation that only supports serializing to and deserializing from number values.
|
||||
* It does support handling nulls. When converting from bytes to Kafka Connect format, the converter will always return the specified
|
||||
* schema.
|
||||
* <p>
|
||||
* This implementation currently does nothing with the topic names or header names.
|
||||
*/
|
||||
abstract class NumberConverter<T extends Number> implements Converter, HeaderConverter {
|
||||
|
||||
private final Serializer<T> serializer;
|
||||
private final Deserializer<T> deserializer;
|
||||
private final String typeName;
|
||||
private final Schema schema;
|
||||
|
||||
/**
|
||||
* Create the converter.
|
||||
*
|
||||
* @param typeName the displayable name of the type; may not be null
|
||||
* @param schema the optional schema to be used for all deserialized forms; may not be null
|
||||
* @param serializer the serializer; may not be null
|
||||
* @param deserializer the deserializer; may not be null
|
||||
*/
|
||||
protected NumberConverter(String typeName, Schema schema, Serializer<T> serializer, Deserializer<T> deserializer) {
|
||||
this.typeName = typeName;
|
||||
this.schema = schema;
|
||||
this.serializer = serializer;
|
||||
this.deserializer = deserializer;
|
||||
assert this.serializer != null;
|
||||
assert this.deserializer != null;
|
||||
assert this.typeName != null;
|
||||
assert this.schema != null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ConfigDef config() {
|
||||
return NumberConverterConfig.configDef();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void configure(Map<String, ?> configs) {
|
||||
NumberConverterConfig conf = new NumberConverterConfig(configs);
|
||||
boolean isKey = conf.type() == ConverterType.KEY;
|
||||
serializer.configure(configs, isKey);
|
||||
deserializer.configure(configs, isKey);
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void configure(Map<String, ?> configs, boolean isKey) {
|
||||
Map<String, Object> conf = new HashMap<>(configs);
|
||||
conf.put(StringConverterConfig.TYPE_CONFIG, isKey ? ConverterType.KEY.getName() : ConverterType.VALUE.getName());
|
||||
configure(conf);
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
protected T cast(Object value) {
|
||||
return (T) value;
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] fromConnectData(String topic, Schema schema, Object value) {
|
||||
try {
|
||||
return serializer.serialize(topic, value == null ? null : cast(value));
|
||||
} catch (ClassCastException e) {
|
||||
throw new DataException("Failed to serialize to " + typeName + " (was " + value.getClass() + "): ", e);
|
||||
} catch (SerializationException e) {
|
||||
throw new DataException("Failed to serialize to " + typeName + ": ", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public SchemaAndValue toConnectData(String topic, byte[] value) {
|
||||
try {
|
||||
return new SchemaAndValue(schema, deserializer.deserialize(topic, value));
|
||||
} catch (SerializationException e) {
|
||||
throw new DataException("Failed to deserialize " + typeName + ": ", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] fromConnectHeader(String topic, String headerKey, Schema schema, Object value) {
|
||||
return fromConnectData(topic, schema, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public SchemaAndValue toConnectHeader(String topic, String headerKey, byte[] value) {
|
||||
return toConnectData(topic, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,39 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.converters;
|
||||
|
||||
import org.apache.kafka.common.config.ConfigDef;
|
||||
import org.apache.kafka.connect.storage.ConverterConfig;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Configuration options for instances of {@link LongConverter}, {@link IntegerConverter}, {@link ShortConverter}, {@link DoubleConverter},
|
||||
* and {@link FloatConverter} instances.
|
||||
*/
|
||||
public class NumberConverterConfig extends ConverterConfig {
|
||||
|
||||
private final static ConfigDef CONFIG = ConverterConfig.newConfigDef();
|
||||
|
||||
public static ConfigDef configDef() {
|
||||
return CONFIG;
|
||||
}
|
||||
|
||||
public NumberConverterConfig(Map<String, ?> props) {
|
||||
super(CONFIG, props);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,37 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.converters;
|
||||
|
||||
import org.apache.kafka.common.serialization.ShortDeserializer;
|
||||
import org.apache.kafka.common.serialization.ShortSerializer;
|
||||
import org.apache.kafka.connect.data.Schema;
|
||||
import org.apache.kafka.connect.storage.Converter;
|
||||
import org.apache.kafka.connect.storage.HeaderConverter;
|
||||
|
||||
/**
|
||||
* {@link Converter} and {@link HeaderConverter} implementation that only supports serializing to and deserializing from short values.
|
||||
* It does support handling nulls. When converting from bytes to Kafka Connect format, the converter will always return an
|
||||
* optional INT16 schema.
|
||||
* <p>
|
||||
* This implementation currently does nothing with the topic names or header names.
|
||||
*/
|
||||
public class ShortConverter extends NumberConverter<Short> {
|
||||
|
||||
public ShortConverter() {
|
||||
super("short", Schema.OPTIONAL_INT16_SCHEMA, new ShortSerializer(), new ShortDeserializer());
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,636 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime;
|
||||
|
||||
import org.apache.kafka.clients.producer.ProducerConfig;
|
||||
import org.apache.kafka.common.config.AbstractConfig;
|
||||
import org.apache.kafka.common.config.Config;
|
||||
import org.apache.kafka.common.config.ConfigDef;
|
||||
import org.apache.kafka.common.config.ConfigDef.ConfigKey;
|
||||
import org.apache.kafka.common.config.ConfigDef.Type;
|
||||
import org.apache.kafka.common.config.ConfigTransformer;
|
||||
import org.apache.kafka.common.config.ConfigValue;
|
||||
import org.apache.kafka.connect.connector.Connector;
|
||||
import org.apache.kafka.connect.connector.policy.ConnectorClientConfigOverridePolicy;
|
||||
import org.apache.kafka.connect.connector.policy.ConnectorClientConfigRequest;
|
||||
import org.apache.kafka.connect.errors.NotFoundException;
|
||||
import org.apache.kafka.connect.runtime.distributed.ClusterConfigState;
|
||||
import org.apache.kafka.connect.runtime.isolation.Plugins;
|
||||
import org.apache.kafka.connect.runtime.rest.entities.ActiveTopicsInfo;
|
||||
import org.apache.kafka.connect.runtime.rest.entities.ConfigInfo;
|
||||
import org.apache.kafka.connect.runtime.rest.entities.ConfigInfos;
|
||||
import org.apache.kafka.connect.runtime.rest.entities.ConfigKeyInfo;
|
||||
import org.apache.kafka.connect.runtime.rest.entities.ConfigValueInfo;
|
||||
import org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo;
|
||||
import org.apache.kafka.connect.runtime.rest.entities.ConnectorStateInfo;
|
||||
import org.apache.kafka.connect.runtime.rest.entities.ConnectorType;
|
||||
import org.apache.kafka.connect.runtime.rest.errors.BadRequestException;
|
||||
import org.apache.kafka.connect.source.SourceConnector;
|
||||
import org.apache.kafka.connect.storage.ConfigBackingStore;
|
||||
import org.apache.kafka.connect.storage.StatusBackingStore;
|
||||
import org.apache.kafka.connect.util.Callback;
|
||||
import org.apache.kafka.connect.util.ConnectorTaskId;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.PrintStream;
|
||||
import java.io.UnsupportedEncodingException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.LinkedHashSet;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
* Abstract Herder implementation which handles connector/task lifecycle tracking. Extensions
|
||||
* must invoke the lifecycle hooks appropriately.
|
||||
*
|
||||
* This class takes the following approach for sending status updates to the backing store:
|
||||
*
|
||||
* 1) When the connector or task is starting, we overwrite the previous state blindly. This ensures that
|
||||
* every rebalance will reset the state of tasks to the proper state. The intuition is that there should
|
||||
* be less chance of write conflicts when the worker has just received its assignment and is starting tasks.
|
||||
* In particular, this prevents us from depending on the generation absolutely. If the group disappears
|
||||
* and the generation is reset, then we'll overwrite the status information with the older (and larger)
|
||||
* generation with the updated one. The danger of this approach is that slow starting tasks may cause the
|
||||
* status to be overwritten after a rebalance has completed.
|
||||
*
|
||||
* 2) If the connector or task fails or is shutdown, we use {@link StatusBackingStore#putSafe(ConnectorStatus)},
|
||||
* which provides a little more protection if the worker is no longer in the group (in which case the
|
||||
* task may have already been started on another worker). Obviously this is still racy. If the task has just
|
||||
* started on another worker, we may not have the updated status cached yet. In this case, we'll overwrite
|
||||
* the value which will cause the state to be inconsistent (most likely until the next rebalance). Until
|
||||
* we have proper producer groups with fenced groups, there is not much else we can do.
|
||||
*/
|
||||
public abstract class AbstractHerder implements Herder, TaskStatus.Listener, ConnectorStatus.Listener {
|
||||
|
||||
private final String workerId;
|
||||
protected final Worker worker;
|
||||
private final String kafkaClusterId;
|
||||
protected final StatusBackingStore statusBackingStore;
|
||||
protected final ConfigBackingStore configBackingStore;
|
||||
private final ConnectorClientConfigOverridePolicy connectorClientConfigOverridePolicy;
|
||||
|
||||
private Map<String, Connector> tempConnectors = new ConcurrentHashMap<>();
|
||||
|
||||
public AbstractHerder(Worker worker,
|
||||
String workerId,
|
||||
String kafkaClusterId,
|
||||
StatusBackingStore statusBackingStore,
|
||||
ConfigBackingStore configBackingStore,
|
||||
ConnectorClientConfigOverridePolicy connectorClientConfigOverridePolicy) {
|
||||
this.worker = worker;
|
||||
this.worker.herder = this;
|
||||
this.workerId = workerId;
|
||||
this.kafkaClusterId = kafkaClusterId;
|
||||
this.statusBackingStore = statusBackingStore;
|
||||
this.configBackingStore = configBackingStore;
|
||||
this.connectorClientConfigOverridePolicy = connectorClientConfigOverridePolicy;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String kafkaClusterId() {
|
||||
return kafkaClusterId;
|
||||
}
|
||||
|
||||
protected abstract int generation();
|
||||
|
||||
protected void startServices() {
|
||||
this.worker.start();
|
||||
this.statusBackingStore.start();
|
||||
this.configBackingStore.start();
|
||||
}
|
||||
|
||||
protected void stopServices() {
|
||||
this.statusBackingStore.stop();
|
||||
this.configBackingStore.stop();
|
||||
this.worker.stop();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onStartup(String connector) {
|
||||
statusBackingStore.put(new ConnectorStatus(connector, ConnectorStatus.State.RUNNING,
|
||||
workerId, generation()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onPause(String connector) {
|
||||
statusBackingStore.put(new ConnectorStatus(connector, ConnectorStatus.State.PAUSED,
|
||||
workerId, generation()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onResume(String connector) {
|
||||
statusBackingStore.put(new ConnectorStatus(connector, TaskStatus.State.RUNNING,
|
||||
workerId, generation()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onShutdown(String connector) {
|
||||
statusBackingStore.putSafe(new ConnectorStatus(connector, ConnectorStatus.State.UNASSIGNED,
|
||||
workerId, generation()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(String connector, Throwable cause) {
|
||||
statusBackingStore.putSafe(new ConnectorStatus(connector, ConnectorStatus.State.FAILED,
|
||||
trace(cause), workerId, generation()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onStartup(ConnectorTaskId id) {
|
||||
statusBackingStore.put(new TaskStatus(id, TaskStatus.State.RUNNING, workerId, generation()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(ConnectorTaskId id, Throwable cause) {
|
||||
statusBackingStore.putSafe(new TaskStatus(id, TaskStatus.State.FAILED, workerId, generation(), trace(cause)));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onShutdown(ConnectorTaskId id) {
|
||||
statusBackingStore.putSafe(new TaskStatus(id, TaskStatus.State.UNASSIGNED, workerId, generation()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onResume(ConnectorTaskId id) {
|
||||
statusBackingStore.put(new TaskStatus(id, TaskStatus.State.RUNNING, workerId, generation()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onPause(ConnectorTaskId id) {
|
||||
statusBackingStore.put(new TaskStatus(id, TaskStatus.State.PAUSED, workerId, generation()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onDeletion(String connector) {
|
||||
for (TaskStatus status : statusBackingStore.getAll(connector))
|
||||
statusBackingStore.put(new TaskStatus(status.id(), TaskStatus.State.DESTROYED, workerId, generation()));
|
||||
statusBackingStore.put(new ConnectorStatus(connector, ConnectorStatus.State.DESTROYED, workerId, generation()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void pauseConnector(String connector) {
|
||||
if (!configBackingStore.contains(connector))
|
||||
throw new NotFoundException("Unknown connector " + connector);
|
||||
configBackingStore.putTargetState(connector, TargetState.PAUSED);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void resumeConnector(String connector) {
|
||||
if (!configBackingStore.contains(connector))
|
||||
throw new NotFoundException("Unknown connector " + connector);
|
||||
configBackingStore.putTargetState(connector, TargetState.STARTED);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Plugins plugins() {
|
||||
return worker.getPlugins();
|
||||
}
|
||||
|
||||
/*
|
||||
* Retrieves config map by connector name
|
||||
*/
|
||||
protected abstract Map<String, String> config(String connName);
|
||||
|
||||
@Override
|
||||
public Collection<String> connectors() {
|
||||
return configBackingStore.snapshot().connectors();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ConnectorInfo connectorInfo(String connector) {
|
||||
final ClusterConfigState configState = configBackingStore.snapshot();
|
||||
|
||||
if (!configState.contains(connector))
|
||||
return null;
|
||||
Map<String, String> config = configState.rawConnectorConfig(connector);
|
||||
|
||||
return new ConnectorInfo(
|
||||
connector,
|
||||
config,
|
||||
configState.tasks(connector),
|
||||
connectorTypeForClass(config.get(ConnectorConfig.CONNECTOR_CLASS_CONFIG))
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ConnectorStateInfo connectorStatus(String connName) {
|
||||
ConnectorStatus connector = statusBackingStore.get(connName);
|
||||
if (connector == null)
|
||||
throw new NotFoundException("No status found for connector " + connName);
|
||||
|
||||
Collection<TaskStatus> tasks = statusBackingStore.getAll(connName);
|
||||
|
||||
ConnectorStateInfo.ConnectorState connectorState = new ConnectorStateInfo.ConnectorState(
|
||||
connector.state().toString(), connector.workerId(), connector.trace());
|
||||
List<ConnectorStateInfo.TaskState> taskStates = new ArrayList<>();
|
||||
|
||||
for (TaskStatus status : tasks) {
|
||||
taskStates.add(new ConnectorStateInfo.TaskState(status.id().task(),
|
||||
status.state().toString(), status.workerId(), status.trace()));
|
||||
}
|
||||
|
||||
Collections.sort(taskStates);
|
||||
|
||||
Map<String, String> conf = config(connName);
|
||||
return new ConnectorStateInfo(connName, connectorState, taskStates,
|
||||
conf == null ? ConnectorType.UNKNOWN : connectorTypeForClass(conf.get(ConnectorConfig.CONNECTOR_CLASS_CONFIG)));
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActiveTopicsInfo connectorActiveTopics(String connName) {
|
||||
Collection<String> topics = statusBackingStore.getAllTopics(connName).stream()
|
||||
.map(TopicStatus::topic)
|
||||
.collect(Collectors.toList());
|
||||
return new ActiveTopicsInfo(connName, topics);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void resetConnectorActiveTopics(String connName) {
|
||||
statusBackingStore.getAllTopics(connName).stream()
|
||||
.forEach(status -> statusBackingStore.deleteTopic(status.connector(), status.topic()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public StatusBackingStore statusBackingStore() {
|
||||
return statusBackingStore;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ConnectorStateInfo.TaskState taskStatus(ConnectorTaskId id) {
|
||||
TaskStatus status = statusBackingStore.get(id);
|
||||
|
||||
if (status == null)
|
||||
throw new NotFoundException("No status found for task " + id);
|
||||
|
||||
return new ConnectorStateInfo.TaskState(id.task(), status.state().toString(),
|
||||
status.workerId(), status.trace());
|
||||
}
|
||||
|
||||
protected Map<String, ConfigValue> validateBasicConnectorConfig(Connector connector,
|
||||
ConfigDef configDef,
|
||||
Map<String, String> config) {
|
||||
return configDef.validateAll(config);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ConfigInfos validateConnectorConfig(Map<String, String> connectorProps) {
|
||||
if (worker.configTransformer() != null) {
|
||||
connectorProps = worker.configTransformer().transform(connectorProps);
|
||||
}
|
||||
String connType = connectorProps.get(ConnectorConfig.CONNECTOR_CLASS_CONFIG);
|
||||
if (connType == null)
|
||||
throw new BadRequestException("Connector config " + connectorProps + " contains no connector type");
|
||||
|
||||
Connector connector = getConnector(connType);
|
||||
org.apache.kafka.connect.health.ConnectorType connectorType;
|
||||
ClassLoader savedLoader = plugins().compareAndSwapLoaders(connector);
|
||||
try {
|
||||
ConfigDef baseConfigDef;
|
||||
if (connector instanceof SourceConnector) {
|
||||
baseConfigDef = SourceConnectorConfig.configDef();
|
||||
connectorType = org.apache.kafka.connect.health.ConnectorType.SOURCE;
|
||||
} else {
|
||||
baseConfigDef = SinkConnectorConfig.configDef();
|
||||
SinkConnectorConfig.validate(connectorProps);
|
||||
connectorType = org.apache.kafka.connect.health.ConnectorType.SINK;
|
||||
}
|
||||
ConfigDef enrichedConfigDef = ConnectorConfig.enrich(plugins(), baseConfigDef, connectorProps, false);
|
||||
Map<String, ConfigValue> validatedConnectorConfig = validateBasicConnectorConfig(
|
||||
connector,
|
||||
enrichedConfigDef,
|
||||
connectorProps
|
||||
);
|
||||
List<ConfigValue> configValues = new ArrayList<>(validatedConnectorConfig.values());
|
||||
Map<String, ConfigKey> configKeys = new LinkedHashMap<>(enrichedConfigDef.configKeys());
|
||||
Set<String> allGroups = new LinkedHashSet<>(enrichedConfigDef.groups());
|
||||
|
||||
// do custom connector-specific validation
|
||||
Config config = connector.validate(connectorProps);
|
||||
if (null == config) {
|
||||
throw new BadRequestException(
|
||||
String.format(
|
||||
"%s.validate() must return a Config that is not null.",
|
||||
connector.getClass().getName()
|
||||
)
|
||||
);
|
||||
}
|
||||
ConfigDef configDef = connector.config();
|
||||
if (null == configDef) {
|
||||
throw new BadRequestException(
|
||||
String.format(
|
||||
"%s.config() must return a ConfigDef that is not null.",
|
||||
connector.getClass().getName()
|
||||
)
|
||||
);
|
||||
}
|
||||
configKeys.putAll(configDef.configKeys());
|
||||
allGroups.addAll(configDef.groups());
|
||||
configValues.addAll(config.configValues());
|
||||
ConfigInfos configInfos = generateResult(connType, configKeys, configValues, new ArrayList<>(allGroups));
|
||||
|
||||
AbstractConfig connectorConfig = new AbstractConfig(new ConfigDef(), connectorProps);
|
||||
String connName = connectorProps.get(ConnectorConfig.NAME_CONFIG);
|
||||
ConfigInfos producerConfigInfos = null;
|
||||
ConfigInfos consumerConfigInfos = null;
|
||||
ConfigInfos adminConfigInfos = null;
|
||||
if (connectorType.equals(org.apache.kafka.connect.health.ConnectorType.SOURCE)) {
|
||||
producerConfigInfos = validateClientOverrides(connName,
|
||||
ConnectorConfig.CONNECTOR_CLIENT_PRODUCER_OVERRIDES_PREFIX,
|
||||
connectorConfig,
|
||||
ProducerConfig.configDef(),
|
||||
connector.getClass(),
|
||||
connectorType,
|
||||
ConnectorClientConfigRequest.ClientType.PRODUCER,
|
||||
connectorClientConfigOverridePolicy);
|
||||
return mergeConfigInfos(connType, configInfos, producerConfigInfos);
|
||||
} else {
|
||||
consumerConfigInfos = validateClientOverrides(connName,
|
||||
ConnectorConfig.CONNECTOR_CLIENT_CONSUMER_OVERRIDES_PREFIX,
|
||||
connectorConfig,
|
||||
ProducerConfig.configDef(),
|
||||
connector.getClass(),
|
||||
connectorType,
|
||||
ConnectorClientConfigRequest.ClientType.CONSUMER,
|
||||
connectorClientConfigOverridePolicy);
|
||||
// check if topic for dead letter queue exists
|
||||
String topic = connectorProps.get(SinkConnectorConfig.DLQ_TOPIC_NAME_CONFIG);
|
||||
if (topic != null && !topic.isEmpty()) {
|
||||
adminConfigInfos = validateClientOverrides(connName,
|
||||
ConnectorConfig.CONNECTOR_CLIENT_ADMIN_OVERRIDES_PREFIX,
|
||||
connectorConfig,
|
||||
ProducerConfig.configDef(),
|
||||
connector.getClass(),
|
||||
connectorType,
|
||||
ConnectorClientConfigRequest.ClientType.ADMIN,
|
||||
connectorClientConfigOverridePolicy);
|
||||
}
|
||||
|
||||
}
|
||||
return mergeConfigInfos(connType, configInfos, producerConfigInfos, consumerConfigInfos, adminConfigInfos);
|
||||
} finally {
|
||||
Plugins.compareAndSwapLoaders(savedLoader);
|
||||
}
|
||||
}
|
||||
|
||||
private static ConfigInfos mergeConfigInfos(String connType, ConfigInfos... configInfosList) {
|
||||
int errorCount = 0;
|
||||
List<ConfigInfo> configInfoList = new LinkedList<>();
|
||||
Set<String> groups = new LinkedHashSet<>();
|
||||
for (ConfigInfos configInfos : configInfosList) {
|
||||
if (configInfos != null) {
|
||||
errorCount += configInfos.errorCount();
|
||||
configInfoList.addAll(configInfos.values());
|
||||
groups.addAll(configInfos.groups());
|
||||
}
|
||||
}
|
||||
return new ConfigInfos(connType, errorCount, new ArrayList<>(groups), configInfoList);
|
||||
}
|
||||
|
||||
private static ConfigInfos validateClientOverrides(String connName,
|
||||
String prefix,
|
||||
AbstractConfig connectorConfig,
|
||||
ConfigDef configDef,
|
||||
Class<? extends Connector> connectorClass,
|
||||
org.apache.kafka.connect.health.ConnectorType connectorType,
|
||||
ConnectorClientConfigRequest.ClientType clientType,
|
||||
ConnectorClientConfigOverridePolicy connectorClientConfigOverridePolicy) {
|
||||
int errorCount = 0;
|
||||
List<ConfigInfo> configInfoList = new LinkedList<>();
|
||||
Map<String, ConfigKey> configKeys = configDef.configKeys();
|
||||
Set<String> groups = new LinkedHashSet<>();
|
||||
Map<String, Object> clientConfigs = new HashMap<>();
|
||||
for (Map.Entry<String, Object> rawClientConfig : connectorConfig.originalsWithPrefix(prefix).entrySet()) {
|
||||
String configName = rawClientConfig.getKey();
|
||||
Object rawConfigValue = rawClientConfig.getValue();
|
||||
ConfigKey configKey = configDef.configKeys().get(configName);
|
||||
Object parsedConfigValue = configKey != null
|
||||
? ConfigDef.parseType(configName, rawConfigValue, configKey.type)
|
||||
: rawConfigValue;
|
||||
clientConfigs.put(configName, parsedConfigValue);
|
||||
}
|
||||
ConnectorClientConfigRequest connectorClientConfigRequest = new ConnectorClientConfigRequest(
|
||||
connName, connectorType, connectorClass, clientConfigs, clientType);
|
||||
List<ConfigValue> configValues = connectorClientConfigOverridePolicy.validate(connectorClientConfigRequest);
|
||||
if (configValues != null) {
|
||||
for (ConfigValue validatedConfigValue : configValues) {
|
||||
ConfigKey configKey = configKeys.get(validatedConfigValue.name());
|
||||
ConfigKeyInfo configKeyInfo = null;
|
||||
if (configKey != null) {
|
||||
if (configKey.group != null) {
|
||||
groups.add(configKey.group);
|
||||
}
|
||||
configKeyInfo = convertConfigKey(configKey, prefix);
|
||||
}
|
||||
|
||||
ConfigValue configValue = new ConfigValue(prefix + validatedConfigValue.name(), validatedConfigValue.value(),
|
||||
validatedConfigValue.recommendedValues(), validatedConfigValue.errorMessages());
|
||||
if (configValue.errorMessages().size() > 0) {
|
||||
errorCount++;
|
||||
}
|
||||
ConfigValueInfo configValueInfo = convertConfigValue(configValue, configKey != null ? configKey.type : null);
|
||||
configInfoList.add(new ConfigInfo(configKeyInfo, configValueInfo));
|
||||
}
|
||||
}
|
||||
return new ConfigInfos(connectorClass.toString(), errorCount, new ArrayList<>(groups), configInfoList);
|
||||
}
|
||||
|
||||
// public for testing
|
||||
public static ConfigInfos generateResult(String connType, Map<String, ConfigKey> configKeys, List<ConfigValue> configValues, List<String> groups) {
|
||||
int errorCount = 0;
|
||||
List<ConfigInfo> configInfoList = new LinkedList<>();
|
||||
|
||||
Map<String, ConfigValue> configValueMap = new HashMap<>();
|
||||
for (ConfigValue configValue: configValues) {
|
||||
String configName = configValue.name();
|
||||
configValueMap.put(configName, configValue);
|
||||
if (!configKeys.containsKey(configName)) {
|
||||
configValue.addErrorMessage("Configuration is not defined: " + configName);
|
||||
configInfoList.add(new ConfigInfo(null, convertConfigValue(configValue, null)));
|
||||
}
|
||||
}
|
||||
|
||||
for (Map.Entry<String, ConfigKey> entry : configKeys.entrySet()) {
|
||||
String configName = entry.getKey();
|
||||
ConfigKeyInfo configKeyInfo = convertConfigKey(entry.getValue());
|
||||
Type type = entry.getValue().type;
|
||||
ConfigValueInfo configValueInfo = null;
|
||||
if (configValueMap.containsKey(configName)) {
|
||||
ConfigValue configValue = configValueMap.get(configName);
|
||||
configValueInfo = convertConfigValue(configValue, type);
|
||||
errorCount += configValue.errorMessages().size();
|
||||
}
|
||||
configInfoList.add(new ConfigInfo(configKeyInfo, configValueInfo));
|
||||
}
|
||||
return new ConfigInfos(connType, errorCount, groups, configInfoList);
|
||||
}
|
||||
|
||||
private static ConfigKeyInfo convertConfigKey(ConfigKey configKey) {
|
||||
return convertConfigKey(configKey, "");
|
||||
}
|
||||
|
||||
private static ConfigKeyInfo convertConfigKey(ConfigKey configKey, String prefix) {
|
||||
String name = prefix + configKey.name;
|
||||
Type type = configKey.type;
|
||||
String typeName = configKey.type.name();
|
||||
|
||||
boolean required = false;
|
||||
String defaultValue;
|
||||
if (ConfigDef.NO_DEFAULT_VALUE.equals(configKey.defaultValue)) {
|
||||
defaultValue = null;
|
||||
required = true;
|
||||
} else {
|
||||
defaultValue = ConfigDef.convertToString(configKey.defaultValue, type);
|
||||
}
|
||||
String importance = configKey.importance.name();
|
||||
String documentation = configKey.documentation;
|
||||
String group = configKey.group;
|
||||
int orderInGroup = configKey.orderInGroup;
|
||||
String width = configKey.width.name();
|
||||
String displayName = configKey.displayName;
|
||||
List<String> dependents = configKey.dependents;
|
||||
return new ConfigKeyInfo(name, typeName, required, defaultValue, importance, documentation, group, orderInGroup, width, displayName, dependents);
|
||||
}
|
||||
|
||||
private static ConfigValueInfo convertConfigValue(ConfigValue configValue, Type type) {
|
||||
String value = ConfigDef.convertToString(configValue.value(), type);
|
||||
List<String> recommendedValues = new LinkedList<>();
|
||||
|
||||
if (type == Type.LIST) {
|
||||
for (Object object: configValue.recommendedValues()) {
|
||||
recommendedValues.add(ConfigDef.convertToString(object, Type.STRING));
|
||||
}
|
||||
} else {
|
||||
for (Object object : configValue.recommendedValues()) {
|
||||
recommendedValues.add(ConfigDef.convertToString(object, type));
|
||||
}
|
||||
}
|
||||
return new ConfigValueInfo(configValue.name(), value, recommendedValues, configValue.errorMessages(), configValue.visible());
|
||||
}
|
||||
|
||||
protected Connector getConnector(String connType) {
|
||||
if (tempConnectors.containsKey(connType)) {
|
||||
return tempConnectors.get(connType);
|
||||
} else {
|
||||
Connector connector = plugins().newConnector(connType);
|
||||
tempConnectors.put(connType, connector);
|
||||
return connector;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Retrieves ConnectorType for the corresponding connector class
|
||||
* @param connClass class of the connector
|
||||
*/
|
||||
public ConnectorType connectorTypeForClass(String connClass) {
|
||||
return ConnectorType.from(getConnector(connClass).getClass());
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks a given {@link ConfigInfos} for validation error messages and adds an exception
|
||||
* to the given {@link Callback} if any were found.
|
||||
*
|
||||
* @param configInfos configInfos to read Errors from
|
||||
* @param callback callback to add config error exception to
|
||||
* @return true if errors were found in the config
|
||||
*/
|
||||
protected final boolean maybeAddConfigErrors(
|
||||
ConfigInfos configInfos,
|
||||
Callback<Created<ConnectorInfo>> callback
|
||||
) {
|
||||
int errors = configInfos.errorCount();
|
||||
boolean hasErrors = errors > 0;
|
||||
if (hasErrors) {
|
||||
StringBuilder messages = new StringBuilder();
|
||||
messages.append("Connector configuration is invalid and contains the following ")
|
||||
.append(errors).append(" error(s):");
|
||||
for (ConfigInfo configInfo : configInfos.values()) {
|
||||
for (String msg : configInfo.configValue().errors()) {
|
||||
messages.append('\n').append(msg);
|
||||
}
|
||||
}
|
||||
callback.onCompletion(
|
||||
new BadRequestException(
|
||||
messages.append(
|
||||
"\nYou can also find the above list of errors at the endpoint `/{connectorType}/config/validate`"
|
||||
).toString()
|
||||
), null
|
||||
);
|
||||
}
|
||||
return hasErrors;
|
||||
}
|
||||
|
||||
private String trace(Throwable t) {
|
||||
ByteArrayOutputStream output = new ByteArrayOutputStream();
|
||||
try {
|
||||
t.printStackTrace(new PrintStream(output, false, StandardCharsets.UTF_8.name()));
|
||||
return output.toString("UTF-8");
|
||||
} catch (UnsupportedEncodingException e) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Performs a reverse transformation on a set of task configs, by replacing values with variable references.
|
||||
*/
|
||||
public static List<Map<String, String>> reverseTransform(String connName,
|
||||
ClusterConfigState configState,
|
||||
List<Map<String, String>> configs) {
|
||||
|
||||
// Find the config keys in the raw connector config that have variable references
|
||||
Map<String, String> rawConnConfig = configState.rawConnectorConfig(connName);
|
||||
Set<String> connKeysWithVariableValues = keysWithVariableValues(rawConnConfig, ConfigTransformer.DEFAULT_PATTERN);
|
||||
|
||||
List<Map<String, String>> result = new ArrayList<>();
|
||||
for (Map<String, String> config : configs) {
|
||||
Map<String, String> newConfig = new HashMap<>(config);
|
||||
for (String key : connKeysWithVariableValues) {
|
||||
if (newConfig.containsKey(key)) {
|
||||
newConfig.put(key, rawConnConfig.get(key));
|
||||
}
|
||||
}
|
||||
result.add(newConfig);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
// Visible for testing
|
||||
static Set<String> keysWithVariableValues(Map<String, String> rawConfig, Pattern pattern) {
|
||||
Set<String> keys = new HashSet<>();
|
||||
for (Map.Entry<String, String> config : rawConfig.entrySet()) {
|
||||
if (config.getValue() != null) {
|
||||
Matcher matcher = pattern.matcher(config.getValue());
|
||||
if (matcher.find()) {
|
||||
keys.add(config.getKey());
|
||||
}
|
||||
}
|
||||
}
|
||||
return keys;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,102 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime;
|
||||
|
||||
import java.util.Objects;
|
||||
|
||||
public abstract class AbstractStatus<T> {
|
||||
|
||||
public enum State {
|
||||
UNASSIGNED,
|
||||
RUNNING,
|
||||
PAUSED,
|
||||
FAILED,
|
||||
DESTROYED,
|
||||
}
|
||||
|
||||
private final T id;
|
||||
private final State state;
|
||||
private final String trace;
|
||||
private final String workerId;
|
||||
private final int generation;
|
||||
|
||||
public AbstractStatus(T id,
|
||||
State state,
|
||||
String workerId,
|
||||
int generation,
|
||||
String trace) {
|
||||
this.id = id;
|
||||
this.state = state;
|
||||
this.workerId = workerId;
|
||||
this.generation = generation;
|
||||
this.trace = trace;
|
||||
}
|
||||
|
||||
public T id() {
|
||||
return id;
|
||||
}
|
||||
|
||||
public State state() {
|
||||
return state;
|
||||
}
|
||||
|
||||
public String trace() {
|
||||
return trace;
|
||||
}
|
||||
|
||||
public String workerId() {
|
||||
return workerId;
|
||||
}
|
||||
|
||||
public int generation() {
|
||||
return generation;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "Status{" +
|
||||
"id=" + id +
|
||||
", state=" + state +
|
||||
", workerId='" + workerId + '\'' +
|
||||
", generation=" + generation +
|
||||
'}';
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
|
||||
AbstractStatus<?> that = (AbstractStatus<?>) o;
|
||||
|
||||
return generation == that.generation
|
||||
&& Objects.equals(id, that.id)
|
||||
&& state == that.state
|
||||
&& Objects.equals(trace, that.trace)
|
||||
&& Objects.equals(workerId, that.workerId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = id != null ? id.hashCode() : 0;
|
||||
result = 31 * result + (state != null ? state.hashCode() : 0);
|
||||
result = 31 * result + (trace != null ? trace.hashCode() : 0);
|
||||
result = 31 * result + (workerId != null ? workerId.hashCode() : 0);
|
||||
result = 31 * result + generation;
|
||||
return result;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,107 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime;
|
||||
|
||||
import org.apache.kafka.common.utils.Exit;
|
||||
import org.apache.kafka.connect.runtime.rest.RestServer;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.net.URI;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
/**
|
||||
* This class ties together all the components of a Kafka Connect process (herder, worker,
|
||||
* storage, command interface), managing their lifecycle.
|
||||
*/
|
||||
public class Connect {
|
||||
private static final Logger log = LoggerFactory.getLogger(Connect.class);
|
||||
|
||||
private final Herder herder;
|
||||
private final RestServer rest;
|
||||
private final CountDownLatch startLatch = new CountDownLatch(1);
|
||||
private final CountDownLatch stopLatch = new CountDownLatch(1);
|
||||
private final AtomicBoolean shutdown = new AtomicBoolean(false);
|
||||
private final ShutdownHook shutdownHook;
|
||||
|
||||
public Connect(Herder herder, RestServer rest) {
|
||||
log.debug("Kafka Connect instance created");
|
||||
this.herder = herder;
|
||||
this.rest = rest;
|
||||
shutdownHook = new ShutdownHook();
|
||||
}
|
||||
|
||||
public void start() {
|
||||
try {
|
||||
log.info("Kafka Connect starting");
|
||||
Exit.addShutdownHook("connect-shutdown-hook", shutdownHook);
|
||||
|
||||
herder.start();
|
||||
rest.initializeResources(herder);
|
||||
|
||||
log.info("Kafka Connect started");
|
||||
} finally {
|
||||
startLatch.countDown();
|
||||
}
|
||||
}
|
||||
|
||||
public void stop() {
|
||||
try {
|
||||
boolean wasShuttingDown = shutdown.getAndSet(true);
|
||||
if (!wasShuttingDown) {
|
||||
log.info("Kafka Connect stopping");
|
||||
|
||||
rest.stop();
|
||||
herder.stop();
|
||||
|
||||
log.info("Kafka Connect stopped");
|
||||
}
|
||||
} finally {
|
||||
stopLatch.countDown();
|
||||
}
|
||||
}
|
||||
|
||||
public void awaitStop() {
|
||||
try {
|
||||
stopLatch.await();
|
||||
} catch (InterruptedException e) {
|
||||
log.error("Interrupted waiting for Kafka Connect to shutdown");
|
||||
}
|
||||
}
|
||||
|
||||
// Visible for testing
|
||||
public URI restUrl() {
|
||||
return rest.serverUrl();
|
||||
}
|
||||
|
||||
public URI adminUrl() {
|
||||
return rest.adminUrl();
|
||||
}
|
||||
|
||||
private class ShutdownHook extends Thread {
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
startLatch.await();
|
||||
Connect.this.stop();
|
||||
} catch (InterruptedException e) {
|
||||
log.error("Interrupted in shutdown hook while waiting for Kafka Connect startup to finish");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,447 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime;
|
||||
|
||||
import org.apache.kafka.clients.CommonClientConfigs;
|
||||
import org.apache.kafka.common.MetricName;
|
||||
import org.apache.kafka.common.MetricNameTemplate;
|
||||
import org.apache.kafka.common.metrics.Gauge;
|
||||
import org.apache.kafka.common.metrics.JmxReporter;
|
||||
import org.apache.kafka.common.metrics.MetricConfig;
|
||||
import org.apache.kafka.common.metrics.Metrics;
|
||||
import org.apache.kafka.common.metrics.MetricsReporter;
|
||||
import org.apache.kafka.common.metrics.Sensor;
|
||||
import org.apache.kafka.common.utils.AppInfoParser;
|
||||
import org.apache.kafka.common.utils.Time;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
* The Connect metrics with JMX reporter.
|
||||
*/
|
||||
public class ConnectMetrics {
|
||||
|
||||
public static final String JMX_PREFIX = "kafka.connect";
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(ConnectMetrics.class);
|
||||
|
||||
private final Metrics metrics;
|
||||
private final Time time;
|
||||
private final String workerId;
|
||||
private final ConcurrentMap<MetricGroupId, MetricGroup> groupsByName = new ConcurrentHashMap<>();
|
||||
private final ConnectMetricsRegistry registry = new ConnectMetricsRegistry();
|
||||
|
||||
/**
|
||||
* Create an instance.
|
||||
*
|
||||
* @param workerId the worker identifier; may not be null
|
||||
* @param config the worker configuration; may not be null
|
||||
* @param time the time; may not be null
|
||||
*/
|
||||
public ConnectMetrics(String workerId, WorkerConfig config, Time time) {
|
||||
this(workerId, time, config.getInt(CommonClientConfigs.METRICS_NUM_SAMPLES_CONFIG),
|
||||
config.getLong(CommonClientConfigs.METRICS_SAMPLE_WINDOW_MS_CONFIG),
|
||||
config.getString(CommonClientConfigs.METRICS_RECORDING_LEVEL_CONFIG),
|
||||
config.getConfiguredInstances(CommonClientConfigs.METRIC_REPORTER_CLASSES_CONFIG, MetricsReporter.class));
|
||||
}
|
||||
|
||||
public ConnectMetrics(String workerId, Time time, int numSamples, long sampleWindowMs, String metricsRecordingLevel,
|
||||
List<MetricsReporter> reporters) {
|
||||
this.workerId = workerId;
|
||||
this.time = time;
|
||||
|
||||
MetricConfig metricConfig = new MetricConfig().samples(numSamples)
|
||||
.timeWindow(sampleWindowMs, TimeUnit.MILLISECONDS).recordLevel(
|
||||
Sensor.RecordingLevel.forName(metricsRecordingLevel));
|
||||
reporters.add(new JmxReporter(JMX_PREFIX));
|
||||
this.metrics = new Metrics(metricConfig, reporters, time);
|
||||
LOG.debug("Registering Connect metrics with JMX for worker '{}'", workerId);
|
||||
AppInfoParser.registerAppInfo(JMX_PREFIX, workerId, metrics, time.milliseconds());
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the worker identifier.
|
||||
*
|
||||
* @return the worker ID; never null
|
||||
*/
|
||||
public String workerId() {
|
||||
return workerId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the {@link Metrics Kafka Metrics} that are managed by this object and that should be used to
|
||||
* add sensors and individual metrics.
|
||||
*
|
||||
* @return the Kafka Metrics instance; never null
|
||||
*/
|
||||
public Metrics metrics() {
|
||||
return metrics;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the registry of metric names.
|
||||
*
|
||||
* @return the registry for the Connect metrics; never null
|
||||
*/
|
||||
public ConnectMetricsRegistry registry() {
|
||||
return registry;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get or create a {@link MetricGroup} with the specified group name and the given tags.
|
||||
* Each group is uniquely identified by the name and tags.
|
||||
*
|
||||
* @param groupName the name of the metric group; may not be null
|
||||
* @param tagKeyValues pairs of tag name and values
|
||||
* @return the {@link MetricGroup} that can be used to create metrics; never null
|
||||
* @throws IllegalArgumentException if the group name is not valid
|
||||
*/
|
||||
public MetricGroup group(String groupName, String... tagKeyValues) {
|
||||
MetricGroupId groupId = groupId(groupName, tagKeyValues);
|
||||
MetricGroup group = groupsByName.get(groupId);
|
||||
if (group == null) {
|
||||
group = new MetricGroup(groupId);
|
||||
MetricGroup previous = groupsByName.putIfAbsent(groupId, group);
|
||||
if (previous != null)
|
||||
group = previous;
|
||||
}
|
||||
return group;
|
||||
}
|
||||
|
||||
protected MetricGroupId groupId(String groupName, String... tagKeyValues) {
|
||||
Map<String, String> tags = tags(tagKeyValues);
|
||||
return new MetricGroupId(groupName, tags);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the time.
|
||||
*
|
||||
* @return the time; never null
|
||||
*/
|
||||
public Time time() {
|
||||
return time;
|
||||
}
|
||||
|
||||
/**
|
||||
* Stop and unregister the metrics from any reporters.
|
||||
*/
|
||||
public void stop() {
|
||||
metrics.close();
|
||||
LOG.debug("Unregistering Connect metrics with JMX for worker '{}'", workerId);
|
||||
AppInfoParser.unregisterAppInfo(JMX_PREFIX, workerId, metrics);
|
||||
}
|
||||
|
||||
public static class MetricGroupId {
|
||||
private final String groupName;
|
||||
private final Map<String, String> tags;
|
||||
private final int hc;
|
||||
private final String str;
|
||||
|
||||
public MetricGroupId(String groupName, Map<String, String> tags) {
|
||||
Objects.requireNonNull(groupName);
|
||||
Objects.requireNonNull(tags);
|
||||
this.groupName = groupName;
|
||||
this.tags = Collections.unmodifiableMap(new LinkedHashMap<>(tags));
|
||||
this.hc = Objects.hash(this.groupName, this.tags);
|
||||
StringBuilder sb = new StringBuilder(this.groupName);
|
||||
for (Map.Entry<String, String> entry : this.tags.entrySet()) {
|
||||
sb.append(";").append(entry.getKey()).append('=').append(entry.getValue());
|
||||
}
|
||||
this.str = sb.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the group name.
|
||||
*
|
||||
* @return the group name; never null
|
||||
*/
|
||||
public String groupName() {
|
||||
return groupName;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the immutable map of tag names and values.
|
||||
*
|
||||
* @return the tags; never null
|
||||
*/
|
||||
public Map<String, String> tags() {
|
||||
return tags;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine if the supplied metric name is part of this group identifier.
|
||||
*
|
||||
* @param metricName the metric name
|
||||
* @return true if the metric name's group and tags match this group identifier, or false otherwise
|
||||
*/
|
||||
public boolean includes(MetricName metricName) {
|
||||
return metricName != null && groupName.equals(metricName.group()) && tags.equals(metricName.tags());
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return hc;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == this)
|
||||
return true;
|
||||
if (obj instanceof MetricGroupId) {
|
||||
MetricGroupId that = (MetricGroupId) obj;
|
||||
return this.groupName.equals(that.groupName) && this.tags.equals(that.tags);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return str;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A group of metrics. Each group maps to a JMX MBean and each metric maps to an MBean attribute.
|
||||
* <p>
|
||||
* Sensors should be added via the {@code sensor} methods on this class, rather than directly through
|
||||
* the {@link Metrics} class, so that the sensor names are made to be unique (based on the group name)
|
||||
* and so the sensors are removed when this group is {@link #close() closed}.
|
||||
*/
|
||||
public class MetricGroup implements AutoCloseable {
|
||||
private final MetricGroupId groupId;
|
||||
private final Set<String> sensorNames = new HashSet<>();
|
||||
private final String sensorPrefix;
|
||||
|
||||
/**
|
||||
* Create a group of Connect metrics.
|
||||
*
|
||||
* @param groupId the identifier of the group; may not be null and must be valid
|
||||
*/
|
||||
protected MetricGroup(MetricGroupId groupId) {
|
||||
Objects.requireNonNull(groupId);
|
||||
this.groupId = groupId;
|
||||
sensorPrefix = "connect-sensor-group: " + groupId.toString() + ";";
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the group identifier.
|
||||
*
|
||||
* @return the group identifier; never null
|
||||
*/
|
||||
public MetricGroupId groupId() {
|
||||
return groupId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create the name of a metric that belongs to this group and has the group's tags.
|
||||
*
|
||||
* @param template the name template for the metric; may not be null
|
||||
* @return the metric name; never null
|
||||
* @throws IllegalArgumentException if the name is not valid
|
||||
*/
|
||||
public MetricName metricName(MetricNameTemplate template) {
|
||||
return metrics.metricInstance(template, groupId.tags());
|
||||
}
|
||||
|
||||
// for testing only
|
||||
MetricName metricName(String name) {
|
||||
return metrics.metricName(name, groupId.groupName(), "", groupId.tags());
|
||||
}
|
||||
|
||||
/**
|
||||
* The {@link Metrics} that this group belongs to.
|
||||
* <p>
|
||||
* Do not use this to add {@link Sensor Sensors}, since they will not be removed when this group is
|
||||
* {@link #close() closed}. Metrics can be added directly, as long as the metric names are obtained from
|
||||
* this group via the {@link #metricName(MetricNameTemplate)} method.
|
||||
*
|
||||
* @return the metrics; never null
|
||||
*/
|
||||
public Metrics metrics() {
|
||||
return metrics;
|
||||
}
|
||||
|
||||
/**
|
||||
* The tags of this group.
|
||||
*
|
||||
* @return the unmodifiable tags; never null but may be empty
|
||||
*/
|
||||
Map<String, String> tags() {
|
||||
return groupId.tags();
|
||||
}
|
||||
|
||||
/**
|
||||
* Add to this group an indicator metric with a function that returns the current value.
|
||||
*
|
||||
* @param nameTemplate the name template for the metric; may not be null
|
||||
* @param supplier the function used to determine the literal value of the metric; may not be null
|
||||
* @throws IllegalArgumentException if the name is not valid
|
||||
*/
|
||||
public <T> void addValueMetric(MetricNameTemplate nameTemplate, final LiteralSupplier<T> supplier) {
|
||||
MetricName metricName = metricName(nameTemplate);
|
||||
if (metrics().metric(metricName) == null) {
|
||||
metrics().addMetric(metricName, (Gauge<T>) (config, now) -> supplier.metricValue(now));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Add to this group an indicator metric that always returns the specified value.
|
||||
*
|
||||
* @param nameTemplate the name template for the metric; may not be null
|
||||
* @param value the value; may not be null
|
||||
* @throws IllegalArgumentException if the name is not valid
|
||||
*/
|
||||
public <T> void addImmutableValueMetric(MetricNameTemplate nameTemplate, final T value) {
|
||||
MetricName metricName = metricName(nameTemplate);
|
||||
if (metrics().metric(metricName) == null) {
|
||||
metrics().addMetric(metricName, (Gauge<T>) (config, now) -> value);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get or create a sensor with the given unique name and no parent sensors. This uses
|
||||
* a default recording level of INFO.
|
||||
*
|
||||
* @param name The sensor name
|
||||
* @return The sensor
|
||||
*/
|
||||
public Sensor sensor(String name) {
|
||||
return sensor(name, null, Sensor.RecordingLevel.INFO);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get or create a sensor with the given unique name and no parent sensors. This uses
|
||||
* a default recording level of INFO.
|
||||
*
|
||||
* @param name The sensor name
|
||||
* @return The sensor
|
||||
*/
|
||||
public Sensor sensor(String name, Sensor... parents) {
|
||||
return sensor(name, null, Sensor.RecordingLevel.INFO, parents);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get or create a sensor with the given unique name and zero or more parent sensors. All parent sensors will
|
||||
* receive every value recorded with this sensor.
|
||||
*
|
||||
* @param name The name of the sensor
|
||||
* @param recordingLevel The recording level.
|
||||
* @param parents The parent sensors
|
||||
* @return The sensor that is created
|
||||
*/
|
||||
public Sensor sensor(String name, Sensor.RecordingLevel recordingLevel, Sensor... parents) {
|
||||
return sensor(name, null, recordingLevel, parents);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get or create a sensor with the given unique name and zero or more parent sensors. All parent sensors will
|
||||
* receive every value recorded with this sensor.
|
||||
*
|
||||
* @param name The name of the sensor
|
||||
* @param config A default configuration to use for this sensor for metrics that don't have their own config
|
||||
* @param parents The parent sensors
|
||||
* @return The sensor that is created
|
||||
*/
|
||||
public Sensor sensor(String name, MetricConfig config, Sensor... parents) {
|
||||
return sensor(name, config, Sensor.RecordingLevel.INFO, parents);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get or create a sensor with the given unique name and zero or more parent sensors. All parent sensors will
|
||||
* receive every value recorded with this sensor.
|
||||
*
|
||||
* @param name The name of the sensor
|
||||
* @param config A default configuration to use for this sensor for metrics that don't have their own config
|
||||
* @param recordingLevel The recording level.
|
||||
* @param parents The parent sensors
|
||||
* @return The sensor that is created
|
||||
*/
|
||||
public synchronized Sensor sensor(String name, MetricConfig config, Sensor.RecordingLevel recordingLevel, Sensor... parents) {
|
||||
// We need to make sure that all sensor names are unique across all groups, so use the sensor prefix
|
||||
Sensor result = metrics.sensor(sensorPrefix + name, config, Long.MAX_VALUE, recordingLevel, parents);
|
||||
if (result != null)
|
||||
sensorNames.add(result.name());
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove all sensors and metrics associated with this group.
|
||||
*/
|
||||
public synchronized void close() {
|
||||
for (String sensorName : sensorNames) {
|
||||
metrics.removeSensor(sensorName);
|
||||
}
|
||||
sensorNames.clear();
|
||||
for (MetricName metricName : new HashSet<>(metrics.metrics().keySet())) {
|
||||
if (groupId.includes(metricName)) {
|
||||
metrics.removeMetric(metricName);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A simple functional interface that returns a literal value.
|
||||
*/
|
||||
public interface LiteralSupplier<T> {
|
||||
|
||||
/**
|
||||
* Return the literal value for the metric.
|
||||
*
|
||||
* @param now the current time in milliseconds
|
||||
* @return the literal metric value; may not be null
|
||||
*/
|
||||
T metricValue(long now);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a set of tags using the supplied key and value pairs. The order of the tags will be kept.
|
||||
*
|
||||
* @param keyValue the key and value pairs for the tags; must be an even number
|
||||
* @return the map of tags that can be supplied to the {@link Metrics} methods; never null
|
||||
*/
|
||||
static Map<String, String> tags(String... keyValue) {
|
||||
if ((keyValue.length % 2) != 0)
|
||||
throw new IllegalArgumentException("keyValue needs to be specified in pairs");
|
||||
Map<String, String> tags = new LinkedHashMap<>();
|
||||
for (int i = 0; i < keyValue.length; i += 2) {
|
||||
tags.put(keyValue[i], keyValue[i + 1]);
|
||||
}
|
||||
return tags;
|
||||
}
|
||||
|
||||
/**
|
||||
* Utility to generate the documentation for the Connect metrics.
|
||||
*
|
||||
* @param args the arguments
|
||||
*/
|
||||
public static void main(String[] args) {
|
||||
ConnectMetricsRegistry metrics = new ConnectMetricsRegistry();
|
||||
System.out.println(Metrics.toHtmlTable(JMX_PREFIX, metrics.getAllTemplates()));
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,412 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime;
|
||||
|
||||
import org.apache.kafka.common.MetricNameTemplate;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.LinkedHashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
public class ConnectMetricsRegistry {
|
||||
|
||||
public static final String CONNECTOR_TAG_NAME = "connector";
|
||||
public static final String TASK_TAG_NAME = "task";
|
||||
public static final String CONNECTOR_GROUP_NAME = "connector-metrics";
|
||||
public static final String TASK_GROUP_NAME = "connector-task-metrics";
|
||||
public static final String SOURCE_TASK_GROUP_NAME = "source-task-metrics";
|
||||
public static final String SINK_TASK_GROUP_NAME = "sink-task-metrics";
|
||||
public static final String WORKER_GROUP_NAME = "connect-worker-metrics";
|
||||
public static final String WORKER_REBALANCE_GROUP_NAME = "connect-worker-rebalance-metrics";
|
||||
public static final String TASK_ERROR_HANDLING_GROUP_NAME = "task-error-metrics";
|
||||
|
||||
private final List<MetricNameTemplate> allTemplates = new ArrayList<>();
|
||||
public final MetricNameTemplate connectorStatus;
|
||||
public final MetricNameTemplate connectorType;
|
||||
public final MetricNameTemplate connectorClass;
|
||||
public final MetricNameTemplate connectorVersion;
|
||||
public final MetricNameTemplate connectorTotalTaskCount;
|
||||
public final MetricNameTemplate connectorRunningTaskCount;
|
||||
public final MetricNameTemplate connectorPausedTaskCount;
|
||||
public final MetricNameTemplate connectorFailedTaskCount;
|
||||
public final MetricNameTemplate connectorUnassignedTaskCount;
|
||||
public final MetricNameTemplate connectorDestroyedTaskCount;
|
||||
public final MetricNameTemplate taskStatus;
|
||||
public final MetricNameTemplate taskRunningRatio;
|
||||
public final MetricNameTemplate taskPauseRatio;
|
||||
public final MetricNameTemplate taskCommitTimeMax;
|
||||
public final MetricNameTemplate taskCommitTimeAvg;
|
||||
public final MetricNameTemplate taskBatchSizeMax;
|
||||
public final MetricNameTemplate taskBatchSizeAvg;
|
||||
public final MetricNameTemplate taskCommitFailurePercentage;
|
||||
public final MetricNameTemplate taskCommitSuccessPercentage;
|
||||
public final MetricNameTemplate sourceRecordPollRate;
|
||||
public final MetricNameTemplate sourceRecordPollTotal;
|
||||
public final MetricNameTemplate sourceRecordWriteRate;
|
||||
public final MetricNameTemplate sourceRecordWriteTotal;
|
||||
public final MetricNameTemplate sourceRecordPollBatchTimeMax;
|
||||
public final MetricNameTemplate sourceRecordPollBatchTimeAvg;
|
||||
public final MetricNameTemplate sourceRecordActiveCount;
|
||||
public final MetricNameTemplate sourceRecordActiveCountMax;
|
||||
public final MetricNameTemplate sourceRecordActiveCountAvg;
|
||||
public final MetricNameTemplate sinkRecordReadRate;
|
||||
public final MetricNameTemplate sinkRecordReadTotal;
|
||||
public final MetricNameTemplate sinkRecordSendRate;
|
||||
public final MetricNameTemplate sinkRecordSendTotal;
|
||||
public final MetricNameTemplate sinkRecordLagMax;
|
||||
public final MetricNameTemplate sinkRecordPartitionCount;
|
||||
public final MetricNameTemplate sinkRecordOffsetCommitSeqNum;
|
||||
public final MetricNameTemplate sinkRecordOffsetCommitCompletionRate;
|
||||
public final MetricNameTemplate sinkRecordOffsetCommitCompletionTotal;
|
||||
public final MetricNameTemplate sinkRecordOffsetCommitSkipRate;
|
||||
public final MetricNameTemplate sinkRecordOffsetCommitSkipTotal;
|
||||
public final MetricNameTemplate sinkRecordPutBatchTimeMax;
|
||||
public final MetricNameTemplate sinkRecordPutBatchTimeAvg;
|
||||
public final MetricNameTemplate sinkRecordActiveCount;
|
||||
public final MetricNameTemplate sinkRecordActiveCountMax;
|
||||
public final MetricNameTemplate sinkRecordActiveCountAvg;
|
||||
public final MetricNameTemplate connectorCount;
|
||||
public final MetricNameTemplate taskCount;
|
||||
public final MetricNameTemplate connectorStartupAttemptsTotal;
|
||||
public final MetricNameTemplate connectorStartupSuccessTotal;
|
||||
public final MetricNameTemplate connectorStartupSuccessPercentage;
|
||||
public final MetricNameTemplate connectorStartupFailureTotal;
|
||||
public final MetricNameTemplate connectorStartupFailurePercentage;
|
||||
public final MetricNameTemplate taskStartupAttemptsTotal;
|
||||
public final MetricNameTemplate taskStartupSuccessTotal;
|
||||
public final MetricNameTemplate taskStartupSuccessPercentage;
|
||||
public final MetricNameTemplate taskStartupFailureTotal;
|
||||
public final MetricNameTemplate taskStartupFailurePercentage;
|
||||
public final MetricNameTemplate connectProtocol;
|
||||
public final MetricNameTemplate leaderName;
|
||||
public final MetricNameTemplate epoch;
|
||||
public final MetricNameTemplate rebalanceCompletedTotal;
|
||||
public final MetricNameTemplate rebalanceMode;
|
||||
public final MetricNameTemplate rebalanceTimeMax;
|
||||
public final MetricNameTemplate rebalanceTimeAvg;
|
||||
public final MetricNameTemplate rebalanceTimeSinceLast;
|
||||
public final MetricNameTemplate recordProcessingFailures;
|
||||
public final MetricNameTemplate recordProcessingErrors;
|
||||
public final MetricNameTemplate recordsSkipped;
|
||||
public final MetricNameTemplate retries;
|
||||
public final MetricNameTemplate errorsLogged;
|
||||
public final MetricNameTemplate dlqProduceRequests;
|
||||
public final MetricNameTemplate dlqProduceFailures;
|
||||
public final MetricNameTemplate lastErrorTimestamp;
|
||||
|
||||
public Map<MetricNameTemplate, TaskStatus.State> connectorStatusMetrics;
|
||||
|
||||
public ConnectMetricsRegistry() {
|
||||
this(new LinkedHashSet<String>());
|
||||
}
|
||||
|
||||
public ConnectMetricsRegistry(Set<String> tags) {
|
||||
/***** Connector level *****/
|
||||
Set<String> connectorTags = new LinkedHashSet<>(tags);
|
||||
connectorTags.add(CONNECTOR_TAG_NAME);
|
||||
|
||||
connectorStatus = createTemplate("status", CONNECTOR_GROUP_NAME,
|
||||
"The status of the connector. One of 'unassigned', 'running', 'paused', 'failed', or " +
|
||||
"'destroyed'.",
|
||||
connectorTags);
|
||||
connectorType = createTemplate("connector-type", CONNECTOR_GROUP_NAME, "The type of the connector. One of 'source' or 'sink'.",
|
||||
connectorTags);
|
||||
connectorClass = createTemplate("connector-class", CONNECTOR_GROUP_NAME, "The name of the connector class.", connectorTags);
|
||||
connectorVersion = createTemplate("connector-version", CONNECTOR_GROUP_NAME,
|
||||
"The version of the connector class, as reported by the connector.", connectorTags);
|
||||
|
||||
/***** Worker task level *****/
|
||||
Set<String> workerTaskTags = new LinkedHashSet<>(tags);
|
||||
workerTaskTags.add(CONNECTOR_TAG_NAME);
|
||||
workerTaskTags.add(TASK_TAG_NAME);
|
||||
|
||||
taskStatus = createTemplate("status", TASK_GROUP_NAME,
|
||||
"The status of the connector task. One of 'unassigned', 'running', 'paused', 'failed', or " +
|
||||
"'destroyed'.",
|
||||
workerTaskTags);
|
||||
taskRunningRatio = createTemplate("running-ratio", TASK_GROUP_NAME,
|
||||
"The fraction of time this task has spent in the running state.", workerTaskTags);
|
||||
taskPauseRatio = createTemplate("pause-ratio", TASK_GROUP_NAME, "The fraction of time this task has spent in the pause state.",
|
||||
workerTaskTags);
|
||||
taskCommitTimeMax = createTemplate("offset-commit-max-time-ms", TASK_GROUP_NAME,
|
||||
"The maximum time in milliseconds taken by this task to commit offsets.", workerTaskTags);
|
||||
taskCommitTimeAvg = createTemplate("offset-commit-avg-time-ms", TASK_GROUP_NAME,
|
||||
"The average time in milliseconds taken by this task to commit offsets.", workerTaskTags);
|
||||
taskBatchSizeMax = createTemplate("batch-size-max", TASK_GROUP_NAME, "The maximum size of the batches processed by the connector.",
|
||||
workerTaskTags);
|
||||
taskBatchSizeAvg = createTemplate("batch-size-avg", TASK_GROUP_NAME, "The average size of the batches processed by the connector.",
|
||||
workerTaskTags);
|
||||
taskCommitFailurePercentage = createTemplate("offset-commit-failure-percentage", TASK_GROUP_NAME,
|
||||
"The average percentage of this task's offset commit attempts that failed.",
|
||||
workerTaskTags);
|
||||
taskCommitSuccessPercentage = createTemplate("offset-commit-success-percentage", TASK_GROUP_NAME,
|
||||
"The average percentage of this task's offset commit attempts that succeeded.",
|
||||
workerTaskTags);
|
||||
|
||||
/***** Source worker task level *****/
|
||||
Set<String> sourceTaskTags = new LinkedHashSet<>(tags);
|
||||
sourceTaskTags.add(CONNECTOR_TAG_NAME);
|
||||
sourceTaskTags.add(TASK_TAG_NAME);
|
||||
|
||||
sourceRecordPollRate = createTemplate("source-record-poll-rate", SOURCE_TASK_GROUP_NAME,
|
||||
"The average per-second number of records produced/polled (before transformation) by " +
|
||||
"this task belonging to the named source connector in this worker.",
|
||||
sourceTaskTags);
|
||||
sourceRecordPollTotal = createTemplate("source-record-poll-total", SOURCE_TASK_GROUP_NAME,
|
||||
"The total number of records produced/polled (before transformation) by this task " +
|
||||
"belonging to the named source connector in this worker.",
|
||||
sourceTaskTags);
|
||||
sourceRecordWriteRate = createTemplate("source-record-write-rate", SOURCE_TASK_GROUP_NAME,
|
||||
"The average per-second number of records output from the transformations and written" +
|
||||
" to Kafka for this task belonging to the named source connector in this worker. This" +
|
||||
" is after transformations are applied and excludes any records filtered out by the " +
|
||||
"transformations.",
|
||||
sourceTaskTags);
|
||||
sourceRecordWriteTotal = createTemplate("source-record-write-total", SOURCE_TASK_GROUP_NAME,
|
||||
"The number of records output from the transformations and written to Kafka for this" +
|
||||
" task belonging to the named source connector in this worker, since the task was " +
|
||||
"last restarted.",
|
||||
sourceTaskTags);
|
||||
sourceRecordPollBatchTimeMax = createTemplate("poll-batch-max-time-ms", SOURCE_TASK_GROUP_NAME,
|
||||
"The maximum time in milliseconds taken by this task to poll for a batch of " +
|
||||
"source records.",
|
||||
sourceTaskTags);
|
||||
sourceRecordPollBatchTimeAvg = createTemplate("poll-batch-avg-time-ms", SOURCE_TASK_GROUP_NAME,
|
||||
"The average time in milliseconds taken by this task to poll for a batch of " +
|
||||
"source records.",
|
||||
sourceTaskTags);
|
||||
sourceRecordActiveCount = createTemplate("source-record-active-count", SOURCE_TASK_GROUP_NAME,
|
||||
"The number of records that have been produced by this task but not yet completely " +
|
||||
"written to Kafka.",
|
||||
sourceTaskTags);
|
||||
sourceRecordActiveCountMax = createTemplate("source-record-active-count-max", SOURCE_TASK_GROUP_NAME,
|
||||
"The maximum number of records that have been produced by this task but not yet " +
|
||||
"completely written to Kafka.",
|
||||
sourceTaskTags);
|
||||
sourceRecordActiveCountAvg = createTemplate("source-record-active-count-avg", SOURCE_TASK_GROUP_NAME,
|
||||
"The average number of records that have been produced by this task but not yet " +
|
||||
"completely written to Kafka.",
|
||||
sourceTaskTags);
|
||||
|
||||
/***** Sink worker task level *****/
|
||||
Set<String> sinkTaskTags = new LinkedHashSet<>(tags);
|
||||
sinkTaskTags.add(CONNECTOR_TAG_NAME);
|
||||
sinkTaskTags.add(TASK_TAG_NAME);
|
||||
|
||||
sinkRecordReadRate = createTemplate("sink-record-read-rate", SINK_TASK_GROUP_NAME,
|
||||
"The average per-second number of records read from Kafka for this task belonging to the" +
|
||||
" named sink connector in this worker. This is before transformations are applied.",
|
||||
sinkTaskTags);
|
||||
sinkRecordReadTotal = createTemplate("sink-record-read-total", SINK_TASK_GROUP_NAME,
|
||||
"The total number of records read from Kafka by this task belonging to the named sink " +
|
||||
"connector in this worker, since the task was last restarted.",
|
||||
sinkTaskTags);
|
||||
sinkRecordSendRate = createTemplate("sink-record-send-rate", SINK_TASK_GROUP_NAME,
|
||||
"The average per-second number of records output from the transformations and sent/put " +
|
||||
"to this task belonging to the named sink connector in this worker. This is after " +
|
||||
"transformations are applied and excludes any records filtered out by the " +
|
||||
"transformations.",
|
||||
sinkTaskTags);
|
||||
sinkRecordSendTotal = createTemplate("sink-record-send-total", SINK_TASK_GROUP_NAME,
|
||||
"The total number of records output from the transformations and sent/put to this task " +
|
||||
"belonging to the named sink connector in this worker, since the task was last " +
|
||||
"restarted.",
|
||||
sinkTaskTags);
|
||||
sinkRecordLagMax = createTemplate("sink-record-lag-max", SINK_TASK_GROUP_NAME,
|
||||
"The maximum lag in terms of number of records that the sink task is behind the consumer's " +
|
||||
"position for any topic partitions.",
|
||||
sinkTaskTags);
|
||||
sinkRecordPartitionCount = createTemplate("partition-count", SINK_TASK_GROUP_NAME,
|
||||
"The number of topic partitions assigned to this task belonging to the named sink " +
|
||||
"connector in this worker.",
|
||||
sinkTaskTags);
|
||||
sinkRecordOffsetCommitSeqNum = createTemplate("offset-commit-seq-no", SINK_TASK_GROUP_NAME,
|
||||
"The current sequence number for offset commits.", sinkTaskTags);
|
||||
sinkRecordOffsetCommitCompletionRate = createTemplate("offset-commit-completion-rate", SINK_TASK_GROUP_NAME,
|
||||
"The average per-second number of offset commit completions that were " +
|
||||
"completed successfully.",
|
||||
sinkTaskTags);
|
||||
sinkRecordOffsetCommitCompletionTotal = createTemplate("offset-commit-completion-total", SINK_TASK_GROUP_NAME,
|
||||
"The total number of offset commit completions that were completed " +
|
||||
"successfully.",
|
||||
sinkTaskTags);
|
||||
sinkRecordOffsetCommitSkipRate = createTemplate("offset-commit-skip-rate", SINK_TASK_GROUP_NAME,
|
||||
"The average per-second number of offset commit completions that were " +
|
||||
"received too late and skipped/ignored.",
|
||||
sinkTaskTags);
|
||||
sinkRecordOffsetCommitSkipTotal = createTemplate("offset-commit-skip-total", SINK_TASK_GROUP_NAME,
|
||||
"The total number of offset commit completions that were received too late " +
|
||||
"and skipped/ignored.",
|
||||
sinkTaskTags);
|
||||
sinkRecordPutBatchTimeMax = createTemplate("put-batch-max-time-ms", SINK_TASK_GROUP_NAME,
|
||||
"The maximum time taken by this task to put a batch of sinks records.", sinkTaskTags);
|
||||
sinkRecordPutBatchTimeAvg = createTemplate("put-batch-avg-time-ms", SINK_TASK_GROUP_NAME,
|
||||
"The average time taken by this task to put a batch of sinks records.", sinkTaskTags);
|
||||
sinkRecordActiveCount = createTemplate("sink-record-active-count", SINK_TASK_GROUP_NAME,
|
||||
"The number of records that have been read from Kafka but not yet completely " +
|
||||
"committed/flushed/acknowledged by the sink task.",
|
||||
sinkTaskTags);
|
||||
sinkRecordActiveCountMax = createTemplate("sink-record-active-count-max", SINK_TASK_GROUP_NAME,
|
||||
"The maximum number of records that have been read from Kafka but not yet completely "
|
||||
+ "committed/flushed/acknowledged by the sink task.",
|
||||
sinkTaskTags);
|
||||
sinkRecordActiveCountAvg = createTemplate("sink-record-active-count-avg", SINK_TASK_GROUP_NAME,
|
||||
"The average number of records that have been read from Kafka but not yet completely "
|
||||
+ "committed/flushed/acknowledged by the sink task.",
|
||||
sinkTaskTags);
|
||||
|
||||
/***** Worker level *****/
|
||||
Set<String> workerTags = new LinkedHashSet<>(tags);
|
||||
|
||||
connectorCount = createTemplate("connector-count", WORKER_GROUP_NAME, "The number of connectors run in this worker.", workerTags);
|
||||
taskCount = createTemplate("task-count", WORKER_GROUP_NAME, "The number of tasks run in this worker.", workerTags);
|
||||
connectorStartupAttemptsTotal = createTemplate("connector-startup-attempts-total", WORKER_GROUP_NAME,
|
||||
"The total number of connector startups that this worker has attempted.", workerTags);
|
||||
connectorStartupSuccessTotal = createTemplate("connector-startup-success-total", WORKER_GROUP_NAME,
|
||||
"The total number of connector starts that succeeded.", workerTags);
|
||||
connectorStartupSuccessPercentage = createTemplate("connector-startup-success-percentage", WORKER_GROUP_NAME,
|
||||
"The average percentage of this worker's connectors starts that succeeded.", workerTags);
|
||||
connectorStartupFailureTotal = createTemplate("connector-startup-failure-total", WORKER_GROUP_NAME,
|
||||
"The total number of connector starts that failed.", workerTags);
|
||||
connectorStartupFailurePercentage = createTemplate("connector-startup-failure-percentage", WORKER_GROUP_NAME,
|
||||
"The average percentage of this worker's connectors starts that failed.", workerTags);
|
||||
taskStartupAttemptsTotal = createTemplate("task-startup-attempts-total", WORKER_GROUP_NAME,
|
||||
"The total number of task startups that this worker has attempted.", workerTags);
|
||||
taskStartupSuccessTotal = createTemplate("task-startup-success-total", WORKER_GROUP_NAME,
|
||||
"The total number of task starts that succeeded.", workerTags);
|
||||
taskStartupSuccessPercentage = createTemplate("task-startup-success-percentage", WORKER_GROUP_NAME,
|
||||
"The average percentage of this worker's tasks starts that succeeded.", workerTags);
|
||||
taskStartupFailureTotal = createTemplate("task-startup-failure-total", WORKER_GROUP_NAME,
|
||||
"The total number of task starts that failed.", workerTags);
|
||||
taskStartupFailurePercentage = createTemplate("task-startup-failure-percentage", WORKER_GROUP_NAME,
|
||||
"The average percentage of this worker's tasks starts that failed.", workerTags);
|
||||
|
||||
Set<String> workerConnectorTags = new LinkedHashSet<>(tags);
|
||||
workerConnectorTags.add(CONNECTOR_TAG_NAME);
|
||||
connectorTotalTaskCount = createTemplate("connector-total-task-count", WORKER_GROUP_NAME,
|
||||
"The number of tasks of the connector on the worker.", workerConnectorTags);
|
||||
connectorRunningTaskCount = createTemplate("connector-running-task-count", WORKER_GROUP_NAME,
|
||||
"The number of running tasks of the connector on the worker.", workerConnectorTags);
|
||||
connectorPausedTaskCount = createTemplate("connector-paused-task-count", WORKER_GROUP_NAME,
|
||||
"The number of paused tasks of the connector on the worker.", workerConnectorTags);
|
||||
connectorFailedTaskCount = createTemplate("connector-failed-task-count", WORKER_GROUP_NAME,
|
||||
"The number of failed tasks of the connector on the worker.", workerConnectorTags);
|
||||
connectorUnassignedTaskCount = createTemplate("connector-unassigned-task-count",
|
||||
WORKER_GROUP_NAME,
|
||||
"The number of unassigned tasks of the connector on the worker.", workerConnectorTags);
|
||||
connectorDestroyedTaskCount = createTemplate("connector-destroyed-task-count",
|
||||
WORKER_GROUP_NAME,
|
||||
"The number of destroyed tasks of the connector on the worker.", workerConnectorTags);
|
||||
|
||||
connectorStatusMetrics = new HashMap<>();
|
||||
connectorStatusMetrics.put(connectorRunningTaskCount, TaskStatus.State.RUNNING);
|
||||
connectorStatusMetrics.put(connectorPausedTaskCount, TaskStatus.State.PAUSED);
|
||||
connectorStatusMetrics.put(connectorFailedTaskCount, TaskStatus.State.FAILED);
|
||||
connectorStatusMetrics.put(connectorUnassignedTaskCount, TaskStatus.State.UNASSIGNED);
|
||||
connectorStatusMetrics.put(connectorDestroyedTaskCount, TaskStatus.State.DESTROYED);
|
||||
connectorStatusMetrics = Collections.unmodifiableMap(connectorStatusMetrics);
|
||||
|
||||
/***** Worker rebalance level *****/
|
||||
Set<String> rebalanceTags = new LinkedHashSet<>(tags);
|
||||
|
||||
connectProtocol = createTemplate("connect-protocol", WORKER_REBALANCE_GROUP_NAME, "The Connect protocol used by this cluster", rebalanceTags);
|
||||
leaderName = createTemplate("leader-name", WORKER_REBALANCE_GROUP_NAME, "The name of the group leader.", rebalanceTags);
|
||||
epoch = createTemplate("epoch", WORKER_REBALANCE_GROUP_NAME, "The epoch or generation number of this worker.", rebalanceTags);
|
||||
rebalanceCompletedTotal = createTemplate("completed-rebalances-total", WORKER_REBALANCE_GROUP_NAME,
|
||||
"The total number of rebalances completed by this worker.", rebalanceTags);
|
||||
rebalanceMode = createTemplate("rebalancing", WORKER_REBALANCE_GROUP_NAME,
|
||||
"Whether this worker is currently rebalancing.", rebalanceTags);
|
||||
rebalanceTimeMax = createTemplate("rebalance-max-time-ms", WORKER_REBALANCE_GROUP_NAME,
|
||||
"The maximum time in milliseconds spent by this worker to rebalance.", rebalanceTags);
|
||||
rebalanceTimeAvg = createTemplate("rebalance-avg-time-ms", WORKER_REBALANCE_GROUP_NAME,
|
||||
"The average time in milliseconds spent by this worker to rebalance.", rebalanceTags);
|
||||
rebalanceTimeSinceLast = createTemplate("time-since-last-rebalance-ms", WORKER_REBALANCE_GROUP_NAME,
|
||||
"The time in milliseconds since this worker completed the most recent rebalance.", rebalanceTags);
|
||||
|
||||
/***** Task Error Handling Metrics *****/
|
||||
Set<String> taskErrorHandlingTags = new LinkedHashSet<>(tags);
|
||||
taskErrorHandlingTags.add(CONNECTOR_TAG_NAME);
|
||||
taskErrorHandlingTags.add(TASK_TAG_NAME);
|
||||
|
||||
recordProcessingFailures = createTemplate("total-record-failures", TASK_ERROR_HANDLING_GROUP_NAME,
|
||||
"The number of record processing failures in this task.", taskErrorHandlingTags);
|
||||
recordProcessingErrors = createTemplate("total-record-errors", TASK_ERROR_HANDLING_GROUP_NAME,
|
||||
"The number of record processing errors in this task. ", taskErrorHandlingTags);
|
||||
recordsSkipped = createTemplate("total-records-skipped", TASK_ERROR_HANDLING_GROUP_NAME,
|
||||
"The number of records skipped due to errors.", taskErrorHandlingTags);
|
||||
retries = createTemplate("total-retries", TASK_ERROR_HANDLING_GROUP_NAME,
|
||||
"The number of operations retried.", taskErrorHandlingTags);
|
||||
errorsLogged = createTemplate("total-errors-logged", TASK_ERROR_HANDLING_GROUP_NAME,
|
||||
"The number of errors that were logged.", taskErrorHandlingTags);
|
||||
dlqProduceRequests = createTemplate("deadletterqueue-produce-requests", TASK_ERROR_HANDLING_GROUP_NAME,
|
||||
"The number of attempted writes to the dead letter queue.", taskErrorHandlingTags);
|
||||
dlqProduceFailures = createTemplate("deadletterqueue-produce-failures", TASK_ERROR_HANDLING_GROUP_NAME,
|
||||
"The number of failed writes to the dead letter queue.", taskErrorHandlingTags);
|
||||
lastErrorTimestamp = createTemplate("last-error-timestamp", TASK_ERROR_HANDLING_GROUP_NAME,
|
||||
"The epoch timestamp when this task last encountered an error.", taskErrorHandlingTags);
|
||||
}
|
||||
|
||||
private MetricNameTemplate createTemplate(String name, String group, String doc, Set<String> tags) {
|
||||
MetricNameTemplate template = new MetricNameTemplate(name, group, doc, tags);
|
||||
allTemplates.add(template);
|
||||
return template;
|
||||
}
|
||||
|
||||
public List<MetricNameTemplate> getAllTemplates() {
|
||||
return Collections.unmodifiableList(allTemplates);
|
||||
}
|
||||
|
||||
public String connectorTagName() {
|
||||
return CONNECTOR_TAG_NAME;
|
||||
}
|
||||
|
||||
public String taskTagName() {
|
||||
return TASK_TAG_NAME;
|
||||
}
|
||||
|
||||
public String connectorGroupName() {
|
||||
return CONNECTOR_GROUP_NAME;
|
||||
}
|
||||
|
||||
public String taskGroupName() {
|
||||
return TASK_GROUP_NAME;
|
||||
}
|
||||
|
||||
public String sinkTaskGroupName() {
|
||||
return SINK_TASK_GROUP_NAME;
|
||||
}
|
||||
|
||||
public String sourceTaskGroupName() {
|
||||
return SOURCE_TASK_GROUP_NAME;
|
||||
}
|
||||
|
||||
public String workerGroupName() {
|
||||
return WORKER_GROUP_NAME;
|
||||
}
|
||||
|
||||
public String workerRebalanceGroupName() {
|
||||
return WORKER_REBALANCE_GROUP_NAME;
|
||||
}
|
||||
|
||||
public String taskErrorHandlingGroupName() {
|
||||
return TASK_ERROR_HANDLING_GROUP_NAME;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,375 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime;
|
||||
|
||||
import org.apache.kafka.common.config.AbstractConfig;
|
||||
import org.apache.kafka.common.config.ConfigDef;
|
||||
import org.apache.kafka.common.config.ConfigDef.Importance;
|
||||
import org.apache.kafka.common.config.ConfigDef.Type;
|
||||
import org.apache.kafka.common.config.ConfigDef.Width;
|
||||
import org.apache.kafka.common.config.ConfigException;
|
||||
import org.apache.kafka.connect.connector.ConnectRecord;
|
||||
import org.apache.kafka.connect.errors.ConnectException;
|
||||
import org.apache.kafka.connect.runtime.errors.ToleranceType;
|
||||
import org.apache.kafka.connect.runtime.isolation.PluginDesc;
|
||||
import org.apache.kafka.connect.runtime.isolation.Plugins;
|
||||
import org.apache.kafka.connect.transforms.Transformation;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.LinkedHashSet;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.apache.kafka.common.config.ConfigDef.NonEmptyStringWithoutControlChars.nonEmptyStringWithoutControlChars;
|
||||
import static org.apache.kafka.common.config.ConfigDef.Range.atLeast;
|
||||
import static org.apache.kafka.common.config.ConfigDef.ValidString.in;
|
||||
|
||||
/**
|
||||
* <p>
|
||||
* Configuration options for Connectors. These only include Kafka Connect system-level configuration
|
||||
* options (e.g. Connector class name, timeouts used by Connect to control the connector) but does
|
||||
* not include Connector-specific options (e.g. database connection settings).
|
||||
* </p>
|
||||
* <p>
|
||||
* Note that some of these options are not required for all connectors. For example TOPICS_CONFIG
|
||||
* is sink-specific.
|
||||
* </p>
|
||||
*/
|
||||
public class ConnectorConfig extends AbstractConfig {
|
||||
protected static final String COMMON_GROUP = "Common";
|
||||
protected static final String TRANSFORMS_GROUP = "Transforms";
|
||||
protected static final String ERROR_GROUP = "Error Handling";
|
||||
|
||||
public static final String NAME_CONFIG = "name";
|
||||
private static final String NAME_DOC = "Globally unique name to use for this connector.";
|
||||
private static final String NAME_DISPLAY = "Connector name";
|
||||
|
||||
public static final String CONNECTOR_CLASS_CONFIG = "connector.class";
|
||||
private static final String CONNECTOR_CLASS_DOC =
|
||||
"Name or alias of the class for this connector. Must be a subclass of org.apache.kafka.connect.connector.Connector. " +
|
||||
"If the connector is org.apache.kafka.connect.file.FileStreamSinkConnector, you can either specify this full name, " +
|
||||
" or use \"FileStreamSink\" or \"FileStreamSinkConnector\" to make the configuration a bit shorter";
|
||||
private static final String CONNECTOR_CLASS_DISPLAY = "Connector class";
|
||||
|
||||
public static final String KEY_CONVERTER_CLASS_CONFIG = WorkerConfig.KEY_CONVERTER_CLASS_CONFIG;
|
||||
public static final String KEY_CONVERTER_CLASS_DOC = WorkerConfig.KEY_CONVERTER_CLASS_DOC;
|
||||
public static final String KEY_CONVERTER_CLASS_DISPLAY = "Key converter class";
|
||||
|
||||
public static final String VALUE_CONVERTER_CLASS_CONFIG = WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG;
|
||||
public static final String VALUE_CONVERTER_CLASS_DOC = WorkerConfig.VALUE_CONVERTER_CLASS_DOC;
|
||||
public static final String VALUE_CONVERTER_CLASS_DISPLAY = "Value converter class";
|
||||
|
||||
public static final String HEADER_CONVERTER_CLASS_CONFIG = WorkerConfig.HEADER_CONVERTER_CLASS_CONFIG;
|
||||
public static final String HEADER_CONVERTER_CLASS_DOC = WorkerConfig.HEADER_CONVERTER_CLASS_DOC;
|
||||
public static final String HEADER_CONVERTER_CLASS_DISPLAY = "Header converter class";
|
||||
// The Connector config should not have a default for the header converter, since the absence of a config property means that
|
||||
// the worker config settings should be used. Thus, we set the default to null here.
|
||||
public static final String HEADER_CONVERTER_CLASS_DEFAULT = null;
|
||||
|
||||
public static final String TASKS_MAX_CONFIG = "tasks.max";
|
||||
private static final String TASKS_MAX_DOC = "Maximum number of tasks to use for this connector.";
|
||||
public static final int TASKS_MAX_DEFAULT = 1;
|
||||
private static final int TASKS_MIN_CONFIG = 1;
|
||||
|
||||
private static final String TASK_MAX_DISPLAY = "Tasks max";
|
||||
|
||||
public static final String TRANSFORMS_CONFIG = "transforms";
|
||||
private static final String TRANSFORMS_DOC = "Aliases for the transformations to be applied to records.";
|
||||
private static final String TRANSFORMS_DISPLAY = "Transforms";
|
||||
|
||||
public static final String CONFIG_RELOAD_ACTION_CONFIG = "config.action.reload";
|
||||
private static final String CONFIG_RELOAD_ACTION_DOC =
|
||||
"The action that Connect should take on the connector when changes in external " +
|
||||
"configuration providers result in a change in the connector's configuration properties. " +
|
||||
"A value of 'none' indicates that Connect will do nothing. " +
|
||||
"A value of 'restart' indicates that Connect should restart/reload the connector with the " +
|
||||
"updated configuration properties." +
|
||||
"The restart may actually be scheduled in the future if the external configuration provider " +
|
||||
"indicates that a configuration value will expire in the future.";
|
||||
|
||||
private static final String CONFIG_RELOAD_ACTION_DISPLAY = "Reload Action";
|
||||
public static final String CONFIG_RELOAD_ACTION_NONE = Herder.ConfigReloadAction.NONE.name().toLowerCase(Locale.ROOT);
|
||||
public static final String CONFIG_RELOAD_ACTION_RESTART = Herder.ConfigReloadAction.RESTART.name().toLowerCase(Locale.ROOT);
|
||||
|
||||
public static final String ERRORS_RETRY_TIMEOUT_CONFIG = "errors.retry.timeout";
|
||||
public static final String ERRORS_RETRY_TIMEOUT_DISPLAY = "Retry Timeout for Errors";
|
||||
public static final int ERRORS_RETRY_TIMEOUT_DEFAULT = 0;
|
||||
public static final String ERRORS_RETRY_TIMEOUT_DOC = "The maximum duration in milliseconds that a failed operation " +
|
||||
"will be reattempted. The default is 0, which means no retries will be attempted. Use -1 for infinite retries.";
|
||||
|
||||
public static final String ERRORS_RETRY_MAX_DELAY_CONFIG = "errors.retry.delay.max.ms";
|
||||
public static final String ERRORS_RETRY_MAX_DELAY_DISPLAY = "Maximum Delay Between Retries for Errors";
|
||||
public static final int ERRORS_RETRY_MAX_DELAY_DEFAULT = 60000;
|
||||
public static final String ERRORS_RETRY_MAX_DELAY_DOC = "The maximum duration in milliseconds between consecutive retry attempts. " +
|
||||
"Jitter will be added to the delay once this limit is reached to prevent thundering herd issues.";
|
||||
|
||||
public static final String ERRORS_TOLERANCE_CONFIG = "errors.tolerance";
|
||||
public static final String ERRORS_TOLERANCE_DISPLAY = "Error Tolerance";
|
||||
public static final ToleranceType ERRORS_TOLERANCE_DEFAULT = ToleranceType.NONE;
|
||||
public static final String ERRORS_TOLERANCE_DOC = "Behavior for tolerating errors during connector operation. 'none' is the default value " +
|
||||
"and signals that any error will result in an immediate connector task failure; 'all' changes the behavior to skip over problematic records.";
|
||||
|
||||
public static final String ERRORS_LOG_ENABLE_CONFIG = "errors.log.enable";
|
||||
public static final String ERRORS_LOG_ENABLE_DISPLAY = "Log Errors";
|
||||
public static final boolean ERRORS_LOG_ENABLE_DEFAULT = false;
|
||||
public static final String ERRORS_LOG_ENABLE_DOC = "If true, write each error and the details of the failed operation and problematic record " +
|
||||
"to the Connect application log. This is 'false' by default, so that only errors that are not tolerated are reported.";
|
||||
|
||||
public static final String ERRORS_LOG_INCLUDE_MESSAGES_CONFIG = "errors.log.include.messages";
|
||||
public static final String ERRORS_LOG_INCLUDE_MESSAGES_DISPLAY = "Log Error Details";
|
||||
public static final boolean ERRORS_LOG_INCLUDE_MESSAGES_DEFAULT = false;
|
||||
public static final String ERRORS_LOG_INCLUDE_MESSAGES_DOC = "Whether to the include in the log the Connect record that resulted in " +
|
||||
"a failure. This is 'false' by default, which will prevent record keys, values, and headers from being written to log files, " +
|
||||
"although some information such as topic and partition number will still be logged.";
|
||||
|
||||
|
||||
public static final String CONNECTOR_CLIENT_PRODUCER_OVERRIDES_PREFIX = "producer.override.";
|
||||
public static final String CONNECTOR_CLIENT_CONSUMER_OVERRIDES_PREFIX = "consumer.override.";
|
||||
public static final String CONNECTOR_CLIENT_ADMIN_OVERRIDES_PREFIX = "admin.override.";
|
||||
|
||||
private final EnrichedConnectorConfig enrichedConfig;
|
||||
private static class EnrichedConnectorConfig extends AbstractConfig {
|
||||
EnrichedConnectorConfig(ConfigDef configDef, Map<String, String> props) {
|
||||
super(configDef, props);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object get(String key) {
|
||||
return super.get(key);
|
||||
}
|
||||
}
|
||||
|
||||
public static ConfigDef configDef() {
|
||||
int orderInGroup = 0;
|
||||
int orderInErrorGroup = 0;
|
||||
return new ConfigDef()
|
||||
.define(NAME_CONFIG, Type.STRING, ConfigDef.NO_DEFAULT_VALUE, nonEmptyStringWithoutControlChars(), Importance.HIGH, NAME_DOC, COMMON_GROUP, ++orderInGroup, Width.MEDIUM, NAME_DISPLAY)
|
||||
.define(CONNECTOR_CLASS_CONFIG, Type.STRING, Importance.HIGH, CONNECTOR_CLASS_DOC, COMMON_GROUP, ++orderInGroup, Width.LONG, CONNECTOR_CLASS_DISPLAY)
|
||||
.define(TASKS_MAX_CONFIG, Type.INT, TASKS_MAX_DEFAULT, atLeast(TASKS_MIN_CONFIG), Importance.HIGH, TASKS_MAX_DOC, COMMON_GROUP, ++orderInGroup, Width.SHORT, TASK_MAX_DISPLAY)
|
||||
.define(KEY_CONVERTER_CLASS_CONFIG, Type.CLASS, null, Importance.LOW, KEY_CONVERTER_CLASS_DOC, COMMON_GROUP, ++orderInGroup, Width.SHORT, KEY_CONVERTER_CLASS_DISPLAY)
|
||||
.define(VALUE_CONVERTER_CLASS_CONFIG, Type.CLASS, null, Importance.LOW, VALUE_CONVERTER_CLASS_DOC, COMMON_GROUP, ++orderInGroup, Width.SHORT, VALUE_CONVERTER_CLASS_DISPLAY)
|
||||
.define(HEADER_CONVERTER_CLASS_CONFIG, Type.CLASS, HEADER_CONVERTER_CLASS_DEFAULT, Importance.LOW, HEADER_CONVERTER_CLASS_DOC, COMMON_GROUP, ++orderInGroup, Width.SHORT, HEADER_CONVERTER_CLASS_DISPLAY)
|
||||
.define(TRANSFORMS_CONFIG, Type.LIST, Collections.emptyList(), ConfigDef.CompositeValidator.of(new ConfigDef.NonNullValidator(), new ConfigDef.Validator() {
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public void ensureValid(String name, Object value) {
|
||||
final List<String> transformAliases = (List<String>) value;
|
||||
if (transformAliases.size() > new HashSet<>(transformAliases).size()) {
|
||||
throw new ConfigException(name, value, "Duplicate alias provided.");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "unique transformation aliases";
|
||||
}
|
||||
}), Importance.LOW, TRANSFORMS_DOC, TRANSFORMS_GROUP, ++orderInGroup, Width.LONG, TRANSFORMS_DISPLAY)
|
||||
.define(CONFIG_RELOAD_ACTION_CONFIG, Type.STRING, CONFIG_RELOAD_ACTION_RESTART,
|
||||
in(CONFIG_RELOAD_ACTION_NONE, CONFIG_RELOAD_ACTION_RESTART), Importance.LOW,
|
||||
CONFIG_RELOAD_ACTION_DOC, COMMON_GROUP, ++orderInGroup, Width.MEDIUM, CONFIG_RELOAD_ACTION_DISPLAY)
|
||||
.define(ERRORS_RETRY_TIMEOUT_CONFIG, Type.LONG, ERRORS_RETRY_TIMEOUT_DEFAULT, Importance.MEDIUM,
|
||||
ERRORS_RETRY_TIMEOUT_DOC, ERROR_GROUP, ++orderInErrorGroup, Width.MEDIUM, ERRORS_RETRY_TIMEOUT_DISPLAY)
|
||||
.define(ERRORS_RETRY_MAX_DELAY_CONFIG, Type.LONG, ERRORS_RETRY_MAX_DELAY_DEFAULT, Importance.MEDIUM,
|
||||
ERRORS_RETRY_MAX_DELAY_DOC, ERROR_GROUP, ++orderInErrorGroup, Width.MEDIUM, ERRORS_RETRY_MAX_DELAY_DISPLAY)
|
||||
.define(ERRORS_TOLERANCE_CONFIG, Type.STRING, ERRORS_TOLERANCE_DEFAULT.value(),
|
||||
in(ToleranceType.NONE.value(), ToleranceType.ALL.value()), Importance.MEDIUM,
|
||||
ERRORS_TOLERANCE_DOC, ERROR_GROUP, ++orderInErrorGroup, Width.SHORT, ERRORS_TOLERANCE_DISPLAY)
|
||||
.define(ERRORS_LOG_ENABLE_CONFIG, Type.BOOLEAN, ERRORS_LOG_ENABLE_DEFAULT, Importance.MEDIUM,
|
||||
ERRORS_LOG_ENABLE_DOC, ERROR_GROUP, ++orderInErrorGroup, Width.SHORT, ERRORS_LOG_ENABLE_DISPLAY)
|
||||
.define(ERRORS_LOG_INCLUDE_MESSAGES_CONFIG, Type.BOOLEAN, ERRORS_LOG_INCLUDE_MESSAGES_DEFAULT, Importance.MEDIUM,
|
||||
ERRORS_LOG_INCLUDE_MESSAGES_DOC, ERROR_GROUP, ++orderInErrorGroup, Width.SHORT, ERRORS_LOG_INCLUDE_MESSAGES_DISPLAY);
|
||||
}
|
||||
|
||||
public ConnectorConfig(Plugins plugins) {
|
||||
this(plugins, new HashMap<String, String>());
|
||||
}
|
||||
|
||||
public ConnectorConfig(Plugins plugins, Map<String, String> props) {
|
||||
this(plugins, configDef(), props);
|
||||
}
|
||||
|
||||
public ConnectorConfig(Plugins plugins, ConfigDef configDef, Map<String, String> props) {
|
||||
super(configDef, props);
|
||||
enrichedConfig = new EnrichedConnectorConfig(
|
||||
enrich(plugins, configDef, props, true),
|
||||
props
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object get(String key) {
|
||||
return enrichedConfig.get(key);
|
||||
}
|
||||
|
||||
public long errorRetryTimeout() {
|
||||
return getLong(ERRORS_RETRY_TIMEOUT_CONFIG);
|
||||
}
|
||||
|
||||
public long errorMaxDelayInMillis() {
|
||||
return getLong(ERRORS_RETRY_MAX_DELAY_CONFIG);
|
||||
}
|
||||
|
||||
public ToleranceType errorToleranceType() {
|
||||
String tolerance = getString(ERRORS_TOLERANCE_CONFIG);
|
||||
for (ToleranceType type: ToleranceType.values()) {
|
||||
if (type.name().equalsIgnoreCase(tolerance)) {
|
||||
return type;
|
||||
}
|
||||
}
|
||||
return ERRORS_TOLERANCE_DEFAULT;
|
||||
}
|
||||
|
||||
public boolean enableErrorLog() {
|
||||
return getBoolean(ERRORS_LOG_ENABLE_CONFIG);
|
||||
}
|
||||
|
||||
public boolean includeRecordDetailsInErrorLog() {
|
||||
return getBoolean(ERRORS_LOG_INCLUDE_MESSAGES_CONFIG);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the initialized list of {@link Transformation} which are specified in {@link #TRANSFORMS_CONFIG}.
|
||||
*/
|
||||
public <R extends ConnectRecord<R>> List<Transformation<R>> transformations() {
|
||||
final List<String> transformAliases = getList(TRANSFORMS_CONFIG);
|
||||
|
||||
final List<Transformation<R>> transformations = new ArrayList<>(transformAliases.size());
|
||||
for (String alias : transformAliases) {
|
||||
final String prefix = TRANSFORMS_CONFIG + "." + alias + ".";
|
||||
try {
|
||||
@SuppressWarnings("unchecked")
|
||||
final Transformation<R> transformation = getClass(prefix + "type").asSubclass(Transformation.class)
|
||||
.getDeclaredConstructor().newInstance();
|
||||
transformation.configure(originalsWithPrefix(prefix));
|
||||
transformations.add(transformation);
|
||||
} catch (Exception e) {
|
||||
throw new ConnectException(e);
|
||||
}
|
||||
}
|
||||
|
||||
return transformations;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an enriched {@link ConfigDef} building upon the {@code ConfigDef}, using the current configuration specified in {@code props} as an input.
|
||||
* <p>
|
||||
* {@code requireFullConfig} specifies whether required config values that are missing should cause an exception to be thrown.
|
||||
*/
|
||||
public static ConfigDef enrich(Plugins plugins, ConfigDef baseConfigDef, Map<String, String> props, boolean requireFullConfig) {
|
||||
Object transformAliases = ConfigDef.parseType(TRANSFORMS_CONFIG, props.get(TRANSFORMS_CONFIG), Type.LIST);
|
||||
if (!(transformAliases instanceof List)) {
|
||||
return baseConfigDef;
|
||||
}
|
||||
|
||||
ConfigDef newDef = new ConfigDef(baseConfigDef);
|
||||
LinkedHashSet<?> uniqueTransformAliases = new LinkedHashSet<>((List<?>) transformAliases);
|
||||
for (Object o : uniqueTransformAliases) {
|
||||
if (!(o instanceof String)) {
|
||||
throw new ConfigException("Item in " + TRANSFORMS_CONFIG + " property is not of "
|
||||
+ "type String");
|
||||
}
|
||||
String alias = (String) o;
|
||||
final String prefix = TRANSFORMS_CONFIG + "." + alias + ".";
|
||||
final String group = TRANSFORMS_GROUP + ": " + alias;
|
||||
int orderInGroup = 0;
|
||||
|
||||
final String transformationTypeConfig = prefix + "type";
|
||||
final ConfigDef.Validator typeValidator = new ConfigDef.Validator() {
|
||||
@Override
|
||||
public void ensureValid(String name, Object value) {
|
||||
getConfigDefFromTransformation(transformationTypeConfig, (Class) value);
|
||||
}
|
||||
};
|
||||
newDef.define(transformationTypeConfig, Type.CLASS, ConfigDef.NO_DEFAULT_VALUE, typeValidator, Importance.HIGH,
|
||||
"Class for the '" + alias + "' transformation.", group, orderInGroup++, Width.LONG, "Transformation type for " + alias,
|
||||
Collections.<String>emptyList(), new TransformationClassRecommender(plugins));
|
||||
|
||||
final ConfigDef transformationConfigDef;
|
||||
try {
|
||||
final String className = props.get(transformationTypeConfig);
|
||||
final Class<?> cls = (Class<?>) ConfigDef.parseType(transformationTypeConfig, className, Type.CLASS);
|
||||
transformationConfigDef = getConfigDefFromTransformation(transformationTypeConfig, cls);
|
||||
} catch (ConfigException e) {
|
||||
if (requireFullConfig) {
|
||||
throw e;
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
newDef.embed(prefix, group, orderInGroup, transformationConfigDef);
|
||||
}
|
||||
|
||||
return newDef;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return {@link ConfigDef} from {@code transformationCls}, which is expected to be a non-null {@code Class<Transformation>},
|
||||
* by instantiating it and invoking {@link Transformation#config()}.
|
||||
*/
|
||||
static ConfigDef getConfigDefFromTransformation(String key, Class<?> transformationCls) {
|
||||
if (transformationCls == null || !Transformation.class.isAssignableFrom(transformationCls)) {
|
||||
throw new ConfigException(key, String.valueOf(transformationCls), "Not a Transformation");
|
||||
}
|
||||
Transformation transformation;
|
||||
try {
|
||||
transformation = transformationCls.asSubclass(Transformation.class).newInstance();
|
||||
} catch (Exception e) {
|
||||
throw new ConfigException(key, String.valueOf(transformationCls), "Error getting config definition from Transformation: " + e.getMessage());
|
||||
}
|
||||
ConfigDef configDef = transformation.config();
|
||||
if (null == configDef) {
|
||||
throw new ConnectException(
|
||||
String.format(
|
||||
"%s.config() must return a ConfigDef that is not null.",
|
||||
transformationCls.getName()
|
||||
)
|
||||
);
|
||||
}
|
||||
return configDef;
|
||||
}
|
||||
|
||||
/**
|
||||
* Recommend bundled transformations.
|
||||
*/
|
||||
static final class TransformationClassRecommender implements ConfigDef.Recommender {
|
||||
private final Plugins plugins;
|
||||
|
||||
TransformationClassRecommender(Plugins plugins) {
|
||||
this.plugins = plugins;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Object> validValues(String name, Map<String, Object> parsedConfig) {
|
||||
List<Object> transformationPlugins = new ArrayList<>();
|
||||
for (PluginDesc<Transformation> plugin : plugins.transformations()) {
|
||||
transformationPlugins.add(plugin.pluginClass());
|
||||
}
|
||||
return Collections.unmodifiableList(transformationPlugins);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean visible(String name, Map<String, Object> parsedConfig) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,73 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime;
|
||||
|
||||
|
||||
public class ConnectorStatus extends AbstractStatus<String> {
|
||||
|
||||
public ConnectorStatus(String connector, State state, String msg, String workerUrl, int generation) {
|
||||
super(connector, state, workerUrl, generation, msg);
|
||||
}
|
||||
|
||||
public ConnectorStatus(String connector, State state, String workerUrl, int generation) {
|
||||
super(connector, state, workerUrl, generation, null);
|
||||
}
|
||||
|
||||
public interface Listener {
|
||||
|
||||
/**
|
||||
* Invoked after connector has successfully been shutdown.
|
||||
* @param connector The connector name
|
||||
*/
|
||||
void onShutdown(String connector);
|
||||
|
||||
/**
|
||||
* Invoked from the Connector using {@link org.apache.kafka.connect.connector.ConnectorContext#raiseError(Exception)}
|
||||
* or if either {@link org.apache.kafka.connect.connector.Connector#start(java.util.Map)} or
|
||||
* {@link org.apache.kafka.connect.connector.Connector#stop()} throw an exception.
|
||||
* Note that no shutdown event will follow after the task has been failed.
|
||||
* @param connector The connector name
|
||||
* @param cause Error raised from the connector.
|
||||
*/
|
||||
void onFailure(String connector, Throwable cause);
|
||||
|
||||
/**
|
||||
* Invoked when the connector is paused through the REST API
|
||||
* @param connector The connector name
|
||||
*/
|
||||
void onPause(String connector);
|
||||
|
||||
/**
|
||||
* Invoked after the connector has been resumed.
|
||||
* @param connector The connector name
|
||||
*/
|
||||
void onResume(String connector);
|
||||
|
||||
/**
|
||||
* Invoked after successful startup of the connector.
|
||||
* @param connector The connector name
|
||||
*/
|
||||
void onStartup(String connector);
|
||||
|
||||
/**
|
||||
* Invoked when the connector is deleted through the REST API.
|
||||
* @param connector The connector name
|
||||
*/
|
||||
void onDeletion(String connector);
|
||||
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,269 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime;
|
||||
|
||||
import org.apache.kafka.connect.runtime.isolation.Plugins;
|
||||
import org.apache.kafka.connect.runtime.rest.InternalRequestSignature;
|
||||
import org.apache.kafka.connect.runtime.rest.entities.ActiveTopicsInfo;
|
||||
import org.apache.kafka.connect.runtime.rest.entities.ConfigInfos;
|
||||
import org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo;
|
||||
import org.apache.kafka.connect.runtime.rest.entities.ConnectorStateInfo;
|
||||
import org.apache.kafka.connect.runtime.rest.entities.TaskInfo;
|
||||
import org.apache.kafka.connect.storage.StatusBackingStore;
|
||||
import org.apache.kafka.connect.util.Callback;
|
||||
import org.apache.kafka.connect.util.ConnectorTaskId;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* <p>
|
||||
* The herder interface tracks and manages workers and connectors. It is the main interface for external components
|
||||
* to make changes to the state of the cluster. For example, in distributed mode, an implementation of this class
|
||||
* knows how to accept a connector configuration, may need to route it to the current leader worker for the cluster so
|
||||
* the config can be written to persistent storage, and then ensures the new connector is correctly instantiated on one
|
||||
* of the workers.
|
||||
* </p>
|
||||
* <p>
|
||||
* This class must implement all the actions that can be taken on the cluster (add/remove connectors, pause/resume tasks,
|
||||
* get state of connectors and tasks, etc). The non-Java interfaces to the cluster (REST API and CLI) are very simple
|
||||
* wrappers of the functionality provided by this interface.
|
||||
* </p>
|
||||
* <p>
|
||||
* In standalone mode, this implementation of this class will be trivial because no coordination is needed. In that case,
|
||||
* the implementation will mainly be delegating tasks directly to other components. For example, when creating a new
|
||||
* connector in standalone mode, there is no need to persist the config and the connector and its tasks must run in the
|
||||
* same process, so the standalone herder implementation can immediately instantiate and start the connector and its
|
||||
* tasks.
|
||||
* </p>
|
||||
*/
|
||||
public interface Herder {
|
||||
|
||||
void start();
|
||||
|
||||
void stop();
|
||||
|
||||
/**
|
||||
* Get a list of connectors currently running in this cluster. This is a full list of connectors in the cluster gathered
|
||||
* from the current configuration. However, note
|
||||
*
|
||||
* @returns A list of connector names
|
||||
* @throws org.apache.kafka.connect.runtime.distributed.RequestTargetException if this node can not resolve the request
|
||||
* (e.g., because it has not joined the cluster or does not have configs in sync with the group) and it is
|
||||
* not the leader or the task owner (e.g., task restart must be handled by the worker which owns the task)
|
||||
* @throws org.apache.kafka.connect.errors.ConnectException if this node is the leader, but still cannot resolve the
|
||||
* request (e.g., it is not in sync with other worker's config state)
|
||||
*/
|
||||
void connectors(Callback<Collection<String>> callback);
|
||||
|
||||
/**
|
||||
* Get the definition and status of a connector.
|
||||
*/
|
||||
void connectorInfo(String connName, Callback<ConnectorInfo> callback);
|
||||
|
||||
/**
|
||||
* Get the configuration for a connector.
|
||||
* @param connName name of the connector
|
||||
* @param callback callback to invoke with the configuration
|
||||
*/
|
||||
void connectorConfig(String connName, Callback<Map<String, String>> callback);
|
||||
|
||||
/**
|
||||
* Set the configuration for a connector. This supports creation and updating.
|
||||
* @param connName name of the connector
|
||||
* @param config the connectors configuration, or null if deleting the connector
|
||||
* @param allowReplace if true, allow overwriting previous configs; if false, throw AlreadyExistsException if a connector
|
||||
* with the same name already exists
|
||||
* @param callback callback to invoke when the configuration has been written
|
||||
*/
|
||||
void putConnectorConfig(String connName, Map<String, String> config, boolean allowReplace, Callback<Created<ConnectorInfo>> callback);
|
||||
|
||||
/**
|
||||
* Delete a connector and its configuration.
|
||||
* @param connName name of the connector
|
||||
* @param callback callback to invoke when the configuration has been written
|
||||
*/
|
||||
void deleteConnectorConfig(String connName, Callback<Created<ConnectorInfo>> callback);
|
||||
|
||||
/**
|
||||
* Requests reconfiguration of the task. This should only be triggered by
|
||||
* {@link HerderConnectorContext}.
|
||||
*
|
||||
* @param connName name of the connector that should be reconfigured
|
||||
*/
|
||||
void requestTaskReconfiguration(String connName);
|
||||
|
||||
/**
|
||||
* Get the configurations for the current set of tasks of a connector.
|
||||
* @param connName connector to update
|
||||
* @param callback callback to invoke upon completion
|
||||
*/
|
||||
void taskConfigs(String connName, Callback<List<TaskInfo>> callback);
|
||||
|
||||
/**
|
||||
* Set the configurations for the tasks of a connector. This should always include all tasks in the connector; if
|
||||
* there are existing configurations and fewer are provided, this will reduce the number of tasks, and if more are
|
||||
* provided it will increase the number of tasks.
|
||||
* @param connName connector to update
|
||||
* @param configs list of configurations
|
||||
* @param callback callback to invoke upon completion
|
||||
* @param requestSignature the signature of the request made for this task (re-)configuration;
|
||||
* may be null if no signature was provided
|
||||
*/
|
||||
void putTaskConfigs(String connName, List<Map<String, String>> configs, Callback<Void> callback, InternalRequestSignature requestSignature);
|
||||
|
||||
/**
|
||||
* Get a list of connectors currently running in this cluster.
|
||||
* @returns A list of connector names
|
||||
*/
|
||||
Collection<String> connectors();
|
||||
|
||||
/**
|
||||
* Get the definition and status of a connector.
|
||||
* @param connName name of the connector
|
||||
*/
|
||||
ConnectorInfo connectorInfo(String connName);
|
||||
|
||||
/**
|
||||
* Lookup the current status of a connector.
|
||||
* @param connName name of the connector
|
||||
*/
|
||||
ConnectorStateInfo connectorStatus(String connName);
|
||||
|
||||
/**
|
||||
* Lookup the set of topics currently used by a connector.
|
||||
*
|
||||
* @param connName name of the connector
|
||||
* @return the set of active topics
|
||||
*/
|
||||
ActiveTopicsInfo connectorActiveTopics(String connName);
|
||||
|
||||
/**
|
||||
* Request to asynchronously reset the active topics for the named connector.
|
||||
*
|
||||
* @param connName name of the connector
|
||||
*/
|
||||
void resetConnectorActiveTopics(String connName);
|
||||
|
||||
/**
|
||||
* Return a reference to the status backing store used by this herder.
|
||||
*
|
||||
* @return the status backing store used by this herder
|
||||
*/
|
||||
StatusBackingStore statusBackingStore();
|
||||
|
||||
/**
|
||||
* Lookup the status of the a task.
|
||||
* @param id id of the task
|
||||
*/
|
||||
ConnectorStateInfo.TaskState taskStatus(ConnectorTaskId id);
|
||||
|
||||
/**
|
||||
* Validate the provided connector config values against the configuration definition.
|
||||
* @param connectorConfig the provided connector config values
|
||||
*/
|
||||
ConfigInfos validateConnectorConfig(Map<String, String> connectorConfig);
|
||||
|
||||
/**
|
||||
* Restart the task with the given id.
|
||||
* @param id id of the task
|
||||
* @param cb callback to invoke upon completion
|
||||
*/
|
||||
void restartTask(ConnectorTaskId id, Callback<Void> cb);
|
||||
|
||||
/**
|
||||
* Restart the connector.
|
||||
* @param connName name of the connector
|
||||
* @param cb callback to invoke upon completion
|
||||
*/
|
||||
void restartConnector(String connName, Callback<Void> cb);
|
||||
|
||||
/**
|
||||
* Restart the connector.
|
||||
* @param delayMs delay before restart
|
||||
* @param connName name of the connector
|
||||
* @param cb callback to invoke upon completion
|
||||
* @returns The id of the request
|
||||
*/
|
||||
HerderRequest restartConnector(long delayMs, String connName, Callback<Void> cb);
|
||||
|
||||
/**
|
||||
* Pause the connector. This call will asynchronously suspend processing by the connector and all
|
||||
* of its tasks.
|
||||
* @param connector name of the connector
|
||||
*/
|
||||
void pauseConnector(String connector);
|
||||
|
||||
/**
|
||||
* Resume the connector. This call will asynchronously start the connector and its tasks (if
|
||||
* not started already).
|
||||
* @param connector name of the connector
|
||||
*/
|
||||
void resumeConnector(String connector);
|
||||
|
||||
/**
|
||||
* Returns a handle to the plugin factory used by this herder and its worker.
|
||||
*
|
||||
* @return a reference to the plugin factory.
|
||||
*/
|
||||
Plugins plugins();
|
||||
|
||||
/**
|
||||
* Get the cluster ID of the Kafka cluster backing this Connect cluster.
|
||||
* @return the cluster ID of the Kafka cluster backing this connect cluster
|
||||
*/
|
||||
String kafkaClusterId();
|
||||
|
||||
enum ConfigReloadAction {
|
||||
NONE,
|
||||
RESTART
|
||||
}
|
||||
|
||||
class Created<T> {
|
||||
private final boolean created;
|
||||
private final T result;
|
||||
|
||||
public Created(boolean created, T result) {
|
||||
this.created = created;
|
||||
this.result = result;
|
||||
}
|
||||
|
||||
public boolean created() {
|
||||
return created;
|
||||
}
|
||||
|
||||
public T result() {
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
Created<?> created1 = (Created<?>) o;
|
||||
return Objects.equals(created, created1.created) &&
|
||||
Objects.equals(result, created1.result);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(created, result);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,45 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime;
|
||||
|
||||
import org.apache.kafka.connect.connector.ConnectorContext;
|
||||
|
||||
/**
|
||||
* ConnectorContext for use with a Herder
|
||||
*/
|
||||
public class HerderConnectorContext implements ConnectorContext {
|
||||
|
||||
private final AbstractHerder herder;
|
||||
private final String connectorName;
|
||||
|
||||
public HerderConnectorContext(AbstractHerder herder, String connectorName) {
|
||||
this.herder = herder;
|
||||
this.connectorName = connectorName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void requestTaskReconfiguration() {
|
||||
// Local herder runs in memory in this process
|
||||
// Distributed herder will forward the request to the leader if needed
|
||||
herder.requestTaskReconfiguration(connectorName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void raiseError(Exception e) {
|
||||
herder.onFailure(connectorName, e);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,21 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime;
|
||||
|
||||
public interface HerderRequest {
|
||||
void cancel();
|
||||
}
|
||||
@@ -0,0 +1,73 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime;
|
||||
|
||||
import javax.crypto.SecretKey;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* A session key, which can be used to validate internal REST requests between workers.
|
||||
*/
|
||||
public class SessionKey {
|
||||
|
||||
private final SecretKey key;
|
||||
private final long creationTimestamp;
|
||||
|
||||
/**
|
||||
* Create a new session key with the given key value and creation timestamp
|
||||
* @param key the actual cryptographic key to use for request validation; may not be null
|
||||
* @param creationTimestamp the time at which the key was generated
|
||||
*/
|
||||
public SessionKey(SecretKey key, long creationTimestamp) {
|
||||
this.key = Objects.requireNonNull(key, "Key may not be null");
|
||||
this.creationTimestamp = creationTimestamp;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the cryptographic key to use for request validation.
|
||||
*
|
||||
* @return the cryptographic key; may not be null
|
||||
*/
|
||||
public SecretKey key() {
|
||||
return key;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the time at which the key was generated.
|
||||
*
|
||||
* @return the time at which the key was generated
|
||||
*/
|
||||
public long creationTimestamp() {
|
||||
return creationTimestamp;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o)
|
||||
return true;
|
||||
if (o == null || getClass() != o.getClass())
|
||||
return false;
|
||||
SessionKey that = (SessionKey) o;
|
||||
return creationTimestamp == that.creationTimestamp
|
||||
&& key.equals(that.key);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(key, creationTimestamp);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,126 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime;
|
||||
|
||||
import org.apache.kafka.common.config.ConfigDef;
|
||||
import org.apache.kafka.common.config.ConfigDef.Importance;
|
||||
import org.apache.kafka.common.config.ConfigException;
|
||||
import org.apache.kafka.connect.runtime.isolation.Plugins;
|
||||
import org.apache.kafka.connect.sink.SinkTask;
|
||||
import org.apache.kafka.connect.transforms.util.RegexValidator;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Configuration needed for all sink connectors
|
||||
*/
|
||||
|
||||
public class SinkConnectorConfig extends ConnectorConfig {
|
||||
|
||||
public static final String TOPICS_CONFIG = SinkTask.TOPICS_CONFIG;
|
||||
private static final String TOPICS_DOC = "List of topics to consume, separated by commas";
|
||||
public static final String TOPICS_DEFAULT = "";
|
||||
private static final String TOPICS_DISPLAY = "Topics";
|
||||
|
||||
public static final String TOPICS_REGEX_CONFIG = SinkTask.TOPICS_REGEX_CONFIG;
|
||||
private static final String TOPICS_REGEX_DOC = "Regular expression giving topics to consume. " +
|
||||
"Under the hood, the regex is compiled to a <code>java.util.regex.Pattern</code>. " +
|
||||
"Only one of " + TOPICS_CONFIG + " or " + TOPICS_REGEX_CONFIG + " should be specified.";
|
||||
public static final String TOPICS_REGEX_DEFAULT = "";
|
||||
private static final String TOPICS_REGEX_DISPLAY = "Topics regex";
|
||||
|
||||
public static final String DLQ_PREFIX = "errors.deadletterqueue.";
|
||||
|
||||
public static final String DLQ_TOPIC_NAME_CONFIG = DLQ_PREFIX + "topic.name";
|
||||
public static final String DLQ_TOPIC_NAME_DOC = "The name of the topic to be used as the dead letter queue (DLQ) for messages that " +
|
||||
"result in an error when processed by this sink connector, or its transformations or converters. The topic name is blank by default, " +
|
||||
"which means that no messages are to be recorded in the DLQ.";
|
||||
public static final String DLQ_TOPIC_DEFAULT = "";
|
||||
private static final String DLQ_TOPIC_DISPLAY = "Dead Letter Queue Topic Name";
|
||||
|
||||
public static final String DLQ_TOPIC_REPLICATION_FACTOR_CONFIG = DLQ_PREFIX + "topic.replication.factor";
|
||||
private static final String DLQ_TOPIC_REPLICATION_FACTOR_CONFIG_DOC = "Replication factor used to create the dead letter queue topic when it doesn't already exist.";
|
||||
public static final short DLQ_TOPIC_REPLICATION_FACTOR_CONFIG_DEFAULT = 3;
|
||||
private static final String DLQ_TOPIC_REPLICATION_FACTOR_CONFIG_DISPLAY = "Dead Letter Queue Topic Replication Factor";
|
||||
|
||||
public static final String DLQ_CONTEXT_HEADERS_ENABLE_CONFIG = DLQ_PREFIX + "context.headers.enable";
|
||||
public static final boolean DLQ_CONTEXT_HEADERS_ENABLE_DEFAULT = false;
|
||||
public static final String DLQ_CONTEXT_HEADERS_ENABLE_DOC = "If true, add headers containing error context to the messages " +
|
||||
"written to the dead letter queue. To avoid clashing with headers from the original record, all error context header " +
|
||||
"keys, all error context header keys will start with <code>__connect.errors.</code>";
|
||||
private static final String DLQ_CONTEXT_HEADERS_ENABLE_DISPLAY = "Enable Error Context Headers";
|
||||
|
||||
static ConfigDef config = ConnectorConfig.configDef()
|
||||
.define(TOPICS_CONFIG, ConfigDef.Type.LIST, TOPICS_DEFAULT, ConfigDef.Importance.HIGH, TOPICS_DOC, COMMON_GROUP, 4, ConfigDef.Width.LONG, TOPICS_DISPLAY)
|
||||
.define(TOPICS_REGEX_CONFIG, ConfigDef.Type.STRING, TOPICS_REGEX_DEFAULT, new RegexValidator(), ConfigDef.Importance.HIGH, TOPICS_REGEX_DOC, COMMON_GROUP, 4, ConfigDef.Width.LONG, TOPICS_REGEX_DISPLAY)
|
||||
.define(DLQ_TOPIC_NAME_CONFIG, ConfigDef.Type.STRING, DLQ_TOPIC_DEFAULT, Importance.MEDIUM, DLQ_TOPIC_NAME_DOC, ERROR_GROUP, 6, ConfigDef.Width.MEDIUM, DLQ_TOPIC_DISPLAY)
|
||||
.define(DLQ_TOPIC_REPLICATION_FACTOR_CONFIG, ConfigDef.Type.SHORT, DLQ_TOPIC_REPLICATION_FACTOR_CONFIG_DEFAULT, Importance.MEDIUM, DLQ_TOPIC_REPLICATION_FACTOR_CONFIG_DOC, ERROR_GROUP, 7, ConfigDef.Width.MEDIUM, DLQ_TOPIC_REPLICATION_FACTOR_CONFIG_DISPLAY)
|
||||
.define(DLQ_CONTEXT_HEADERS_ENABLE_CONFIG, ConfigDef.Type.BOOLEAN, DLQ_CONTEXT_HEADERS_ENABLE_DEFAULT, Importance.MEDIUM, DLQ_CONTEXT_HEADERS_ENABLE_DOC, ERROR_GROUP, 8, ConfigDef.Width.MEDIUM, DLQ_CONTEXT_HEADERS_ENABLE_DISPLAY);
|
||||
|
||||
public static ConfigDef configDef() {
|
||||
return config;
|
||||
}
|
||||
|
||||
public SinkConnectorConfig(Plugins plugins, Map<String, String> props) {
|
||||
super(plugins, config, props);
|
||||
}
|
||||
|
||||
/**
|
||||
* Throw an exception if the passed-in properties do not constitute a valid sink.
|
||||
* @param props sink configuration properties
|
||||
*/
|
||||
public static void validate(Map<String, String> props) {
|
||||
final boolean hasTopicsConfig = hasTopicsConfig(props);
|
||||
final boolean hasTopicsRegexConfig = hasTopicsRegexConfig(props);
|
||||
|
||||
if (hasTopicsConfig && hasTopicsRegexConfig) {
|
||||
throw new ConfigException(SinkTask.TOPICS_CONFIG + " and " + SinkTask.TOPICS_REGEX_CONFIG +
|
||||
" are mutually exclusive options, but both are set.");
|
||||
}
|
||||
|
||||
if (!hasTopicsConfig && !hasTopicsRegexConfig) {
|
||||
throw new ConfigException("Must configure one of " +
|
||||
SinkTask.TOPICS_CONFIG + " or " + SinkTask.TOPICS_REGEX_CONFIG);
|
||||
}
|
||||
}
|
||||
|
||||
public static boolean hasTopicsConfig(Map<String, String> props) {
|
||||
String topicsStr = props.get(TOPICS_CONFIG);
|
||||
return topicsStr != null && !topicsStr.trim().isEmpty();
|
||||
}
|
||||
|
||||
public static boolean hasTopicsRegexConfig(Map<String, String> props) {
|
||||
String topicsRegexStr = props.get(TOPICS_REGEX_CONFIG);
|
||||
return topicsRegexStr != null && !topicsRegexStr.trim().isEmpty();
|
||||
}
|
||||
|
||||
public String dlqTopicName() {
|
||||
return getString(DLQ_TOPIC_NAME_CONFIG);
|
||||
}
|
||||
|
||||
public short dlqTopicReplicationFactor() {
|
||||
return getShort(DLQ_TOPIC_REPLICATION_FACTOR_CONFIG);
|
||||
}
|
||||
|
||||
public boolean isDlqContextHeadersEnabled() {
|
||||
return getBoolean(DLQ_CONTEXT_HEADERS_ENABLE_CONFIG);
|
||||
}
|
||||
|
||||
public static void main(String[] args) {
|
||||
System.out.println(config.toHtml());
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,39 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime;
|
||||
|
||||
import org.apache.kafka.common.config.ConfigDef;
|
||||
import org.apache.kafka.connect.runtime.isolation.Plugins;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
public class SourceConnectorConfig extends ConnectorConfig {
|
||||
|
||||
private static ConfigDef config = ConnectorConfig.configDef();
|
||||
|
||||
public static ConfigDef configDef() {
|
||||
return config;
|
||||
}
|
||||
|
||||
public SourceConnectorConfig(Plugins plugins, Map<String, String> props) {
|
||||
super(plugins, config, props);
|
||||
}
|
||||
|
||||
public static void main(String[] args) {
|
||||
System.out.println(config.toHtml());
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,124 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime;
|
||||
|
||||
import org.apache.kafka.connect.errors.ConnectException;
|
||||
import org.apache.kafka.connect.util.ConnectorTaskId;
|
||||
import org.apache.kafka.connect.util.LoggingContext;
|
||||
import org.apache.kafka.common.utils.ThreadUtils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.concurrent.CancellationException;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
* <p>
|
||||
* Manages offset commit scheduling and execution for SourceTasks.
|
||||
* </p>
|
||||
* <p>
|
||||
* Unlike sink tasks which directly manage their offset commits in the main poll() thread since
|
||||
* they drive the event loop and control (for all intents and purposes) the timeouts, source
|
||||
* tasks are at the whim of the connector and cannot be guaranteed to wake up on the necessary
|
||||
* schedule. Instead, this class tracks all the active tasks, their schedule for commits, and
|
||||
* ensures they are invoked in a timely fashion.
|
||||
* </p>
|
||||
*/
|
||||
class SourceTaskOffsetCommitter {
|
||||
private static final Logger log = LoggerFactory.getLogger(SourceTaskOffsetCommitter.class);
|
||||
|
||||
private final WorkerConfig config;
|
||||
private final ScheduledExecutorService commitExecutorService;
|
||||
private final ConcurrentMap<ConnectorTaskId, ScheduledFuture<?>> committers;
|
||||
|
||||
// visible for testing
|
||||
SourceTaskOffsetCommitter(WorkerConfig config,
|
||||
ScheduledExecutorService commitExecutorService,
|
||||
ConcurrentMap<ConnectorTaskId, ScheduledFuture<?>> committers) {
|
||||
this.config = config;
|
||||
this.commitExecutorService = commitExecutorService;
|
||||
this.committers = committers;
|
||||
}
|
||||
|
||||
public SourceTaskOffsetCommitter(WorkerConfig config) {
|
||||
this(config, Executors.newSingleThreadScheduledExecutor(ThreadUtils.createThreadFactory(
|
||||
SourceTaskOffsetCommitter.class.getSimpleName() + "-%d", false)),
|
||||
new ConcurrentHashMap<ConnectorTaskId, ScheduledFuture<?>>());
|
||||
}
|
||||
|
||||
public void close(long timeoutMs) {
|
||||
commitExecutorService.shutdown();
|
||||
try {
|
||||
if (!commitExecutorService.awaitTermination(timeoutMs, TimeUnit.MILLISECONDS)) {
|
||||
log.error("Graceful shutdown of offset commitOffsets thread timed out.");
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
// ignore and allow to exit immediately
|
||||
}
|
||||
}
|
||||
|
||||
public void schedule(final ConnectorTaskId id, final WorkerSourceTask workerTask) {
|
||||
long commitIntervalMs = config.getLong(WorkerConfig.OFFSET_COMMIT_INTERVAL_MS_CONFIG);
|
||||
ScheduledFuture<?> commitFuture = commitExecutorService.scheduleWithFixedDelay(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
try (LoggingContext loggingContext = LoggingContext.forOffsets(id)) {
|
||||
commit(workerTask);
|
||||
}
|
||||
}
|
||||
}, commitIntervalMs, commitIntervalMs, TimeUnit.MILLISECONDS);
|
||||
committers.put(id, commitFuture);
|
||||
}
|
||||
|
||||
public void remove(ConnectorTaskId id) {
|
||||
final ScheduledFuture<?> task = committers.remove(id);
|
||||
if (task == null)
|
||||
return;
|
||||
|
||||
try (LoggingContext loggingContext = LoggingContext.forTask(id)) {
|
||||
task.cancel(false);
|
||||
if (!task.isDone())
|
||||
task.get();
|
||||
} catch (CancellationException e) {
|
||||
// ignore
|
||||
log.trace("Offset commit thread was cancelled by another thread while removing connector task with id: {}", id);
|
||||
} catch (ExecutionException | InterruptedException e) {
|
||||
throw new ConnectException("Unexpected interruption in SourceTaskOffsetCommitter while removing task with id: " + id, e);
|
||||
}
|
||||
}
|
||||
|
||||
private void commit(WorkerSourceTask workerTask) {
|
||||
log.debug("{} Committing offsets", workerTask);
|
||||
try {
|
||||
if (workerTask.commitOffsets()) {
|
||||
return;
|
||||
}
|
||||
log.error("{} Failed to commit offsets", workerTask);
|
||||
} catch (Throwable t) {
|
||||
// We're very careful about exceptions here since any uncaught exceptions in the commit
|
||||
// thread would cause the fixed interval schedule on the ExecutorService to stop running
|
||||
// for that task
|
||||
log.error("{} Unhandled exception when committing: ", workerTask, t);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,173 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime;
|
||||
|
||||
import org.apache.kafka.connect.runtime.AbstractStatus.State;
|
||||
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
/**
|
||||
* Utility class that tracks the current state and the duration of time spent in each state.
|
||||
* This class is threadsafe.
|
||||
*/
|
||||
public class StateTracker {
|
||||
|
||||
private final AtomicReference<StateChange> lastState = new AtomicReference<>(new StateChange());
|
||||
|
||||
/**
|
||||
* Change the current state.
|
||||
* <p>
|
||||
* This method is synchronized to ensure that all state changes are captured correctly and in the same order.
|
||||
* Synchronization is acceptable since it is assumed that state changes will be relatively infrequent.
|
||||
*
|
||||
* @param newState the current state; may not be null
|
||||
* @param now the current time in milliseconds
|
||||
*/
|
||||
public synchronized void changeState(State newState, long now) {
|
||||
// JDK8: remove synchronization by using lastState.getAndUpdate(oldState->oldState.newState(newState, now));
|
||||
lastState.set(lastState.get().newState(newState, now));
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate the ratio of time spent in the specified state.
|
||||
*
|
||||
* @param ratioState the state for which the ratio is to be calculated; may not be null
|
||||
* @param now the current time in milliseconds
|
||||
* @return the ratio of time spent in the specified state to the time spent in all states
|
||||
*/
|
||||
public double durationRatio(State ratioState, long now) {
|
||||
return lastState.get().durationRatio(ratioState, now);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the current state.
|
||||
*
|
||||
* @return the current state; may be null if no state change has been recorded
|
||||
*/
|
||||
public State currentState() {
|
||||
return lastState.get().state;
|
||||
}
|
||||
|
||||
/**
|
||||
* An immutable record of the accumulated times at the most recent state change. This class is required to
|
||||
* efficiently make {@link StateTracker} threadsafe.
|
||||
*/
|
||||
private static final class StateChange {
|
||||
|
||||
private final State state;
|
||||
private final long startTime;
|
||||
private final long unassignedTotalTimeMs;
|
||||
private final long runningTotalTimeMs;
|
||||
private final long pausedTotalTimeMs;
|
||||
private final long failedTotalTimeMs;
|
||||
private final long destroyedTotalTimeMs;
|
||||
|
||||
/**
|
||||
* The initial StateChange instance before any state has changed.
|
||||
*/
|
||||
StateChange() {
|
||||
this(null, 0L, 0L, 0L, 0L, 0L, 0L);
|
||||
}
|
||||
|
||||
StateChange(State state, long startTime, long unassignedTotalTimeMs, long runningTotalTimeMs,
|
||||
long pausedTotalTimeMs, long failedTotalTimeMs, long destroyedTotalTimeMs) {
|
||||
this.state = state;
|
||||
this.startTime = startTime;
|
||||
this.unassignedTotalTimeMs = unassignedTotalTimeMs;
|
||||
this.runningTotalTimeMs = runningTotalTimeMs;
|
||||
this.pausedTotalTimeMs = pausedTotalTimeMs;
|
||||
this.failedTotalTimeMs = failedTotalTimeMs;
|
||||
this.destroyedTotalTimeMs = destroyedTotalTimeMs;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return a new StateChange that includes the accumulated times of this state plus the time spent in the
|
||||
* current state.
|
||||
*
|
||||
* @param state the new state; may not be null
|
||||
* @param now the time at which the state transition occurs.
|
||||
* @return the new StateChange, though may be this instance of the state did not actually change; never null
|
||||
*/
|
||||
public StateChange newState(State state, long now) {
|
||||
if (this.state == null) {
|
||||
return new StateChange(state, now, 0L, 0L, 0L, 0L, 0L);
|
||||
}
|
||||
if (state == this.state) {
|
||||
return this;
|
||||
}
|
||||
long unassignedTime = this.unassignedTotalTimeMs;
|
||||
long runningTime = this.runningTotalTimeMs;
|
||||
long pausedTime = this.pausedTotalTimeMs;
|
||||
long failedTime = this.failedTotalTimeMs;
|
||||
long destroyedTime = this.destroyedTotalTimeMs;
|
||||
long duration = now - startTime;
|
||||
switch (this.state) {
|
||||
case UNASSIGNED:
|
||||
unassignedTime += duration;
|
||||
break;
|
||||
case RUNNING:
|
||||
runningTime += duration;
|
||||
break;
|
||||
case PAUSED:
|
||||
pausedTime += duration;
|
||||
break;
|
||||
case FAILED:
|
||||
failedTime += duration;
|
||||
break;
|
||||
case DESTROYED:
|
||||
destroyedTime += duration;
|
||||
break;
|
||||
}
|
||||
return new StateChange(state, now, unassignedTime, runningTime, pausedTime, failedTime, destroyedTime);
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate the ratio of time spent in the specified state.
|
||||
*
|
||||
* @param ratioState the state for which the ratio is to be calculated; may not be null
|
||||
* @param now the current time in milliseconds
|
||||
* @return the ratio of time spent in the specified state to the time spent in all states
|
||||
*/
|
||||
public double durationRatio(State ratioState, long now) {
|
||||
if (state == null) {
|
||||
return 0.0d;
|
||||
}
|
||||
long durationCurrent = now - startTime; // since last state change
|
||||
long durationDesired = ratioState == state ? durationCurrent : 0L;
|
||||
switch (ratioState) {
|
||||
case UNASSIGNED:
|
||||
durationDesired += unassignedTotalTimeMs;
|
||||
break;
|
||||
case RUNNING:
|
||||
durationDesired += runningTotalTimeMs;
|
||||
break;
|
||||
case PAUSED:
|
||||
durationDesired += pausedTotalTimeMs;
|
||||
break;
|
||||
case FAILED:
|
||||
durationDesired += failedTotalTimeMs;
|
||||
break;
|
||||
case DESTROYED:
|
||||
durationDesired += destroyedTotalTimeMs;
|
||||
break;
|
||||
}
|
||||
long total = durationCurrent + unassignedTotalTimeMs + runningTotalTimeMs + pausedTotalTimeMs +
|
||||
failedTotalTimeMs + destroyedTotalTimeMs;
|
||||
return total == 0.0d ? 0.0d : (double) durationDesired / total;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,36 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime;
|
||||
|
||||
/**
|
||||
* The target state of a connector is its desired state as indicated by the user
|
||||
* through interaction with the REST API. When a connector is first created, its
|
||||
* target state is "STARTED." This does not mean it has actually started, just that
|
||||
* the Connect framework will attempt to start it after its tasks have been assigned.
|
||||
* After the connector has been paused, the target state will change to PAUSED,
|
||||
* and all the tasks will stop doing work.
|
||||
*
|
||||
* Target states are persisted in the config topic, which is read by all of the
|
||||
* workers in the group. When a worker sees a new target state for a connector which
|
||||
* is running, it will transition any tasks which it owns (i.e. which have been
|
||||
* assigned to it by the leader) into the desired target state. Upon completion of
|
||||
* a task rebalance, the worker will start the task in the last known target state.
|
||||
*/
|
||||
public enum TargetState {
|
||||
STARTED,
|
||||
PAUSED,
|
||||
}
|
||||
@@ -0,0 +1,53 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime;
|
||||
|
||||
import org.apache.kafka.common.config.AbstractConfig;
|
||||
import org.apache.kafka.common.config.ConfigDef;
|
||||
import org.apache.kafka.common.config.ConfigDef.Importance;
|
||||
import org.apache.kafka.common.config.ConfigDef.Type;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* <p>
|
||||
* Configuration options for Tasks. These only include Kafka Connect system-level configuration
|
||||
* options.
|
||||
* </p>
|
||||
*/
|
||||
public class TaskConfig extends AbstractConfig {
|
||||
|
||||
public static final String TASK_CLASS_CONFIG = "task.class";
|
||||
private static final String TASK_CLASS_DOC =
|
||||
"Name of the class for this task. Must be a subclass of org.apache.kafka.connect.connector.Task";
|
||||
|
||||
private static ConfigDef config;
|
||||
|
||||
static {
|
||||
config = new ConfigDef()
|
||||
.define(TASK_CLASS_CONFIG, Type.CLASS, Importance.HIGH, TASK_CLASS_DOC);
|
||||
}
|
||||
|
||||
public TaskConfig() {
|
||||
this(new HashMap<String, String>());
|
||||
}
|
||||
|
||||
public TaskConfig(Map<String, ?> props) {
|
||||
super(config, props, true);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,65 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime;
|
||||
|
||||
import org.apache.kafka.connect.util.ConnectorTaskId;
|
||||
|
||||
public class TaskStatus extends AbstractStatus<ConnectorTaskId> {
|
||||
|
||||
public TaskStatus(ConnectorTaskId id, State state, String workerUrl, int generation, String trace) {
|
||||
super(id, state, workerUrl, generation, trace);
|
||||
}
|
||||
|
||||
public TaskStatus(ConnectorTaskId id, State state, String workerUrl, int generation) {
|
||||
super(id, state, workerUrl, generation, null);
|
||||
}
|
||||
|
||||
public interface Listener {
|
||||
|
||||
/**
|
||||
* Invoked after successful startup of the task.
|
||||
* @param id The id of the task
|
||||
*/
|
||||
void onStartup(ConnectorTaskId id);
|
||||
|
||||
/**
|
||||
* Invoked after the task has been paused.
|
||||
* @param id The id of the task
|
||||
*/
|
||||
void onPause(ConnectorTaskId id);
|
||||
|
||||
/**
|
||||
* Invoked after the task has been resumed.
|
||||
* @param id The id of the task
|
||||
*/
|
||||
void onResume(ConnectorTaskId id);
|
||||
|
||||
/**
|
||||
* Invoked if the task raises an error. No shutdown event will follow.
|
||||
* @param id The id of the task
|
||||
* @param cause The error raised by the task.
|
||||
*/
|
||||
void onFailure(ConnectorTaskId id, Throwable cause);
|
||||
|
||||
/**
|
||||
* Invoked after successful shutdown of the task.
|
||||
* @param id The id of the task
|
||||
*/
|
||||
void onShutdown(ConnectorTaskId id);
|
||||
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,110 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime;
|
||||
|
||||
import org.apache.kafka.connect.util.ConnectorTaskId;
|
||||
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Represents the metadata that is stored as the value of the record that is stored in the
|
||||
* {@link org.apache.kafka.connect.storage.StatusBackingStore#put(TopicStatus)},
|
||||
*/
|
||||
public class TopicStatus {
|
||||
private final String topic;
|
||||
private final String connector;
|
||||
private final int task;
|
||||
private final long discoverTimestamp;
|
||||
|
||||
public TopicStatus(String topic, ConnectorTaskId task, long discoverTimestamp) {
|
||||
this(topic, task.connector(), task.task(), discoverTimestamp);
|
||||
}
|
||||
|
||||
public TopicStatus(String topic, String connector, int task, long discoverTimestamp) {
|
||||
this.topic = Objects.requireNonNull(topic);
|
||||
this.connector = Objects.requireNonNull(connector);
|
||||
this.task = task;
|
||||
this.discoverTimestamp = discoverTimestamp;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the name of the topic.
|
||||
*
|
||||
* @return the topic name; never null
|
||||
*/
|
||||
public String topic() {
|
||||
return topic;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the name of the connector.
|
||||
*
|
||||
* @return the connector name; never null
|
||||
*/
|
||||
public String connector() {
|
||||
return connector;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the ID of the task that stored the topic status.
|
||||
*
|
||||
* @return the task ID
|
||||
*/
|
||||
public int task() {
|
||||
return task;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a timestamp that represents when this topic was discovered as being actively used by
|
||||
* this connector.
|
||||
*
|
||||
* @return the discovery timestamp
|
||||
*/
|
||||
public long discoverTimestamp() {
|
||||
return discoverTimestamp;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "TopicStatus{" +
|
||||
"topic='" + topic + '\'' +
|
||||
", connector='" + connector + '\'' +
|
||||
", task=" + task +
|
||||
", discoverTimestamp=" + discoverTimestamp +
|
||||
'}';
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (!(o instanceof TopicStatus)) {
|
||||
return false;
|
||||
}
|
||||
TopicStatus that = (TopicStatus) o;
|
||||
return task == that.task &&
|
||||
discoverTimestamp == that.discoverTimestamp &&
|
||||
topic.equals(that.topic) &&
|
||||
connector.equals(that.connector);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(topic, connector, task, discoverTimestamp);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,84 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime;
|
||||
|
||||
import org.apache.kafka.connect.connector.ConnectRecord;
|
||||
import org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator;
|
||||
import org.apache.kafka.connect.runtime.errors.Stage;
|
||||
import org.apache.kafka.connect.transforms.Transformation;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.StringJoiner;
|
||||
|
||||
public class TransformationChain<R extends ConnectRecord<R>> {
|
||||
private static final Logger log = LoggerFactory.getLogger(TransformationChain.class);
|
||||
|
||||
private final List<Transformation<R>> transformations;
|
||||
private final RetryWithToleranceOperator retryWithToleranceOperator;
|
||||
|
||||
public TransformationChain(List<Transformation<R>> transformations, RetryWithToleranceOperator retryWithToleranceOperator) {
|
||||
this.transformations = transformations;
|
||||
this.retryWithToleranceOperator = retryWithToleranceOperator;
|
||||
}
|
||||
|
||||
public R apply(R record) {
|
||||
if (transformations.isEmpty()) return record;
|
||||
|
||||
for (final Transformation<R> transformation : transformations) {
|
||||
final R current = record;
|
||||
|
||||
log.trace("Applying transformation {} to {}",
|
||||
transformation.getClass().getName(), record);
|
||||
// execute the operation
|
||||
record = retryWithToleranceOperator.execute(() -> transformation.apply(current), Stage.TRANSFORMATION, transformation.getClass());
|
||||
|
||||
if (record == null) break;
|
||||
}
|
||||
|
||||
return record;
|
||||
}
|
||||
|
||||
public void close() {
|
||||
for (Transformation<R> transformation : transformations) {
|
||||
transformation.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
TransformationChain that = (TransformationChain) o;
|
||||
return Objects.equals(transformations, that.transformations);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(transformations);
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
StringJoiner chain = new StringJoiner(", ", getClass().getName() + "{", "}");
|
||||
for (Transformation<R> transformation : transformations) {
|
||||
chain.add(transformation.getClass().getName());
|
||||
}
|
||||
return chain.toString();
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,430 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime;
|
||||
|
||||
import org.apache.kafka.clients.ClientDnsLookup;
|
||||
import org.apache.kafka.clients.CommonClientConfigs;
|
||||
import org.apache.kafka.common.config.AbstractConfig;
|
||||
import org.apache.kafka.common.config.ConfigDef;
|
||||
import org.apache.kafka.common.config.ConfigDef.Importance;
|
||||
import org.apache.kafka.common.config.ConfigDef.Type;
|
||||
import org.apache.kafka.common.config.ConfigException;
|
||||
import org.apache.kafka.common.config.internals.BrokerSecurityConfigs;
|
||||
import org.apache.kafka.common.metrics.Sensor;
|
||||
import org.apache.kafka.connect.json.JsonConverter;
|
||||
import org.apache.kafka.connect.json.JsonConverterConfig;
|
||||
import org.apache.kafka.connect.storage.Converter;
|
||||
import org.apache.kafka.connect.storage.SimpleHeaderConverter;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import static org.apache.kafka.common.config.ConfigDef.Range.atLeast;
|
||||
import static org.apache.kafka.common.config.ConfigDef.ValidString.in;
|
||||
|
||||
/**
|
||||
* Common base class providing configuration for Kafka Connect workers, whether standalone or distributed.
|
||||
*/
|
||||
public class WorkerConfig extends AbstractConfig {
|
||||
private static final Logger log = LoggerFactory.getLogger(WorkerConfig.class);
|
||||
|
||||
private static final Pattern COMMA_WITH_WHITESPACE = Pattern.compile("\\s*,\\s*");
|
||||
|
||||
public static final String BOOTSTRAP_SERVERS_CONFIG = "bootstrap.servers";
|
||||
public static final String BOOTSTRAP_SERVERS_DOC
|
||||
= "A list of host/port pairs to use for establishing the initial connection to the Kafka "
|
||||
+ "cluster. The client will make use of all servers irrespective of which servers are "
|
||||
+ "specified here for bootstrapping—this list only impacts the initial hosts used "
|
||||
+ "to discover the full set of servers. This list should be in the form "
|
||||
+ "<code>host1:port1,host2:port2,...</code>. Since these servers are just used for the "
|
||||
+ "initial connection to discover the full cluster membership (which may change "
|
||||
+ "dynamically), this list need not contain the full set of servers (you may want more "
|
||||
+ "than one, though, in case a server is down).";
|
||||
public static final String BOOTSTRAP_SERVERS_DEFAULT = "localhost:9092";
|
||||
|
||||
public static final String CLIENT_DNS_LOOKUP_CONFIG = CommonClientConfigs.CLIENT_DNS_LOOKUP_CONFIG;
|
||||
public static final String CLIENT_DNS_LOOKUP_DOC = CommonClientConfigs.CLIENT_DNS_LOOKUP_DOC;
|
||||
|
||||
public static final String KEY_CONVERTER_CLASS_CONFIG = "key.converter";
|
||||
public static final String KEY_CONVERTER_CLASS_DOC =
|
||||
"Converter class used to convert between Kafka Connect format and the serialized form that is written to Kafka." +
|
||||
" This controls the format of the keys in messages written to or read from Kafka, and since this is" +
|
||||
" independent of connectors it allows any connector to work with any serialization format." +
|
||||
" Examples of common formats include JSON and Avro.";
|
||||
|
||||
public static final String VALUE_CONVERTER_CLASS_CONFIG = "value.converter";
|
||||
public static final String VALUE_CONVERTER_CLASS_DOC =
|
||||
"Converter class used to convert between Kafka Connect format and the serialized form that is written to Kafka." +
|
||||
" This controls the format of the values in messages written to or read from Kafka, and since this is" +
|
||||
" independent of connectors it allows any connector to work with any serialization format." +
|
||||
" Examples of common formats include JSON and Avro.";
|
||||
|
||||
public static final String HEADER_CONVERTER_CLASS_CONFIG = "header.converter";
|
||||
public static final String HEADER_CONVERTER_CLASS_DOC =
|
||||
"HeaderConverter class used to convert between Kafka Connect format and the serialized form that is written to Kafka." +
|
||||
" This controls the format of the header values in messages written to or read from Kafka, and since this is" +
|
||||
" independent of connectors it allows any connector to work with any serialization format." +
|
||||
" Examples of common formats include JSON and Avro. By default, the SimpleHeaderConverter is used to serialize" +
|
||||
" header values to strings and deserialize them by inferring the schemas.";
|
||||
public static final String HEADER_CONVERTER_CLASS_DEFAULT = SimpleHeaderConverter.class.getName();
|
||||
|
||||
/**
|
||||
* @deprecated As of 2.0.0
|
||||
*/
|
||||
@Deprecated
|
||||
public static final String INTERNAL_KEY_CONVERTER_CLASS_CONFIG = "internal.key.converter";
|
||||
public static final String INTERNAL_KEY_CONVERTER_CLASS_DOC =
|
||||
"Converter class used to convert between Kafka Connect format and the serialized form that is written to Kafka." +
|
||||
" This controls the format of the keys in messages written to or read from Kafka, and since this is" +
|
||||
" independent of connectors it allows any connector to work with any serialization format." +
|
||||
" Examples of common formats include JSON and Avro." +
|
||||
" This setting controls the format used for internal bookkeeping data used by the framework, such as" +
|
||||
" configs and offsets, so users can typically use any functioning Converter implementation." +
|
||||
" Deprecated; will be removed in an upcoming version.";
|
||||
|
||||
/**
|
||||
* @deprecated As of 2.0.0
|
||||
*/
|
||||
@Deprecated
|
||||
public static final String INTERNAL_VALUE_CONVERTER_CLASS_CONFIG = "internal.value.converter";
|
||||
public static final String INTERNAL_VALUE_CONVERTER_CLASS_DOC =
|
||||
"Converter class used to convert between Kafka Connect format and the serialized form that is written to Kafka." +
|
||||
" This controls the format of the values in messages written to or read from Kafka, and since this is" +
|
||||
" independent of connectors it allows any connector to work with any serialization format." +
|
||||
" Examples of common formats include JSON and Avro." +
|
||||
" This setting controls the format used for internal bookkeeping data used by the framework, such as" +
|
||||
" configs and offsets, so users can typically use any functioning Converter implementation." +
|
||||
" Deprecated; will be removed in an upcoming version.";
|
||||
|
||||
private static final Class<? extends Converter> INTERNAL_CONVERTER_DEFAULT = JsonConverter.class;
|
||||
|
||||
public static final String TASK_SHUTDOWN_GRACEFUL_TIMEOUT_MS_CONFIG
|
||||
= "task.shutdown.graceful.timeout.ms";
|
||||
private static final String TASK_SHUTDOWN_GRACEFUL_TIMEOUT_MS_DOC =
|
||||
"Amount of time to wait for tasks to shutdown gracefully. This is the total amount of time,"
|
||||
+ " not per task. All task have shutdown triggered, then they are waited on sequentially.";
|
||||
private static final String TASK_SHUTDOWN_GRACEFUL_TIMEOUT_MS_DEFAULT = "5000";
|
||||
|
||||
public static final String OFFSET_COMMIT_INTERVAL_MS_CONFIG = "offset.flush.interval.ms";
|
||||
private static final String OFFSET_COMMIT_INTERVAL_MS_DOC
|
||||
= "Interval at which to try committing offsets for tasks.";
|
||||
public static final long OFFSET_COMMIT_INTERVAL_MS_DEFAULT = 60000L;
|
||||
|
||||
public static final String OFFSET_COMMIT_TIMEOUT_MS_CONFIG = "offset.flush.timeout.ms";
|
||||
private static final String OFFSET_COMMIT_TIMEOUT_MS_DOC
|
||||
= "Maximum number of milliseconds to wait for records to flush and partition offset data to be"
|
||||
+ " committed to offset storage before cancelling the process and restoring the offset "
|
||||
+ "data to be committed in a future attempt.";
|
||||
public static final long OFFSET_COMMIT_TIMEOUT_MS_DEFAULT = 5000L;
|
||||
|
||||
/**
|
||||
* @deprecated As of 1.1.0.
|
||||
*/
|
||||
@Deprecated
|
||||
public static final String REST_HOST_NAME_CONFIG = "rest.host.name";
|
||||
private static final String REST_HOST_NAME_DOC
|
||||
= "Hostname for the REST API. If this is set, it will only bind to this interface.";
|
||||
|
||||
/**
|
||||
* @deprecated As of 1.1.0.
|
||||
*/
|
||||
@Deprecated
|
||||
public static final String REST_PORT_CONFIG = "rest.port";
|
||||
private static final String REST_PORT_DOC
|
||||
= "Port for the REST API to listen on.";
|
||||
public static final int REST_PORT_DEFAULT = 8083;
|
||||
|
||||
public static final String LISTENERS_CONFIG = "listeners";
|
||||
private static final String LISTENERS_DOC
|
||||
= "List of comma-separated URIs the REST API will listen on. The supported protocols are HTTP and HTTPS.\n" +
|
||||
" Specify hostname as 0.0.0.0 to bind to all interfaces.\n" +
|
||||
" Leave hostname empty to bind to default interface.\n" +
|
||||
" Examples of legal listener lists: HTTP://myhost:8083,HTTPS://myhost:8084";
|
||||
|
||||
public static final String REST_ADVERTISED_HOST_NAME_CONFIG = "rest.advertised.host.name";
|
||||
private static final String REST_ADVERTISED_HOST_NAME_DOC
|
||||
= "If this is set, this is the hostname that will be given out to other workers to connect to.";
|
||||
|
||||
public static final String REST_ADVERTISED_PORT_CONFIG = "rest.advertised.port";
|
||||
private static final String REST_ADVERTISED_PORT_DOC
|
||||
= "If this is set, this is the port that will be given out to other workers to connect to.";
|
||||
|
||||
public static final String REST_ADVERTISED_LISTENER_CONFIG = "rest.advertised.listener";
|
||||
private static final String REST_ADVERTISED_LISTENER_DOC
|
||||
= "Sets the advertised listener (HTTP or HTTPS) which will be given to other workers to use.";
|
||||
|
||||
public static final String ACCESS_CONTROL_ALLOW_ORIGIN_CONFIG = "access.control.allow.origin";
|
||||
protected static final String ACCESS_CONTROL_ALLOW_ORIGIN_DOC =
|
||||
"Value to set the Access-Control-Allow-Origin header to for REST API requests." +
|
||||
"To enable cross origin access, set this to the domain of the application that should be permitted" +
|
||||
" to access the API, or '*' to allow access from any domain. The default value only allows access" +
|
||||
" from the domain of the REST API.";
|
||||
protected static final String ACCESS_CONTROL_ALLOW_ORIGIN_DEFAULT = "";
|
||||
|
||||
public static final String ACCESS_CONTROL_ALLOW_METHODS_CONFIG = "access.control.allow.methods";
|
||||
protected static final String ACCESS_CONTROL_ALLOW_METHODS_DOC =
|
||||
"Sets the methods supported for cross origin requests by setting the Access-Control-Allow-Methods header. "
|
||||
+ "The default value of the Access-Control-Allow-Methods header allows cross origin requests for GET, POST and HEAD.";
|
||||
protected static final String ACCESS_CONTROL_ALLOW_METHODS_DEFAULT = "";
|
||||
|
||||
public static final String ADMIN_LISTENERS_CONFIG = "admin.listeners";
|
||||
protected static final String ADMIN_LISTENERS_DOC = "List of comma-separated URIs the Admin REST API will listen on." +
|
||||
" The supported protocols are HTTP and HTTPS." +
|
||||
" An empty or blank string will disable this feature." +
|
||||
" The default behavior is to use the regular listener (specified by the 'listeners' property).";
|
||||
protected static final List<String> ADMIN_LISTENERS_DEFAULT = null;
|
||||
public static final String ADMIN_LISTENERS_HTTPS_CONFIGS_PREFIX = "admin.listeners.https.";
|
||||
|
||||
public static final String PLUGIN_PATH_CONFIG = "plugin.path";
|
||||
protected static final String PLUGIN_PATH_DOC = "List of paths separated by commas (,) that "
|
||||
+ "contain plugins (connectors, converters, transformations). The list should consist"
|
||||
+ " of top level directories that include any combination of: \n"
|
||||
+ "a) directories immediately containing jars with plugins and their dependencies\n"
|
||||
+ "b) uber-jars with plugins and their dependencies\n"
|
||||
+ "c) directories immediately containing the package directory structure of classes of "
|
||||
+ "plugins and their dependencies\n"
|
||||
+ "Note: symlinks will be followed to discover dependencies or plugins.\n"
|
||||
+ "Examples: plugin.path=/usr/local/share/java,/usr/local/share/kafka/plugins,"
|
||||
+ "/opt/connectors";
|
||||
|
||||
public static final String CONFIG_PROVIDERS_CONFIG = "config.providers";
|
||||
protected static final String CONFIG_PROVIDERS_DOC =
|
||||
"Comma-separated names of <code>ConfigProvider</code> classes, loaded and used "
|
||||
+ "in the order specified. Implementing the interface "
|
||||
+ "<code>ConfigProvider</code> allows you to replace variable references in connector configurations, "
|
||||
+ "such as for externalized secrets. ";
|
||||
|
||||
public static final String REST_EXTENSION_CLASSES_CONFIG = "rest.extension.classes";
|
||||
protected static final String REST_EXTENSION_CLASSES_DOC =
|
||||
"Comma-separated names of <code>ConnectRestExtension</code> classes, loaded and called "
|
||||
+ "in the order specified. Implementing the interface "
|
||||
+ "<code>ConnectRestExtension</code> allows you to inject into Connect's REST API user defined resources like filters. "
|
||||
+ "Typically used to add custom capability like logging, security, etc. ";
|
||||
|
||||
public static final String CONNECTOR_CLIENT_POLICY_CLASS_CONFIG = "connector.client.config.override.policy";
|
||||
public static final String CONNECTOR_CLIENT_POLICY_CLASS_DOC =
|
||||
"Class name or alias of implementation of <code>ConnectorClientConfigOverridePolicy</code>. Defines what client configurations can be "
|
||||
+ "overriden by the connector. The default implementation is `None`. The other possible policies in the framework include `All` "
|
||||
+ "and `Principal`. ";
|
||||
public static final String CONNECTOR_CLIENT_POLICY_CLASS_DEFAULT = "None";
|
||||
|
||||
|
||||
public static final String METRICS_SAMPLE_WINDOW_MS_CONFIG = CommonClientConfigs.METRICS_SAMPLE_WINDOW_MS_CONFIG;
|
||||
public static final String METRICS_NUM_SAMPLES_CONFIG = CommonClientConfigs.METRICS_NUM_SAMPLES_CONFIG;
|
||||
public static final String METRICS_RECORDING_LEVEL_CONFIG = CommonClientConfigs.METRICS_RECORDING_LEVEL_CONFIG;
|
||||
public static final String METRIC_REPORTER_CLASSES_CONFIG = CommonClientConfigs.METRIC_REPORTER_CLASSES_CONFIG;
|
||||
|
||||
public static final String TOPIC_TRACKING_ENABLE_CONFIG = "topic.tracking.enable";
|
||||
protected static final String TOPIC_TRACKING_ENABLE_DOC = "Enable tracking the set of active "
|
||||
+ "topics per connector during runtime.";
|
||||
protected static final boolean TOPIC_TRACKING_ENABLE_DEFAULT = true;
|
||||
|
||||
public static final String TOPIC_TRACKING_ALLOW_RESET_CONFIG = "topic.tracking.allow.reset";
|
||||
protected static final String TOPIC_TRACKING_ALLOW_RESET_DOC = "If set to true, it allows "
|
||||
+ "user requests to reset the set of active topics per connector.";
|
||||
protected static final boolean TOPIC_TRACKING_ALLOW_RESET_DEFAULT = true;
|
||||
|
||||
/**
|
||||
* Get a basic ConfigDef for a WorkerConfig. This includes all the common settings. Subclasses can use this to
|
||||
* bootstrap their own ConfigDef.
|
||||
* @return a ConfigDef with all the common options specified
|
||||
*/
|
||||
protected static ConfigDef baseConfigDef() {
|
||||
return new ConfigDef()
|
||||
.define(BOOTSTRAP_SERVERS_CONFIG, Type.LIST, BOOTSTRAP_SERVERS_DEFAULT,
|
||||
Importance.HIGH, BOOTSTRAP_SERVERS_DOC)
|
||||
.define(CLIENT_DNS_LOOKUP_CONFIG,
|
||||
Type.STRING,
|
||||
ClientDnsLookup.DEFAULT.toString(),
|
||||
in(ClientDnsLookup.DEFAULT.toString(),
|
||||
ClientDnsLookup.USE_ALL_DNS_IPS.toString(),
|
||||
ClientDnsLookup.RESOLVE_CANONICAL_BOOTSTRAP_SERVERS_ONLY.toString()),
|
||||
Importance.MEDIUM,
|
||||
CLIENT_DNS_LOOKUP_DOC)
|
||||
.define(KEY_CONVERTER_CLASS_CONFIG, Type.CLASS,
|
||||
Importance.HIGH, KEY_CONVERTER_CLASS_DOC)
|
||||
.define(VALUE_CONVERTER_CLASS_CONFIG, Type.CLASS,
|
||||
Importance.HIGH, VALUE_CONVERTER_CLASS_DOC)
|
||||
.define(INTERNAL_KEY_CONVERTER_CLASS_CONFIG, Type.CLASS, INTERNAL_CONVERTER_DEFAULT,
|
||||
Importance.LOW, INTERNAL_KEY_CONVERTER_CLASS_DOC)
|
||||
.define(INTERNAL_VALUE_CONVERTER_CLASS_CONFIG, Type.CLASS, INTERNAL_CONVERTER_DEFAULT,
|
||||
Importance.LOW, INTERNAL_VALUE_CONVERTER_CLASS_DOC)
|
||||
.define(TASK_SHUTDOWN_GRACEFUL_TIMEOUT_MS_CONFIG, Type.LONG,
|
||||
TASK_SHUTDOWN_GRACEFUL_TIMEOUT_MS_DEFAULT, Importance.LOW,
|
||||
TASK_SHUTDOWN_GRACEFUL_TIMEOUT_MS_DOC)
|
||||
.define(OFFSET_COMMIT_INTERVAL_MS_CONFIG, Type.LONG, OFFSET_COMMIT_INTERVAL_MS_DEFAULT,
|
||||
Importance.LOW, OFFSET_COMMIT_INTERVAL_MS_DOC)
|
||||
.define(OFFSET_COMMIT_TIMEOUT_MS_CONFIG, Type.LONG, OFFSET_COMMIT_TIMEOUT_MS_DEFAULT,
|
||||
Importance.LOW, OFFSET_COMMIT_TIMEOUT_MS_DOC)
|
||||
.define(REST_HOST_NAME_CONFIG, Type.STRING, null, Importance.LOW, REST_HOST_NAME_DOC)
|
||||
.define(REST_PORT_CONFIG, Type.INT, REST_PORT_DEFAULT, Importance.LOW, REST_PORT_DOC)
|
||||
.define(LISTENERS_CONFIG, Type.LIST, null, Importance.LOW, LISTENERS_DOC)
|
||||
.define(REST_ADVERTISED_HOST_NAME_CONFIG, Type.STRING, null, Importance.LOW, REST_ADVERTISED_HOST_NAME_DOC)
|
||||
.define(REST_ADVERTISED_PORT_CONFIG, Type.INT, null, Importance.LOW, REST_ADVERTISED_PORT_DOC)
|
||||
.define(REST_ADVERTISED_LISTENER_CONFIG, Type.STRING, null, Importance.LOW, REST_ADVERTISED_LISTENER_DOC)
|
||||
.define(ACCESS_CONTROL_ALLOW_ORIGIN_CONFIG, Type.STRING,
|
||||
ACCESS_CONTROL_ALLOW_ORIGIN_DEFAULT, Importance.LOW,
|
||||
ACCESS_CONTROL_ALLOW_ORIGIN_DOC)
|
||||
.define(ACCESS_CONTROL_ALLOW_METHODS_CONFIG, Type.STRING,
|
||||
ACCESS_CONTROL_ALLOW_METHODS_DEFAULT, Importance.LOW,
|
||||
ACCESS_CONTROL_ALLOW_METHODS_DOC)
|
||||
.define(PLUGIN_PATH_CONFIG,
|
||||
Type.LIST,
|
||||
null,
|
||||
Importance.LOW,
|
||||
PLUGIN_PATH_DOC)
|
||||
.define(METRICS_SAMPLE_WINDOW_MS_CONFIG, Type.LONG,
|
||||
30000, atLeast(0), Importance.LOW,
|
||||
CommonClientConfigs.METRICS_SAMPLE_WINDOW_MS_DOC)
|
||||
.define(METRICS_NUM_SAMPLES_CONFIG, Type.INT,
|
||||
2, atLeast(1), Importance.LOW,
|
||||
CommonClientConfigs.METRICS_NUM_SAMPLES_DOC)
|
||||
.define(METRICS_RECORDING_LEVEL_CONFIG, Type.STRING,
|
||||
Sensor.RecordingLevel.INFO.toString(),
|
||||
in(Sensor.RecordingLevel.INFO.toString(), Sensor.RecordingLevel.DEBUG.toString()),
|
||||
Importance.LOW,
|
||||
CommonClientConfigs.METRICS_RECORDING_LEVEL_DOC)
|
||||
.define(METRIC_REPORTER_CLASSES_CONFIG, Type.LIST,
|
||||
"", Importance.LOW,
|
||||
CommonClientConfigs.METRIC_REPORTER_CLASSES_DOC)
|
||||
.define(BrokerSecurityConfigs.SSL_CLIENT_AUTH_CONFIG,
|
||||
ConfigDef.Type.STRING, "none", ConfigDef.Importance.LOW, BrokerSecurityConfigs.SSL_CLIENT_AUTH_DOC)
|
||||
.define(HEADER_CONVERTER_CLASS_CONFIG, Type.CLASS,
|
||||
HEADER_CONVERTER_CLASS_DEFAULT,
|
||||
Importance.LOW, HEADER_CONVERTER_CLASS_DOC)
|
||||
.define(CONFIG_PROVIDERS_CONFIG, Type.LIST,
|
||||
Collections.emptyList(),
|
||||
Importance.LOW, CONFIG_PROVIDERS_DOC)
|
||||
.define(REST_EXTENSION_CLASSES_CONFIG, Type.LIST, "",
|
||||
Importance.LOW, REST_EXTENSION_CLASSES_DOC)
|
||||
.define(ADMIN_LISTENERS_CONFIG, Type.LIST, null,
|
||||
new AdminListenersValidator(), Importance.LOW, ADMIN_LISTENERS_DOC)
|
||||
.define(CONNECTOR_CLIENT_POLICY_CLASS_CONFIG, Type.STRING, CONNECTOR_CLIENT_POLICY_CLASS_DEFAULT,
|
||||
Importance.MEDIUM, CONNECTOR_CLIENT_POLICY_CLASS_DOC)
|
||||
.define(TOPIC_TRACKING_ENABLE_CONFIG, Type.BOOLEAN, TOPIC_TRACKING_ENABLE_DEFAULT,
|
||||
Importance.LOW, TOPIC_TRACKING_ENABLE_DOC)
|
||||
.define(TOPIC_TRACKING_ALLOW_RESET_CONFIG, Type.BOOLEAN, TOPIC_TRACKING_ALLOW_RESET_DEFAULT,
|
||||
Importance.LOW, TOPIC_TRACKING_ALLOW_RESET_DOC);
|
||||
}
|
||||
|
||||
private void logInternalConverterDeprecationWarnings(Map<String, String> props) {
|
||||
String[] deprecatedConfigs = new String[] {
|
||||
INTERNAL_KEY_CONVERTER_CLASS_CONFIG,
|
||||
INTERNAL_VALUE_CONVERTER_CLASS_CONFIG
|
||||
};
|
||||
for (String config : deprecatedConfigs) {
|
||||
if (props.containsKey(config)) {
|
||||
Class<?> internalConverterClass = getClass(config);
|
||||
logDeprecatedProperty(config, internalConverterClass.getCanonicalName(), INTERNAL_CONVERTER_DEFAULT.getCanonicalName(), null);
|
||||
if (internalConverterClass.equals(INTERNAL_CONVERTER_DEFAULT)) {
|
||||
// log the properties for this converter ...
|
||||
for (Map.Entry<String, Object> propEntry : originalsWithPrefix(config + ".").entrySet()) {
|
||||
String prop = propEntry.getKey();
|
||||
String propValue = propEntry.getValue().toString();
|
||||
String defaultValue = JsonConverterConfig.SCHEMAS_ENABLE_CONFIG.equals(prop) ? "false" : null;
|
||||
logDeprecatedProperty(config + "." + prop, propValue, defaultValue, config);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void logDeprecatedProperty(String propName, String propValue, String defaultValue, String prefix) {
|
||||
String prefixNotice = prefix != null
|
||||
? " (along with all configuration for '" + prefix + "')"
|
||||
: "";
|
||||
if (defaultValue != null && defaultValue.equalsIgnoreCase(propValue)) {
|
||||
log.info(
|
||||
"Worker configuration property '{}'{} is deprecated and may be removed in an upcoming release. "
|
||||
+ "The specified value '{}' matches the default, so this property can be safely removed from the worker configuration.",
|
||||
propName,
|
||||
prefixNotice,
|
||||
propValue
|
||||
);
|
||||
} else if (defaultValue != null) {
|
||||
log.warn(
|
||||
"Worker configuration property '{}'{} is deprecated and may be removed in an upcoming release. "
|
||||
+ "The specified value '{}' does NOT match the default and recommended value '{}'.",
|
||||
propName,
|
||||
prefixNotice,
|
||||
propValue,
|
||||
defaultValue
|
||||
);
|
||||
} else {
|
||||
log.warn(
|
||||
"Worker configuration property '{}'{} is deprecated and may be removed in an upcoming release.",
|
||||
propName,
|
||||
prefixNotice
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
public Integer getRebalanceTimeout() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Map<String, Object> postProcessParsedConfig(final Map<String, Object> parsedValues) {
|
||||
return CommonClientConfigs.postProcessReconnectBackoffConfigs(this, parsedValues);
|
||||
}
|
||||
|
||||
public static List<String> pluginLocations(Map<String, String> props) {
|
||||
String locationList = props.get(WorkerConfig.PLUGIN_PATH_CONFIG);
|
||||
return locationList == null
|
||||
? new ArrayList<String>()
|
||||
: Arrays.asList(COMMA_WITH_WHITESPACE.split(locationList.trim(), -1));
|
||||
}
|
||||
|
||||
public WorkerConfig(ConfigDef definition, Map<String, String> props) {
|
||||
super(definition, props);
|
||||
logInternalConverterDeprecationWarnings(props);
|
||||
}
|
||||
|
||||
private static class AdminListenersValidator implements ConfigDef.Validator {
|
||||
@Override
|
||||
public void ensureValid(String name, Object value) {
|
||||
if (value == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!(value instanceof List)) {
|
||||
throw new ConfigException("Invalid value type (list expected).");
|
||||
}
|
||||
|
||||
List items = (List) value;
|
||||
if (items.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (Object item: items) {
|
||||
if (!(item instanceof String)) {
|
||||
throw new ConfigException("Invalid type for admin listener (expected String).");
|
||||
}
|
||||
if (((String) item).trim().isEmpty()) {
|
||||
throw new ConfigException("Empty listener found when parsing list.");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,101 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime;
|
||||
|
||||
import org.apache.kafka.common.config.ConfigDef;
|
||||
import org.apache.kafka.common.config.provider.ConfigProvider;
|
||||
import org.apache.kafka.common.config.ConfigTransformer;
|
||||
import org.apache.kafka.common.config.ConfigTransformerResult;
|
||||
import org.apache.kafka.connect.runtime.Herder.ConfigReloadAction;
|
||||
import org.apache.kafka.connect.util.Callback;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
|
||||
/**
|
||||
* A wrapper class to perform configuration transformations and schedule reloads for any
|
||||
* retrieved TTL values.
|
||||
*/
|
||||
public class WorkerConfigTransformer {
|
||||
private static final Logger log = LoggerFactory.getLogger(WorkerConfigTransformer.class);
|
||||
|
||||
private final Worker worker;
|
||||
private final ConfigTransformer configTransformer;
|
||||
private final ConcurrentMap<String, Map<String, HerderRequest>> requests = new ConcurrentHashMap<>();
|
||||
|
||||
public WorkerConfigTransformer(Worker worker, Map<String, ConfigProvider> configProviders) {
|
||||
this.worker = worker;
|
||||
this.configTransformer = new ConfigTransformer(configProviders);
|
||||
}
|
||||
|
||||
public Map<String, String> transform(Map<String, String> configs) {
|
||||
return transform(null, configs);
|
||||
}
|
||||
|
||||
public Map<String, String> transform(String connectorName, Map<String, String> configs) {
|
||||
if (configs == null) return null;
|
||||
ConfigTransformerResult result = configTransformer.transform(configs);
|
||||
if (connectorName != null) {
|
||||
String key = ConnectorConfig.CONFIG_RELOAD_ACTION_CONFIG;
|
||||
String action = (String) ConfigDef.parseType(key, configs.get(key), ConfigDef.Type.STRING);
|
||||
if (action == null) {
|
||||
// The default action is "restart".
|
||||
action = ConnectorConfig.CONFIG_RELOAD_ACTION_RESTART;
|
||||
}
|
||||
ConfigReloadAction reloadAction = ConfigReloadAction.valueOf(action.toUpperCase(Locale.ROOT));
|
||||
if (reloadAction == ConfigReloadAction.RESTART) {
|
||||
scheduleReload(connectorName, result.ttls());
|
||||
}
|
||||
}
|
||||
return result.data();
|
||||
}
|
||||
|
||||
private void scheduleReload(String connectorName, Map<String, Long> ttls) {
|
||||
for (Map.Entry<String, Long> entry : ttls.entrySet()) {
|
||||
scheduleReload(connectorName, entry.getKey(), entry.getValue());
|
||||
}
|
||||
}
|
||||
|
||||
private void scheduleReload(String connectorName, String path, long ttl) {
|
||||
Map<String, HerderRequest> connectorRequests = requests.get(connectorName);
|
||||
if (connectorRequests == null) {
|
||||
connectorRequests = new ConcurrentHashMap<>();
|
||||
requests.put(connectorName, connectorRequests);
|
||||
} else {
|
||||
HerderRequest previousRequest = connectorRequests.get(path);
|
||||
if (previousRequest != null) {
|
||||
// Delete previous request for ttl which is now stale
|
||||
previousRequest.cancel();
|
||||
}
|
||||
}
|
||||
log.info("Scheduling a restart of connector {} in {} ms", connectorName, ttl);
|
||||
Callback<Void> cb = new Callback<Void>() {
|
||||
@Override
|
||||
public void onCompletion(Throwable error, Void result) {
|
||||
if (error != null) {
|
||||
log.error("Unexpected error during connector restart: ", error);
|
||||
}
|
||||
}
|
||||
};
|
||||
HerderRequest request = worker.herder().restartConnector(ttl, connectorName, cb);
|
||||
connectorRequests.put(path, request);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,322 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime;
|
||||
|
||||
import org.apache.kafka.connect.connector.Connector;
|
||||
import org.apache.kafka.connect.connector.ConnectorContext;
|
||||
import org.apache.kafka.connect.runtime.ConnectMetrics.MetricGroup;
|
||||
import org.apache.kafka.connect.sink.SinkConnector;
|
||||
import org.apache.kafka.connect.source.SourceConnector;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Container for connectors which is responsible for managing their lifecycle (e.g. handling startup,
|
||||
* shutdown, pausing, etc.). Internally, we manage the runtime state of the connector and transition according
|
||||
* to target state changes. Note that unlike connector tasks, the connector does not really have a "pause"
|
||||
* state which is distinct from being stopped. We therefore treat pause operations as requests to momentarily
|
||||
* stop the connector, and resume operations as requests to restart it (without reinitialization). Connector
|
||||
* failures, whether in initialization or after startup, are treated as fatal, which means that we will not attempt
|
||||
* to restart this connector instance after failure. What this means from a user perspective is that you must
|
||||
* use the /restart REST API to restart a failed task. This behavior is consistent with task failures.
|
||||
*
|
||||
* Note that this class is NOT thread-safe.
|
||||
*/
|
||||
public class WorkerConnector {
|
||||
private static final Logger log = LoggerFactory.getLogger(WorkerConnector.class);
|
||||
|
||||
private enum State {
|
||||
INIT, // initial state before startup
|
||||
STOPPED, // the connector has been stopped/paused.
|
||||
STARTED, // the connector has been started/resumed.
|
||||
FAILED, // the connector has failed (no further transitions are possible after this state)
|
||||
}
|
||||
|
||||
private final String connName;
|
||||
private final ConnectorStatus.Listener statusListener;
|
||||
private final ConnectorContext ctx;
|
||||
private final Connector connector;
|
||||
private final ConnectorMetricsGroup metrics;
|
||||
|
||||
private Map<String, String> config;
|
||||
private State state;
|
||||
|
||||
public WorkerConnector(String connName,
|
||||
Connector connector,
|
||||
ConnectorContext ctx,
|
||||
ConnectMetrics metrics,
|
||||
ConnectorStatus.Listener statusListener) {
|
||||
this.connName = connName;
|
||||
this.ctx = ctx;
|
||||
this.connector = connector;
|
||||
this.state = State.INIT;
|
||||
this.metrics = new ConnectorMetricsGroup(metrics, AbstractStatus.State.UNASSIGNED, statusListener);
|
||||
this.statusListener = this.metrics;
|
||||
}
|
||||
|
||||
public void initialize(ConnectorConfig connectorConfig) {
|
||||
try {
|
||||
this.config = connectorConfig.originalsStrings();
|
||||
log.debug("{} Initializing connector {}", this, connName);
|
||||
if (isSinkConnector()) {
|
||||
SinkConnectorConfig.validate(config);
|
||||
}
|
||||
|
||||
connector.initialize(new ConnectorContext() {
|
||||
@Override
|
||||
public void requestTaskReconfiguration() {
|
||||
ctx.requestTaskReconfiguration();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void raiseError(Exception e) {
|
||||
log.error("{} Connector raised an error", WorkerConnector.this, e);
|
||||
onFailure(e);
|
||||
ctx.raiseError(e);
|
||||
}
|
||||
});
|
||||
} catch (Throwable t) {
|
||||
log.error("{} Error initializing connector", this, t);
|
||||
onFailure(t);
|
||||
}
|
||||
}
|
||||
|
||||
private boolean doStart() {
|
||||
try {
|
||||
switch (state) {
|
||||
case STARTED:
|
||||
return false;
|
||||
|
||||
case INIT:
|
||||
case STOPPED:
|
||||
connector.start(config);
|
||||
this.state = State.STARTED;
|
||||
return true;
|
||||
|
||||
default:
|
||||
throw new IllegalArgumentException("Cannot start connector in state " + state);
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
log.error("{} Error while starting connector", this, t);
|
||||
onFailure(t);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
private void onFailure(Throwable t) {
|
||||
statusListener.onFailure(connName, t);
|
||||
this.state = State.FAILED;
|
||||
}
|
||||
|
||||
private void resume() {
|
||||
if (doStart())
|
||||
statusListener.onResume(connName);
|
||||
}
|
||||
|
||||
private void start() {
|
||||
if (doStart())
|
||||
statusListener.onStartup(connName);
|
||||
}
|
||||
|
||||
public boolean isRunning() {
|
||||
return state == State.STARTED;
|
||||
}
|
||||
|
||||
@SuppressWarnings("fallthrough")
|
||||
private void pause() {
|
||||
try {
|
||||
switch (state) {
|
||||
case STOPPED:
|
||||
return;
|
||||
|
||||
case STARTED:
|
||||
connector.stop();
|
||||
// fall through
|
||||
|
||||
case INIT:
|
||||
statusListener.onPause(connName);
|
||||
this.state = State.STOPPED;
|
||||
break;
|
||||
|
||||
default:
|
||||
throw new IllegalArgumentException("Cannot pause connector in state " + state);
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
log.error("{} Error while shutting down connector", this, t);
|
||||
statusListener.onFailure(connName, t);
|
||||
this.state = State.FAILED;
|
||||
}
|
||||
}
|
||||
|
||||
public void shutdown() {
|
||||
try {
|
||||
if (state == State.STARTED)
|
||||
connector.stop();
|
||||
this.state = State.STOPPED;
|
||||
statusListener.onShutdown(connName);
|
||||
} catch (Throwable t) {
|
||||
log.error("{} Error while shutting down connector", this, t);
|
||||
this.state = State.FAILED;
|
||||
statusListener.onFailure(connName, t);
|
||||
} finally {
|
||||
metrics.close();
|
||||
}
|
||||
}
|
||||
|
||||
public void transitionTo(TargetState targetState) {
|
||||
if (state == State.FAILED) {
|
||||
log.warn("{} Cannot transition connector to {} since it has failed", this, targetState);
|
||||
return;
|
||||
}
|
||||
|
||||
log.debug("{} Transition connector to {}", this, targetState);
|
||||
if (targetState == TargetState.PAUSED) {
|
||||
pause();
|
||||
} else if (targetState == TargetState.STARTED) {
|
||||
if (state == State.INIT)
|
||||
start();
|
||||
else
|
||||
resume();
|
||||
} else {
|
||||
throw new IllegalArgumentException("Unhandled target state " + targetState);
|
||||
}
|
||||
}
|
||||
|
||||
public boolean isSinkConnector() {
|
||||
return SinkConnector.class.isAssignableFrom(connector.getClass());
|
||||
}
|
||||
|
||||
public boolean isSourceConnector() {
|
||||
return SourceConnector.class.isAssignableFrom(connector.getClass());
|
||||
}
|
||||
|
||||
protected String connectorType() {
|
||||
if (isSinkConnector())
|
||||
return "sink";
|
||||
if (isSourceConnector())
|
||||
return "source";
|
||||
return "unknown";
|
||||
}
|
||||
|
||||
public Connector connector() {
|
||||
return connector;
|
||||
}
|
||||
|
||||
ConnectorMetricsGroup metrics() {
|
||||
return metrics;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "WorkerConnector{" +
|
||||
"id=" + connName +
|
||||
'}';
|
||||
}
|
||||
|
||||
class ConnectorMetricsGroup implements ConnectorStatus.Listener, AutoCloseable {
|
||||
/**
|
||||
* Use {@link AbstractStatus.State} since it has all of the states we want,
|
||||
* unlike {@link WorkerConnector.State}.
|
||||
*/
|
||||
private volatile AbstractStatus.State state;
|
||||
private final MetricGroup metricGroup;
|
||||
private final ConnectorStatus.Listener delegate;
|
||||
|
||||
public ConnectorMetricsGroup(ConnectMetrics connectMetrics, AbstractStatus.State initialState, ConnectorStatus.Listener delegate) {
|
||||
Objects.requireNonNull(connectMetrics);
|
||||
Objects.requireNonNull(connector);
|
||||
Objects.requireNonNull(initialState);
|
||||
Objects.requireNonNull(delegate);
|
||||
this.delegate = delegate;
|
||||
this.state = initialState;
|
||||
ConnectMetricsRegistry registry = connectMetrics.registry();
|
||||
this.metricGroup = connectMetrics.group(registry.connectorGroupName(),
|
||||
registry.connectorTagName(), connName);
|
||||
// prevent collisions by removing any previously created metrics in this group.
|
||||
metricGroup.close();
|
||||
|
||||
metricGroup.addImmutableValueMetric(registry.connectorType, connectorType());
|
||||
metricGroup.addImmutableValueMetric(registry.connectorClass, connector.getClass().getName());
|
||||
metricGroup.addImmutableValueMetric(registry.connectorVersion, connector.version());
|
||||
metricGroup.addValueMetric(registry.connectorStatus, now -> state.toString().toLowerCase(Locale.getDefault()));
|
||||
}
|
||||
|
||||
public void close() {
|
||||
metricGroup.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onStartup(String connector) {
|
||||
state = AbstractStatus.State.RUNNING;
|
||||
delegate.onStartup(connector);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onShutdown(String connector) {
|
||||
state = AbstractStatus.State.UNASSIGNED;
|
||||
delegate.onShutdown(connector);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onPause(String connector) {
|
||||
state = AbstractStatus.State.PAUSED;
|
||||
delegate.onPause(connector);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onResume(String connector) {
|
||||
state = AbstractStatus.State.RUNNING;
|
||||
delegate.onResume(connector);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(String connector, Throwable cause) {
|
||||
state = AbstractStatus.State.FAILED;
|
||||
delegate.onFailure(connector, cause);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onDeletion(String connector) {
|
||||
state = AbstractStatus.State.DESTROYED;
|
||||
delegate.onDeletion(connector);
|
||||
}
|
||||
|
||||
boolean isUnassigned() {
|
||||
return state == AbstractStatus.State.UNASSIGNED;
|
||||
}
|
||||
|
||||
boolean isRunning() {
|
||||
return state == AbstractStatus.State.RUNNING;
|
||||
}
|
||||
|
||||
boolean isPaused() {
|
||||
return state == AbstractStatus.State.PAUSED;
|
||||
}
|
||||
|
||||
boolean isFailed() {
|
||||
return state == AbstractStatus.State.FAILED;
|
||||
}
|
||||
|
||||
protected MetricGroup metricGroup() {
|
||||
return metricGroup;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,107 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime;
|
||||
|
||||
import org.apache.kafka.common.utils.Utils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.lang.management.ManagementFactory;
|
||||
import java.lang.management.OperatingSystemMXBean;
|
||||
import java.lang.management.RuntimeMXBean;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Connect Worker system and runtime information.
|
||||
*/
|
||||
public class WorkerInfo {
|
||||
private static final Logger log = LoggerFactory.getLogger(WorkerInfo.class);
|
||||
private static final RuntimeMXBean RUNTIME;
|
||||
private static final OperatingSystemMXBean OS;
|
||||
|
||||
static {
|
||||
RUNTIME = ManagementFactory.getRuntimeMXBean();
|
||||
OS = ManagementFactory.getOperatingSystemMXBean();
|
||||
}
|
||||
|
||||
private final Map<String, Object> values;
|
||||
|
||||
/**
|
||||
* Constructor.
|
||||
*/
|
||||
public WorkerInfo() {
|
||||
this.values = new LinkedHashMap<>();
|
||||
addRuntimeInfo();
|
||||
addSystemInfo();
|
||||
}
|
||||
|
||||
/**
|
||||
* Log the values of this object at level INFO.
|
||||
*/
|
||||
// Equivalent to logAll in AbstractConfig
|
||||
public void logAll() {
|
||||
StringBuilder b = new StringBuilder();
|
||||
b.append(getClass().getSimpleName());
|
||||
b.append(" values: ");
|
||||
b.append(Utils.NL);
|
||||
|
||||
for (Map.Entry<String, Object> entry : values.entrySet()) {
|
||||
b.append('\t');
|
||||
b.append(entry.getKey());
|
||||
b.append(" = ");
|
||||
b.append(format(entry.getValue()));
|
||||
b.append(Utils.NL);
|
||||
}
|
||||
log.info(b.toString());
|
||||
}
|
||||
|
||||
private static Object format(Object value) {
|
||||
return value == null ? "NA" : value;
|
||||
}
|
||||
|
||||
/**
|
||||
* Collect general runtime information.
|
||||
*/
|
||||
protected void addRuntimeInfo() {
|
||||
List<String> jvmArgs = RUNTIME.getInputArguments();
|
||||
values.put("jvm.args", Utils.join(jvmArgs, ", "));
|
||||
String[] jvmSpec = {
|
||||
RUNTIME.getVmVendor(),
|
||||
RUNTIME.getVmName(),
|
||||
RUNTIME.getSystemProperties().get("java.version"),
|
||||
RUNTIME.getVmVersion()
|
||||
};
|
||||
values.put("jvm.spec", Utils.join(jvmSpec, ", "));
|
||||
values.put("jvm.classpath", RUNTIME.getClassPath());
|
||||
}
|
||||
|
||||
/**
|
||||
* Collect system information.
|
||||
*/
|
||||
protected void addSystemInfo() {
|
||||
String[] osInfo = {
|
||||
OS.getName(),
|
||||
OS.getArch(),
|
||||
OS.getVersion(),
|
||||
};
|
||||
values.put("os.spec", Utils.join(osInfo, ", "));
|
||||
values.put("os.vcpus", String.valueOf(OS.getAvailableProcessors()));
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,823 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime;
|
||||
|
||||
import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
|
||||
import org.apache.kafka.clients.consumer.OffsetCommitCallback;
|
||||
import org.apache.kafka.common.KafkaException;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.errors.WakeupException;
|
||||
import org.apache.kafka.common.metrics.Sensor;
|
||||
import org.apache.kafka.common.metrics.stats.Avg;
|
||||
import org.apache.kafka.common.metrics.stats.CumulativeSum;
|
||||
import org.apache.kafka.common.metrics.stats.Max;
|
||||
import org.apache.kafka.common.metrics.stats.Rate;
|
||||
import org.apache.kafka.common.metrics.stats.Value;
|
||||
import org.apache.kafka.common.utils.Time;
|
||||
import org.apache.kafka.connect.data.SchemaAndValue;
|
||||
import org.apache.kafka.connect.errors.ConnectException;
|
||||
import org.apache.kafka.connect.errors.RetriableException;
|
||||
import org.apache.kafka.connect.header.ConnectHeaders;
|
||||
import org.apache.kafka.connect.header.Headers;
|
||||
import org.apache.kafka.connect.runtime.ConnectMetrics.MetricGroup;
|
||||
import org.apache.kafka.connect.runtime.distributed.ClusterConfigState;
|
||||
import org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator;
|
||||
import org.apache.kafka.connect.runtime.errors.Stage;
|
||||
import org.apache.kafka.connect.sink.SinkRecord;
|
||||
import org.apache.kafka.connect.sink.SinkTask;
|
||||
import org.apache.kafka.connect.storage.Converter;
|
||||
import org.apache.kafka.connect.storage.HeaderConverter;
|
||||
import org.apache.kafka.connect.storage.StatusBackingStore;
|
||||
import org.apache.kafka.connect.util.ConnectUtils;
|
||||
import org.apache.kafka.connect.util.ConnectorTaskId;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.time.Duration;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import static java.util.Collections.singleton;
|
||||
import static org.apache.kafka.connect.runtime.WorkerConfig.TOPIC_TRACKING_ENABLE_CONFIG;
|
||||
|
||||
/**
|
||||
* WorkerTask that uses a SinkTask to export data from Kafka.
|
||||
*/
|
||||
class WorkerSinkTask extends WorkerTask {
|
||||
private static final Logger log = LoggerFactory.getLogger(WorkerSinkTask.class);
|
||||
|
||||
private final WorkerConfig workerConfig;
|
||||
private final SinkTask task;
|
||||
private final ClusterConfigState configState;
|
||||
private Map<String, String> taskConfig;
|
||||
private final Converter keyConverter;
|
||||
private final Converter valueConverter;
|
||||
private final HeaderConverter headerConverter;
|
||||
private final TransformationChain<SinkRecord> transformationChain;
|
||||
private final SinkTaskMetricsGroup sinkTaskMetricsGroup;
|
||||
private final boolean isTopicTrackingEnabled;
|
||||
private KafkaConsumer<byte[], byte[]> consumer;
|
||||
private WorkerSinkTaskContext context;
|
||||
private final List<SinkRecord> messageBatch;
|
||||
private Map<TopicPartition, OffsetAndMetadata> lastCommittedOffsets;
|
||||
private Map<TopicPartition, OffsetAndMetadata> currentOffsets;
|
||||
private final Map<TopicPartition, OffsetAndMetadata> origOffsets;
|
||||
private RuntimeException rebalanceException;
|
||||
private long nextCommit;
|
||||
private int commitSeqno;
|
||||
private long commitStarted;
|
||||
private int commitFailures;
|
||||
private boolean pausedForRedelivery;
|
||||
private boolean committing;
|
||||
|
||||
public WorkerSinkTask(ConnectorTaskId id,
|
||||
SinkTask task,
|
||||
TaskStatus.Listener statusListener,
|
||||
TargetState initialState,
|
||||
WorkerConfig workerConfig,
|
||||
ClusterConfigState configState,
|
||||
ConnectMetrics connectMetrics,
|
||||
Converter keyConverter,
|
||||
Converter valueConverter,
|
||||
HeaderConverter headerConverter,
|
||||
TransformationChain<SinkRecord> transformationChain,
|
||||
KafkaConsumer<byte[], byte[]> consumer,
|
||||
ClassLoader loader,
|
||||
Time time,
|
||||
RetryWithToleranceOperator retryWithToleranceOperator,
|
||||
StatusBackingStore statusBackingStore) {
|
||||
super(id, statusListener, initialState, loader, connectMetrics,
|
||||
retryWithToleranceOperator, time, statusBackingStore);
|
||||
|
||||
this.workerConfig = workerConfig;
|
||||
this.task = task;
|
||||
this.configState = configState;
|
||||
this.keyConverter = keyConverter;
|
||||
this.valueConverter = valueConverter;
|
||||
this.headerConverter = headerConverter;
|
||||
this.transformationChain = transformationChain;
|
||||
this.messageBatch = new ArrayList<>();
|
||||
this.currentOffsets = new HashMap<>();
|
||||
this.origOffsets = new HashMap<>();
|
||||
this.pausedForRedelivery = false;
|
||||
this.rebalanceException = null;
|
||||
this.nextCommit = time.milliseconds() +
|
||||
workerConfig.getLong(WorkerConfig.OFFSET_COMMIT_INTERVAL_MS_CONFIG);
|
||||
this.committing = false;
|
||||
this.commitSeqno = 0;
|
||||
this.commitStarted = -1;
|
||||
this.commitFailures = 0;
|
||||
this.sinkTaskMetricsGroup = new SinkTaskMetricsGroup(id, connectMetrics);
|
||||
this.sinkTaskMetricsGroup.recordOffsetSequenceNumber(commitSeqno);
|
||||
this.consumer = consumer;
|
||||
this.isTopicTrackingEnabled = workerConfig.getBoolean(TOPIC_TRACKING_ENABLE_CONFIG);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void initialize(TaskConfig taskConfig) {
|
||||
try {
|
||||
this.taskConfig = taskConfig.originalsStrings();
|
||||
this.context = new WorkerSinkTaskContext(consumer, this, configState);
|
||||
} catch (Throwable t) {
|
||||
log.error("{} Task failed initialization and will not be started.", this, t);
|
||||
onFailure(t);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void stop() {
|
||||
// Offset commit is handled upon exit in work thread
|
||||
super.stop();
|
||||
consumer.wakeup();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void close() {
|
||||
// FIXME Kafka needs to add a timeout parameter here for us to properly obey the timeout
|
||||
// passed in
|
||||
try {
|
||||
task.stop();
|
||||
} catch (Throwable t) {
|
||||
log.warn("Could not stop task", t);
|
||||
}
|
||||
if (consumer != null) {
|
||||
try {
|
||||
consumer.close();
|
||||
} catch (Throwable t) {
|
||||
log.warn("Could not close consumer", t);
|
||||
}
|
||||
}
|
||||
try {
|
||||
transformationChain.close();
|
||||
} catch (Throwable t) {
|
||||
log.warn("Could not close transformation chain", t);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void releaseResources() {
|
||||
sinkTaskMetricsGroup.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void transitionTo(TargetState state) {
|
||||
super.transitionTo(state);
|
||||
consumer.wakeup();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void execute() {
|
||||
initializeAndStart();
|
||||
try {
|
||||
while (!isStopping())
|
||||
iteration();
|
||||
} finally {
|
||||
// Make sure any uncommitted data has been committed and the task has
|
||||
// a chance to clean up its state
|
||||
closePartitions();
|
||||
}
|
||||
}
|
||||
|
||||
protected void iteration() {
|
||||
final long offsetCommitIntervalMs = workerConfig.getLong(WorkerConfig.OFFSET_COMMIT_INTERVAL_MS_CONFIG);
|
||||
|
||||
try {
|
||||
long now = time.milliseconds();
|
||||
|
||||
// Maybe commit
|
||||
if (!committing && (context.isCommitRequested() || now >= nextCommit)) {
|
||||
commitOffsets(now, false);
|
||||
nextCommit = now + offsetCommitIntervalMs;
|
||||
context.clearCommitRequest();
|
||||
}
|
||||
|
||||
final long commitTimeoutMs = commitStarted + workerConfig.getLong(WorkerConfig.OFFSET_COMMIT_TIMEOUT_MS_CONFIG);
|
||||
|
||||
// Check for timed out commits
|
||||
if (committing && now >= commitTimeoutMs) {
|
||||
log.warn("{} Commit of offsets timed out", this);
|
||||
commitFailures++;
|
||||
committing = false;
|
||||
}
|
||||
|
||||
// And process messages
|
||||
long timeoutMs = Math.max(nextCommit - now, 0);
|
||||
poll(timeoutMs);
|
||||
} catch (WakeupException we) {
|
||||
log.trace("{} Consumer woken up", this);
|
||||
|
||||
if (isStopping())
|
||||
return;
|
||||
|
||||
if (shouldPause()) {
|
||||
pauseAll();
|
||||
onPause();
|
||||
context.requestCommit();
|
||||
} else if (!pausedForRedelivery) {
|
||||
resumeAll();
|
||||
onResume();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Respond to a previous commit attempt that may or may not have succeeded. Note that due to our use of async commits,
|
||||
* these invocations may come out of order and thus the need for the commit sequence number.
|
||||
*
|
||||
* @param error the error resulting from the commit, or null if the commit succeeded without error
|
||||
* @param seqno the sequence number at the time the commit was requested
|
||||
* @param committedOffsets the offsets that were committed; may be null if the commit did not complete successfully
|
||||
* or if no new offsets were committed
|
||||
*/
|
||||
private void onCommitCompleted(Throwable error, long seqno, Map<TopicPartition, OffsetAndMetadata> committedOffsets) {
|
||||
if (commitSeqno != seqno) {
|
||||
log.debug("{} Received out of order commit callback for sequence number {}, but most recent sequence number is {}",
|
||||
this, seqno, commitSeqno);
|
||||
sinkTaskMetricsGroup.recordOffsetCommitSkip();
|
||||
} else {
|
||||
long durationMillis = time.milliseconds() - commitStarted;
|
||||
if (error != null) {
|
||||
log.error("{} Commit of offsets threw an unexpected exception for sequence number {}: {}",
|
||||
this, seqno, committedOffsets, error);
|
||||
commitFailures++;
|
||||
recordCommitFailure(durationMillis, error);
|
||||
} else {
|
||||
log.debug("{} Finished offset commit successfully in {} ms for sequence number {}: {}",
|
||||
this, durationMillis, seqno, committedOffsets);
|
||||
if (committedOffsets != null) {
|
||||
log.debug("{} Setting last committed offsets to {}", this, committedOffsets);
|
||||
lastCommittedOffsets = committedOffsets;
|
||||
sinkTaskMetricsGroup.recordCommittedOffsets(committedOffsets);
|
||||
}
|
||||
commitFailures = 0;
|
||||
recordCommitSuccess(durationMillis);
|
||||
}
|
||||
committing = false;
|
||||
}
|
||||
}
|
||||
|
||||
public int commitFailures() {
|
||||
return commitFailures;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initializes and starts the SinkTask.
|
||||
*/
|
||||
protected void initializeAndStart() {
|
||||
SinkConnectorConfig.validate(taskConfig);
|
||||
|
||||
if (SinkConnectorConfig.hasTopicsConfig(taskConfig)) {
|
||||
String[] topics = taskConfig.get(SinkTask.TOPICS_CONFIG).split(",");
|
||||
Arrays.setAll(topics, i -> topics[i].trim());
|
||||
consumer.subscribe(Arrays.asList(topics), new HandleRebalance());
|
||||
log.debug("{} Initializing and starting task for topics {}", this, topics);
|
||||
} else {
|
||||
String topicsRegexStr = taskConfig.get(SinkTask.TOPICS_REGEX_CONFIG);
|
||||
Pattern pattern = Pattern.compile(topicsRegexStr);
|
||||
consumer.subscribe(pattern, new HandleRebalance());
|
||||
log.debug("{} Initializing and starting task for topics regex {}", this, topicsRegexStr);
|
||||
}
|
||||
|
||||
task.initialize(context);
|
||||
task.start(taskConfig);
|
||||
log.info("{} Sink task finished initialization and start", this);
|
||||
}
|
||||
|
||||
/**
|
||||
* Poll for new messages with the given timeout. Should only be invoked by the worker thread.
|
||||
*/
|
||||
protected void poll(long timeoutMs) {
|
||||
rewind();
|
||||
long retryTimeout = context.timeout();
|
||||
if (retryTimeout > 0) {
|
||||
timeoutMs = Math.min(timeoutMs, retryTimeout);
|
||||
context.timeout(-1L);
|
||||
}
|
||||
|
||||
log.trace("{} Polling consumer with timeout {} ms", this, timeoutMs);
|
||||
ConsumerRecords<byte[], byte[]> msgs = pollConsumer(timeoutMs);
|
||||
assert messageBatch.isEmpty() || msgs.isEmpty();
|
||||
log.trace("{} Polling returned {} messages", this, msgs.count());
|
||||
|
||||
convertMessages(msgs);
|
||||
deliverMessages();
|
||||
}
|
||||
|
||||
// Visible for testing
|
||||
boolean isCommitting() {
|
||||
return committing;
|
||||
}
|
||||
|
||||
private void doCommitSync(Map<TopicPartition, OffsetAndMetadata> offsets, int seqno) {
|
||||
log.info("{} Committing offsets synchronously using sequence number {}: {}", this, seqno, offsets);
|
||||
try {
|
||||
consumer.commitSync(offsets);
|
||||
onCommitCompleted(null, seqno, offsets);
|
||||
} catch (WakeupException e) {
|
||||
// retry the commit to ensure offsets get pushed, then propagate the wakeup up to poll
|
||||
doCommitSync(offsets, seqno);
|
||||
throw e;
|
||||
} catch (KafkaException e) {
|
||||
onCommitCompleted(e, seqno, offsets);
|
||||
}
|
||||
}
|
||||
|
||||
private void doCommitAsync(Map<TopicPartition, OffsetAndMetadata> offsets, final int seqno) {
|
||||
log.info("{} Committing offsets asynchronously using sequence number {}: {}", this, seqno, offsets);
|
||||
OffsetCommitCallback cb = new OffsetCommitCallback() {
|
||||
@Override
|
||||
public void onComplete(Map<TopicPartition, OffsetAndMetadata> offsets, Exception error) {
|
||||
onCommitCompleted(error, seqno, offsets);
|
||||
}
|
||||
};
|
||||
consumer.commitAsync(offsets, cb);
|
||||
}
|
||||
|
||||
/**
|
||||
* Starts an offset commit by flushing outstanding messages from the task and then starting
|
||||
* the write commit.
|
||||
**/
|
||||
private void doCommit(Map<TopicPartition, OffsetAndMetadata> offsets, boolean closing, int seqno) {
|
||||
if (closing) {
|
||||
doCommitSync(offsets, seqno);
|
||||
} else {
|
||||
doCommitAsync(offsets, seqno);
|
||||
}
|
||||
}
|
||||
|
||||
private void commitOffsets(long now, boolean closing) {
|
||||
if (currentOffsets.isEmpty())
|
||||
return;
|
||||
|
||||
committing = true;
|
||||
commitSeqno += 1;
|
||||
commitStarted = now;
|
||||
sinkTaskMetricsGroup.recordOffsetSequenceNumber(commitSeqno);
|
||||
|
||||
final Map<TopicPartition, OffsetAndMetadata> taskProvidedOffsets;
|
||||
try {
|
||||
log.trace("{} Calling task.preCommit with current offsets: {}", this, currentOffsets);
|
||||
taskProvidedOffsets = task.preCommit(new HashMap<>(currentOffsets));
|
||||
} catch (Throwable t) {
|
||||
if (closing) {
|
||||
log.warn("{} Offset commit failed during close", this);
|
||||
onCommitCompleted(t, commitSeqno, null);
|
||||
} else {
|
||||
log.error("{} Offset commit failed, rewinding to last committed offsets", this, t);
|
||||
for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : lastCommittedOffsets.entrySet()) {
|
||||
log.debug("{} Rewinding topic partition {} to offset {}", this, entry.getKey(), entry.getValue().offset());
|
||||
consumer.seek(entry.getKey(), entry.getValue().offset());
|
||||
}
|
||||
currentOffsets = new HashMap<>(lastCommittedOffsets);
|
||||
onCommitCompleted(t, commitSeqno, null);
|
||||
}
|
||||
return;
|
||||
} finally {
|
||||
if (closing) {
|
||||
log.trace("{} Closing the task before committing the offsets: {}", this, currentOffsets);
|
||||
task.close(currentOffsets.keySet());
|
||||
}
|
||||
}
|
||||
|
||||
if (taskProvidedOffsets.isEmpty()) {
|
||||
log.debug("{} Skipping offset commit, task opted-out by returning no offsets from preCommit", this);
|
||||
onCommitCompleted(null, commitSeqno, null);
|
||||
return;
|
||||
}
|
||||
|
||||
final Map<TopicPartition, OffsetAndMetadata> commitableOffsets = new HashMap<>(lastCommittedOffsets);
|
||||
for (Map.Entry<TopicPartition, OffsetAndMetadata> taskProvidedOffsetEntry : taskProvidedOffsets.entrySet()) {
|
||||
final TopicPartition partition = taskProvidedOffsetEntry.getKey();
|
||||
final OffsetAndMetadata taskProvidedOffset = taskProvidedOffsetEntry.getValue();
|
||||
if (commitableOffsets.containsKey(partition)) {
|
||||
long taskOffset = taskProvidedOffset.offset();
|
||||
long currentOffset = currentOffsets.get(partition).offset();
|
||||
if (taskOffset <= currentOffset) {
|
||||
commitableOffsets.put(partition, taskProvidedOffset);
|
||||
} else {
|
||||
log.warn("{} Ignoring invalid task provided offset {}/{} -- not yet consumed, taskOffset={} currentOffset={}",
|
||||
this, partition, taskProvidedOffset, taskOffset, currentOffset);
|
||||
}
|
||||
} else {
|
||||
log.warn("{} Ignoring invalid task provided offset {}/{} -- partition not assigned, assignment={}",
|
||||
this, partition, taskProvidedOffset, consumer.assignment());
|
||||
}
|
||||
}
|
||||
|
||||
if (commitableOffsets.equals(lastCommittedOffsets)) {
|
||||
log.debug("{} Skipping offset commit, no change since last commit", this);
|
||||
onCommitCompleted(null, commitSeqno, null);
|
||||
return;
|
||||
}
|
||||
|
||||
doCommit(commitableOffsets, closing, commitSeqno);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "WorkerSinkTask{" +
|
||||
"id=" + id +
|
||||
'}';
|
||||
}
|
||||
|
||||
private ConsumerRecords<byte[], byte[]> pollConsumer(long timeoutMs) {
|
||||
ConsumerRecords<byte[], byte[]> msgs = consumer.poll(Duration.ofMillis(timeoutMs));
|
||||
|
||||
// Exceptions raised from the task during a rebalance should be rethrown to stop the worker
|
||||
if (rebalanceException != null) {
|
||||
RuntimeException e = rebalanceException;
|
||||
rebalanceException = null;
|
||||
throw e;
|
||||
}
|
||||
|
||||
sinkTaskMetricsGroup.recordRead(msgs.count());
|
||||
return msgs;
|
||||
}
|
||||
|
||||
private void convertMessages(ConsumerRecords<byte[], byte[]> msgs) {
|
||||
origOffsets.clear();
|
||||
for (ConsumerRecord<byte[], byte[]> msg : msgs) {
|
||||
log.trace("{} Consuming and converting message in topic '{}' partition {} at offset {} and timestamp {}",
|
||||
this, msg.topic(), msg.partition(), msg.offset(), msg.timestamp());
|
||||
|
||||
retryWithToleranceOperator.consumerRecord(msg);
|
||||
|
||||
SinkRecord transRecord = convertAndTransformRecord(msg);
|
||||
|
||||
origOffsets.put(
|
||||
new TopicPartition(msg.topic(), msg.partition()),
|
||||
new OffsetAndMetadata(msg.offset() + 1)
|
||||
);
|
||||
if (transRecord != null) {
|
||||
messageBatch.add(transRecord);
|
||||
} else {
|
||||
log.trace(
|
||||
"{} Converters and transformations returned null, possibly because of too many retries, so " +
|
||||
"dropping record in topic '{}' partition {} at offset {}",
|
||||
this, msg.topic(), msg.partition(), msg.offset()
|
||||
);
|
||||
}
|
||||
}
|
||||
sinkTaskMetricsGroup.recordConsumedOffsets(origOffsets);
|
||||
}
|
||||
|
||||
private SinkRecord convertAndTransformRecord(final ConsumerRecord<byte[], byte[]> msg) {
|
||||
SchemaAndValue keyAndSchema = retryWithToleranceOperator.execute(() -> keyConverter.toConnectData(msg.topic(), msg.headers(), msg.key()),
|
||||
Stage.KEY_CONVERTER, keyConverter.getClass());
|
||||
|
||||
SchemaAndValue valueAndSchema = retryWithToleranceOperator.execute(() -> valueConverter.toConnectData(msg.topic(), msg.headers(), msg.value()),
|
||||
Stage.VALUE_CONVERTER, valueConverter.getClass());
|
||||
|
||||
Headers headers = retryWithToleranceOperator.execute(() -> convertHeadersFor(msg), Stage.HEADER_CONVERTER, headerConverter.getClass());
|
||||
|
||||
if (retryWithToleranceOperator.failed()) {
|
||||
return null;
|
||||
}
|
||||
|
||||
Long timestamp = ConnectUtils.checkAndConvertTimestamp(msg.timestamp());
|
||||
SinkRecord origRecord = new SinkRecord(msg.topic(), msg.partition(),
|
||||
keyAndSchema.schema(), keyAndSchema.value(),
|
||||
valueAndSchema.schema(), valueAndSchema.value(),
|
||||
msg.offset(),
|
||||
timestamp,
|
||||
msg.timestampType(),
|
||||
headers);
|
||||
log.trace("{} Applying transformations to record in topic '{}' partition {} at offset {} and timestamp {} with key {} and value {}",
|
||||
this, msg.topic(), msg.partition(), msg.offset(), timestamp, keyAndSchema.value(), valueAndSchema.value());
|
||||
if (isTopicTrackingEnabled) {
|
||||
recordActiveTopic(origRecord.topic());
|
||||
}
|
||||
return transformationChain.apply(origRecord);
|
||||
}
|
||||
|
||||
private Headers convertHeadersFor(ConsumerRecord<byte[], byte[]> record) {
|
||||
Headers result = new ConnectHeaders();
|
||||
org.apache.kafka.common.header.Headers recordHeaders = record.headers();
|
||||
if (recordHeaders != null) {
|
||||
String topic = record.topic();
|
||||
for (org.apache.kafka.common.header.Header recordHeader : recordHeaders) {
|
||||
SchemaAndValue schemaAndValue = headerConverter.toConnectHeader(topic, recordHeader.key(), recordHeader.value());
|
||||
result.add(recordHeader.key(), schemaAndValue);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
private void resumeAll() {
|
||||
for (TopicPartition tp : consumer.assignment())
|
||||
if (!context.pausedPartitions().contains(tp))
|
||||
consumer.resume(singleton(tp));
|
||||
}
|
||||
|
||||
private void pauseAll() {
|
||||
consumer.pause(consumer.assignment());
|
||||
}
|
||||
|
||||
private void deliverMessages() {
|
||||
// Finally, deliver this batch to the sink
|
||||
try {
|
||||
// Since we reuse the messageBatch buffer, ensure we give the task its own copy
|
||||
log.trace("{} Delivering batch of {} messages to task", this, messageBatch.size());
|
||||
long start = time.milliseconds();
|
||||
task.put(new ArrayList<>(messageBatch));
|
||||
recordBatch(messageBatch.size());
|
||||
sinkTaskMetricsGroup.recordPut(time.milliseconds() - start);
|
||||
currentOffsets.putAll(origOffsets);
|
||||
messageBatch.clear();
|
||||
// If we had paused all consumer topic partitions to try to redeliver data, then we should resume any that
|
||||
// the task had not explicitly paused
|
||||
if (pausedForRedelivery) {
|
||||
if (!shouldPause())
|
||||
resumeAll();
|
||||
pausedForRedelivery = false;
|
||||
}
|
||||
} catch (RetriableException e) {
|
||||
log.error("{} RetriableException from SinkTask:", this, e);
|
||||
// If we're retrying a previous batch, make sure we've paused all topic partitions so we don't get new data,
|
||||
// but will still be able to poll in order to handle user-requested timeouts, keep group membership, etc.
|
||||
pausedForRedelivery = true;
|
||||
pauseAll();
|
||||
// Let this exit normally, the batch will be reprocessed on the next loop.
|
||||
} catch (Throwable t) {
|
||||
log.error("{} Task threw an uncaught and unrecoverable exception. Task is being killed and will not "
|
||||
+ "recover until manually restarted. Error: {}", this, t.getMessage(), t);
|
||||
throw new ConnectException("Exiting WorkerSinkTask due to unrecoverable exception.", t);
|
||||
}
|
||||
}
|
||||
|
||||
private void rewind() {
|
||||
Map<TopicPartition, Long> offsets = context.offsets();
|
||||
if (offsets.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
for (Map.Entry<TopicPartition, Long> entry: offsets.entrySet()) {
|
||||
TopicPartition tp = entry.getKey();
|
||||
Long offset = entry.getValue();
|
||||
if (offset != null) {
|
||||
log.trace("{} Rewind {} to offset {}", this, tp, offset);
|
||||
consumer.seek(tp, offset);
|
||||
lastCommittedOffsets.put(tp, new OffsetAndMetadata(offset));
|
||||
currentOffsets.put(tp, new OffsetAndMetadata(offset));
|
||||
} else {
|
||||
log.warn("{} Cannot rewind {} to null offset", this, tp);
|
||||
}
|
||||
}
|
||||
context.clearOffsets();
|
||||
}
|
||||
|
||||
private void openPartitions(Collection<TopicPartition> partitions) {
|
||||
sinkTaskMetricsGroup.recordPartitionCount(partitions.size());
|
||||
task.open(partitions);
|
||||
}
|
||||
|
||||
private void closePartitions() {
|
||||
commitOffsets(time.milliseconds(), true);
|
||||
sinkTaskMetricsGroup.recordPartitionCount(0);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void recordBatch(int size) {
|
||||
super.recordBatch(size);
|
||||
sinkTaskMetricsGroup.recordSend(size);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void recordCommitFailure(long duration, Throwable error) {
|
||||
super.recordCommitFailure(duration, error);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void recordCommitSuccess(long duration) {
|
||||
super.recordCommitSuccess(duration);
|
||||
sinkTaskMetricsGroup.recordOffsetCommitSuccess();
|
||||
}
|
||||
|
||||
SinkTaskMetricsGroup sinkTaskMetricsGroup() {
|
||||
return sinkTaskMetricsGroup;
|
||||
}
|
||||
|
||||
// Visible for testing
|
||||
long getNextCommit() {
|
||||
return nextCommit;
|
||||
}
|
||||
|
||||
private class HandleRebalance implements ConsumerRebalanceListener {
|
||||
@Override
|
||||
public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
|
||||
log.debug("{} Partitions assigned {}", WorkerSinkTask.this, partitions);
|
||||
lastCommittedOffsets = new HashMap<>();
|
||||
currentOffsets = new HashMap<>();
|
||||
for (TopicPartition tp : partitions) {
|
||||
long pos = consumer.position(tp);
|
||||
lastCommittedOffsets.put(tp, new OffsetAndMetadata(pos));
|
||||
currentOffsets.put(tp, new OffsetAndMetadata(pos));
|
||||
log.debug("{} Assigned topic partition {} with offset {}", WorkerSinkTask.this, tp, pos);
|
||||
}
|
||||
sinkTaskMetricsGroup.assignedOffsets(currentOffsets);
|
||||
|
||||
// If we paused everything for redelivery (which is no longer relevant since we discarded the data), make
|
||||
// sure anything we paused that the task didn't request to be paused *and* which we still own is resumed.
|
||||
// Also make sure our tracking of paused partitions is updated to remove any partitions we no longer own.
|
||||
pausedForRedelivery = false;
|
||||
|
||||
// Ensure that the paused partitions contains only assigned partitions and repause as necessary
|
||||
context.pausedPartitions().retainAll(partitions);
|
||||
if (shouldPause())
|
||||
pauseAll();
|
||||
else if (!context.pausedPartitions().isEmpty())
|
||||
consumer.pause(context.pausedPartitions());
|
||||
|
||||
// Instead of invoking the assignment callback on initialization, we guarantee the consumer is ready upon
|
||||
// task start. Since this callback gets invoked during that initial setup before we've started the task, we
|
||||
// need to guard against invoking the user's callback method during that period.
|
||||
if (rebalanceException == null || rebalanceException instanceof WakeupException) {
|
||||
try {
|
||||
openPartitions(partitions);
|
||||
// Rewind should be applied only if openPartitions succeeds.
|
||||
rewind();
|
||||
} catch (RuntimeException e) {
|
||||
// The consumer swallows exceptions raised in the rebalance listener, so we need to store
|
||||
// exceptions and rethrow when poll() returns.
|
||||
rebalanceException = e;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
|
||||
log.debug("{} Partitions revoked", WorkerSinkTask.this);
|
||||
try {
|
||||
closePartitions();
|
||||
sinkTaskMetricsGroup.clearOffsets();
|
||||
} catch (RuntimeException e) {
|
||||
// The consumer swallows exceptions raised in the rebalance listener, so we need to store
|
||||
// exceptions and rethrow when poll() returns.
|
||||
rebalanceException = e;
|
||||
}
|
||||
|
||||
// Make sure we don't have any leftover data since offsets will be reset to committed positions
|
||||
messageBatch.clear();
|
||||
}
|
||||
}
|
||||
|
||||
static class SinkTaskMetricsGroup {
|
||||
private final ConnectorTaskId id;
|
||||
private final ConnectMetrics metrics;
|
||||
private final MetricGroup metricGroup;
|
||||
private final Sensor sinkRecordRead;
|
||||
private final Sensor sinkRecordSend;
|
||||
private final Sensor partitionCount;
|
||||
private final Sensor offsetSeqNum;
|
||||
private final Sensor offsetCompletion;
|
||||
private final Sensor offsetCompletionSkip;
|
||||
private final Sensor putBatchTime;
|
||||
private final Sensor sinkRecordActiveCount;
|
||||
private long activeRecords;
|
||||
private Map<TopicPartition, OffsetAndMetadata> consumedOffsets = new HashMap<>();
|
||||
private Map<TopicPartition, OffsetAndMetadata> committedOffsets = new HashMap<>();
|
||||
|
||||
public SinkTaskMetricsGroup(ConnectorTaskId id, ConnectMetrics connectMetrics) {
|
||||
this.metrics = connectMetrics;
|
||||
this.id = id;
|
||||
|
||||
ConnectMetricsRegistry registry = connectMetrics.registry();
|
||||
metricGroup = connectMetrics
|
||||
.group(registry.sinkTaskGroupName(), registry.connectorTagName(), id.connector(), registry.taskTagName(),
|
||||
Integer.toString(id.task()));
|
||||
// prevent collisions by removing any previously created metrics in this group.
|
||||
metricGroup.close();
|
||||
|
||||
sinkRecordRead = metricGroup.sensor("sink-record-read");
|
||||
sinkRecordRead.add(metricGroup.metricName(registry.sinkRecordReadRate), new Rate());
|
||||
sinkRecordRead.add(metricGroup.metricName(registry.sinkRecordReadTotal), new CumulativeSum());
|
||||
|
||||
sinkRecordSend = metricGroup.sensor("sink-record-send");
|
||||
sinkRecordSend.add(metricGroup.metricName(registry.sinkRecordSendRate), new Rate());
|
||||
sinkRecordSend.add(metricGroup.metricName(registry.sinkRecordSendTotal), new CumulativeSum());
|
||||
|
||||
sinkRecordActiveCount = metricGroup.sensor("sink-record-active-count");
|
||||
sinkRecordActiveCount.add(metricGroup.metricName(registry.sinkRecordActiveCount), new Value());
|
||||
sinkRecordActiveCount.add(metricGroup.metricName(registry.sinkRecordActiveCountMax), new Max());
|
||||
sinkRecordActiveCount.add(metricGroup.metricName(registry.sinkRecordActiveCountAvg), new Avg());
|
||||
|
||||
partitionCount = metricGroup.sensor("partition-count");
|
||||
partitionCount.add(metricGroup.metricName(registry.sinkRecordPartitionCount), new Value());
|
||||
|
||||
offsetSeqNum = metricGroup.sensor("offset-seq-number");
|
||||
offsetSeqNum.add(metricGroup.metricName(registry.sinkRecordOffsetCommitSeqNum), new Value());
|
||||
|
||||
offsetCompletion = metricGroup.sensor("offset-commit-completion");
|
||||
offsetCompletion.add(metricGroup.metricName(registry.sinkRecordOffsetCommitCompletionRate), new Rate());
|
||||
offsetCompletion.add(metricGroup.metricName(registry.sinkRecordOffsetCommitCompletionTotal), new CumulativeSum());
|
||||
|
||||
offsetCompletionSkip = metricGroup.sensor("offset-commit-completion-skip");
|
||||
offsetCompletionSkip.add(metricGroup.metricName(registry.sinkRecordOffsetCommitSkipRate), new Rate());
|
||||
offsetCompletionSkip.add(metricGroup.metricName(registry.sinkRecordOffsetCommitSkipTotal), new CumulativeSum());
|
||||
|
||||
putBatchTime = metricGroup.sensor("put-batch-time");
|
||||
putBatchTime.add(metricGroup.metricName(registry.sinkRecordPutBatchTimeMax), new Max());
|
||||
putBatchTime.add(metricGroup.metricName(registry.sinkRecordPutBatchTimeAvg), new Avg());
|
||||
}
|
||||
|
||||
void computeSinkRecordLag() {
|
||||
Map<TopicPartition, OffsetAndMetadata> consumed = this.consumedOffsets;
|
||||
Map<TopicPartition, OffsetAndMetadata> committed = this.committedOffsets;
|
||||
activeRecords = 0L;
|
||||
for (Map.Entry<TopicPartition, OffsetAndMetadata> committedOffsetEntry : committed.entrySet()) {
|
||||
final TopicPartition partition = committedOffsetEntry.getKey();
|
||||
final OffsetAndMetadata consumedOffsetMeta = consumed.get(partition);
|
||||
if (consumedOffsetMeta != null) {
|
||||
final OffsetAndMetadata committedOffsetMeta = committedOffsetEntry.getValue();
|
||||
long consumedOffset = consumedOffsetMeta.offset();
|
||||
long committedOffset = committedOffsetMeta.offset();
|
||||
long diff = consumedOffset - committedOffset;
|
||||
// Connector tasks can return offsets, so make sure nothing wonky happens
|
||||
activeRecords += Math.max(diff, 0L);
|
||||
}
|
||||
}
|
||||
sinkRecordActiveCount.record(activeRecords);
|
||||
}
|
||||
|
||||
void close() {
|
||||
metricGroup.close();
|
||||
}
|
||||
|
||||
void recordRead(int batchSize) {
|
||||
sinkRecordRead.record(batchSize);
|
||||
}
|
||||
|
||||
void recordSend(int batchSize) {
|
||||
sinkRecordSend.record(batchSize);
|
||||
}
|
||||
|
||||
void recordPut(long duration) {
|
||||
putBatchTime.record(duration);
|
||||
}
|
||||
|
||||
void recordPartitionCount(int assignedPartitionCount) {
|
||||
partitionCount.record(assignedPartitionCount);
|
||||
}
|
||||
|
||||
void recordOffsetSequenceNumber(int seqNum) {
|
||||
offsetSeqNum.record(seqNum);
|
||||
}
|
||||
|
||||
void recordConsumedOffsets(Map<TopicPartition, OffsetAndMetadata> offsets) {
|
||||
consumedOffsets.putAll(offsets);
|
||||
computeSinkRecordLag();
|
||||
}
|
||||
|
||||
void recordCommittedOffsets(Map<TopicPartition, OffsetAndMetadata> offsets) {
|
||||
committedOffsets = offsets;
|
||||
computeSinkRecordLag();
|
||||
}
|
||||
|
||||
void assignedOffsets(Map<TopicPartition, OffsetAndMetadata> offsets) {
|
||||
consumedOffsets = new HashMap<>(offsets);
|
||||
committedOffsets = offsets;
|
||||
sinkRecordActiveCount.record(0.0);
|
||||
}
|
||||
|
||||
void clearOffsets() {
|
||||
consumedOffsets.clear();
|
||||
committedOffsets.clear();
|
||||
sinkRecordActiveCount.record(0.0);
|
||||
}
|
||||
|
||||
void recordOffsetCommitSuccess() {
|
||||
offsetCompletion.record(1.0);
|
||||
}
|
||||
|
||||
void recordOffsetCommitSkip() {
|
||||
offsetCompletionSkip.record(1.0);
|
||||
}
|
||||
|
||||
protected MetricGroup metricGroup() {
|
||||
return metricGroup;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,167 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime;
|
||||
|
||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.connect.errors.IllegalWorkerStateException;
|
||||
import org.apache.kafka.connect.runtime.distributed.ClusterConfigState;
|
||||
import org.apache.kafka.connect.sink.SinkTaskContext;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
public class WorkerSinkTaskContext implements SinkTaskContext {
|
||||
|
||||
private final Logger log = LoggerFactory.getLogger(getClass());
|
||||
private Map<TopicPartition, Long> offsets;
|
||||
private long timeoutMs;
|
||||
private KafkaConsumer<byte[], byte[]> consumer;
|
||||
private final WorkerSinkTask sinkTask;
|
||||
private final ClusterConfigState configState;
|
||||
private final Set<TopicPartition> pausedPartitions;
|
||||
private boolean commitRequested;
|
||||
|
||||
public WorkerSinkTaskContext(KafkaConsumer<byte[], byte[]> consumer,
|
||||
WorkerSinkTask sinkTask,
|
||||
ClusterConfigState configState) {
|
||||
this.offsets = new HashMap<>();
|
||||
this.timeoutMs = -1L;
|
||||
this.consumer = consumer;
|
||||
this.sinkTask = sinkTask;
|
||||
this.configState = configState;
|
||||
this.pausedPartitions = new HashSet<>();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, String> configs() {
|
||||
return configState.taskConfig(sinkTask.id());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void offset(Map<TopicPartition, Long> offsets) {
|
||||
log.debug("{} Setting offsets for topic partitions {}", this, offsets);
|
||||
this.offsets.putAll(offsets);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void offset(TopicPartition tp, long offset) {
|
||||
log.debug("{} Setting offset for topic partition {} to {}", this, tp, offset);
|
||||
offsets.put(tp, offset);
|
||||
}
|
||||
|
||||
public void clearOffsets() {
|
||||
offsets.clear();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get offsets that the SinkTask has submitted to be reset. Used by the Kafka Connect framework.
|
||||
* @return the map of offsets
|
||||
*/
|
||||
public Map<TopicPartition, Long> offsets() {
|
||||
return offsets;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void timeout(long timeoutMs) {
|
||||
log.debug("{} Setting timeout to {} ms", this, timeoutMs);
|
||||
this.timeoutMs = timeoutMs;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the timeout in milliseconds set by SinkTasks. Used by the Kafka Connect framework.
|
||||
* @return the backoff timeout in milliseconds.
|
||||
*/
|
||||
public long timeout() {
|
||||
return timeoutMs;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Set<TopicPartition> assignment() {
|
||||
if (consumer == null) {
|
||||
throw new IllegalWorkerStateException("SinkTaskContext may not be used to look up partition assignment until the task is initialized");
|
||||
}
|
||||
return consumer.assignment();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void pause(TopicPartition... partitions) {
|
||||
if (consumer == null) {
|
||||
throw new IllegalWorkerStateException("SinkTaskContext may not be used to pause consumption until the task is initialized");
|
||||
}
|
||||
try {
|
||||
Collections.addAll(pausedPartitions, partitions);
|
||||
if (sinkTask.shouldPause()) {
|
||||
log.debug("{} Connector is paused, so not pausing consumer's partitions {}", this, partitions);
|
||||
} else {
|
||||
consumer.pause(Arrays.asList(partitions));
|
||||
log.debug("{} Pausing partitions {}. Connector is not paused.", this, partitions);
|
||||
}
|
||||
} catch (IllegalStateException e) {
|
||||
throw new IllegalWorkerStateException("SinkTasks may not pause partitions that are not currently assigned to them.", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void resume(TopicPartition... partitions) {
|
||||
if (consumer == null) {
|
||||
throw new IllegalWorkerStateException("SinkTaskContext may not be used to resume consumption until the task is initialized");
|
||||
}
|
||||
try {
|
||||
pausedPartitions.removeAll(Arrays.asList(partitions));
|
||||
if (sinkTask.shouldPause()) {
|
||||
log.debug("{} Connector is paused, so not resuming consumer's partitions {}", this, partitions);
|
||||
} else {
|
||||
consumer.resume(Arrays.asList(partitions));
|
||||
log.debug("{} Resuming partitions: {}", this, partitions);
|
||||
}
|
||||
} catch (IllegalStateException e) {
|
||||
throw new IllegalWorkerStateException("SinkTasks may not resume partitions that are not currently assigned to them.", e);
|
||||
}
|
||||
}
|
||||
|
||||
public Set<TopicPartition> pausedPartitions() {
|
||||
return pausedPartitions;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void requestCommit() {
|
||||
log.debug("{} Requesting commit", this);
|
||||
commitRequested = true;
|
||||
}
|
||||
|
||||
public boolean isCommitRequested() {
|
||||
return commitRequested;
|
||||
}
|
||||
|
||||
public void clearCommitRequest() {
|
||||
commitRequested = false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "WorkerSinkTaskContext{" +
|
||||
"id=" + sinkTask.id +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,658 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime;
|
||||
|
||||
import org.apache.kafka.clients.producer.Callback;
|
||||
import org.apache.kafka.clients.producer.KafkaProducer;
|
||||
import org.apache.kafka.clients.producer.ProducerRecord;
|
||||
import org.apache.kafka.clients.producer.RecordMetadata;
|
||||
import org.apache.kafka.common.KafkaException;
|
||||
import org.apache.kafka.common.header.internals.RecordHeaders;
|
||||
import org.apache.kafka.common.metrics.Sensor;
|
||||
import org.apache.kafka.common.metrics.stats.Avg;
|
||||
import org.apache.kafka.common.metrics.stats.CumulativeSum;
|
||||
import org.apache.kafka.common.metrics.stats.Max;
|
||||
import org.apache.kafka.common.metrics.stats.Rate;
|
||||
import org.apache.kafka.common.metrics.stats.Value;
|
||||
import org.apache.kafka.common.utils.Time;
|
||||
import org.apache.kafka.connect.errors.ConnectException;
|
||||
import org.apache.kafka.connect.errors.RetriableException;
|
||||
import org.apache.kafka.connect.header.Header;
|
||||
import org.apache.kafka.connect.header.Headers;
|
||||
import org.apache.kafka.connect.runtime.ConnectMetrics.MetricGroup;
|
||||
import org.apache.kafka.connect.runtime.distributed.ClusterConfigState;
|
||||
import org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator;
|
||||
import org.apache.kafka.connect.runtime.errors.Stage;
|
||||
import org.apache.kafka.connect.source.SourceRecord;
|
||||
import org.apache.kafka.connect.source.SourceTask;
|
||||
import org.apache.kafka.connect.storage.CloseableOffsetStorageReader;
|
||||
import org.apache.kafka.connect.storage.Converter;
|
||||
import org.apache.kafka.connect.storage.HeaderConverter;
|
||||
import org.apache.kafka.connect.storage.OffsetStorageWriter;
|
||||
import org.apache.kafka.connect.storage.StatusBackingStore;
|
||||
import org.apache.kafka.connect.util.ConnectUtils;
|
||||
import org.apache.kafka.connect.util.ConnectorTaskId;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.time.Duration;
|
||||
import java.util.IdentityHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static org.apache.kafka.connect.runtime.WorkerConfig.TOPIC_TRACKING_ENABLE_CONFIG;
|
||||
|
||||
/**
|
||||
* WorkerTask that uses a SourceTask to ingest data into Kafka.
|
||||
*/
|
||||
class WorkerSourceTask extends WorkerTask {
|
||||
private static final Logger log = LoggerFactory.getLogger(WorkerSourceTask.class);
|
||||
|
||||
private static final long SEND_FAILED_BACKOFF_MS = 100;
|
||||
|
||||
private final WorkerConfig workerConfig;
|
||||
private final SourceTask task;
|
||||
private final ClusterConfigState configState;
|
||||
private final Converter keyConverter;
|
||||
private final Converter valueConverter;
|
||||
private final HeaderConverter headerConverter;
|
||||
private final TransformationChain<SourceRecord> transformationChain;
|
||||
private KafkaProducer<byte[], byte[]> producer;
|
||||
private final CloseableOffsetStorageReader offsetReader;
|
||||
private final OffsetStorageWriter offsetWriter;
|
||||
private final SourceTaskMetricsGroup sourceTaskMetricsGroup;
|
||||
private final AtomicReference<Exception> producerSendException;
|
||||
private final boolean isTopicTrackingEnabled;
|
||||
|
||||
private List<SourceRecord> toSend;
|
||||
private boolean lastSendFailed; // Whether the last send failed *synchronously*, i.e. never made it into the producer's RecordAccumulator
|
||||
// Use IdentityHashMap to ensure correctness with duplicate records. This is a HashMap because
|
||||
// there is no IdentityHashSet.
|
||||
private IdentityHashMap<ProducerRecord<byte[], byte[]>, ProducerRecord<byte[], byte[]>> outstandingMessages;
|
||||
// A second buffer is used while an offset flush is running
|
||||
private IdentityHashMap<ProducerRecord<byte[], byte[]>, ProducerRecord<byte[], byte[]>> outstandingMessagesBacklog;
|
||||
private boolean flushing;
|
||||
private CountDownLatch stopRequestedLatch;
|
||||
|
||||
private Map<String, String> taskConfig;
|
||||
private boolean finishedStart = false;
|
||||
private boolean startedShutdownBeforeStartCompleted = false;
|
||||
private boolean stopped = false;
|
||||
|
||||
public WorkerSourceTask(ConnectorTaskId id,
|
||||
SourceTask task,
|
||||
TaskStatus.Listener statusListener,
|
||||
TargetState initialState,
|
||||
Converter keyConverter,
|
||||
Converter valueConverter,
|
||||
HeaderConverter headerConverter,
|
||||
TransformationChain<SourceRecord> transformationChain,
|
||||
KafkaProducer<byte[], byte[]> producer,
|
||||
CloseableOffsetStorageReader offsetReader,
|
||||
OffsetStorageWriter offsetWriter,
|
||||
WorkerConfig workerConfig,
|
||||
ClusterConfigState configState,
|
||||
ConnectMetrics connectMetrics,
|
||||
ClassLoader loader,
|
||||
Time time,
|
||||
RetryWithToleranceOperator retryWithToleranceOperator,
|
||||
StatusBackingStore statusBackingStore) {
|
||||
|
||||
super(id, statusListener, initialState, loader, connectMetrics,
|
||||
retryWithToleranceOperator, time, statusBackingStore);
|
||||
|
||||
this.workerConfig = workerConfig;
|
||||
this.task = task;
|
||||
this.configState = configState;
|
||||
this.keyConverter = keyConverter;
|
||||
this.valueConverter = valueConverter;
|
||||
this.headerConverter = headerConverter;
|
||||
this.transformationChain = transformationChain;
|
||||
this.producer = producer;
|
||||
this.offsetReader = offsetReader;
|
||||
this.offsetWriter = offsetWriter;
|
||||
|
||||
this.toSend = null;
|
||||
this.lastSendFailed = false;
|
||||
this.outstandingMessages = new IdentityHashMap<>();
|
||||
this.outstandingMessagesBacklog = new IdentityHashMap<>();
|
||||
this.flushing = false;
|
||||
this.stopRequestedLatch = new CountDownLatch(1);
|
||||
this.sourceTaskMetricsGroup = new SourceTaskMetricsGroup(id, connectMetrics);
|
||||
this.producerSendException = new AtomicReference<>();
|
||||
this.isTopicTrackingEnabled = workerConfig.getBoolean(TOPIC_TRACKING_ENABLE_CONFIG);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void initialize(TaskConfig taskConfig) {
|
||||
try {
|
||||
this.taskConfig = taskConfig.originalsStrings();
|
||||
} catch (Throwable t) {
|
||||
log.error("{} Task failed initialization and will not be started.", this, t);
|
||||
onFailure(t);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void close() {
|
||||
if (!shouldPause()) {
|
||||
tryStop();
|
||||
}
|
||||
if (producer != null) {
|
||||
try {
|
||||
producer.close(Duration.ofSeconds(30));
|
||||
} catch (Throwable t) {
|
||||
log.warn("Could not close producer", t);
|
||||
}
|
||||
}
|
||||
try {
|
||||
transformationChain.close();
|
||||
} catch (Throwable t) {
|
||||
log.warn("Could not close transformation chain", t);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void releaseResources() {
|
||||
sourceTaskMetricsGroup.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void cancel() {
|
||||
super.cancel();
|
||||
offsetReader.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void stop() {
|
||||
super.stop();
|
||||
stopRequestedLatch.countDown();
|
||||
synchronized (this) {
|
||||
if (finishedStart)
|
||||
tryStop();
|
||||
else
|
||||
startedShutdownBeforeStartCompleted = true;
|
||||
}
|
||||
}
|
||||
|
||||
private synchronized void tryStop() {
|
||||
if (!stopped) {
|
||||
try {
|
||||
task.stop();
|
||||
stopped = true;
|
||||
} catch (Throwable t) {
|
||||
log.warn("Could not stop task", t);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void execute() {
|
||||
try {
|
||||
task.initialize(new WorkerSourceTaskContext(offsetReader, this, configState));
|
||||
task.start(taskConfig);
|
||||
log.info("{} Source task finished initialization and start", this);
|
||||
synchronized (this) {
|
||||
if (startedShutdownBeforeStartCompleted) {
|
||||
tryStop();
|
||||
return;
|
||||
}
|
||||
finishedStart = true;
|
||||
}
|
||||
|
||||
while (!isStopping()) {
|
||||
if (shouldPause()) {
|
||||
onPause();
|
||||
if (awaitUnpause()) {
|
||||
onResume();
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
maybeThrowProducerSendException();
|
||||
|
||||
if (toSend == null) {
|
||||
log.trace("{} Nothing to send to Kafka. Polling source for additional records", this);
|
||||
long start = time.milliseconds();
|
||||
toSend = poll();
|
||||
if (toSend != null) {
|
||||
recordPollReturned(toSend.size(), time.milliseconds() - start);
|
||||
}
|
||||
}
|
||||
if (toSend == null)
|
||||
continue;
|
||||
log.trace("{} About to send {} records to Kafka", this, toSend.size());
|
||||
if (!sendRecords())
|
||||
stopRequestedLatch.await(SEND_FAILED_BACKOFF_MS, TimeUnit.MILLISECONDS);
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
// Ignore and allow to exit.
|
||||
} finally {
|
||||
// It should still be safe to commit offsets since any exception would have
|
||||
// simply resulted in not getting more records but all the existing records should be ok to flush
|
||||
// and commit offsets. Worst case, task.flush() will also throw an exception causing the offset commit
|
||||
// to fail.
|
||||
commitOffsets();
|
||||
}
|
||||
}
|
||||
|
||||
private void maybeThrowProducerSendException() {
|
||||
if (producerSendException.get() != null) {
|
||||
throw new ConnectException(
|
||||
"Unrecoverable exception from producer send callback",
|
||||
producerSendException.get()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
protected List<SourceRecord> poll() throws InterruptedException {
|
||||
try {
|
||||
return task.poll();
|
||||
} catch (RetriableException | org.apache.kafka.common.errors.RetriableException e) {
|
||||
log.warn("{} failed to poll records from SourceTask. Will retry operation.", this, e);
|
||||
// Do nothing. Let the framework poll whenever it's ready.
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert the source record into a producer record.
|
||||
*
|
||||
* @param record the transformed record
|
||||
* @return the producer record which can sent over to Kafka. A null is returned if the input is null or
|
||||
* if an error was encountered during any of the converter stages.
|
||||
*/
|
||||
private ProducerRecord<byte[], byte[]> convertTransformedRecord(SourceRecord record) {
|
||||
if (record == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
RecordHeaders headers = retryWithToleranceOperator.execute(() -> convertHeaderFor(record), Stage.HEADER_CONVERTER, headerConverter.getClass());
|
||||
|
||||
byte[] key = retryWithToleranceOperator.execute(() -> keyConverter.fromConnectData(record.topic(), headers, record.keySchema(), record.key()),
|
||||
Stage.KEY_CONVERTER, keyConverter.getClass());
|
||||
|
||||
byte[] value = retryWithToleranceOperator.execute(() -> valueConverter.fromConnectData(record.topic(), headers, record.valueSchema(), record.value()),
|
||||
Stage.VALUE_CONVERTER, valueConverter.getClass());
|
||||
|
||||
if (retryWithToleranceOperator.failed()) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return new ProducerRecord<>(record.topic(), record.kafkaPartition(),
|
||||
ConnectUtils.checkAndConvertTimestamp(record.timestamp()), key, value, headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Try to send a batch of records. If a send fails and is retriable, this saves the remainder of the batch so it can
|
||||
* be retried after backing off. If a send fails and is not retriable, this will throw a ConnectException.
|
||||
* @return true if all messages were sent, false if some need to be retried
|
||||
*/
|
||||
private boolean sendRecords() {
|
||||
int processed = 0;
|
||||
recordBatch(toSend.size());
|
||||
final SourceRecordWriteCounter counter =
|
||||
toSend.size() > 0 ? new SourceRecordWriteCounter(toSend.size(), sourceTaskMetricsGroup) : null;
|
||||
for (final SourceRecord preTransformRecord : toSend) {
|
||||
maybeThrowProducerSendException();
|
||||
|
||||
retryWithToleranceOperator.sourceRecord(preTransformRecord);
|
||||
final SourceRecord record = transformationChain.apply(preTransformRecord);
|
||||
final ProducerRecord<byte[], byte[]> producerRecord = convertTransformedRecord(record);
|
||||
if (producerRecord == null || retryWithToleranceOperator.failed()) {
|
||||
counter.skipRecord();
|
||||
commitTaskRecord(preTransformRecord, null);
|
||||
continue;
|
||||
}
|
||||
|
||||
log.trace("{} Appending record with key {}, value {}", this, record.key(), record.value());
|
||||
// We need this queued first since the callback could happen immediately (even synchronously in some cases).
|
||||
// Because of this we need to be careful about handling retries -- we always save the previously attempted
|
||||
// record as part of toSend and need to use a flag to track whether we should actually add it to the outstanding
|
||||
// messages and update the offsets.
|
||||
synchronized (this) {
|
||||
if (!lastSendFailed) {
|
||||
if (!flushing) {
|
||||
outstandingMessages.put(producerRecord, producerRecord);
|
||||
} else {
|
||||
outstandingMessagesBacklog.put(producerRecord, producerRecord);
|
||||
}
|
||||
// Offsets are converted & serialized in the OffsetWriter
|
||||
offsetWriter.offset(record.sourcePartition(), record.sourceOffset());
|
||||
}
|
||||
}
|
||||
try {
|
||||
final String topic = producerRecord.topic();
|
||||
producer.send(
|
||||
producerRecord,
|
||||
new Callback() {
|
||||
@Override
|
||||
public void onCompletion(RecordMetadata recordMetadata, Exception e) {
|
||||
if (e != null) {
|
||||
log.error("{} failed to send record to {}:", WorkerSourceTask.this, topic, e);
|
||||
log.debug("{} Failed record: {}", WorkerSourceTask.this, preTransformRecord);
|
||||
producerSendException.compareAndSet(null, e);
|
||||
} else {
|
||||
recordSent(producerRecord);
|
||||
counter.completeRecord();
|
||||
log.trace("{} Wrote record successfully: topic {} partition {} offset {}",
|
||||
WorkerSourceTask.this,
|
||||
recordMetadata.topic(), recordMetadata.partition(),
|
||||
recordMetadata.offset());
|
||||
commitTaskRecord(preTransformRecord, recordMetadata);
|
||||
if (isTopicTrackingEnabled) {
|
||||
recordActiveTopic(producerRecord.topic());
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
lastSendFailed = false;
|
||||
} catch (org.apache.kafka.common.errors.RetriableException e) {
|
||||
log.warn("{} Failed to send {}, backing off before retrying:", this, producerRecord, e);
|
||||
toSend = toSend.subList(processed, toSend.size());
|
||||
lastSendFailed = true;
|
||||
counter.retryRemaining();
|
||||
return false;
|
||||
} catch (KafkaException e) {
|
||||
throw new ConnectException("Unrecoverable exception trying to send", e);
|
||||
}
|
||||
processed++;
|
||||
}
|
||||
toSend = null;
|
||||
return true;
|
||||
}
|
||||
|
||||
private RecordHeaders convertHeaderFor(SourceRecord record) {
|
||||
Headers headers = record.headers();
|
||||
RecordHeaders result = new RecordHeaders();
|
||||
if (headers != null) {
|
||||
String topic = record.topic();
|
||||
for (Header header : headers) {
|
||||
String key = header.key();
|
||||
byte[] rawHeader = headerConverter.fromConnectHeader(topic, key, header.schema(), header.value());
|
||||
result.add(key, rawHeader);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
private void commitTaskRecord(SourceRecord record, RecordMetadata metadata) {
|
||||
try {
|
||||
task.commitRecord(record, metadata);
|
||||
} catch (Throwable t) {
|
||||
log.error("{} Exception thrown while calling task.commitRecord()", this, t);
|
||||
}
|
||||
}
|
||||
|
||||
private synchronized void recordSent(final ProducerRecord<byte[], byte[]> record) {
|
||||
ProducerRecord<byte[], byte[]> removed = outstandingMessages.remove(record);
|
||||
// While flushing, we may also see callbacks for items in the backlog
|
||||
if (removed == null && flushing)
|
||||
removed = outstandingMessagesBacklog.remove(record);
|
||||
// But if neither one had it, something is very wrong
|
||||
if (removed == null) {
|
||||
log.error("{} CRITICAL Saw callback for record that was not present in the outstanding message set: {}", this, record);
|
||||
} else if (flushing && outstandingMessages.isEmpty()) {
|
||||
// flush thread may be waiting on the outstanding messages to clear
|
||||
this.notifyAll();
|
||||
}
|
||||
}
|
||||
|
||||
public boolean commitOffsets() {
|
||||
long commitTimeoutMs = workerConfig.getLong(WorkerConfig.OFFSET_COMMIT_TIMEOUT_MS_CONFIG);
|
||||
|
||||
log.info("{} Committing offsets", this);
|
||||
|
||||
long started = time.milliseconds();
|
||||
long timeout = started + commitTimeoutMs;
|
||||
|
||||
synchronized (this) {
|
||||
// First we need to make sure we snapshot everything in exactly the current state. This
|
||||
// means both the current set of messages we're still waiting to finish, stored in this
|
||||
// class, which setting flushing = true will handle by storing any new values into a new
|
||||
// buffer; and the current set of user-specified offsets, stored in the
|
||||
// OffsetStorageWriter, for which we can use beginFlush() to initiate the snapshot.
|
||||
flushing = true;
|
||||
boolean flushStarted = offsetWriter.beginFlush();
|
||||
// Still wait for any producer records to flush, even if there aren't any offsets to write
|
||||
// to persistent storage
|
||||
|
||||
// Next we need to wait for all outstanding messages to finish sending
|
||||
log.info("{} flushing {} outstanding messages for offset commit", this, outstandingMessages.size());
|
||||
while (!outstandingMessages.isEmpty()) {
|
||||
try {
|
||||
long timeoutMs = timeout - time.milliseconds();
|
||||
if (timeoutMs <= 0) {
|
||||
log.error("{} Failed to flush, timed out while waiting for producer to flush outstanding {} messages", this, outstandingMessages.size());
|
||||
finishFailedFlush();
|
||||
recordCommitFailure(time.milliseconds() - started, null);
|
||||
return false;
|
||||
}
|
||||
this.wait(timeoutMs);
|
||||
} catch (InterruptedException e) {
|
||||
// We can get interrupted if we take too long committing when the work thread shutdown is requested,
|
||||
// requiring a forcible shutdown. Give up since we can't safely commit any offsets, but also need
|
||||
// to stop immediately
|
||||
log.error("{} Interrupted while flushing messages, offsets will not be committed", this);
|
||||
finishFailedFlush();
|
||||
recordCommitFailure(time.milliseconds() - started, null);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (!flushStarted) {
|
||||
// There was nothing in the offsets to process, but we still waited for the data in the
|
||||
// buffer to flush. This is useful since this can feed into metrics to monitor, e.g.
|
||||
// flush time, which can be used for monitoring even if the connector doesn't record any
|
||||
// offsets.
|
||||
finishSuccessfulFlush();
|
||||
long durationMillis = time.milliseconds() - started;
|
||||
recordCommitSuccess(durationMillis);
|
||||
log.debug("{} Finished offset commitOffsets successfully in {} ms",
|
||||
this, durationMillis);
|
||||
|
||||
commitSourceTask();
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// Now we can actually flush the offsets to user storage.
|
||||
Future<Void> flushFuture = offsetWriter.doFlush(new org.apache.kafka.connect.util.Callback<Void>() {
|
||||
@Override
|
||||
public void onCompletion(Throwable error, Void result) {
|
||||
if (error != null) {
|
||||
log.error("{} Failed to flush offsets to storage: ", WorkerSourceTask.this, error);
|
||||
} else {
|
||||
log.trace("{} Finished flushing offsets to storage", WorkerSourceTask.this);
|
||||
}
|
||||
}
|
||||
});
|
||||
// Very rare case: offsets were unserializable and we finished immediately, unable to store
|
||||
// any data
|
||||
if (flushFuture == null) {
|
||||
finishFailedFlush();
|
||||
recordCommitFailure(time.milliseconds() - started, null);
|
||||
return false;
|
||||
}
|
||||
try {
|
||||
flushFuture.get(Math.max(timeout - time.milliseconds(), 0), TimeUnit.MILLISECONDS);
|
||||
// There's a small race here where we can get the callback just as this times out (and log
|
||||
// success), but then catch the exception below and cancel everything. This won't cause any
|
||||
// errors, is only wasteful in this minor edge case, and the worst result is that the log
|
||||
// could look a little confusing.
|
||||
} catch (InterruptedException e) {
|
||||
log.warn("{} Flush of offsets interrupted, cancelling", this);
|
||||
finishFailedFlush();
|
||||
recordCommitFailure(time.milliseconds() - started, e);
|
||||
return false;
|
||||
} catch (ExecutionException e) {
|
||||
log.error("{} Flush of offsets threw an unexpected exception: ", this, e);
|
||||
finishFailedFlush();
|
||||
recordCommitFailure(time.milliseconds() - started, e);
|
||||
return false;
|
||||
} catch (TimeoutException e) {
|
||||
log.error("{} Timed out waiting to flush offsets to storage", this);
|
||||
finishFailedFlush();
|
||||
recordCommitFailure(time.milliseconds() - started, null);
|
||||
return false;
|
||||
}
|
||||
|
||||
finishSuccessfulFlush();
|
||||
long durationMillis = time.milliseconds() - started;
|
||||
recordCommitSuccess(durationMillis);
|
||||
log.info("{} Finished commitOffsets successfully in {} ms",
|
||||
this, durationMillis);
|
||||
|
||||
commitSourceTask();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
private void commitSourceTask() {
|
||||
try {
|
||||
this.task.commit();
|
||||
} catch (Throwable t) {
|
||||
log.error("{} Exception thrown while calling task.commit()", this, t);
|
||||
}
|
||||
}
|
||||
|
||||
private synchronized void finishFailedFlush() {
|
||||
offsetWriter.cancelFlush();
|
||||
outstandingMessages.putAll(outstandingMessagesBacklog);
|
||||
outstandingMessagesBacklog.clear();
|
||||
flushing = false;
|
||||
}
|
||||
|
||||
private synchronized void finishSuccessfulFlush() {
|
||||
// If we were successful, we can just swap instead of replacing items back into the original map
|
||||
IdentityHashMap<ProducerRecord<byte[], byte[]>, ProducerRecord<byte[], byte[]>> temp = outstandingMessages;
|
||||
outstandingMessages = outstandingMessagesBacklog;
|
||||
outstandingMessagesBacklog = temp;
|
||||
flushing = false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "WorkerSourceTask{" +
|
||||
"id=" + id +
|
||||
'}';
|
||||
}
|
||||
|
||||
protected void recordPollReturned(int numRecordsInBatch, long duration) {
|
||||
sourceTaskMetricsGroup.recordPoll(numRecordsInBatch, duration);
|
||||
}
|
||||
|
||||
SourceTaskMetricsGroup sourceTaskMetricsGroup() {
|
||||
return sourceTaskMetricsGroup;
|
||||
}
|
||||
|
||||
static class SourceRecordWriteCounter {
|
||||
private final SourceTaskMetricsGroup metricsGroup;
|
||||
private final int batchSize;
|
||||
private boolean completed = false;
|
||||
private int counter;
|
||||
public SourceRecordWriteCounter(int batchSize, SourceTaskMetricsGroup metricsGroup) {
|
||||
assert batchSize > 0;
|
||||
assert metricsGroup != null;
|
||||
this.batchSize = batchSize;
|
||||
counter = batchSize;
|
||||
this.metricsGroup = metricsGroup;
|
||||
}
|
||||
public void skipRecord() {
|
||||
if (counter > 0 && --counter == 0) {
|
||||
finishedAllWrites();
|
||||
}
|
||||
}
|
||||
public void completeRecord() {
|
||||
if (counter > 0 && --counter == 0) {
|
||||
finishedAllWrites();
|
||||
}
|
||||
}
|
||||
public void retryRemaining() {
|
||||
finishedAllWrites();
|
||||
}
|
||||
private void finishedAllWrites() {
|
||||
if (!completed) {
|
||||
metricsGroup.recordWrite(batchSize - counter);
|
||||
completed = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static class SourceTaskMetricsGroup {
|
||||
private final MetricGroup metricGroup;
|
||||
private final Sensor sourceRecordPoll;
|
||||
private final Sensor sourceRecordWrite;
|
||||
private final Sensor sourceRecordActiveCount;
|
||||
private final Sensor pollTime;
|
||||
private int activeRecordCount;
|
||||
|
||||
public SourceTaskMetricsGroup(ConnectorTaskId id, ConnectMetrics connectMetrics) {
|
||||
ConnectMetricsRegistry registry = connectMetrics.registry();
|
||||
metricGroup = connectMetrics.group(registry.sourceTaskGroupName(),
|
||||
registry.connectorTagName(), id.connector(),
|
||||
registry.taskTagName(), Integer.toString(id.task()));
|
||||
// remove any previously created metrics in this group to prevent collisions.
|
||||
metricGroup.close();
|
||||
|
||||
sourceRecordPoll = metricGroup.sensor("source-record-poll");
|
||||
sourceRecordPoll.add(metricGroup.metricName(registry.sourceRecordPollRate), new Rate());
|
||||
sourceRecordPoll.add(metricGroup.metricName(registry.sourceRecordPollTotal), new CumulativeSum());
|
||||
|
||||
sourceRecordWrite = metricGroup.sensor("source-record-write");
|
||||
sourceRecordWrite.add(metricGroup.metricName(registry.sourceRecordWriteRate), new Rate());
|
||||
sourceRecordWrite.add(metricGroup.metricName(registry.sourceRecordWriteTotal), new CumulativeSum());
|
||||
|
||||
pollTime = metricGroup.sensor("poll-batch-time");
|
||||
pollTime.add(metricGroup.metricName(registry.sourceRecordPollBatchTimeMax), new Max());
|
||||
pollTime.add(metricGroup.metricName(registry.sourceRecordPollBatchTimeAvg), new Avg());
|
||||
|
||||
sourceRecordActiveCount = metricGroup.sensor("source-record-active-count");
|
||||
sourceRecordActiveCount.add(metricGroup.metricName(registry.sourceRecordActiveCount), new Value());
|
||||
sourceRecordActiveCount.add(metricGroup.metricName(registry.sourceRecordActiveCountMax), new Max());
|
||||
sourceRecordActiveCount.add(metricGroup.metricName(registry.sourceRecordActiveCountAvg), new Avg());
|
||||
}
|
||||
|
||||
void close() {
|
||||
metricGroup.close();
|
||||
}
|
||||
|
||||
void recordPoll(int batchSize, long duration) {
|
||||
sourceRecordPoll.record(batchSize);
|
||||
pollTime.record(duration);
|
||||
activeRecordCount += batchSize;
|
||||
sourceRecordActiveCount.record(activeRecordCount);
|
||||
}
|
||||
|
||||
void recordWrite(int recordCount) {
|
||||
sourceRecordWrite.record(recordCount);
|
||||
activeRecordCount -= recordCount;
|
||||
activeRecordCount = Math.max(0, activeRecordCount);
|
||||
sourceRecordActiveCount.record(activeRecordCount);
|
||||
}
|
||||
|
||||
protected MetricGroup metricGroup() {
|
||||
return metricGroup;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,48 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime;
|
||||
|
||||
import org.apache.kafka.connect.runtime.distributed.ClusterConfigState;
|
||||
import org.apache.kafka.connect.source.SourceTaskContext;
|
||||
import org.apache.kafka.connect.storage.OffsetStorageReader;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
public class WorkerSourceTaskContext implements SourceTaskContext {
|
||||
|
||||
private final OffsetStorageReader reader;
|
||||
private final WorkerSourceTask task;
|
||||
private final ClusterConfigState configState;
|
||||
|
||||
public WorkerSourceTaskContext(OffsetStorageReader reader,
|
||||
WorkerSourceTask task,
|
||||
ClusterConfigState configState) {
|
||||
this.reader = reader;
|
||||
this.task = task;
|
||||
this.configState = configState;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, String> configs() {
|
||||
return configState.taskConfig(task.id());
|
||||
}
|
||||
|
||||
@Override
|
||||
public OffsetStorageReader offsetStorageReader() {
|
||||
return reader;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,460 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime;
|
||||
|
||||
import org.apache.kafka.common.MetricName;
|
||||
import org.apache.kafka.common.MetricNameTemplate;
|
||||
import org.apache.kafka.common.metrics.Measurable;
|
||||
import org.apache.kafka.common.metrics.MetricConfig;
|
||||
import org.apache.kafka.common.metrics.Sensor;
|
||||
import org.apache.kafka.common.metrics.stats.Avg;
|
||||
import org.apache.kafka.common.metrics.stats.Frequencies;
|
||||
import org.apache.kafka.common.metrics.stats.Max;
|
||||
import org.apache.kafka.common.utils.Time;
|
||||
import org.apache.kafka.connect.runtime.AbstractStatus.State;
|
||||
import org.apache.kafka.connect.runtime.ConnectMetrics.LiteralSupplier;
|
||||
import org.apache.kafka.connect.runtime.ConnectMetrics.MetricGroup;
|
||||
import org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator;
|
||||
import org.apache.kafka.connect.runtime.isolation.Plugins;
|
||||
import org.apache.kafka.connect.storage.StatusBackingStore;
|
||||
import org.apache.kafka.connect.util.ConnectorTaskId;
|
||||
import org.apache.kafka.connect.util.LoggingContext;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.Locale;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
* Handles processing for an individual task. This interface only provides the basic methods
|
||||
* used by {@link Worker} to manage the tasks. Implementations combine a user-specified Task with
|
||||
* Kafka to create a data flow.
|
||||
*
|
||||
* Note on locking: since the task runs in its own thread, special care must be taken to ensure
|
||||
* that state transitions are reported correctly, in particular since some state transitions are
|
||||
* asynchronous (e.g. pause/resume). For example, changing the state to paused could cause a race
|
||||
* if the task fails at the same time. To protect from these cases, we synchronize status updates
|
||||
* using the WorkerTask's monitor.
|
||||
*/
|
||||
abstract class WorkerTask implements Runnable {
|
||||
private static final Logger log = LoggerFactory.getLogger(WorkerTask.class);
|
||||
private static final String THREAD_NAME_PREFIX = "task-thread-";
|
||||
|
||||
protected final ConnectorTaskId id;
|
||||
private final TaskStatus.Listener statusListener;
|
||||
protected final ClassLoader loader;
|
||||
protected final StatusBackingStore statusBackingStore;
|
||||
protected final Time time;
|
||||
private final CountDownLatch shutdownLatch = new CountDownLatch(1);
|
||||
private final TaskMetricsGroup taskMetricsGroup;
|
||||
private volatile TargetState targetState;
|
||||
private volatile boolean stopping; // indicates whether the Worker has asked the task to stop
|
||||
private volatile boolean cancelled; // indicates whether the Worker has cancelled the task (e.g. because of slow shutdown)
|
||||
|
||||
protected final RetryWithToleranceOperator retryWithToleranceOperator;
|
||||
|
||||
public WorkerTask(ConnectorTaskId id,
|
||||
TaskStatus.Listener statusListener,
|
||||
TargetState initialState,
|
||||
ClassLoader loader,
|
||||
ConnectMetrics connectMetrics,
|
||||
RetryWithToleranceOperator retryWithToleranceOperator,
|
||||
Time time,
|
||||
StatusBackingStore statusBackingStore) {
|
||||
this.id = id;
|
||||
this.taskMetricsGroup = new TaskMetricsGroup(this.id, connectMetrics, statusListener);
|
||||
this.statusListener = taskMetricsGroup;
|
||||
this.loader = loader;
|
||||
this.targetState = initialState;
|
||||
this.stopping = false;
|
||||
this.cancelled = false;
|
||||
this.taskMetricsGroup.recordState(this.targetState);
|
||||
this.retryWithToleranceOperator = retryWithToleranceOperator;
|
||||
this.time = time;
|
||||
this.statusBackingStore = statusBackingStore;
|
||||
}
|
||||
|
||||
public ConnectorTaskId id() {
|
||||
return id;
|
||||
}
|
||||
|
||||
public ClassLoader loader() {
|
||||
return loader;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize the task for execution.
|
||||
*
|
||||
* @param taskConfig initial configuration
|
||||
*/
|
||||
public abstract void initialize(TaskConfig taskConfig);
|
||||
|
||||
|
||||
private void triggerStop() {
|
||||
synchronized (this) {
|
||||
stopping = true;
|
||||
|
||||
// wakeup any threads that are waiting for unpause
|
||||
this.notifyAll();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Stop this task from processing messages. This method does not block, it only triggers
|
||||
* shutdown. Use #{@link #awaitStop} to block until completion.
|
||||
*/
|
||||
public void stop() {
|
||||
triggerStop();
|
||||
}
|
||||
|
||||
/**
|
||||
* Cancel this task. This won't actually stop it, but it will prevent the state from being
|
||||
* updated when it eventually does shutdown.
|
||||
*/
|
||||
public void cancel() {
|
||||
cancelled = true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Wait for this task to finish stopping.
|
||||
*
|
||||
* @param timeoutMs time in milliseconds to await stop
|
||||
* @return true if successful, false if the timeout was reached
|
||||
*/
|
||||
public boolean awaitStop(long timeoutMs) {
|
||||
try {
|
||||
return shutdownLatch.await(timeoutMs, TimeUnit.MILLISECONDS);
|
||||
} catch (InterruptedException e) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
protected abstract void execute();
|
||||
|
||||
protected abstract void close();
|
||||
|
||||
/**
|
||||
* Method called when this worker task has been completely closed, and when the subclass should clean up
|
||||
* all resources.
|
||||
*/
|
||||
protected abstract void releaseResources();
|
||||
|
||||
protected boolean isStopping() {
|
||||
return stopping;
|
||||
}
|
||||
|
||||
private void doClose() {
|
||||
try {
|
||||
close();
|
||||
} catch (Throwable t) {
|
||||
log.error("{} Task threw an uncaught and unrecoverable exception during shutdown", this, t);
|
||||
throw t;
|
||||
}
|
||||
}
|
||||
|
||||
private void doRun() throws InterruptedException {
|
||||
try {
|
||||
synchronized (this) {
|
||||
if (stopping)
|
||||
return;
|
||||
|
||||
if (targetState == TargetState.PAUSED) {
|
||||
onPause();
|
||||
if (!awaitUnpause()) return;
|
||||
}
|
||||
|
||||
statusListener.onStartup(id);
|
||||
}
|
||||
|
||||
execute();
|
||||
} catch (Throwable t) {
|
||||
log.error("{} Task threw an uncaught and unrecoverable exception", this, t);
|
||||
log.error("{} Task is being killed and will not recover until manually restarted", this);
|
||||
throw t;
|
||||
} finally {
|
||||
doClose();
|
||||
}
|
||||
}
|
||||
|
||||
private void onShutdown() {
|
||||
synchronized (this) {
|
||||
triggerStop();
|
||||
|
||||
// if we were cancelled, skip the status update since the task may have already been
|
||||
// started somewhere else
|
||||
if (!cancelled)
|
||||
statusListener.onShutdown(id);
|
||||
}
|
||||
}
|
||||
|
||||
protected void onFailure(Throwable t) {
|
||||
synchronized (this) {
|
||||
triggerStop();
|
||||
|
||||
// if we were cancelled, skip the status update since the task may have already been
|
||||
// started somewhere else
|
||||
if (!cancelled)
|
||||
statusListener.onFailure(id, t);
|
||||
}
|
||||
}
|
||||
|
||||
protected synchronized void onPause() {
|
||||
statusListener.onPause(id);
|
||||
}
|
||||
|
||||
protected synchronized void onResume() {
|
||||
statusListener.onResume(id);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
// Clear all MDC parameters, in case this thread is being reused
|
||||
LoggingContext.clear();
|
||||
|
||||
try (LoggingContext loggingContext = LoggingContext.forTask(id())) {
|
||||
ClassLoader savedLoader = Plugins.compareAndSwapLoaders(loader);
|
||||
String savedName = Thread.currentThread().getName();
|
||||
try {
|
||||
Thread.currentThread().setName(THREAD_NAME_PREFIX + id);
|
||||
doRun();
|
||||
onShutdown();
|
||||
} catch (Throwable t) {
|
||||
onFailure(t);
|
||||
|
||||
if (t instanceof Error)
|
||||
throw (Error) t;
|
||||
} finally {
|
||||
try {
|
||||
Thread.currentThread().setName(savedName);
|
||||
Plugins.compareAndSwapLoaders(savedLoader);
|
||||
shutdownLatch.countDown();
|
||||
} finally {
|
||||
try {
|
||||
releaseResources();
|
||||
} finally {
|
||||
taskMetricsGroup.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public boolean shouldPause() {
|
||||
return this.targetState == TargetState.PAUSED;
|
||||
}
|
||||
|
||||
/**
|
||||
* Await task resumption.
|
||||
*
|
||||
* @return true if the task's target state is not paused, false if the task is shutdown before resumption
|
||||
* @throws InterruptedException
|
||||
*/
|
||||
protected boolean awaitUnpause() throws InterruptedException {
|
||||
synchronized (this) {
|
||||
while (targetState == TargetState.PAUSED) {
|
||||
if (stopping)
|
||||
return false;
|
||||
this.wait();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
public void transitionTo(TargetState state) {
|
||||
synchronized (this) {
|
||||
// ignore the state change if we are stopping
|
||||
if (stopping)
|
||||
return;
|
||||
|
||||
this.targetState = state;
|
||||
this.notifyAll();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Include this topic to the set of active topics for the connector that this worker task
|
||||
* is running. This information is persisted in the status backing store used by this worker.
|
||||
*
|
||||
* @param topic the topic to mark as active for this connector
|
||||
*/
|
||||
protected void recordActiveTopic(String topic) {
|
||||
if (statusBackingStore.getTopic(id.connector(), topic) != null) {
|
||||
// The topic is already recorded as active. No further action is required.
|
||||
return;
|
||||
}
|
||||
statusBackingStore.put(new TopicStatus(topic, id, time.milliseconds()));
|
||||
}
|
||||
|
||||
/**
|
||||
* Record that offsets have been committed.
|
||||
*
|
||||
* @param duration the length of time in milliseconds for the commit attempt to complete
|
||||
*/
|
||||
protected void recordCommitSuccess(long duration) {
|
||||
taskMetricsGroup.recordCommit(duration, true, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Record that offsets have been committed.
|
||||
*
|
||||
* @param duration the length of time in milliseconds for the commit attempt to complete
|
||||
* @param error the unexpected error that occurred; may be null in the case of timeouts or interruptions
|
||||
*/
|
||||
protected void recordCommitFailure(long duration, Throwable error) {
|
||||
taskMetricsGroup.recordCommit(duration, false, error);
|
||||
}
|
||||
|
||||
/**
|
||||
* Record that a batch of records has been processed.
|
||||
*
|
||||
* @param size the number of records in the batch
|
||||
*/
|
||||
protected void recordBatch(int size) {
|
||||
taskMetricsGroup.recordBatch(size);
|
||||
}
|
||||
|
||||
TaskMetricsGroup taskMetricsGroup() {
|
||||
return taskMetricsGroup;
|
||||
}
|
||||
|
||||
static class TaskMetricsGroup implements TaskStatus.Listener {
|
||||
private final TaskStatus.Listener delegateListener;
|
||||
private final MetricGroup metricGroup;
|
||||
private final Time time;
|
||||
private final StateTracker taskStateTimer;
|
||||
private final Sensor commitTime;
|
||||
private final Sensor batchSize;
|
||||
private final Sensor commitAttempts;
|
||||
|
||||
public TaskMetricsGroup(ConnectorTaskId id, ConnectMetrics connectMetrics, TaskStatus.Listener statusListener) {
|
||||
delegateListener = statusListener;
|
||||
time = connectMetrics.time();
|
||||
taskStateTimer = new StateTracker();
|
||||
ConnectMetricsRegistry registry = connectMetrics.registry();
|
||||
metricGroup = connectMetrics.group(registry.taskGroupName(),
|
||||
registry.connectorTagName(), id.connector(),
|
||||
registry.taskTagName(), Integer.toString(id.task()));
|
||||
// prevent collisions by removing any previously created metrics in this group.
|
||||
metricGroup.close();
|
||||
|
||||
metricGroup.addValueMetric(registry.taskStatus, new LiteralSupplier<String>() {
|
||||
@Override
|
||||
public String metricValue(long now) {
|
||||
return taskStateTimer.currentState().toString().toLowerCase(Locale.getDefault());
|
||||
}
|
||||
});
|
||||
|
||||
addRatioMetric(State.RUNNING, registry.taskRunningRatio);
|
||||
addRatioMetric(State.PAUSED, registry.taskPauseRatio);
|
||||
|
||||
commitTime = metricGroup.sensor("commit-time");
|
||||
commitTime.add(metricGroup.metricName(registry.taskCommitTimeMax), new Max());
|
||||
commitTime.add(metricGroup.metricName(registry.taskCommitTimeAvg), new Avg());
|
||||
|
||||
batchSize = metricGroup.sensor("batch-size");
|
||||
batchSize.add(metricGroup.metricName(registry.taskBatchSizeMax), new Max());
|
||||
batchSize.add(metricGroup.metricName(registry.taskBatchSizeAvg), new Avg());
|
||||
|
||||
MetricName offsetCommitFailures = metricGroup.metricName(registry.taskCommitFailurePercentage);
|
||||
MetricName offsetCommitSucceeds = metricGroup.metricName(registry.taskCommitSuccessPercentage);
|
||||
Frequencies commitFrequencies = Frequencies.forBooleanValues(offsetCommitFailures, offsetCommitSucceeds);
|
||||
commitAttempts = metricGroup.sensor("offset-commit-completion");
|
||||
commitAttempts.add(commitFrequencies);
|
||||
}
|
||||
|
||||
private void addRatioMetric(final State matchingState, MetricNameTemplate template) {
|
||||
MetricName metricName = metricGroup.metricName(template);
|
||||
if (metricGroup.metrics().metric(metricName) == null) {
|
||||
metricGroup.metrics().addMetric(metricName, new Measurable() {
|
||||
@Override
|
||||
public double measure(MetricConfig config, long now) {
|
||||
return taskStateTimer.durationRatio(matchingState, now);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
void close() {
|
||||
metricGroup.close();
|
||||
}
|
||||
|
||||
void recordCommit(long duration, boolean success, Throwable error) {
|
||||
if (success) {
|
||||
commitTime.record(duration);
|
||||
commitAttempts.record(1.0d);
|
||||
} else {
|
||||
commitAttempts.record(0.0d);
|
||||
}
|
||||
}
|
||||
|
||||
void recordBatch(int size) {
|
||||
batchSize.record(size);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onStartup(ConnectorTaskId id) {
|
||||
taskStateTimer.changeState(State.RUNNING, time.milliseconds());
|
||||
delegateListener.onStartup(id);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(ConnectorTaskId id, Throwable cause) {
|
||||
taskStateTimer.changeState(State.FAILED, time.milliseconds());
|
||||
delegateListener.onFailure(id, cause);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onPause(ConnectorTaskId id) {
|
||||
taskStateTimer.changeState(State.PAUSED, time.milliseconds());
|
||||
delegateListener.onPause(id);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onResume(ConnectorTaskId id) {
|
||||
taskStateTimer.changeState(State.RUNNING, time.milliseconds());
|
||||
delegateListener.onResume(id);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onShutdown(ConnectorTaskId id) {
|
||||
taskStateTimer.changeState(State.UNASSIGNED, time.milliseconds());
|
||||
delegateListener.onShutdown(id);
|
||||
}
|
||||
|
||||
public void recordState(TargetState state) {
|
||||
switch (state) {
|
||||
case STARTED:
|
||||
taskStateTimer.changeState(State.RUNNING, time.milliseconds());
|
||||
break;
|
||||
case PAUSED:
|
||||
taskStateTimer.changeState(State.PAUSED, time.milliseconds());
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
public State state() {
|
||||
return taskStateTimer.currentState();
|
||||
}
|
||||
|
||||
protected MetricGroup metricGroup() {
|
||||
return metricGroup;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,282 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime.distributed;
|
||||
|
||||
import org.apache.kafka.common.config.provider.ConfigProvider;
|
||||
import org.apache.kafka.connect.runtime.SessionKey;
|
||||
import org.apache.kafka.connect.runtime.WorkerConfigTransformer;
|
||||
import org.apache.kafka.connect.runtime.TargetState;
|
||||
import org.apache.kafka.connect.util.ConnectorTaskId;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.TreeMap;
|
||||
|
||||
/**
|
||||
* An immutable snapshot of the configuration state of connectors and tasks in a Kafka Connect cluster.
|
||||
*/
|
||||
public class ClusterConfigState {
|
||||
public static final long NO_OFFSET = -1;
|
||||
public static final ClusterConfigState EMPTY = new ClusterConfigState(
|
||||
NO_OFFSET,
|
||||
null,
|
||||
Collections.<String, Integer>emptyMap(),
|
||||
Collections.<String, Map<String, String>>emptyMap(),
|
||||
Collections.<String, TargetState>emptyMap(),
|
||||
Collections.<ConnectorTaskId, Map<String, String>>emptyMap(),
|
||||
Collections.<String>emptySet());
|
||||
|
||||
private final long offset;
|
||||
private final SessionKey sessionKey;
|
||||
private final Map<String, Integer> connectorTaskCounts;
|
||||
private final Map<String, Map<String, String>> connectorConfigs;
|
||||
private final Map<String, TargetState> connectorTargetStates;
|
||||
private final Map<ConnectorTaskId, Map<String, String>> taskConfigs;
|
||||
private final Set<String> inconsistentConnectors;
|
||||
private final WorkerConfigTransformer configTransformer;
|
||||
|
||||
public ClusterConfigState(long offset,
|
||||
SessionKey sessionKey,
|
||||
Map<String, Integer> connectorTaskCounts,
|
||||
Map<String, Map<String, String>> connectorConfigs,
|
||||
Map<String, TargetState> connectorTargetStates,
|
||||
Map<ConnectorTaskId, Map<String, String>> taskConfigs,
|
||||
Set<String> inconsistentConnectors) {
|
||||
this(offset,
|
||||
sessionKey,
|
||||
connectorTaskCounts,
|
||||
connectorConfigs,
|
||||
connectorTargetStates,
|
||||
taskConfigs,
|
||||
inconsistentConnectors,
|
||||
null);
|
||||
}
|
||||
|
||||
public ClusterConfigState(long offset,
|
||||
SessionKey sessionKey,
|
||||
Map<String, Integer> connectorTaskCounts,
|
||||
Map<String, Map<String, String>> connectorConfigs,
|
||||
Map<String, TargetState> connectorTargetStates,
|
||||
Map<ConnectorTaskId, Map<String, String>> taskConfigs,
|
||||
Set<String> inconsistentConnectors,
|
||||
WorkerConfigTransformer configTransformer) {
|
||||
this.offset = offset;
|
||||
this.sessionKey = sessionKey;
|
||||
this.connectorTaskCounts = connectorTaskCounts;
|
||||
this.connectorConfigs = connectorConfigs;
|
||||
this.connectorTargetStates = connectorTargetStates;
|
||||
this.taskConfigs = taskConfigs;
|
||||
this.inconsistentConnectors = inconsistentConnectors;
|
||||
this.configTransformer = configTransformer;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the last offset read to generate this config state. This offset is not guaranteed to be perfectly consistent
|
||||
* with the recorded state because some partial updates to task configs may have been read.
|
||||
* @return the latest config offset
|
||||
*/
|
||||
public long offset() {
|
||||
return offset;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the latest session key from the config state
|
||||
* @return the {@link SessionKey session key}; may be null if no key has been read yet
|
||||
*/
|
||||
public SessionKey sessionKey() {
|
||||
return sessionKey;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check whether this snapshot contains configuration for a connector.
|
||||
* @param connector name of the connector
|
||||
* @return true if this state contains configuration for the connector, false otherwise
|
||||
*/
|
||||
public boolean contains(String connector) {
|
||||
return connectorConfigs.containsKey(connector);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a list of the connectors in this configuration
|
||||
*/
|
||||
public Set<String> connectors() {
|
||||
return connectorConfigs.keySet();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the configuration for a connector. The configuration will have been transformed by
|
||||
* {@link org.apache.kafka.common.config.ConfigTransformer} by having all variable
|
||||
* references replaced with the current values from external instances of
|
||||
* {@link ConfigProvider}, and may include secrets.
|
||||
* @param connector name of the connector
|
||||
* @return a map containing configuration parameters
|
||||
*/
|
||||
public Map<String, String> connectorConfig(String connector) {
|
||||
Map<String, String> configs = connectorConfigs.get(connector);
|
||||
if (configTransformer != null) {
|
||||
configs = configTransformer.transform(connector, configs);
|
||||
}
|
||||
return configs;
|
||||
}
|
||||
|
||||
public Map<String, String> rawConnectorConfig(String connector) {
|
||||
return connectorConfigs.get(connector);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the target state of the connector
|
||||
* @param connector name of the connector
|
||||
* @return the target state
|
||||
*/
|
||||
public TargetState targetState(String connector) {
|
||||
return connectorTargetStates.get(connector);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the configuration for a task. The configuration will have been transformed by
|
||||
* {@link org.apache.kafka.common.config.ConfigTransformer} by having all variable
|
||||
* references replaced with the current values from external instances of
|
||||
* {@link ConfigProvider}, and may include secrets.
|
||||
* @param task id of the task
|
||||
* @return a map containing configuration parameters
|
||||
*/
|
||||
public Map<String, String> taskConfig(ConnectorTaskId task) {
|
||||
Map<String, String> configs = taskConfigs.get(task);
|
||||
if (configTransformer != null) {
|
||||
configs = configTransformer.transform(task.connector(), configs);
|
||||
}
|
||||
return configs;
|
||||
}
|
||||
|
||||
public Map<String, String> rawTaskConfig(ConnectorTaskId task) {
|
||||
return taskConfigs.get(task);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all task configs for a connector. The configurations will have been transformed by
|
||||
* {@link org.apache.kafka.common.config.ConfigTransformer} by having all variable
|
||||
* references replaced with the current values from external instances of
|
||||
* {@link ConfigProvider}, and may include secrets.
|
||||
* @param connector name of the connector
|
||||
* @return a list of task configurations
|
||||
*/
|
||||
public List<Map<String, String>> allTaskConfigs(String connector) {
|
||||
Map<Integer, Map<String, String>> taskConfigs = new TreeMap<>();
|
||||
for (Map.Entry<ConnectorTaskId, Map<String, String>> taskConfigEntry : this.taskConfigs.entrySet()) {
|
||||
if (taskConfigEntry.getKey().connector().equals(connector)) {
|
||||
Map<String, String> configs = taskConfigEntry.getValue();
|
||||
if (configTransformer != null) {
|
||||
configs = configTransformer.transform(connector, configs);
|
||||
}
|
||||
taskConfigs.put(taskConfigEntry.getKey().task(), configs);
|
||||
}
|
||||
}
|
||||
return new LinkedList<>(taskConfigs.values());
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the number of tasks assigned for the given connector.
|
||||
* @param connectorName name of the connector to look up tasks for
|
||||
* @return the number of tasks
|
||||
*/
|
||||
public int taskCount(String connectorName) {
|
||||
Integer count = connectorTaskCounts.get(connectorName);
|
||||
return count == null ? 0 : count;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the current set of task IDs for the specified connector.
|
||||
* @param connectorName the name of the connector to look up task configs for
|
||||
* @return the current set of connector task IDs
|
||||
*/
|
||||
public List<ConnectorTaskId> tasks(String connectorName) {
|
||||
if (inconsistentConnectors.contains(connectorName))
|
||||
return Collections.emptyList();
|
||||
|
||||
Integer numTasks = connectorTaskCounts.get(connectorName);
|
||||
if (numTasks == null)
|
||||
return Collections.emptyList();
|
||||
|
||||
List<ConnectorTaskId> taskIds = new ArrayList<>();
|
||||
for (int taskIndex = 0; taskIndex < numTasks; taskIndex++) {
|
||||
ConnectorTaskId taskId = new ConnectorTaskId(connectorName, taskIndex);
|
||||
taskIds.add(taskId);
|
||||
}
|
||||
return taskIds;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the set of connectors which have inconsistent data in this snapshot. These inconsistencies can occur due to
|
||||
* partially completed writes combined with log compaction.
|
||||
*
|
||||
* Connectors in this set will appear in the output of {@link #connectors()} since their connector configuration is
|
||||
* available, but not in the output of {@link #taskConfig(ConnectorTaskId)} since the task configs are incomplete.
|
||||
*
|
||||
* When a worker detects a connector in this state, it should request that the connector regenerate its task
|
||||
* configurations.
|
||||
*
|
||||
* @return the set of inconsistent connectors
|
||||
*/
|
||||
public Set<String> inconsistentConnectors() {
|
||||
return inconsistentConnectors;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ClusterConfigState{" +
|
||||
"offset=" + offset +
|
||||
", sessionKey=" + (sessionKey != null ? "[hidden]" : "null") +
|
||||
", connectorTaskCounts=" + connectorTaskCounts +
|
||||
", connectorConfigs=" + connectorConfigs +
|
||||
", taskConfigs=" + taskConfigs +
|
||||
", inconsistentConnectors=" + inconsistentConnectors +
|
||||
'}';
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
ClusterConfigState that = (ClusterConfigState) o;
|
||||
return offset == that.offset &&
|
||||
Objects.equals(sessionKey, that.sessionKey) &&
|
||||
Objects.equals(connectorTaskCounts, that.connectorTaskCounts) &&
|
||||
Objects.equals(connectorConfigs, that.connectorConfigs) &&
|
||||
Objects.equals(connectorTargetStates, that.connectorTargetStates) &&
|
||||
Objects.equals(taskConfigs, that.taskConfigs) &&
|
||||
Objects.equals(inconsistentConnectors, that.inconsistentConnectors) &&
|
||||
Objects.equals(configTransformer, that.configTransformer);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(
|
||||
offset,
|
||||
sessionKey,
|
||||
connectorTaskCounts,
|
||||
connectorConfigs,
|
||||
connectorTargetStates,
|
||||
taskConfigs,
|
||||
inconsistentConnectors,
|
||||
configTransformer);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,43 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime.distributed;
|
||||
|
||||
import org.apache.kafka.common.message.JoinGroupResponseData;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* An assignor that computes a distribution of connectors and tasks among the workers of the group
|
||||
* that performs rebalancing.
|
||||
*/
|
||||
public interface ConnectAssignor {
|
||||
/**
|
||||
* Based on the member metadata and the information stored in the worker coordinator this
|
||||
* method computes an assignment of connectors and tasks among the members of the worker group.
|
||||
*
|
||||
* @param leaderId the leader of the group
|
||||
* @param protocol the protocol type; for Connect assignors this is normally "connect"
|
||||
* @param allMemberMetadata the metadata of all the active workers of the group
|
||||
* @param coordinator the worker coordinator that runs this assignor
|
||||
* @return the assignment of connectors and tasks to workers
|
||||
*/
|
||||
Map<String, ByteBuffer> performAssignment(String leaderId, String protocol,
|
||||
List<JoinGroupResponseData.JoinGroupResponseMember> allMemberMetadata,
|
||||
WorkerCoordinator coordinator);
|
||||
}
|
||||
@@ -0,0 +1,407 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime.distributed;
|
||||
|
||||
import org.apache.kafka.common.protocol.types.ArrayOf;
|
||||
import org.apache.kafka.common.protocol.types.Field;
|
||||
import org.apache.kafka.common.protocol.types.Schema;
|
||||
import org.apache.kafka.common.protocol.types.SchemaException;
|
||||
import org.apache.kafka.common.protocol.types.Struct;
|
||||
import org.apache.kafka.common.protocol.types.Type;
|
||||
import org.apache.kafka.connect.util.ConnectorTaskId;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.apache.kafka.common.message.JoinGroupRequestData.JoinGroupRequestProtocol;
|
||||
import static org.apache.kafka.common.message.JoinGroupRequestData.JoinGroupRequestProtocolCollection;
|
||||
import static org.apache.kafka.connect.runtime.distributed.ConnectProtocolCompatibility.EAGER;
|
||||
|
||||
/**
|
||||
* This class implements the protocol for Kafka Connect workers in a group. It includes the format of worker state used when
|
||||
* joining the group and distributing assignments, and the format of assignments of connectors and tasks to workers.
|
||||
*/
|
||||
public class ConnectProtocol {
|
||||
public static final String VERSION_KEY_NAME = "version";
|
||||
public static final String URL_KEY_NAME = "url";
|
||||
public static final String CONFIG_OFFSET_KEY_NAME = "config-offset";
|
||||
public static final String CONNECTOR_KEY_NAME = "connector";
|
||||
public static final String LEADER_KEY_NAME = "leader";
|
||||
public static final String LEADER_URL_KEY_NAME = "leader-url";
|
||||
public static final String ERROR_KEY_NAME = "error";
|
||||
public static final String TASKS_KEY_NAME = "tasks";
|
||||
public static final String ASSIGNMENT_KEY_NAME = "assignment";
|
||||
public static final int CONNECTOR_TASK = -1;
|
||||
|
||||
public static final short CONNECT_PROTOCOL_V0 = 0;
|
||||
public static final Schema CONNECT_PROTOCOL_HEADER_SCHEMA = new Schema(
|
||||
new Field(VERSION_KEY_NAME, Type.INT16));
|
||||
|
||||
/**
|
||||
* Connect Protocol Header V0:
|
||||
* <pre>
|
||||
* Version => Int16
|
||||
* </pre>
|
||||
*/
|
||||
private static final Struct CONNECT_PROTOCOL_HEADER_V0 = new Struct(CONNECT_PROTOCOL_HEADER_SCHEMA)
|
||||
.set(VERSION_KEY_NAME, CONNECT_PROTOCOL_V0);
|
||||
|
||||
/**
|
||||
* Config State V0:
|
||||
* <pre>
|
||||
* Url => [String]
|
||||
* ConfigOffset => Int64
|
||||
* </pre>
|
||||
*/
|
||||
public static final Schema CONFIG_STATE_V0 = new Schema(
|
||||
new Field(URL_KEY_NAME, Type.STRING),
|
||||
new Field(CONFIG_OFFSET_KEY_NAME, Type.INT64));
|
||||
|
||||
/**
|
||||
* Connector Assignment V0:
|
||||
* <pre>
|
||||
* Connector => [String]
|
||||
* Tasks => [Int32]
|
||||
* </pre>
|
||||
*
|
||||
* <p>Assignments for each worker are a set of connectors and tasks. These are categorized by
|
||||
* connector ID. A sentinel task ID (CONNECTOR_TASK) is used to indicate the connector itself
|
||||
* (i.e. that the assignment includes responsibility for running the Connector instance in
|
||||
* addition to any tasks it generates).</p>
|
||||
*/
|
||||
public static final Schema CONNECTOR_ASSIGNMENT_V0 = new Schema(
|
||||
new Field(CONNECTOR_KEY_NAME, Type.STRING),
|
||||
new Field(TASKS_KEY_NAME, new ArrayOf(Type.INT32)));
|
||||
|
||||
/**
|
||||
* Assignment V0:
|
||||
* <pre>
|
||||
* Error => Int16
|
||||
* Leader => [String]
|
||||
* LeaderUrl => [String]
|
||||
* ConfigOffset => Int64
|
||||
* Assignment => [Connector Assignment]
|
||||
* </pre>
|
||||
*/
|
||||
public static final Schema ASSIGNMENT_V0 = new Schema(
|
||||
new Field(ERROR_KEY_NAME, Type.INT16),
|
||||
new Field(LEADER_KEY_NAME, Type.STRING),
|
||||
new Field(LEADER_URL_KEY_NAME, Type.STRING),
|
||||
new Field(CONFIG_OFFSET_KEY_NAME, Type.INT64),
|
||||
new Field(ASSIGNMENT_KEY_NAME, new ArrayOf(CONNECTOR_ASSIGNMENT_V0)));
|
||||
|
||||
/**
|
||||
* The fields are serialized in sequence as follows:
|
||||
* Subscription V0:
|
||||
* <pre>
|
||||
* Version => Int16
|
||||
* Url => [String]
|
||||
* ConfigOffset => Int64
|
||||
* </pre>
|
||||
*
|
||||
* @param workerState the current state of the worker metadata
|
||||
* @return the serialized state of the worker metadata
|
||||
*/
|
||||
public static ByteBuffer serializeMetadata(WorkerState workerState) {
|
||||
Struct struct = new Struct(CONFIG_STATE_V0);
|
||||
struct.set(URL_KEY_NAME, workerState.url());
|
||||
struct.set(CONFIG_OFFSET_KEY_NAME, workerState.offset());
|
||||
ByteBuffer buffer = ByteBuffer.allocate(CONNECT_PROTOCOL_HEADER_V0.sizeOf() + CONFIG_STATE_V0.sizeOf(struct));
|
||||
CONNECT_PROTOCOL_HEADER_V0.writeTo(buffer);
|
||||
CONFIG_STATE_V0.write(buffer, struct);
|
||||
buffer.flip();
|
||||
return buffer;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the collection of Connect protocols that are supported by this version along
|
||||
* with their serialized metadata. The protocols are ordered by preference.
|
||||
*
|
||||
* @param workerState the current state of the worker metadata
|
||||
* @return the collection of Connect protocol metadata
|
||||
*/
|
||||
public static JoinGroupRequestProtocolCollection metadataRequest(WorkerState workerState) {
|
||||
return new JoinGroupRequestProtocolCollection(Collections.singleton(
|
||||
new JoinGroupRequestProtocol()
|
||||
.setName(EAGER.protocol())
|
||||
.setMetadata(ConnectProtocol.serializeMetadata(workerState).array()))
|
||||
.iterator());
|
||||
}
|
||||
|
||||
/**
|
||||
* Given a byte buffer that contains protocol metadata return the deserialized form of the
|
||||
* metadata.
|
||||
*
|
||||
* @param buffer A buffer containing the protocols metadata
|
||||
* @return the deserialized metadata
|
||||
* @throws SchemaException on incompatible Connect protocol version
|
||||
*/
|
||||
public static WorkerState deserializeMetadata(ByteBuffer buffer) {
|
||||
Struct header = CONNECT_PROTOCOL_HEADER_SCHEMA.read(buffer);
|
||||
Short version = header.getShort(VERSION_KEY_NAME);
|
||||
checkVersionCompatibility(version);
|
||||
Struct struct = CONFIG_STATE_V0.read(buffer);
|
||||
long configOffset = struct.getLong(CONFIG_OFFSET_KEY_NAME);
|
||||
String url = struct.getString(URL_KEY_NAME);
|
||||
return new WorkerState(url, configOffset);
|
||||
}
|
||||
|
||||
/**
|
||||
* The fields are serialized in sequence as follows:
|
||||
* Complete Assignment V0:
|
||||
* <pre>
|
||||
* Version => Int16
|
||||
* Error => Int16
|
||||
* Leader => [String]
|
||||
* LeaderUrl => [String]
|
||||
* ConfigOffset => Int64
|
||||
* Assignment => [Connector Assignment]
|
||||
* </pre>
|
||||
*/
|
||||
public static ByteBuffer serializeAssignment(Assignment assignment) {
|
||||
Struct struct = new Struct(ASSIGNMENT_V0);
|
||||
struct.set(ERROR_KEY_NAME, assignment.error());
|
||||
struct.set(LEADER_KEY_NAME, assignment.leader());
|
||||
struct.set(LEADER_URL_KEY_NAME, assignment.leaderUrl());
|
||||
struct.set(CONFIG_OFFSET_KEY_NAME, assignment.offset());
|
||||
List<Struct> taskAssignments = new ArrayList<>();
|
||||
for (Map.Entry<String, Collection<Integer>> connectorEntry : assignment.asMap().entrySet()) {
|
||||
Struct taskAssignment = new Struct(CONNECTOR_ASSIGNMENT_V0);
|
||||
taskAssignment.set(CONNECTOR_KEY_NAME, connectorEntry.getKey());
|
||||
Collection<Integer> tasks = connectorEntry.getValue();
|
||||
taskAssignment.set(TASKS_KEY_NAME, tasks.toArray());
|
||||
taskAssignments.add(taskAssignment);
|
||||
}
|
||||
struct.set(ASSIGNMENT_KEY_NAME, taskAssignments.toArray());
|
||||
|
||||
ByteBuffer buffer = ByteBuffer.allocate(CONNECT_PROTOCOL_HEADER_V0.sizeOf() + ASSIGNMENT_V0.sizeOf(struct));
|
||||
CONNECT_PROTOCOL_HEADER_V0.writeTo(buffer);
|
||||
ASSIGNMENT_V0.write(buffer, struct);
|
||||
buffer.flip();
|
||||
return buffer;
|
||||
}
|
||||
|
||||
/**
|
||||
* Given a byte buffer that contains an assignment as defined by this protocol, return the
|
||||
* deserialized form of the assignment.
|
||||
*
|
||||
* @param buffer the buffer containing a serialized assignment
|
||||
* @return the deserialized assignment
|
||||
* @throws SchemaException on incompatible Connect protocol version
|
||||
*/
|
||||
public static Assignment deserializeAssignment(ByteBuffer buffer) {
|
||||
Struct header = CONNECT_PROTOCOL_HEADER_SCHEMA.read(buffer);
|
||||
Short version = header.getShort(VERSION_KEY_NAME);
|
||||
checkVersionCompatibility(version);
|
||||
Struct struct = ASSIGNMENT_V0.read(buffer);
|
||||
short error = struct.getShort(ERROR_KEY_NAME);
|
||||
String leader = struct.getString(LEADER_KEY_NAME);
|
||||
String leaderUrl = struct.getString(LEADER_URL_KEY_NAME);
|
||||
long offset = struct.getLong(CONFIG_OFFSET_KEY_NAME);
|
||||
List<String> connectorIds = new ArrayList<>();
|
||||
List<ConnectorTaskId> taskIds = new ArrayList<>();
|
||||
for (Object structObj : struct.getArray(ASSIGNMENT_KEY_NAME)) {
|
||||
Struct assignment = (Struct) structObj;
|
||||
String connector = assignment.getString(CONNECTOR_KEY_NAME);
|
||||
for (Object taskIdObj : assignment.getArray(TASKS_KEY_NAME)) {
|
||||
Integer taskId = (Integer) taskIdObj;
|
||||
if (taskId == CONNECTOR_TASK)
|
||||
connectorIds.add(connector);
|
||||
else
|
||||
taskIds.add(new ConnectorTaskId(connector, taskId));
|
||||
}
|
||||
}
|
||||
return new Assignment(error, leader, leaderUrl, offset, connectorIds, taskIds);
|
||||
}
|
||||
|
||||
/**
|
||||
* A class that captures the deserialized form of a worker's metadata.
|
||||
*/
|
||||
public static class WorkerState {
|
||||
private final String url;
|
||||
private final long offset;
|
||||
|
||||
public WorkerState(String url, long offset) {
|
||||
this.url = url;
|
||||
this.offset = offset;
|
||||
}
|
||||
|
||||
public String url() {
|
||||
return url;
|
||||
}
|
||||
|
||||
/**
|
||||
* The most up-to-date (maximum) configuration offset according known to this worker.
|
||||
*
|
||||
* @return the configuration offset
|
||||
*/
|
||||
public long offset() {
|
||||
return offset;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "WorkerState{" +
|
||||
"url='" + url + '\'' +
|
||||
", offset=" + offset +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* The basic assignment of connectors and tasks introduced with V0 version of the Connect protocol.
|
||||
*/
|
||||
public static class Assignment {
|
||||
public static final short NO_ERROR = 0;
|
||||
// Configuration offsets mismatched in a way that the leader could not resolve. Workers should read to the end
|
||||
// of the config log and try to re-join
|
||||
public static final short CONFIG_MISMATCH = 1;
|
||||
|
||||
private final short error;
|
||||
private final String leader;
|
||||
private final String leaderUrl;
|
||||
private final long offset;
|
||||
private final Collection<String> connectorIds;
|
||||
private final Collection<ConnectorTaskId> taskIds;
|
||||
|
||||
/**
|
||||
* Create an assignment indicating responsibility for the given connector instances and task Ids.
|
||||
*
|
||||
* @param error error code for this assignment; {@code ConnectProtocol.Assignment.NO_ERROR}
|
||||
* indicates no error during assignment
|
||||
* @param leader Connect group's leader Id; may be null only on the empty assignment
|
||||
* @param leaderUrl Connect group's leader URL; may be null only on the empty assignment
|
||||
* @param configOffset the most up-to-date configuration offset according to this assignment
|
||||
* @param connectorIds list of connectors that the worker should instantiate and run; may not be null
|
||||
* @param taskIds list of task IDs that the worker should instantiate and run; may not be null
|
||||
*/
|
||||
public Assignment(short error, String leader, String leaderUrl, long configOffset,
|
||||
Collection<String> connectorIds, Collection<ConnectorTaskId> taskIds) {
|
||||
this.error = error;
|
||||
this.leader = leader;
|
||||
this.leaderUrl = leaderUrl;
|
||||
this.offset = configOffset;
|
||||
this.connectorIds = Objects.requireNonNull(connectorIds,
|
||||
"Assigned connector IDs may be empty but not null");
|
||||
this.taskIds = Objects.requireNonNull(taskIds,
|
||||
"Assigned task IDs may be empty but not null");
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the error code of this assignment; 0 signals successful assignment ({@code ConnectProtocol.Assignment.NO_ERROR}).
|
||||
*
|
||||
* @return the error code of the assignment
|
||||
*/
|
||||
public short error() {
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the ID of the leader Connect worker in this assignment.
|
||||
*
|
||||
* @return the ID of the leader
|
||||
*/
|
||||
public String leader() {
|
||||
return leader;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the URL to which the leader accepts requests from other members of the group.
|
||||
*
|
||||
* @return the leader URL
|
||||
*/
|
||||
public String leaderUrl() {
|
||||
return leaderUrl;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if this assignment failed.
|
||||
*
|
||||
* @return true if this assignment failed; false otherwise
|
||||
*/
|
||||
public boolean failed() {
|
||||
return error != NO_ERROR;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the most up-to-date offset in the configuration topic according to this assignment
|
||||
*
|
||||
* @return the configuration topic
|
||||
*/
|
||||
public long offset() {
|
||||
return offset;
|
||||
}
|
||||
|
||||
/**
|
||||
* The connectors included in this assignment.
|
||||
*
|
||||
* @return the connectors
|
||||
*/
|
||||
public Collection<String> connectors() {
|
||||
return connectorIds;
|
||||
}
|
||||
|
||||
/**
|
||||
* The tasks included in this assignment.
|
||||
*
|
||||
* @return the tasks
|
||||
*/
|
||||
public Collection<ConnectorTaskId> tasks() {
|
||||
return taskIds;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "Assignment{" +
|
||||
"error=" + error +
|
||||
", leader='" + leader + '\'' +
|
||||
", leaderUrl='" + leaderUrl + '\'' +
|
||||
", offset=" + offset +
|
||||
", connectorIds=" + connectorIds +
|
||||
", taskIds=" + taskIds +
|
||||
'}';
|
||||
}
|
||||
|
||||
protected Map<String, Collection<Integer>> asMap() {
|
||||
// Using LinkedHashMap preserves the ordering, which is helpful for tests and debugging
|
||||
Map<String, Collection<Integer>> taskMap = new LinkedHashMap<>();
|
||||
for (String connectorId : new HashSet<>(connectorIds)) {
|
||||
taskMap.computeIfAbsent(connectorId, key -> new ArrayList<>()).add(CONNECTOR_TASK);
|
||||
}
|
||||
for (ConnectorTaskId taskId : taskIds) {
|
||||
String connectorId = taskId.connector();
|
||||
taskMap.computeIfAbsent(connectorId, key -> new ArrayList<>()).add(taskId.task());
|
||||
}
|
||||
return taskMap;
|
||||
}
|
||||
}
|
||||
|
||||
private static void checkVersionCompatibility(short version) {
|
||||
// check for invalid versions
|
||||
if (version < CONNECT_PROTOCOL_V0)
|
||||
throw new SchemaException("Unsupported subscription version: " + version);
|
||||
|
||||
// otherwise, assume versions can be parsed as V0
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,148 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime.distributed;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Locale;
|
||||
|
||||
import static org.apache.kafka.connect.runtime.distributed.ConnectProtocol.CONNECT_PROTOCOL_V0;
|
||||
import static org.apache.kafka.connect.runtime.distributed.IncrementalCooperativeConnectProtocol.CONNECT_PROTOCOL_V1;
|
||||
import static org.apache.kafka.connect.runtime.distributed.IncrementalCooperativeConnectProtocol.CONNECT_PROTOCOL_V2;
|
||||
|
||||
/**
|
||||
* An enumeration of the modes available to the worker to signal which Connect protocols are
|
||||
* enabled at any time.
|
||||
*
|
||||
* {@code EAGER} signifies that this worker only supports prompt release of assigned connectors
|
||||
* and tasks in every rebalance. Corresponds to Connect protocol V0.
|
||||
*
|
||||
* {@code COMPATIBLE} signifies that this worker supports both eager and incremental cooperative
|
||||
* Connect protocols and will use the version that is elected by the Kafka broker coordinator
|
||||
* during rebalance.
|
||||
*
|
||||
* {@code SESSIONED} signifies that this worker supports all of the above protocols in addition to
|
||||
* a protocol that uses incremental cooperative rebalancing for worker assignment and uses session
|
||||
* keys distributed via the config topic to verify internal REST requests
|
||||
*/
|
||||
public enum ConnectProtocolCompatibility {
|
||||
EAGER {
|
||||
@Override
|
||||
public String protocol() {
|
||||
return "default";
|
||||
}
|
||||
|
||||
@Override
|
||||
public short protocolVersion() {
|
||||
return CONNECT_PROTOCOL_V0;
|
||||
}
|
||||
},
|
||||
|
||||
COMPATIBLE {
|
||||
@Override
|
||||
public String protocol() {
|
||||
return "compatible";
|
||||
}
|
||||
|
||||
@Override
|
||||
public short protocolVersion() {
|
||||
return CONNECT_PROTOCOL_V1;
|
||||
}
|
||||
},
|
||||
|
||||
SESSIONED {
|
||||
@Override
|
||||
public String protocol() {
|
||||
return "sessioned";
|
||||
}
|
||||
|
||||
@Override
|
||||
public short protocolVersion() {
|
||||
return CONNECT_PROTOCOL_V2;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Return the enum that corresponds to the name that is given as an argument;
|
||||
* if no mapping is found {@code IllegalArgumentException} is thrown.
|
||||
*
|
||||
* @param name the name of the protocol compatibility mode
|
||||
* @return the enum that corresponds to the protocol compatibility mode
|
||||
*/
|
||||
public static ConnectProtocolCompatibility compatibility(String name) {
|
||||
return Arrays.stream(ConnectProtocolCompatibility.values())
|
||||
.filter(mode -> mode.name().equalsIgnoreCase(name))
|
||||
.findFirst()
|
||||
.orElseThrow(() -> new IllegalArgumentException(
|
||||
"Unknown Connect protocol compatibility mode: " + name));
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the enum that corresponds to the Connect protocol version that is given as an argument;
|
||||
* if no mapping is found {@code IllegalArgumentException} is thrown.
|
||||
*
|
||||
* @param protocolVersion the version of the protocol; for example,
|
||||
* {@link ConnectProtocol#CONNECT_PROTOCOL_V0 CONNECT_PROTOCOL_V0}. May not be null
|
||||
* @return the enum that corresponds to the protocol compatibility mode
|
||||
*/
|
||||
public static ConnectProtocolCompatibility fromProtocolVersion(short protocolVersion) {
|
||||
switch (protocolVersion) {
|
||||
case CONNECT_PROTOCOL_V0:
|
||||
return EAGER;
|
||||
case CONNECT_PROTOCOL_V1:
|
||||
return COMPATIBLE;
|
||||
case CONNECT_PROTOCOL_V2:
|
||||
return SESSIONED;
|
||||
default:
|
||||
throw new IllegalArgumentException("Unknown Connect protocol version: " + protocolVersion);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return name().toLowerCase(Locale.ROOT);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the version of the protocol for this mode.
|
||||
*
|
||||
* @return the protocol version
|
||||
*/
|
||||
public abstract short protocolVersion();
|
||||
|
||||
/**
|
||||
* Return the name of the protocol that this mode will use in {@code ProtocolMetadata}.
|
||||
*
|
||||
* @return the protocol name
|
||||
*/
|
||||
public abstract String protocol();
|
||||
|
||||
/**
|
||||
* Return the enum that corresponds to the protocol name that is given as an argument;
|
||||
* if no mapping is found {@code IllegalArgumentException} is thrown.
|
||||
*
|
||||
* @param protocolName the name of the connect protocol
|
||||
* @return the enum that corresponds to the protocol compatibility mode that supports the
|
||||
* given protocol
|
||||
*/
|
||||
public static ConnectProtocolCompatibility fromProtocol(String protocolName) {
|
||||
return Arrays.stream(ConnectProtocolCompatibility.values())
|
||||
.filter(mode -> mode.protocol().equalsIgnoreCase(protocolName))
|
||||
.findFirst()
|
||||
.orElseThrow(() -> new IllegalArgumentException(
|
||||
"Not found Connect protocol compatibility mode for protocol: " + protocolName));
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,441 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime.distributed;
|
||||
|
||||
import org.apache.kafka.clients.CommonClientConfigs;
|
||||
import org.apache.kafka.common.config.ConfigDef;
|
||||
import org.apache.kafka.common.config.ConfigException;
|
||||
import org.apache.kafka.common.utils.Utils;
|
||||
import org.apache.kafka.connect.runtime.WorkerConfig;
|
||||
|
||||
import javax.crypto.KeyGenerator;
|
||||
import javax.crypto.Mac;
|
||||
import java.security.InvalidParameterException;
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.apache.kafka.common.config.ConfigDef.Range.atLeast;
|
||||
import static org.apache.kafka.common.config.ConfigDef.Range.between;
|
||||
|
||||
public class DistributedConfig extends WorkerConfig {
|
||||
/*
|
||||
* NOTE: DO NOT CHANGE EITHER CONFIG STRINGS OR THEIR JAVA VARIABLE NAMES AS
|
||||
* THESE ARE PART OF THE PUBLIC API AND CHANGE WILL BREAK USER CODE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* <code>group.id</code>
|
||||
*/
|
||||
public static final String GROUP_ID_CONFIG = CommonClientConfigs.GROUP_ID_CONFIG;
|
||||
private static final String GROUP_ID_DOC = "A unique string that identifies the Connect cluster group this worker belongs to.";
|
||||
|
||||
/**
|
||||
* <code>session.timeout.ms</code>
|
||||
*/
|
||||
public static final String SESSION_TIMEOUT_MS_CONFIG = CommonClientConfigs.SESSION_TIMEOUT_MS_CONFIG;
|
||||
private static final String SESSION_TIMEOUT_MS_DOC = "The timeout used to detect worker failures. " +
|
||||
"The worker sends periodic heartbeats to indicate its liveness to the broker. If no heartbeats are " +
|
||||
"received by the broker before the expiration of this session timeout, then the broker will remove the " +
|
||||
"worker from the group and initiate a rebalance. Note that the value must be in the allowable range as " +
|
||||
"configured in the broker configuration by <code>group.min.session.timeout.ms</code> " +
|
||||
"and <code>group.max.session.timeout.ms</code>.";
|
||||
|
||||
/**
|
||||
* <code>heartbeat.interval.ms</code>
|
||||
*/
|
||||
public static final String HEARTBEAT_INTERVAL_MS_CONFIG = CommonClientConfigs.HEARTBEAT_INTERVAL_MS_CONFIG;
|
||||
private static final String HEARTBEAT_INTERVAL_MS_DOC = "The expected time between heartbeats to the group " +
|
||||
"coordinator when using Kafka's group management facilities. Heartbeats are used to ensure that the " +
|
||||
"worker's session stays active and to facilitate rebalancing when new members join or leave the group. " +
|
||||
"The value must be set lower than <code>session.timeout.ms</code>, but typically should be set no higher " +
|
||||
"than 1/3 of that value. It can be adjusted even lower to control the expected time for normal rebalances.";
|
||||
|
||||
/**
|
||||
* <code>rebalance.timeout.ms</code>
|
||||
*/
|
||||
public static final String REBALANCE_TIMEOUT_MS_CONFIG = CommonClientConfigs.REBALANCE_TIMEOUT_MS_CONFIG;
|
||||
private static final String REBALANCE_TIMEOUT_MS_DOC = CommonClientConfigs.REBALANCE_TIMEOUT_MS_DOC;
|
||||
|
||||
/**
|
||||
* <code>worker.sync.timeout.ms</code>
|
||||
*/
|
||||
public static final String WORKER_SYNC_TIMEOUT_MS_CONFIG = "worker.sync.timeout.ms";
|
||||
private static final String WORKER_SYNC_TIMEOUT_MS_DOC = "When the worker is out of sync with other workers and needs" +
|
||||
" to resynchronize configurations, wait up to this amount of time before giving up, leaving the group, and" +
|
||||
" waiting a backoff period before rejoining.";
|
||||
|
||||
/**
|
||||
* <code>group.unsync.timeout.ms</code>
|
||||
*/
|
||||
public static final String WORKER_UNSYNC_BACKOFF_MS_CONFIG = "worker.unsync.backoff.ms";
|
||||
private static final String WORKER_UNSYNC_BACKOFF_MS_DOC = "When the worker is out of sync with other workers and " +
|
||||
" fails to catch up within worker.sync.timeout.ms, leave the Connect cluster for this long before rejoining.";
|
||||
public static final int WORKER_UNSYNC_BACKOFF_MS_DEFAULT = 5 * 60 * 1000;
|
||||
|
||||
/**
|
||||
* <code>offset.storage.topic</code>
|
||||
*/
|
||||
public static final String OFFSET_STORAGE_TOPIC_CONFIG = "offset.storage.topic";
|
||||
private static final String OFFSET_STORAGE_TOPIC_CONFIG_DOC = "The name of the Kafka topic where connector offsets are stored";
|
||||
|
||||
/**
|
||||
* <code>offset.storage.partitions</code>
|
||||
*/
|
||||
public static final String OFFSET_STORAGE_PARTITIONS_CONFIG = "offset.storage.partitions";
|
||||
private static final String OFFSET_STORAGE_PARTITIONS_CONFIG_DOC = "The number of partitions used when creating the offset storage topic";
|
||||
|
||||
/**
|
||||
* <code>offset.storage.replication.factor</code>
|
||||
*/
|
||||
public static final String OFFSET_STORAGE_REPLICATION_FACTOR_CONFIG = "offset.storage.replication.factor";
|
||||
private static final String OFFSET_STORAGE_REPLICATION_FACTOR_CONFIG_DOC = "Replication factor used when creating the offset storage topic";
|
||||
|
||||
/**
|
||||
* <code>config.storage.topic</code>
|
||||
*/
|
||||
public static final String CONFIG_TOPIC_CONFIG = "config.storage.topic";
|
||||
private static final String CONFIG_TOPIC_CONFIG_DOC = "The name of the Kafka topic where connector configurations are stored";
|
||||
|
||||
/**
|
||||
* <code>config.storage.replication.factor</code>
|
||||
*/
|
||||
public static final String CONFIG_STORAGE_REPLICATION_FACTOR_CONFIG = "config.storage.replication.factor";
|
||||
private static final String CONFIG_STORAGE_REPLICATION_FACTOR_CONFIG_DOC = "Replication factor used when creating the configuration storage topic";
|
||||
|
||||
/**
|
||||
* <code>status.storage.topic</code>
|
||||
*/
|
||||
public static final String STATUS_STORAGE_TOPIC_CONFIG = "status.storage.topic";
|
||||
public static final String STATUS_STORAGE_TOPIC_CONFIG_DOC = "The name of the Kafka topic where connector and task status are stored";
|
||||
|
||||
/**
|
||||
* <code>status.storage.partitions</code>
|
||||
*/
|
||||
public static final String STATUS_STORAGE_PARTITIONS_CONFIG = "status.storage.partitions";
|
||||
private static final String STATUS_STORAGE_PARTITIONS_CONFIG_DOC = "The number of partitions used when creating the status storage topic";
|
||||
|
||||
/**
|
||||
* <code>status.storage.replication.factor</code>
|
||||
*/
|
||||
public static final String STATUS_STORAGE_REPLICATION_FACTOR_CONFIG = "status.storage.replication.factor";
|
||||
private static final String STATUS_STORAGE_REPLICATION_FACTOR_CONFIG_DOC = "Replication factor used when creating the status storage topic";
|
||||
|
||||
/**
|
||||
* <code>connect.protocol</code>
|
||||
*/
|
||||
public static final String CONNECT_PROTOCOL_CONFIG = "connect.protocol";
|
||||
public static final String CONNECT_PROTOCOL_DOC = "Compatibility mode for Kafka Connect Protocol";
|
||||
public static final String CONNECT_PROTOCOL_DEFAULT = ConnectProtocolCompatibility.SESSIONED.toString();
|
||||
|
||||
/**
|
||||
* <code>connect.protocol</code>
|
||||
*/
|
||||
public static final String SCHEDULED_REBALANCE_MAX_DELAY_MS_CONFIG = "scheduled.rebalance.max.delay.ms";
|
||||
public static final String SCHEDULED_REBALANCE_MAX_DELAY_MS_DOC = "The maximum delay that is "
|
||||
+ "scheduled in order to wait for the return of one or more departed workers before "
|
||||
+ "rebalancing and reassigning their connectors and tasks to the group. During this "
|
||||
+ "period the connectors and tasks of the departed workers remain unassigned";
|
||||
public static final int SCHEDULED_REBALANCE_MAX_DELAY_MS_DEFAULT = Math.toIntExact(TimeUnit.SECONDS.toMillis(300));
|
||||
|
||||
public static final String INTER_WORKER_KEY_GENERATION_ALGORITHM_CONFIG = "inter.worker.key.generation.algorithm";
|
||||
public static final String INTER_WORKER_KEY_GENERATION_ALGORITHM_DOC = "The algorithm to use for generating internal request keys";
|
||||
public static final String INTER_WORKER_KEY_GENERATION_ALGORITHM_DEFAULT = "HmacSHA256";
|
||||
|
||||
public static final String INTER_WORKER_KEY_SIZE_CONFIG = "inter.worker.key.size";
|
||||
public static final String INTER_WORKER_KEY_SIZE_DOC = "The size of the key to use for signing internal requests, in bits. "
|
||||
+ "If null, the default key size for the key generation algorithm will be used.";
|
||||
public static final Long INTER_WORKER_KEY_SIZE_DEFAULT = null;
|
||||
|
||||
public static final String INTER_WORKER_KEY_TTL_MS_CONFIG = "inter.worker.key.ttl.ms";
|
||||
public static final String INTER_WORKER_KEY_TTL_MS_MS_DOC = "The TTL of generated session keys used for "
|
||||
+ "internal request validation (in milliseconds)";
|
||||
public static final int INTER_WORKER_KEY_TTL_MS_MS_DEFAULT = Math.toIntExact(TimeUnit.HOURS.toMillis(1));
|
||||
|
||||
public static final String INTER_WORKER_SIGNATURE_ALGORITHM_CONFIG = "inter.worker.signature.algorithm";
|
||||
public static final String INTER_WORKER_SIGNATURE_ALGORITHM_DOC = "The algorithm used to sign internal requests";
|
||||
public static final String INTER_WORKER_SIGNATURE_ALGORITHM_DEFAULT = "HmacSHA256";
|
||||
|
||||
public static final String INTER_WORKER_VERIFICATION_ALGORITHMS_CONFIG = "inter.worker.verification.algorithms";
|
||||
public static final String INTER_WORKER_VERIFICATION_ALGORITHMS_DOC = "A list of permitted algorithms for verifying internal requests";
|
||||
public static final List<String> INTER_WORKER_VERIFICATION_ALGORITHMS_DEFAULT = Collections.singletonList(INTER_WORKER_SIGNATURE_ALGORITHM_DEFAULT);
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private static final ConfigDef CONFIG = baseConfigDef()
|
||||
.define(GROUP_ID_CONFIG,
|
||||
ConfigDef.Type.STRING,
|
||||
ConfigDef.Importance.HIGH,
|
||||
GROUP_ID_DOC)
|
||||
.define(SESSION_TIMEOUT_MS_CONFIG,
|
||||
ConfigDef.Type.INT,
|
||||
Math.toIntExact(TimeUnit.SECONDS.toMillis(10)),
|
||||
ConfigDef.Importance.HIGH,
|
||||
SESSION_TIMEOUT_MS_DOC)
|
||||
.define(REBALANCE_TIMEOUT_MS_CONFIG,
|
||||
ConfigDef.Type.INT,
|
||||
Math.toIntExact(TimeUnit.MINUTES.toMillis(1)),
|
||||
ConfigDef.Importance.HIGH,
|
||||
REBALANCE_TIMEOUT_MS_DOC)
|
||||
.define(HEARTBEAT_INTERVAL_MS_CONFIG,
|
||||
ConfigDef.Type.INT,
|
||||
Math.toIntExact(TimeUnit.SECONDS.toMillis(3)),
|
||||
ConfigDef.Importance.HIGH,
|
||||
HEARTBEAT_INTERVAL_MS_DOC)
|
||||
.define(CommonClientConfigs.METADATA_MAX_AGE_CONFIG,
|
||||
ConfigDef.Type.LONG,
|
||||
TimeUnit.MINUTES.toMillis(5),
|
||||
atLeast(0),
|
||||
ConfigDef.Importance.LOW,
|
||||
CommonClientConfigs.METADATA_MAX_AGE_DOC)
|
||||
.define(CommonClientConfigs.CLIENT_ID_CONFIG,
|
||||
ConfigDef.Type.STRING,
|
||||
"",
|
||||
ConfigDef.Importance.LOW,
|
||||
CommonClientConfigs.CLIENT_ID_DOC)
|
||||
.define(CommonClientConfigs.SEND_BUFFER_CONFIG,
|
||||
ConfigDef.Type.INT,
|
||||
128 * 1024,
|
||||
atLeast(0),
|
||||
ConfigDef.Importance.MEDIUM,
|
||||
CommonClientConfigs.SEND_BUFFER_DOC)
|
||||
.define(CommonClientConfigs.RECEIVE_BUFFER_CONFIG,
|
||||
ConfigDef.Type.INT,
|
||||
32 * 1024,
|
||||
atLeast(0),
|
||||
ConfigDef.Importance.MEDIUM,
|
||||
CommonClientConfigs.RECEIVE_BUFFER_DOC)
|
||||
.define(CommonClientConfigs.RECONNECT_BACKOFF_MS_CONFIG,
|
||||
ConfigDef.Type.LONG,
|
||||
50L,
|
||||
atLeast(0L),
|
||||
ConfigDef.Importance.LOW,
|
||||
CommonClientConfigs.RECONNECT_BACKOFF_MS_DOC)
|
||||
.define(CommonClientConfigs.RECONNECT_BACKOFF_MAX_MS_CONFIG,
|
||||
ConfigDef.Type.LONG,
|
||||
TimeUnit.SECONDS.toMillis(1),
|
||||
atLeast(0L),
|
||||
ConfigDef.Importance.LOW,
|
||||
CommonClientConfigs.RECONNECT_BACKOFF_MAX_MS_DOC)
|
||||
.define(CommonClientConfigs.RETRY_BACKOFF_MS_CONFIG,
|
||||
ConfigDef.Type.LONG,
|
||||
100L,
|
||||
atLeast(0L),
|
||||
ConfigDef.Importance.LOW,
|
||||
CommonClientConfigs.RETRY_BACKOFF_MS_DOC)
|
||||
.define(CommonClientConfigs.REQUEST_TIMEOUT_MS_CONFIG,
|
||||
ConfigDef.Type.INT,
|
||||
Math.toIntExact(TimeUnit.SECONDS.toMillis(40)),
|
||||
atLeast(0),
|
||||
ConfigDef.Importance.MEDIUM,
|
||||
CommonClientConfigs.REQUEST_TIMEOUT_MS_DOC)
|
||||
/* default is set to be a bit lower than the server default (10 min), to avoid both client and server closing connection at same time */
|
||||
.define(CommonClientConfigs.CONNECTIONS_MAX_IDLE_MS_CONFIG,
|
||||
ConfigDef.Type.LONG,
|
||||
TimeUnit.MINUTES.toMillis(9),
|
||||
ConfigDef.Importance.MEDIUM,
|
||||
CommonClientConfigs.CONNECTIONS_MAX_IDLE_MS_DOC)
|
||||
// security support
|
||||
.define(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG,
|
||||
ConfigDef.Type.STRING,
|
||||
CommonClientConfigs.DEFAULT_SECURITY_PROTOCOL,
|
||||
ConfigDef.Importance.MEDIUM,
|
||||
CommonClientConfigs.SECURITY_PROTOCOL_DOC)
|
||||
.withClientSslSupport()
|
||||
.withClientSaslSupport()
|
||||
.define(WORKER_SYNC_TIMEOUT_MS_CONFIG,
|
||||
ConfigDef.Type.INT,
|
||||
3000,
|
||||
ConfigDef.Importance.MEDIUM,
|
||||
WORKER_SYNC_TIMEOUT_MS_DOC)
|
||||
.define(WORKER_UNSYNC_BACKOFF_MS_CONFIG,
|
||||
ConfigDef.Type.INT,
|
||||
WORKER_UNSYNC_BACKOFF_MS_DEFAULT,
|
||||
ConfigDef.Importance.MEDIUM,
|
||||
WORKER_UNSYNC_BACKOFF_MS_DOC)
|
||||
.define(OFFSET_STORAGE_TOPIC_CONFIG,
|
||||
ConfigDef.Type.STRING,
|
||||
ConfigDef.Importance.HIGH,
|
||||
OFFSET_STORAGE_TOPIC_CONFIG_DOC)
|
||||
.define(OFFSET_STORAGE_PARTITIONS_CONFIG,
|
||||
ConfigDef.Type.INT,
|
||||
25,
|
||||
atLeast(1),
|
||||
ConfigDef.Importance.LOW,
|
||||
OFFSET_STORAGE_PARTITIONS_CONFIG_DOC)
|
||||
.define(OFFSET_STORAGE_REPLICATION_FACTOR_CONFIG,
|
||||
ConfigDef.Type.SHORT,
|
||||
(short) 3,
|
||||
atLeast(1),
|
||||
ConfigDef.Importance.LOW,
|
||||
OFFSET_STORAGE_REPLICATION_FACTOR_CONFIG_DOC)
|
||||
.define(CONFIG_TOPIC_CONFIG,
|
||||
ConfigDef.Type.STRING,
|
||||
ConfigDef.Importance.HIGH,
|
||||
CONFIG_TOPIC_CONFIG_DOC)
|
||||
.define(CONFIG_STORAGE_REPLICATION_FACTOR_CONFIG,
|
||||
ConfigDef.Type.SHORT,
|
||||
(short) 3,
|
||||
atLeast(1),
|
||||
ConfigDef.Importance.LOW,
|
||||
CONFIG_STORAGE_REPLICATION_FACTOR_CONFIG_DOC)
|
||||
.define(STATUS_STORAGE_TOPIC_CONFIG,
|
||||
ConfigDef.Type.STRING,
|
||||
ConfigDef.Importance.HIGH,
|
||||
STATUS_STORAGE_TOPIC_CONFIG_DOC)
|
||||
.define(STATUS_STORAGE_PARTITIONS_CONFIG,
|
||||
ConfigDef.Type.INT,
|
||||
5,
|
||||
atLeast(1),
|
||||
ConfigDef.Importance.LOW,
|
||||
STATUS_STORAGE_PARTITIONS_CONFIG_DOC)
|
||||
.define(STATUS_STORAGE_REPLICATION_FACTOR_CONFIG,
|
||||
ConfigDef.Type.SHORT,
|
||||
(short) 3,
|
||||
atLeast(1),
|
||||
ConfigDef.Importance.LOW,
|
||||
STATUS_STORAGE_REPLICATION_FACTOR_CONFIG_DOC)
|
||||
.define(CONNECT_PROTOCOL_CONFIG,
|
||||
ConfigDef.Type.STRING,
|
||||
CONNECT_PROTOCOL_DEFAULT,
|
||||
ConfigDef.LambdaValidator.with(
|
||||
(name, value) -> {
|
||||
try {
|
||||
ConnectProtocolCompatibility.compatibility((String) value);
|
||||
} catch (Throwable t) {
|
||||
throw new ConfigException(name, value, "Invalid Connect protocol "
|
||||
+ "compatibility");
|
||||
}
|
||||
},
|
||||
() -> "[" + Utils.join(ConnectProtocolCompatibility.values(), ", ") + "]"),
|
||||
ConfigDef.Importance.LOW,
|
||||
CONNECT_PROTOCOL_DOC)
|
||||
.define(SCHEDULED_REBALANCE_MAX_DELAY_MS_CONFIG,
|
||||
ConfigDef.Type.INT,
|
||||
SCHEDULED_REBALANCE_MAX_DELAY_MS_DEFAULT,
|
||||
between(0, Integer.MAX_VALUE),
|
||||
ConfigDef.Importance.LOW,
|
||||
SCHEDULED_REBALANCE_MAX_DELAY_MS_DOC)
|
||||
.define(INTER_WORKER_KEY_TTL_MS_CONFIG,
|
||||
ConfigDef.Type.INT,
|
||||
INTER_WORKER_KEY_TTL_MS_MS_DEFAULT,
|
||||
between(0, Integer.MAX_VALUE),
|
||||
ConfigDef.Importance.LOW,
|
||||
INTER_WORKER_KEY_TTL_MS_MS_DOC)
|
||||
.define(INTER_WORKER_KEY_GENERATION_ALGORITHM_CONFIG,
|
||||
ConfigDef.Type.STRING,
|
||||
INTER_WORKER_KEY_GENERATION_ALGORITHM_DEFAULT,
|
||||
ConfigDef.LambdaValidator.with(
|
||||
(name, value) -> validateKeyAlgorithm(name, (String) value),
|
||||
() -> "Any KeyGenerator algorithm supported by the worker JVM"
|
||||
),
|
||||
ConfigDef.Importance.LOW,
|
||||
INTER_WORKER_KEY_GENERATION_ALGORITHM_DOC)
|
||||
.define(INTER_WORKER_KEY_SIZE_CONFIG,
|
||||
ConfigDef.Type.INT,
|
||||
INTER_WORKER_KEY_SIZE_DEFAULT,
|
||||
ConfigDef.Importance.LOW,
|
||||
INTER_WORKER_KEY_SIZE_DOC)
|
||||
.define(INTER_WORKER_SIGNATURE_ALGORITHM_CONFIG,
|
||||
ConfigDef.Type.STRING,
|
||||
INTER_WORKER_SIGNATURE_ALGORITHM_DEFAULT,
|
||||
ConfigDef.LambdaValidator.with(
|
||||
(name, value) -> validateSignatureAlgorithm(name, (String) value),
|
||||
() -> "Any MAC algorithm supported by the worker JVM"),
|
||||
ConfigDef.Importance.LOW,
|
||||
INTER_WORKER_SIGNATURE_ALGORITHM_DOC)
|
||||
.define(INTER_WORKER_VERIFICATION_ALGORITHMS_CONFIG,
|
||||
ConfigDef.Type.LIST,
|
||||
INTER_WORKER_VERIFICATION_ALGORITHMS_DEFAULT,
|
||||
ConfigDef.LambdaValidator.with(
|
||||
(name, value) -> validateSignatureAlgorithms(name, (List<String>) value),
|
||||
() -> "A list of one or more MAC algorithms, each supported by the worker JVM"
|
||||
),
|
||||
ConfigDef.Importance.LOW,
|
||||
INTER_WORKER_VERIFICATION_ALGORITHMS_DOC);
|
||||
|
||||
@Override
|
||||
public Integer getRebalanceTimeout() {
|
||||
return getInt(DistributedConfig.REBALANCE_TIMEOUT_MS_CONFIG);
|
||||
}
|
||||
|
||||
public DistributedConfig(Map<String, String> props) {
|
||||
super(CONFIG, props);
|
||||
getInternalRequestKeyGenerator(); // Check here for a valid key size + key algorithm to fail fast if either are invalid
|
||||
validateKeyAlgorithmAndVerificationAlgorithms();
|
||||
}
|
||||
|
||||
public static void main(String[] args) {
|
||||
System.out.println(CONFIG.toHtml());
|
||||
}
|
||||
|
||||
public KeyGenerator getInternalRequestKeyGenerator() {
|
||||
try {
|
||||
KeyGenerator result = KeyGenerator.getInstance(getString(INTER_WORKER_KEY_GENERATION_ALGORITHM_CONFIG));
|
||||
Optional.ofNullable(getInt(INTER_WORKER_KEY_SIZE_CONFIG)).ifPresent(result::init);
|
||||
return result;
|
||||
} catch (NoSuchAlgorithmException | InvalidParameterException e) {
|
||||
throw new ConfigException(String.format(
|
||||
"Unable to create key generator with algorithm %s and key size %d: %s",
|
||||
getString(INTER_WORKER_KEY_GENERATION_ALGORITHM_CONFIG),
|
||||
getInt(INTER_WORKER_KEY_SIZE_CONFIG),
|
||||
e.getMessage()
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
private void validateKeyAlgorithmAndVerificationAlgorithms() {
|
||||
String keyAlgorithm = getString(INTER_WORKER_KEY_GENERATION_ALGORITHM_CONFIG);
|
||||
List<String> verificationAlgorithms = getList(INTER_WORKER_VERIFICATION_ALGORITHMS_CONFIG);
|
||||
if (!verificationAlgorithms.contains(keyAlgorithm)) {
|
||||
throw new ConfigException(
|
||||
INTER_WORKER_KEY_GENERATION_ALGORITHM_CONFIG,
|
||||
keyAlgorithm,
|
||||
String.format("Key generation algorithm must be present in %s list", INTER_WORKER_VERIFICATION_ALGORITHMS_CONFIG)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
private static void validateSignatureAlgorithms(String configName, List<String> algorithms) {
|
||||
if (algorithms.isEmpty()) {
|
||||
throw new ConfigException(
|
||||
configName,
|
||||
algorithms,
|
||||
"At least one signature verification algorithm must be provided"
|
||||
);
|
||||
}
|
||||
algorithms.forEach(algorithm -> validateSignatureAlgorithm(configName, algorithm));
|
||||
}
|
||||
|
||||
private static void validateSignatureAlgorithm(String configName, String algorithm) {
|
||||
try {
|
||||
Mac.getInstance(algorithm);
|
||||
} catch (NoSuchAlgorithmException e) {
|
||||
throw new ConfigException(configName, algorithm, e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
private static void validateKeyAlgorithm(String configName, String algorithm) {
|
||||
try {
|
||||
KeyGenerator.getInstance(algorithm);
|
||||
} catch (NoSuchAlgorithmException e) {
|
||||
throw new ConfigException(configName, algorithm, e.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,182 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime.distributed;
|
||||
|
||||
import org.apache.kafka.common.utils.CircularIterator;
|
||||
import org.apache.kafka.common.utils.LogContext;
|
||||
import org.apache.kafka.connect.util.ConnectorTaskId;
|
||||
import org.slf4j.Logger;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.apache.kafka.common.message.JoinGroupResponseData.JoinGroupResponseMember;
|
||||
import static org.apache.kafka.connect.runtime.distributed.ConnectProtocol.Assignment;
|
||||
import static org.apache.kafka.connect.runtime.distributed.WorkerCoordinator.LeaderState;
|
||||
|
||||
|
||||
/**
|
||||
* An assignor that computes a unweighted round-robin distribution of connectors and tasks. The
|
||||
* connectors are assigned to the workers first, followed by the tasks. This is to avoid
|
||||
* load imbalance when several 1-task connectors are running, given that a connector is usually
|
||||
* more lightweight than a task.
|
||||
*
|
||||
* Note that this class is NOT thread-safe.
|
||||
*/
|
||||
public class EagerAssignor implements ConnectAssignor {
|
||||
private final Logger log;
|
||||
|
||||
public EagerAssignor(LogContext logContext) {
|
||||
this.log = logContext.logger(EagerAssignor.class);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, ByteBuffer> performAssignment(String leaderId, String protocol,
|
||||
List<JoinGroupResponseMember> allMemberMetadata,
|
||||
WorkerCoordinator coordinator) {
|
||||
log.debug("Performing task assignment");
|
||||
Map<String, ExtendedWorkerState> memberConfigs = new HashMap<>();
|
||||
for (JoinGroupResponseMember member : allMemberMetadata)
|
||||
memberConfigs.put(member.memberId(), IncrementalCooperativeConnectProtocol.deserializeMetadata(ByteBuffer.wrap(member.metadata())));
|
||||
|
||||
long maxOffset = findMaxMemberConfigOffset(memberConfigs, coordinator);
|
||||
Long leaderOffset = ensureLeaderConfig(maxOffset, coordinator);
|
||||
if (leaderOffset == null)
|
||||
return fillAssignmentsAndSerialize(memberConfigs.keySet(), Assignment.CONFIG_MISMATCH,
|
||||
leaderId, memberConfigs.get(leaderId).url(), maxOffset,
|
||||
new HashMap<>(), new HashMap<>());
|
||||
return performTaskAssignment(leaderId, leaderOffset, memberConfigs, coordinator);
|
||||
}
|
||||
|
||||
private Long ensureLeaderConfig(long maxOffset, WorkerCoordinator coordinator) {
|
||||
// If this leader is behind some other members, we can't do assignment
|
||||
if (coordinator.configSnapshot().offset() < maxOffset) {
|
||||
// We might be able to take a new snapshot to catch up immediately and avoid another round of syncing here.
|
||||
// Alternatively, if this node has already passed the maximum reported by any other member of the group, it
|
||||
// is also safe to use this newer state.
|
||||
ClusterConfigState updatedSnapshot = coordinator.configFreshSnapshot();
|
||||
if (updatedSnapshot.offset() < maxOffset) {
|
||||
log.info("Was selected to perform assignments, but do not have latest config found in sync request. " +
|
||||
"Returning an empty configuration to trigger re-sync.");
|
||||
return null;
|
||||
} else {
|
||||
coordinator.configSnapshot(updatedSnapshot);
|
||||
return updatedSnapshot.offset();
|
||||
}
|
||||
}
|
||||
return maxOffset;
|
||||
}
|
||||
|
||||
private Map<String, ByteBuffer> performTaskAssignment(String leaderId, long maxOffset,
|
||||
Map<String, ExtendedWorkerState> memberConfigs,
|
||||
WorkerCoordinator coordinator) {
|
||||
Map<String, Collection<String>> connectorAssignments = new HashMap<>();
|
||||
Map<String, Collection<ConnectorTaskId>> taskAssignments = new HashMap<>();
|
||||
|
||||
// Perform round-robin task assignment. Assign all connectors and then all tasks because assigning both the
|
||||
// connector and its tasks can lead to very uneven distribution of work in some common cases (e.g. for connectors
|
||||
// that generate only 1 task each; in a cluster of 2 or an even # of nodes, only even nodes will be assigned
|
||||
// connectors and only odd nodes will be assigned tasks, but tasks are, on average, actually more resource
|
||||
// intensive than connectors).
|
||||
List<String> connectorsSorted = sorted(coordinator.configSnapshot().connectors());
|
||||
CircularIterator<String> memberIt = new CircularIterator<>(sorted(memberConfigs.keySet()));
|
||||
for (String connectorId : connectorsSorted) {
|
||||
String connectorAssignedTo = memberIt.next();
|
||||
log.trace("Assigning connector {} to {}", connectorId, connectorAssignedTo);
|
||||
Collection<String> memberConnectors = connectorAssignments.get(connectorAssignedTo);
|
||||
if (memberConnectors == null) {
|
||||
memberConnectors = new ArrayList<>();
|
||||
connectorAssignments.put(connectorAssignedTo, memberConnectors);
|
||||
}
|
||||
memberConnectors.add(connectorId);
|
||||
}
|
||||
for (String connectorId : connectorsSorted) {
|
||||
for (ConnectorTaskId taskId : sorted(coordinator.configSnapshot().tasks(connectorId))) {
|
||||
String taskAssignedTo = memberIt.next();
|
||||
log.trace("Assigning task {} to {}", taskId, taskAssignedTo);
|
||||
Collection<ConnectorTaskId> memberTasks = taskAssignments.get(taskAssignedTo);
|
||||
if (memberTasks == null) {
|
||||
memberTasks = new ArrayList<>();
|
||||
taskAssignments.put(taskAssignedTo, memberTasks);
|
||||
}
|
||||
memberTasks.add(taskId);
|
||||
}
|
||||
}
|
||||
|
||||
coordinator.leaderState(new LeaderState(memberConfigs, connectorAssignments, taskAssignments));
|
||||
|
||||
return fillAssignmentsAndSerialize(memberConfigs.keySet(), Assignment.NO_ERROR,
|
||||
leaderId, memberConfigs.get(leaderId).url(), maxOffset, connectorAssignments, taskAssignments);
|
||||
}
|
||||
|
||||
private Map<String, ByteBuffer> fillAssignmentsAndSerialize(Collection<String> members,
|
||||
short error,
|
||||
String leaderId,
|
||||
String leaderUrl,
|
||||
long maxOffset,
|
||||
Map<String, Collection<String>> connectorAssignments,
|
||||
Map<String, Collection<ConnectorTaskId>> taskAssignments) {
|
||||
|
||||
Map<String, ByteBuffer> groupAssignment = new HashMap<>();
|
||||
for (String member : members) {
|
||||
Collection<String> connectors = connectorAssignments.get(member);
|
||||
if (connectors == null) {
|
||||
connectors = Collections.emptyList();
|
||||
}
|
||||
Collection<ConnectorTaskId> tasks = taskAssignments.get(member);
|
||||
if (tasks == null) {
|
||||
tasks = Collections.emptyList();
|
||||
}
|
||||
Assignment assignment = new Assignment(error, leaderId, leaderUrl, maxOffset, connectors, tasks);
|
||||
log.debug("Assignment: {} -> {}", member, assignment);
|
||||
groupAssignment.put(member, ConnectProtocol.serializeAssignment(assignment));
|
||||
}
|
||||
log.debug("Finished assignment");
|
||||
return groupAssignment;
|
||||
}
|
||||
|
||||
private long findMaxMemberConfigOffset(Map<String, ExtendedWorkerState> memberConfigs,
|
||||
WorkerCoordinator coordinator) {
|
||||
// The new config offset is the maximum seen by any member. We always perform assignment using this offset,
|
||||
// even if some members have fallen behind. The config offset used to generate the assignment is included in
|
||||
// the response so members that have fallen behind will not use the assignment until they have caught up.
|
||||
Long maxOffset = null;
|
||||
for (Map.Entry<String, ExtendedWorkerState> stateEntry : memberConfigs.entrySet()) {
|
||||
long memberRootOffset = stateEntry.getValue().offset();
|
||||
if (maxOffset == null)
|
||||
maxOffset = memberRootOffset;
|
||||
else
|
||||
maxOffset = Math.max(maxOffset, memberRootOffset);
|
||||
}
|
||||
|
||||
log.debug("Max config offset root: {}, local snapshot config offsets root: {}",
|
||||
maxOffset, coordinator.configSnapshot().offset());
|
||||
return maxOffset;
|
||||
}
|
||||
|
||||
private static <T extends Comparable<T>> List<T> sorted(Collection<T> members) {
|
||||
List<T> res = new ArrayList<>(members);
|
||||
Collections.sort(res);
|
||||
return res;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,269 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime.distributed;
|
||||
|
||||
import org.apache.kafka.common.protocol.types.Struct;
|
||||
import org.apache.kafka.connect.util.ConnectorTaskId;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Optional;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.apache.kafka.connect.runtime.distributed.ConnectProtocol.ASSIGNMENT_KEY_NAME;
|
||||
import static org.apache.kafka.connect.runtime.distributed.ConnectProtocol.CONFIG_OFFSET_KEY_NAME;
|
||||
import static org.apache.kafka.connect.runtime.distributed.ConnectProtocol.CONNECTOR_KEY_NAME;
|
||||
import static org.apache.kafka.connect.runtime.distributed.ConnectProtocol.CONNECTOR_TASK;
|
||||
import static org.apache.kafka.connect.runtime.distributed.ConnectProtocol.ERROR_KEY_NAME;
|
||||
import static org.apache.kafka.connect.runtime.distributed.ConnectProtocol.LEADER_KEY_NAME;
|
||||
import static org.apache.kafka.connect.runtime.distributed.ConnectProtocol.LEADER_URL_KEY_NAME;
|
||||
import static org.apache.kafka.connect.runtime.distributed.ConnectProtocol.TASKS_KEY_NAME;
|
||||
import static org.apache.kafka.connect.runtime.distributed.IncrementalCooperativeConnectProtocol.ASSIGNMENT_V1;
|
||||
import static org.apache.kafka.connect.runtime.distributed.IncrementalCooperativeConnectProtocol.CONNECTOR_ASSIGNMENT_V1;
|
||||
import static org.apache.kafka.connect.runtime.distributed.IncrementalCooperativeConnectProtocol.CONNECT_PROTOCOL_V1;
|
||||
import static org.apache.kafka.connect.runtime.distributed.IncrementalCooperativeConnectProtocol.REVOKED_KEY_NAME;
|
||||
import static org.apache.kafka.connect.runtime.distributed.IncrementalCooperativeConnectProtocol.SCHEDULED_DELAY_KEY_NAME;
|
||||
|
||||
/**
|
||||
* The extended assignment of connectors and tasks that includes revoked connectors and tasks
|
||||
* as well as a scheduled rebalancing delay.
|
||||
*/
|
||||
public class ExtendedAssignment extends ConnectProtocol.Assignment {
|
||||
private final short version;
|
||||
private final Collection<String> revokedConnectorIds;
|
||||
private final Collection<ConnectorTaskId> revokedTaskIds;
|
||||
private final int delay;
|
||||
|
||||
private static final ExtendedAssignment EMPTY = new ExtendedAssignment(
|
||||
CONNECT_PROTOCOL_V1, ConnectProtocol.Assignment.NO_ERROR, null, null, -1,
|
||||
Collections.emptyList(), Collections.emptyList(), Collections.emptyList(), Collections.emptyList(), 0);
|
||||
|
||||
/**
|
||||
* Create an assignment indicating responsibility for the given connector instances and task Ids.
|
||||
*
|
||||
* @param version Connect protocol version
|
||||
* @param error error code for this assignment; {@code ConnectProtocol.Assignment.NO_ERROR}
|
||||
* indicates no error during assignment
|
||||
* @param leader Connect group's leader Id; may be null only on the empty assignment
|
||||
* @param leaderUrl Connect group's leader URL; may be null only on the empty assignment
|
||||
* @param configOffset the offset in the config topic that this assignment is corresponding to
|
||||
* @param connectorIds list of connectors that the worker should instantiate and run; may not be null
|
||||
* @param taskIds list of task IDs that the worker should instantiate and run; may not be null
|
||||
* @param revokedConnectorIds list of connectors that the worker should stop running; may not be null
|
||||
* @param revokedTaskIds list of task IDs that the worker should stop running; may not be null
|
||||
* @param delay the scheduled delay after which the worker should rejoin the group
|
||||
*/
|
||||
public ExtendedAssignment(short version, short error, String leader, String leaderUrl, long configOffset,
|
||||
Collection<String> connectorIds, Collection<ConnectorTaskId> taskIds,
|
||||
Collection<String> revokedConnectorIds, Collection<ConnectorTaskId> revokedTaskIds,
|
||||
int delay) {
|
||||
super(error, leader, leaderUrl, configOffset, connectorIds, taskIds);
|
||||
this.version = version;
|
||||
this.revokedConnectorIds = Objects.requireNonNull(revokedConnectorIds,
|
||||
"Revoked connector IDs may be empty but not null");
|
||||
this.revokedTaskIds = Objects.requireNonNull(revokedTaskIds,
|
||||
"Revoked task IDs may be empty but not null");
|
||||
this.delay = delay;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the version of the connect protocol that this assignment belongs to.
|
||||
*
|
||||
* @return the connect protocol version of this assignment
|
||||
*/
|
||||
public short version() {
|
||||
return version;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the IDs of the connectors that are revoked by this assignment.
|
||||
*
|
||||
* @return the revoked connector IDs; empty if there are no revoked connectors
|
||||
*/
|
||||
public Collection<String> revokedConnectors() {
|
||||
return revokedConnectorIds;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the IDs of the tasks that are revoked by this assignment.
|
||||
*
|
||||
* @return the revoked task IDs; empty if there are no revoked tasks
|
||||
*/
|
||||
public Collection<ConnectorTaskId> revokedTasks() {
|
||||
return revokedTaskIds;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the delay for the rebalance that is scheduled by this assignment.
|
||||
*
|
||||
* @return the scheduled delay
|
||||
*/
|
||||
public int delay() {
|
||||
return delay;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return an empty assignment.
|
||||
*
|
||||
* @return an empty assignment
|
||||
*/
|
||||
public static ExtendedAssignment empty() {
|
||||
return EMPTY;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "Assignment{" +
|
||||
"error=" + error() +
|
||||
", leader='" + leader() + '\'' +
|
||||
", leaderUrl='" + leaderUrl() + '\'' +
|
||||
", offset=" + offset() +
|
||||
", connectorIds=" + connectors() +
|
||||
", taskIds=" + tasks() +
|
||||
", revokedConnectorIds=" + revokedConnectorIds +
|
||||
", revokedTaskIds=" + revokedTaskIds +
|
||||
", delay=" + delay +
|
||||
'}';
|
||||
}
|
||||
|
||||
private Map<String, Collection<Integer>> revokedAsMap() {
|
||||
if (revokedConnectorIds == null && revokedTaskIds == null) {
|
||||
return null;
|
||||
}
|
||||
// Using LinkedHashMap preserves the ordering, which is helpful for tests and debugging
|
||||
Map<String, Collection<Integer>> taskMap = new LinkedHashMap<>();
|
||||
Optional.ofNullable(revokedConnectorIds)
|
||||
.orElseGet(Collections::emptyList)
|
||||
.stream()
|
||||
.distinct()
|
||||
.forEachOrdered(connectorId -> {
|
||||
Collection<Integer> connectorTasks =
|
||||
taskMap.computeIfAbsent(connectorId, v -> new ArrayList<>());
|
||||
connectorTasks.add(CONNECTOR_TASK);
|
||||
});
|
||||
|
||||
Optional.ofNullable(revokedTaskIds)
|
||||
.orElseGet(Collections::emptyList)
|
||||
.forEach(taskId -> {
|
||||
String connectorId = taskId.connector();
|
||||
Collection<Integer> connectorTasks =
|
||||
taskMap.computeIfAbsent(connectorId, v -> new ArrayList<>());
|
||||
connectorTasks.add(taskId.task());
|
||||
});
|
||||
return taskMap;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the {@code Struct} that corresponds to this assignment.
|
||||
*
|
||||
* @return the assignment struct
|
||||
*/
|
||||
public Struct toStruct() {
|
||||
Collection<Struct> assigned = taskAssignments(asMap());
|
||||
Collection<Struct> revoked = taskAssignments(revokedAsMap());
|
||||
return new Struct(ASSIGNMENT_V1)
|
||||
.set(ERROR_KEY_NAME, error())
|
||||
.set(LEADER_KEY_NAME, leader())
|
||||
.set(LEADER_URL_KEY_NAME, leaderUrl())
|
||||
.set(CONFIG_OFFSET_KEY_NAME, offset())
|
||||
.set(ASSIGNMENT_KEY_NAME, assigned != null ? assigned.toArray() : null)
|
||||
.set(REVOKED_KEY_NAME, revoked != null ? revoked.toArray() : null)
|
||||
.set(SCHEDULED_DELAY_KEY_NAME, delay);
|
||||
}
|
||||
|
||||
/**
|
||||
* Given a {@code Struct} that encodes an assignment return the assignment object.
|
||||
*
|
||||
* @param struct a struct representing an assignment
|
||||
* @return the assignment
|
||||
*/
|
||||
public static ExtendedAssignment fromStruct(short version, Struct struct) {
|
||||
return struct == null
|
||||
? null
|
||||
: new ExtendedAssignment(
|
||||
version,
|
||||
struct.getShort(ERROR_KEY_NAME),
|
||||
struct.getString(LEADER_KEY_NAME),
|
||||
struct.getString(LEADER_URL_KEY_NAME),
|
||||
struct.getLong(CONFIG_OFFSET_KEY_NAME),
|
||||
extractConnectors(struct, ASSIGNMENT_KEY_NAME),
|
||||
extractTasks(struct, ASSIGNMENT_KEY_NAME),
|
||||
extractConnectors(struct, REVOKED_KEY_NAME),
|
||||
extractTasks(struct, REVOKED_KEY_NAME),
|
||||
struct.getInt(SCHEDULED_DELAY_KEY_NAME));
|
||||
}
|
||||
|
||||
private static Collection<Struct> taskAssignments(Map<String, Collection<Integer>> assignments) {
|
||||
return assignments == null
|
||||
? null
|
||||
: assignments.entrySet().stream()
|
||||
.map(connectorEntry -> {
|
||||
Struct taskAssignment = new Struct(CONNECTOR_ASSIGNMENT_V1);
|
||||
taskAssignment.set(CONNECTOR_KEY_NAME, connectorEntry.getKey());
|
||||
taskAssignment.set(TASKS_KEY_NAME, connectorEntry.getValue().toArray());
|
||||
return taskAssignment;
|
||||
}).collect(Collectors.toList());
|
||||
}
|
||||
|
||||
private static Collection<String> extractConnectors(Struct struct, String key) {
|
||||
assert REVOKED_KEY_NAME.equals(key) || ASSIGNMENT_KEY_NAME.equals(key);
|
||||
|
||||
Object[] connectors = struct.getArray(key);
|
||||
if (connectors == null) {
|
||||
return Collections.emptyList();
|
||||
}
|
||||
List<String> connectorIds = new ArrayList<>();
|
||||
for (Object structObj : connectors) {
|
||||
Struct assignment = (Struct) structObj;
|
||||
String connector = assignment.getString(CONNECTOR_KEY_NAME);
|
||||
for (Object taskIdObj : assignment.getArray(TASKS_KEY_NAME)) {
|
||||
Integer taskId = (Integer) taskIdObj;
|
||||
if (taskId == CONNECTOR_TASK) {
|
||||
connectorIds.add(connector);
|
||||
}
|
||||
}
|
||||
}
|
||||
return connectorIds;
|
||||
}
|
||||
|
||||
private static Collection<ConnectorTaskId> extractTasks(Struct struct, String key) {
|
||||
assert REVOKED_KEY_NAME.equals(key) || ASSIGNMENT_KEY_NAME.equals(key);
|
||||
|
||||
Object[] tasks = struct.getArray(key);
|
||||
if (tasks == null) {
|
||||
return Collections.emptyList();
|
||||
}
|
||||
List<ConnectorTaskId> tasksIds = new ArrayList<>();
|
||||
for (Object structObj : tasks) {
|
||||
Struct assignment = (Struct) structObj;
|
||||
String connector = assignment.getString(CONNECTOR_KEY_NAME);
|
||||
for (Object taskIdObj : assignment.getArray(TASKS_KEY_NAME)) {
|
||||
Integer taskId = (Integer) taskIdObj;
|
||||
if (taskId != CONNECTOR_TASK) {
|
||||
tasksIds.add(new ConnectorTaskId(connector, taskId));
|
||||
}
|
||||
}
|
||||
}
|
||||
return tasksIds;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,48 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime.distributed;
|
||||
|
||||
/**
|
||||
* A class that captures the deserialized form of a worker's metadata.
|
||||
*/
|
||||
public class ExtendedWorkerState extends ConnectProtocol.WorkerState {
|
||||
private final ExtendedAssignment assignment;
|
||||
|
||||
public ExtendedWorkerState(String url, long offset, ExtendedAssignment assignment) {
|
||||
super(url, offset);
|
||||
this.assignment = assignment != null ? assignment : ExtendedAssignment.empty();
|
||||
}
|
||||
|
||||
/**
|
||||
* This method returns which was the assignment of connectors and tasks on a worker at the
|
||||
* moment that its state was captured by this class.
|
||||
*
|
||||
* @return the assignment of connectors and tasks
|
||||
*/
|
||||
public ExtendedAssignment assignment() {
|
||||
return assignment;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "WorkerState{" +
|
||||
"url='" + url() + '\'' +
|
||||
", offset=" + offset() +
|
||||
", " + assignment +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,668 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime.distributed;
|
||||
|
||||
import org.apache.kafka.common.utils.LogContext;
|
||||
import org.apache.kafka.common.utils.Time;
|
||||
import org.apache.kafka.connect.runtime.distributed.WorkerCoordinator.ConnectorsAndTasks;
|
||||
import org.apache.kafka.connect.runtime.distributed.WorkerCoordinator.WorkerLoad;
|
||||
import org.apache.kafka.connect.util.ConnectorTaskId;
|
||||
import org.slf4j.Logger;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedHashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.TreeSet;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.IntStream;
|
||||
|
||||
import static org.apache.kafka.common.message.JoinGroupResponseData.JoinGroupResponseMember;
|
||||
import static org.apache.kafka.connect.runtime.distributed.ConnectProtocol.Assignment;
|
||||
import static org.apache.kafka.connect.runtime.distributed.IncrementalCooperativeConnectProtocol.CONNECT_PROTOCOL_V1;
|
||||
import static org.apache.kafka.connect.runtime.distributed.IncrementalCooperativeConnectProtocol.CONNECT_PROTOCOL_V2;
|
||||
import static org.apache.kafka.connect.runtime.distributed.WorkerCoordinator.LeaderState;
|
||||
|
||||
/**
|
||||
* An assignor that computes a distribution of connectors and tasks according to the incremental
|
||||
* cooperative strategy for rebalancing. {@see
|
||||
* https://cwiki.apache.org/confluence/display/KAFKA/KIP-415%3A+Incremental+Cooperative
|
||||
* +Rebalancing+in+Kafka+Connect} for a description of the assignment policy.
|
||||
*
|
||||
* Note that this class is NOT thread-safe.
|
||||
*/
|
||||
public class IncrementalCooperativeAssignor implements ConnectAssignor {
|
||||
private final Logger log;
|
||||
private final Time time;
|
||||
private final int maxDelay;
|
||||
private ConnectorsAndTasks previousAssignment;
|
||||
private ConnectorsAndTasks previousRevocation;
|
||||
private boolean canRevoke;
|
||||
// visible for testing
|
||||
protected final Set<String> candidateWorkersForReassignment;
|
||||
protected long scheduledRebalance;
|
||||
protected int delay;
|
||||
|
||||
public IncrementalCooperativeAssignor(LogContext logContext, Time time, int maxDelay) {
|
||||
this.log = logContext.logger(IncrementalCooperativeAssignor.class);
|
||||
this.time = time;
|
||||
this.maxDelay = maxDelay;
|
||||
this.previousAssignment = ConnectorsAndTasks.EMPTY;
|
||||
this.previousRevocation = new ConnectorsAndTasks.Builder().build();
|
||||
this.canRevoke = true;
|
||||
this.scheduledRebalance = 0;
|
||||
this.candidateWorkersForReassignment = new LinkedHashSet<>();
|
||||
this.delay = 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, ByteBuffer> performAssignment(String leaderId, String protocol,
|
||||
List<JoinGroupResponseMember> allMemberMetadata,
|
||||
WorkerCoordinator coordinator) {
|
||||
log.debug("Performing task assignment");
|
||||
|
||||
Map<String, ExtendedWorkerState> memberConfigs = new HashMap<>();
|
||||
for (JoinGroupResponseMember member : allMemberMetadata) {
|
||||
memberConfigs.put(
|
||||
member.memberId(),
|
||||
IncrementalCooperativeConnectProtocol.deserializeMetadata(ByteBuffer.wrap(member.metadata())));
|
||||
}
|
||||
log.debug("Member configs: {}", memberConfigs);
|
||||
|
||||
// The new config offset is the maximum seen by any member. We always perform assignment using this offset,
|
||||
// even if some members have fallen behind. The config offset used to generate the assignment is included in
|
||||
// the response so members that have fallen behind will not use the assignment until they have caught up.
|
||||
long maxOffset = memberConfigs.values().stream().map(ExtendedWorkerState::offset).max(Long::compare).get();
|
||||
log.debug("Max config offset root: {}, local snapshot config offsets root: {}",
|
||||
maxOffset, coordinator.configSnapshot().offset());
|
||||
|
||||
short protocolVersion = memberConfigs.values().stream()
|
||||
.allMatch(state -> state.assignment().version() == CONNECT_PROTOCOL_V2)
|
||||
? CONNECT_PROTOCOL_V2
|
||||
: CONNECT_PROTOCOL_V1;
|
||||
|
||||
Long leaderOffset = ensureLeaderConfig(maxOffset, coordinator);
|
||||
if (leaderOffset == null) {
|
||||
Map<String, ExtendedAssignment> assignments = fillAssignments(
|
||||
memberConfigs.keySet(), Assignment.CONFIG_MISMATCH,
|
||||
leaderId, memberConfigs.get(leaderId).url(), maxOffset, Collections.emptyMap(),
|
||||
Collections.emptyMap(), Collections.emptyMap(), 0, protocolVersion);
|
||||
return serializeAssignments(assignments);
|
||||
}
|
||||
return performTaskAssignment(leaderId, leaderOffset, memberConfigs, coordinator, protocolVersion);
|
||||
}
|
||||
|
||||
private Long ensureLeaderConfig(long maxOffset, WorkerCoordinator coordinator) {
|
||||
// If this leader is behind some other members, we can't do assignment
|
||||
if (coordinator.configSnapshot().offset() < maxOffset) {
|
||||
// We might be able to take a new snapshot to catch up immediately and avoid another round of syncing here.
|
||||
// Alternatively, if this node has already passed the maximum reported by any other member of the group, it
|
||||
// is also safe to use this newer state.
|
||||
ClusterConfigState updatedSnapshot = coordinator.configFreshSnapshot();
|
||||
if (updatedSnapshot.offset() < maxOffset) {
|
||||
log.info("Was selected to perform assignments, but do not have latest config found in sync request. "
|
||||
+ "Returning an empty configuration to trigger re-sync.");
|
||||
return null;
|
||||
} else {
|
||||
coordinator.configSnapshot(updatedSnapshot);
|
||||
return updatedSnapshot.offset();
|
||||
}
|
||||
}
|
||||
return maxOffset;
|
||||
}
|
||||
|
||||
/**
|
||||
* Performs task assignment based on the incremental cooperative connect protocol.
|
||||
* Read more on the design and implementation in:
|
||||
* {@see https://cwiki.apache.org/confluence/display/KAFKA/KIP-415%3A+Incremental+Cooperative+Rebalancing+in+Kafka+Connect}
|
||||
*
|
||||
* @param leaderId the ID of the group leader
|
||||
* @param maxOffset the latest known offset of the configuration topic
|
||||
* @param memberConfigs the metadata of all the members of the group as gather in the current
|
||||
* round of rebalancing
|
||||
* @param coordinator the worker coordinator instance that provide the configuration snapshot
|
||||
* and get assigned the leader state during this assignment
|
||||
* @param protocolVersion the Connect subprotocol version
|
||||
* @return the serialized assignment of tasks to the whole group, including assigned or
|
||||
* revoked tasks
|
||||
*/
|
||||
protected Map<String, ByteBuffer> performTaskAssignment(String leaderId, long maxOffset,
|
||||
Map<String, ExtendedWorkerState> memberConfigs,
|
||||
WorkerCoordinator coordinator, short protocolVersion) {
|
||||
log.debug("Performing task assignment during generation: {} with memberId: {}",
|
||||
coordinator.generationId(), coordinator.memberId());
|
||||
|
||||
// Base set: The previous assignment of connectors-and-tasks is a standalone snapshot that
|
||||
// can be used to calculate derived sets
|
||||
log.debug("Previous assignments: {}", previousAssignment);
|
||||
|
||||
ClusterConfigState snapshot = coordinator.configSnapshot();
|
||||
Set<String> configuredConnectors = new TreeSet<>(snapshot.connectors());
|
||||
Set<ConnectorTaskId> configuredTasks = configuredConnectors.stream()
|
||||
.flatMap(c -> snapshot.tasks(c).stream())
|
||||
.collect(Collectors.toSet());
|
||||
|
||||
// Base set: The set of configured connectors-and-tasks is a standalone snapshot that can
|
||||
// be used to calculate derived sets
|
||||
ConnectorsAndTasks configured = new ConnectorsAndTasks.Builder()
|
||||
.with(configuredConnectors, configuredTasks).build();
|
||||
log.debug("Configured assignments: {}", configured);
|
||||
|
||||
// Base set: The set of active connectors-and-tasks is a standalone snapshot that can be
|
||||
// used to calculate derived sets
|
||||
ConnectorsAndTasks activeAssignments = assignment(memberConfigs);
|
||||
log.debug("Active assignments: {}", activeAssignments);
|
||||
|
||||
// This means that a previous revocation did not take effect. In this case, reset
|
||||
// appropriately and be ready to re-apply revocation of tasks
|
||||
if (!previousRevocation.isEmpty()) {
|
||||
if (previousRevocation.connectors().stream().anyMatch(c -> activeAssignments.connectors().contains(c))
|
||||
|| previousRevocation.tasks().stream().anyMatch(t -> activeAssignments.tasks().contains(t))) {
|
||||
previousAssignment = activeAssignments;
|
||||
canRevoke = true;
|
||||
}
|
||||
previousRevocation.connectors().clear();
|
||||
previousRevocation.tasks().clear();
|
||||
}
|
||||
|
||||
// Derived set: The set of deleted connectors-and-tasks is a derived set from the set
|
||||
// difference of previous - configured
|
||||
ConnectorsAndTasks deleted = diff(previousAssignment, configured);
|
||||
log.debug("Deleted assignments: {}", deleted);
|
||||
|
||||
// Derived set: The set of remaining active connectors-and-tasks is a derived set from the
|
||||
// set difference of active - deleted
|
||||
ConnectorsAndTasks remainingActive = diff(activeAssignments, deleted);
|
||||
log.debug("Remaining (excluding deleted) active assignments: {}", remainingActive);
|
||||
|
||||
// Derived set: The set of lost or unaccounted connectors-and-tasks is a derived set from
|
||||
// the set difference of previous - active - deleted
|
||||
ConnectorsAndTasks lostAssignments = diff(previousAssignment, activeAssignments, deleted);
|
||||
log.debug("Lost assignments: {}", lostAssignments);
|
||||
|
||||
// Derived set: The set of new connectors-and-tasks is a derived set from the set
|
||||
// difference of configured - previous - active
|
||||
ConnectorsAndTasks newSubmissions = diff(configured, previousAssignment, activeAssignments);
|
||||
log.debug("New assignments: {}", newSubmissions);
|
||||
|
||||
// A collection of the complete assignment
|
||||
List<WorkerLoad> completeWorkerAssignment = workerAssignment(memberConfigs, ConnectorsAndTasks.EMPTY);
|
||||
log.debug("Complete (ignoring deletions) worker assignments: {}", completeWorkerAssignment);
|
||||
|
||||
// Per worker connector assignments without removing deleted connectors yet
|
||||
Map<String, Collection<String>> connectorAssignments =
|
||||
completeWorkerAssignment.stream().collect(Collectors.toMap(WorkerLoad::worker, WorkerLoad::connectors));
|
||||
log.debug("Complete (ignoring deletions) connector assignments: {}", connectorAssignments);
|
||||
|
||||
// Per worker task assignments without removing deleted connectors yet
|
||||
Map<String, Collection<ConnectorTaskId>> taskAssignments =
|
||||
completeWorkerAssignment.stream().collect(Collectors.toMap(WorkerLoad::worker, WorkerLoad::tasks));
|
||||
log.debug("Complete (ignoring deletions) task assignments: {}", taskAssignments);
|
||||
|
||||
// A collection of the current assignment excluding the connectors-and-tasks to be deleted
|
||||
List<WorkerLoad> currentWorkerAssignment = workerAssignment(memberConfigs, deleted);
|
||||
|
||||
Map<String, ConnectorsAndTasks> toRevoke = computeDeleted(deleted, connectorAssignments, taskAssignments);
|
||||
log.debug("Connector and task to delete assignments: {}", toRevoke);
|
||||
|
||||
// Recompute the complete assignment excluding the deleted connectors-and-tasks
|
||||
completeWorkerAssignment = workerAssignment(memberConfigs, deleted);
|
||||
connectorAssignments =
|
||||
completeWorkerAssignment.stream().collect(Collectors.toMap(WorkerLoad::worker, WorkerLoad::connectors));
|
||||
taskAssignments =
|
||||
completeWorkerAssignment.stream().collect(Collectors.toMap(WorkerLoad::worker, WorkerLoad::tasks));
|
||||
|
||||
handleLostAssignments(lostAssignments, newSubmissions, completeWorkerAssignment);
|
||||
|
||||
// Do not revoke resources for re-assignment while a delayed rebalance is active
|
||||
// Also we do not revoke in two consecutive rebalances by the same leader
|
||||
canRevoke = delay == 0 && canRevoke;
|
||||
|
||||
// Compute the connectors-and-tasks to be revoked for load balancing without taking into
|
||||
// account the deleted ones.
|
||||
log.debug("Can leader revoke tasks in this assignment? {} (delay: {})", canRevoke, delay);
|
||||
if (canRevoke) {
|
||||
Map<String, ConnectorsAndTasks> toExplicitlyRevoke =
|
||||
performTaskRevocation(activeAssignments, currentWorkerAssignment);
|
||||
|
||||
log.debug("Connector and task to revoke assignments: {}", toRevoke);
|
||||
|
||||
toExplicitlyRevoke.forEach(
|
||||
(worker, assignment) -> {
|
||||
ConnectorsAndTasks existing = toRevoke.computeIfAbsent(
|
||||
worker,
|
||||
v -> new ConnectorsAndTasks.Builder().build());
|
||||
existing.connectors().addAll(assignment.connectors());
|
||||
existing.tasks().addAll(assignment.tasks());
|
||||
}
|
||||
);
|
||||
canRevoke = toExplicitlyRevoke.size() == 0;
|
||||
} else {
|
||||
canRevoke = delay == 0;
|
||||
}
|
||||
|
||||
assignConnectors(completeWorkerAssignment, newSubmissions.connectors());
|
||||
assignTasks(completeWorkerAssignment, newSubmissions.tasks());
|
||||
|
||||
log.debug("Current complete assignments: {}", currentWorkerAssignment);
|
||||
log.debug("New complete assignments: {}", completeWorkerAssignment);
|
||||
|
||||
Map<String, Collection<String>> currentConnectorAssignments =
|
||||
currentWorkerAssignment.stream().collect(Collectors.toMap(WorkerLoad::worker, WorkerLoad::connectors));
|
||||
|
||||
Map<String, Collection<ConnectorTaskId>> currentTaskAssignments =
|
||||
currentWorkerAssignment.stream().collect(Collectors.toMap(WorkerLoad::worker, WorkerLoad::tasks));
|
||||
|
||||
Map<String, Collection<String>> incrementalConnectorAssignments =
|
||||
diff(connectorAssignments, currentConnectorAssignments);
|
||||
|
||||
Map<String, Collection<ConnectorTaskId>> incrementalTaskAssignments =
|
||||
diff(taskAssignments, currentTaskAssignments);
|
||||
|
||||
log.debug("Incremental connector assignments: {}", incrementalConnectorAssignments);
|
||||
log.debug("Incremental task assignments: {}", incrementalTaskAssignments);
|
||||
|
||||
coordinator.leaderState(new LeaderState(memberConfigs, connectorAssignments, taskAssignments));
|
||||
|
||||
Map<String, ExtendedAssignment> assignments =
|
||||
fillAssignments(memberConfigs.keySet(), Assignment.NO_ERROR, leaderId,
|
||||
memberConfigs.get(leaderId).url(), maxOffset, incrementalConnectorAssignments,
|
||||
incrementalTaskAssignments, toRevoke, delay, protocolVersion);
|
||||
|
||||
previousAssignment = computePreviousAssignment(toRevoke, connectorAssignments, taskAssignments, lostAssignments);
|
||||
|
||||
log.debug("Actual assignments: {}", assignments);
|
||||
return serializeAssignments(assignments);
|
||||
}
|
||||
|
||||
private Map<String, ConnectorsAndTasks> computeDeleted(ConnectorsAndTasks deleted,
|
||||
Map<String, Collection<String>> connectorAssignments,
|
||||
Map<String, Collection<ConnectorTaskId>> taskAssignments) {
|
||||
// Connector to worker reverse lookup map
|
||||
Map<String, String> connectorOwners = WorkerCoordinator.invertAssignment(connectorAssignments);
|
||||
// Task to worker reverse lookup map
|
||||
Map<ConnectorTaskId, String> taskOwners = WorkerCoordinator.invertAssignment(taskAssignments);
|
||||
|
||||
Map<String, ConnectorsAndTasks> toRevoke = new HashMap<>();
|
||||
// Add the connectors that have been deleted to the revoked set
|
||||
deleted.connectors().forEach(c ->
|
||||
toRevoke.computeIfAbsent(
|
||||
connectorOwners.get(c),
|
||||
v -> new ConnectorsAndTasks.Builder().build()
|
||||
).connectors().add(c));
|
||||
// Add the tasks that have been deleted to the revoked set
|
||||
deleted.tasks().forEach(t ->
|
||||
toRevoke.computeIfAbsent(
|
||||
taskOwners.get(t),
|
||||
v -> new ConnectorsAndTasks.Builder().build()
|
||||
).tasks().add(t));
|
||||
log.debug("Connectors and tasks to delete assignments: {}", toRevoke);
|
||||
return toRevoke;
|
||||
}
|
||||
|
||||
private ConnectorsAndTasks computePreviousAssignment(Map<String, ConnectorsAndTasks> toRevoke,
|
||||
Map<String, Collection<String>> connectorAssignments,
|
||||
Map<String, Collection<ConnectorTaskId>> taskAssignments,
|
||||
ConnectorsAndTasks lostAssignments) {
|
||||
ConnectorsAndTasks previousAssignment = new ConnectorsAndTasks.Builder().with(
|
||||
connectorAssignments.values().stream().flatMap(Collection::stream).collect(Collectors.toSet()),
|
||||
taskAssignments.values() .stream() .flatMap(Collection::stream).collect(Collectors.toSet()))
|
||||
.build();
|
||||
|
||||
for (ConnectorsAndTasks revoked : toRevoke.values()) {
|
||||
previousAssignment.connectors().removeAll(revoked.connectors());
|
||||
previousAssignment.tasks().removeAll(revoked.tasks());
|
||||
previousRevocation.connectors().addAll(revoked.connectors());
|
||||
previousRevocation.tasks().addAll(revoked.tasks());
|
||||
}
|
||||
|
||||
// Depends on the previous assignment's collections being sets at the moment.
|
||||
// TODO: make it independent
|
||||
previousAssignment.connectors().addAll(lostAssignments.connectors());
|
||||
previousAssignment.tasks().addAll(lostAssignments.tasks());
|
||||
|
||||
return previousAssignment;
|
||||
}
|
||||
|
||||
// visible for testing
|
||||
protected void handleLostAssignments(ConnectorsAndTasks lostAssignments,
|
||||
ConnectorsAndTasks newSubmissions,
|
||||
List<WorkerLoad> completeWorkerAssignment) {
|
||||
if (lostAssignments.isEmpty()) {
|
||||
resetDelay();
|
||||
return;
|
||||
}
|
||||
|
||||
final long now = time.milliseconds();
|
||||
log.debug("Found the following connectors and tasks missing from previous assignments: "
|
||||
+ lostAssignments);
|
||||
|
||||
if (scheduledRebalance > 0 && now >= scheduledRebalance) {
|
||||
// delayed rebalance expired and it's time to assign resources
|
||||
log.debug("Delayed rebalance expired. Reassigning lost tasks");
|
||||
Optional<WorkerLoad> candidateWorkerLoad = Optional.empty();
|
||||
if (!candidateWorkersForReassignment.isEmpty()) {
|
||||
candidateWorkerLoad = pickCandidateWorkerForReassignment(completeWorkerAssignment);
|
||||
}
|
||||
|
||||
if (candidateWorkerLoad.isPresent()) {
|
||||
WorkerLoad workerLoad = candidateWorkerLoad.get();
|
||||
log.debug("A candidate worker has been found to assign lost tasks: {}", workerLoad.worker());
|
||||
lostAssignments.connectors().forEach(workerLoad::assign);
|
||||
lostAssignments.tasks().forEach(workerLoad::assign);
|
||||
} else {
|
||||
log.debug("No single candidate worker was found to assign lost tasks. Treating lost tasks as new tasks");
|
||||
newSubmissions.connectors().addAll(lostAssignments.connectors());
|
||||
newSubmissions.tasks().addAll(lostAssignments.tasks());
|
||||
}
|
||||
resetDelay();
|
||||
} else {
|
||||
candidateWorkersForReassignment
|
||||
.addAll(candidateWorkersForReassignment(completeWorkerAssignment));
|
||||
if (now < scheduledRebalance) {
|
||||
// a delayed rebalance is in progress, but it's not yet time to reassign
|
||||
// unaccounted resources
|
||||
delay = calculateDelay(now);
|
||||
log.debug("Delayed rebalance in progress. Task reassignment is postponed. New computed rebalance delay: {}", delay);
|
||||
} else {
|
||||
// This means scheduledRebalance == 0
|
||||
// We could also also extract the current minimum delay from the group, to make
|
||||
// independent of consecutive leader failures, but this optimization is skipped
|
||||
// at the moment
|
||||
delay = maxDelay;
|
||||
log.debug("Resetting rebalance delay to the max: {}. scheduledRebalance: {} now: {} diff scheduledRebalance - now: {}",
|
||||
delay, scheduledRebalance, now, scheduledRebalance - now);
|
||||
}
|
||||
scheduledRebalance = now + delay;
|
||||
}
|
||||
}
|
||||
|
||||
private void resetDelay() {
|
||||
candidateWorkersForReassignment.clear();
|
||||
scheduledRebalance = 0;
|
||||
if (delay != 0) {
|
||||
log.debug("Resetting delay from previous value: {} to 0", delay);
|
||||
}
|
||||
delay = 0;
|
||||
}
|
||||
|
||||
private Set<String> candidateWorkersForReassignment(List<WorkerLoad> completeWorkerAssignment) {
|
||||
return completeWorkerAssignment.stream()
|
||||
.filter(WorkerLoad::isEmpty)
|
||||
.map(WorkerLoad::worker)
|
||||
.collect(Collectors.toSet());
|
||||
}
|
||||
|
||||
private Optional<WorkerLoad> pickCandidateWorkerForReassignment(List<WorkerLoad> completeWorkerAssignment) {
|
||||
Map<String, WorkerLoad> activeWorkers = completeWorkerAssignment.stream()
|
||||
.collect(Collectors.toMap(WorkerLoad::worker, Function.identity()));
|
||||
return candidateWorkersForReassignment.stream()
|
||||
.map(activeWorkers::get)
|
||||
.filter(Objects::nonNull)
|
||||
.findFirst();
|
||||
}
|
||||
|
||||
/**
|
||||
* Task revocation is based on an rough estimation of the lower average number of tasks before
|
||||
* and after new workers join the group. If no new workers join, no revocation takes place.
|
||||
* Based on this estimation, tasks are revoked until the new floor average is reached for
|
||||
* each existing worker. The revoked tasks, once assigned to the new workers will maintain
|
||||
* a balanced load among the group.
|
||||
*
|
||||
* @param activeAssignments
|
||||
* @param completeWorkerAssignment
|
||||
* @return
|
||||
*/
|
||||
private Map<String, ConnectorsAndTasks> performTaskRevocation(ConnectorsAndTasks activeAssignments,
|
||||
Collection<WorkerLoad> completeWorkerAssignment) {
|
||||
int totalActiveConnectorsNum = activeAssignments.connectors().size();
|
||||
int totalActiveTasksNum = activeAssignments.tasks().size();
|
||||
Collection<WorkerLoad> existingWorkers = completeWorkerAssignment.stream()
|
||||
.filter(wl -> wl.size() > 0)
|
||||
.collect(Collectors.toList());
|
||||
int existingWorkersNum = existingWorkers.size();
|
||||
int totalWorkersNum = completeWorkerAssignment.size();
|
||||
int newWorkersNum = totalWorkersNum - existingWorkersNum;
|
||||
|
||||
if (log.isDebugEnabled()) {
|
||||
completeWorkerAssignment.forEach(wl -> log.debug(
|
||||
"Per worker current load size; worker: {} connectors: {} tasks: {}",
|
||||
wl.worker(), wl.connectorsSize(), wl.tasksSize()));
|
||||
}
|
||||
|
||||
Map<String, ConnectorsAndTasks> revoking = new HashMap<>();
|
||||
// If there are no new workers, or no existing workers to revoke tasks from return early
|
||||
// after logging the status
|
||||
if (!(newWorkersNum > 0 && existingWorkersNum > 0)) {
|
||||
log.debug("No task revocation required; workers with existing load: {} workers with "
|
||||
+ "no load {} total workers {}",
|
||||
existingWorkersNum, newWorkersNum, totalWorkersNum);
|
||||
// This is intentionally empty but mutable, because the map is used to include deleted
|
||||
// connectors and tasks as well
|
||||
return revoking;
|
||||
}
|
||||
|
||||
log.debug("Task revocation is required; workers with existing load: {} workers with "
|
||||
+ "no load {} total workers {}",
|
||||
existingWorkersNum, newWorkersNum, totalWorkersNum);
|
||||
|
||||
// We have at least one worker assignment (the leader itself) so totalWorkersNum can't be 0
|
||||
log.debug("Previous rounded down (floor) average number of connectors per worker {}", totalActiveConnectorsNum / existingWorkersNum);
|
||||
int floorConnectors = totalActiveConnectorsNum / totalWorkersNum;
|
||||
log.debug("New rounded down (floor) average number of connectors per worker {}", floorConnectors);
|
||||
|
||||
log.debug("Previous rounded down (floor) average number of tasks per worker {}", totalActiveTasksNum / existingWorkersNum);
|
||||
int floorTasks = totalActiveTasksNum / totalWorkersNum;
|
||||
log.debug("New rounded down (floor) average number of tasks per worker {}", floorTasks);
|
||||
|
||||
int numToRevoke = floorConnectors;
|
||||
for (WorkerLoad existing : existingWorkers) {
|
||||
Iterator<String> connectors = existing.connectors().iterator();
|
||||
for (int i = existing.connectorsSize(); i > floorConnectors && numToRevoke > 0; --i, --numToRevoke) {
|
||||
ConnectorsAndTasks resources = revoking.computeIfAbsent(
|
||||
existing.worker(),
|
||||
w -> new ConnectorsAndTasks.Builder().build());
|
||||
resources.connectors().add(connectors.next());
|
||||
}
|
||||
if (numToRevoke == 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
numToRevoke = floorTasks;
|
||||
for (WorkerLoad existing : existingWorkers) {
|
||||
Iterator<ConnectorTaskId> tasks = existing.tasks().iterator();
|
||||
for (int i = existing.tasksSize(); i > floorTasks && numToRevoke > 0; --i, --numToRevoke) {
|
||||
ConnectorsAndTasks resources = revoking.computeIfAbsent(
|
||||
existing.worker(),
|
||||
w -> new ConnectorsAndTasks.Builder().build());
|
||||
resources.tasks().add(tasks.next());
|
||||
}
|
||||
if (numToRevoke == 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return revoking;
|
||||
}
|
||||
|
||||
private Map<String, ExtendedAssignment> fillAssignments(Collection<String> members, short error,
|
||||
String leaderId, String leaderUrl, long maxOffset,
|
||||
Map<String, Collection<String>> connectorAssignments,
|
||||
Map<String, Collection<ConnectorTaskId>> taskAssignments,
|
||||
Map<String, ConnectorsAndTasks> revoked,
|
||||
int delay, short protocolVersion) {
|
||||
Map<String, ExtendedAssignment> groupAssignment = new HashMap<>();
|
||||
for (String member : members) {
|
||||
Collection<String> connectorsToStart = connectorAssignments.getOrDefault(member, Collections.emptyList());
|
||||
Collection<ConnectorTaskId> tasksToStart = taskAssignments.getOrDefault(member, Collections.emptyList());
|
||||
Collection<String> connectorsToStop = revoked.getOrDefault(member, ConnectorsAndTasks.EMPTY).connectors();
|
||||
Collection<ConnectorTaskId> tasksToStop = revoked.getOrDefault(member, ConnectorsAndTasks.EMPTY).tasks();
|
||||
ExtendedAssignment assignment =
|
||||
new ExtendedAssignment(protocolVersion, error, leaderId, leaderUrl, maxOffset,
|
||||
connectorsToStart, tasksToStart, connectorsToStop, tasksToStop, delay);
|
||||
log.debug("Filling assignment: {} -> {}", member, assignment);
|
||||
groupAssignment.put(member, assignment);
|
||||
}
|
||||
log.debug("Finished assignment");
|
||||
return groupAssignment;
|
||||
}
|
||||
|
||||
/**
|
||||
* From a map of workers to assignment object generate the equivalent map of workers to byte
|
||||
* buffers of serialized assignments.
|
||||
*
|
||||
* @param assignments the map of worker assignments
|
||||
* @return the serialized map of assignments to workers
|
||||
*/
|
||||
protected Map<String, ByteBuffer> serializeAssignments(Map<String, ExtendedAssignment> assignments) {
|
||||
return assignments.entrySet()
|
||||
.stream()
|
||||
.collect(Collectors.toMap(
|
||||
Map.Entry::getKey,
|
||||
e -> IncrementalCooperativeConnectProtocol.serializeAssignment(e.getValue())));
|
||||
}
|
||||
|
||||
private static ConnectorsAndTasks diff(ConnectorsAndTasks base,
|
||||
ConnectorsAndTasks... toSubtract) {
|
||||
Collection<String> connectors = new TreeSet<>(base.connectors());
|
||||
Collection<ConnectorTaskId> tasks = new TreeSet<>(base.tasks());
|
||||
for (ConnectorsAndTasks sub : toSubtract) {
|
||||
connectors.removeAll(sub.connectors());
|
||||
tasks.removeAll(sub.tasks());
|
||||
}
|
||||
return new ConnectorsAndTasks.Builder().with(connectors, tasks).build();
|
||||
}
|
||||
|
||||
private static <T> Map<String, Collection<T>> diff(Map<String, Collection<T>> base,
|
||||
Map<String, Collection<T>> toSubtract) {
|
||||
Map<String, Collection<T>> incremental = new HashMap<>();
|
||||
for (Map.Entry<String, Collection<T>> entry : base.entrySet()) {
|
||||
List<T> values = new ArrayList<>(entry.getValue());
|
||||
values.removeAll(toSubtract.get(entry.getKey()));
|
||||
incremental.put(entry.getKey(), values);
|
||||
}
|
||||
return incremental;
|
||||
}
|
||||
|
||||
private ConnectorsAndTasks assignment(Map<String, ExtendedWorkerState> memberConfigs) {
|
||||
log.debug("Received assignments: {}", memberConfigs);
|
||||
Set<String> connectors = memberConfigs.values()
|
||||
.stream()
|
||||
.flatMap(state -> state.assignment().connectors().stream())
|
||||
.collect(Collectors.toSet());
|
||||
Set<ConnectorTaskId> tasks = memberConfigs.values()
|
||||
.stream()
|
||||
.flatMap(state -> state.assignment().tasks().stream())
|
||||
.collect(Collectors.toSet());
|
||||
return new ConnectorsAndTasks.Builder().with(connectors, tasks).build();
|
||||
}
|
||||
|
||||
private int calculateDelay(long now) {
|
||||
long diff = scheduledRebalance - now;
|
||||
return diff > 0 ? (int) Math.min(diff, maxDelay) : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform a round-robin assignment of connectors to workers with existing worker load. This
|
||||
* assignment tries to balance the load between workers, by assigning connectors to workers
|
||||
* that have equal load, starting with the least loaded workers.
|
||||
*
|
||||
* @param workerAssignment the current worker assignment; assigned connectors are added to this list
|
||||
* @param connectors the connectors to be assigned
|
||||
*/
|
||||
protected void assignConnectors(List<WorkerLoad> workerAssignment, Collection<String> connectors) {
|
||||
workerAssignment.sort(WorkerLoad.connectorComparator());
|
||||
WorkerLoad first = workerAssignment.get(0);
|
||||
|
||||
Iterator<String> load = connectors.iterator();
|
||||
while (load.hasNext()) {
|
||||
int firstLoad = first.connectorsSize();
|
||||
int upTo = IntStream.range(0, workerAssignment.size())
|
||||
.filter(i -> workerAssignment.get(i).connectorsSize() > firstLoad)
|
||||
.findFirst()
|
||||
.orElse(workerAssignment.size());
|
||||
for (WorkerLoad worker : workerAssignment.subList(0, upTo)) {
|
||||
String connector = load.next();
|
||||
log.debug("Assigning connector {} to {}", connector, worker.worker());
|
||||
worker.assign(connector);
|
||||
if (!load.hasNext()) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform a round-robin assignment of tasks to workers with existing worker load. This
|
||||
* assignment tries to balance the load between workers, by assigning tasks to workers that
|
||||
* have equal load, starting with the least loaded workers.
|
||||
*
|
||||
* @param workerAssignment the current worker assignment; assigned tasks are added to this list
|
||||
* @param tasks the tasks to be assigned
|
||||
*/
|
||||
protected void assignTasks(List<WorkerLoad> workerAssignment, Collection<ConnectorTaskId> tasks) {
|
||||
workerAssignment.sort(WorkerLoad.taskComparator());
|
||||
WorkerLoad first = workerAssignment.get(0);
|
||||
|
||||
Iterator<ConnectorTaskId> load = tasks.iterator();
|
||||
while (load.hasNext()) {
|
||||
int firstLoad = first.tasksSize();
|
||||
int upTo = IntStream.range(0, workerAssignment.size())
|
||||
.filter(i -> workerAssignment.get(i).tasksSize() > firstLoad)
|
||||
.findFirst()
|
||||
.orElse(workerAssignment.size());
|
||||
for (WorkerLoad worker : workerAssignment.subList(0, upTo)) {
|
||||
ConnectorTaskId task = load.next();
|
||||
log.debug("Assigning task {} to {}", task, worker.worker());
|
||||
worker.assign(task);
|
||||
if (!load.hasNext()) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static List<WorkerLoad> workerAssignment(Map<String, ExtendedWorkerState> memberConfigs,
|
||||
ConnectorsAndTasks toExclude) {
|
||||
ConnectorsAndTasks ignore = new ConnectorsAndTasks.Builder()
|
||||
.with(new HashSet<>(toExclude.connectors()), new HashSet<>(toExclude.tasks()))
|
||||
.build();
|
||||
|
||||
return memberConfigs.entrySet().stream()
|
||||
.map(e -> new WorkerLoad.Builder(e.getKey()).with(
|
||||
e.getValue().assignment().connectors().stream()
|
||||
.filter(v -> !ignore.connectors().contains(v))
|
||||
.collect(Collectors.toList()),
|
||||
e.getValue().assignment().tasks().stream()
|
||||
.filter(v -> !ignore.tasks().contains(v))
|
||||
.collect(Collectors.toList())
|
||||
).build()
|
||||
).collect(Collectors.toList());
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,274 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime.distributed;
|
||||
|
||||
import org.apache.kafka.common.protocol.types.ArrayOf;
|
||||
import org.apache.kafka.common.protocol.types.Field;
|
||||
import org.apache.kafka.common.protocol.types.Schema;
|
||||
import org.apache.kafka.common.protocol.types.SchemaException;
|
||||
import org.apache.kafka.common.protocol.types.Struct;
|
||||
import org.apache.kafka.common.protocol.types.Type;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import static org.apache.kafka.common.message.JoinGroupRequestData.JoinGroupRequestProtocol;
|
||||
import static org.apache.kafka.common.message.JoinGroupRequestData.JoinGroupRequestProtocolCollection;
|
||||
import static org.apache.kafka.common.protocol.types.Type.NULLABLE_BYTES;
|
||||
import static org.apache.kafka.connect.runtime.distributed.ConnectProtocol.ASSIGNMENT_KEY_NAME;
|
||||
import static org.apache.kafka.connect.runtime.distributed.ConnectProtocol.CONFIG_OFFSET_KEY_NAME;
|
||||
import static org.apache.kafka.connect.runtime.distributed.ConnectProtocol.CONFIG_STATE_V0;
|
||||
import static org.apache.kafka.connect.runtime.distributed.ConnectProtocol.CONNECTOR_ASSIGNMENT_V0;
|
||||
import static org.apache.kafka.connect.runtime.distributed.ConnectProtocol.CONNECT_PROTOCOL_HEADER_SCHEMA;
|
||||
import static org.apache.kafka.connect.runtime.distributed.ConnectProtocol.CONNECT_PROTOCOL_V0;
|
||||
import static org.apache.kafka.connect.runtime.distributed.ConnectProtocol.ERROR_KEY_NAME;
|
||||
import static org.apache.kafka.connect.runtime.distributed.ConnectProtocol.LEADER_KEY_NAME;
|
||||
import static org.apache.kafka.connect.runtime.distributed.ConnectProtocol.LEADER_URL_KEY_NAME;
|
||||
import static org.apache.kafka.connect.runtime.distributed.ConnectProtocol.URL_KEY_NAME;
|
||||
import static org.apache.kafka.connect.runtime.distributed.ConnectProtocol.VERSION_KEY_NAME;
|
||||
import static org.apache.kafka.connect.runtime.distributed.ConnectProtocolCompatibility.COMPATIBLE;
|
||||
import static org.apache.kafka.connect.runtime.distributed.ConnectProtocolCompatibility.EAGER;
|
||||
import static org.apache.kafka.connect.runtime.distributed.ConnectProtocolCompatibility.SESSIONED;
|
||||
|
||||
|
||||
/**
|
||||
* This class implements a group protocol for Kafka Connect workers that support incremental and
|
||||
* cooperative rebalancing of connectors and tasks. It includes the format of worker state used when
|
||||
* joining the group and distributing assignments, and the format of assignments of connectors
|
||||
* and tasks to workers.
|
||||
*/
|
||||
public class IncrementalCooperativeConnectProtocol {
|
||||
public static final String ALLOCATION_KEY_NAME = "allocation";
|
||||
public static final String REVOKED_KEY_NAME = "revoked";
|
||||
public static final String SCHEDULED_DELAY_KEY_NAME = "delay";
|
||||
public static final short CONNECT_PROTOCOL_V1 = 1;
|
||||
public static final short CONNECT_PROTOCOL_V2 = 2;
|
||||
public static final boolean TOLERATE_MISSING_FIELDS_WITH_DEFAULTS = true;
|
||||
|
||||
/**
|
||||
* Connect Protocol Header V1:
|
||||
* <pre>
|
||||
* Version => Int16
|
||||
* </pre>
|
||||
*/
|
||||
private static final Struct CONNECT_PROTOCOL_HEADER_V1 = new Struct(CONNECT_PROTOCOL_HEADER_SCHEMA)
|
||||
.set(VERSION_KEY_NAME, CONNECT_PROTOCOL_V1);
|
||||
|
||||
/**
|
||||
* Connect Protocol Header V2:
|
||||
* <pre>
|
||||
* Version => Int16
|
||||
* </pre>
|
||||
* The V2 protocol is schematically identical to V1, but is used to signify that internal request
|
||||
* verification and distribution of session keys is enabled (for more information, see KIP-507:
|
||||
* https://cwiki.apache.org/confluence/display/KAFKA/KIP-507%3A+Securing+Internal+Connect+REST+Endpoints)
|
||||
*/
|
||||
private static final Struct CONNECT_PROTOCOL_HEADER_V2 = new Struct(CONNECT_PROTOCOL_HEADER_SCHEMA)
|
||||
.set(VERSION_KEY_NAME, CONNECT_PROTOCOL_V2);
|
||||
|
||||
|
||||
/**
|
||||
* Config State V1:
|
||||
* <pre>
|
||||
* Url => [String]
|
||||
* ConfigOffset => Int64
|
||||
* </pre>
|
||||
*/
|
||||
public static final Schema CONFIG_STATE_V1 = CONFIG_STATE_V0;
|
||||
|
||||
/**
|
||||
* Allocation V1
|
||||
* <pre>
|
||||
* Current Assignment => [Byte]
|
||||
* </pre>
|
||||
*/
|
||||
public static final Schema ALLOCATION_V1 = new Schema(
|
||||
TOLERATE_MISSING_FIELDS_WITH_DEFAULTS,
|
||||
new Field(ALLOCATION_KEY_NAME, NULLABLE_BYTES, null, true, null));
|
||||
|
||||
/**
|
||||
*
|
||||
* Connector Assignment V1:
|
||||
* <pre>
|
||||
* Connector => [String]
|
||||
* Tasks => [Int32]
|
||||
* </pre>
|
||||
*
|
||||
* <p>Assignments for each worker are a set of connectors and tasks. These are categorized by
|
||||
* connector ID. A sentinel task ID (CONNECTOR_TASK) is used to indicate the connector itself
|
||||
* (i.e. that the assignment includes responsibility for running the Connector instance in
|
||||
* addition to any tasks it generates).</p>
|
||||
*/
|
||||
public static final Schema CONNECTOR_ASSIGNMENT_V1 = CONNECTOR_ASSIGNMENT_V0;
|
||||
|
||||
/**
|
||||
* Raw (non versioned) assignment V1:
|
||||
* <pre>
|
||||
* Error => Int16
|
||||
* Leader => [String]
|
||||
* LeaderUrl => [String]
|
||||
* ConfigOffset => Int64
|
||||
* Assignment => [Connector Assignment]
|
||||
* Revoked => [Connector Assignment]
|
||||
* ScheduledDelay => Int32
|
||||
* </pre>
|
||||
*/
|
||||
public static final Schema ASSIGNMENT_V1 = new Schema(
|
||||
TOLERATE_MISSING_FIELDS_WITH_DEFAULTS,
|
||||
new Field(ERROR_KEY_NAME, Type.INT16),
|
||||
new Field(LEADER_KEY_NAME, Type.STRING),
|
||||
new Field(LEADER_URL_KEY_NAME, Type.STRING),
|
||||
new Field(CONFIG_OFFSET_KEY_NAME, Type.INT64),
|
||||
new Field(ASSIGNMENT_KEY_NAME, ArrayOf.nullable(CONNECTOR_ASSIGNMENT_V1), null, true, null),
|
||||
new Field(REVOKED_KEY_NAME, ArrayOf.nullable(CONNECTOR_ASSIGNMENT_V1), null, true, null),
|
||||
new Field(SCHEDULED_DELAY_KEY_NAME, Type.INT32, null, 0));
|
||||
|
||||
/**
|
||||
* The fields are serialized in sequence as follows:
|
||||
* Subscription V1:
|
||||
* <pre>
|
||||
* Version => Int16
|
||||
* Url => [String]
|
||||
* ConfigOffset => Int64
|
||||
* Current Assignment => [Byte]
|
||||
* </pre>
|
||||
*/
|
||||
public static ByteBuffer serializeMetadata(ExtendedWorkerState workerState, boolean sessioned) {
|
||||
Struct configState = new Struct(CONFIG_STATE_V1)
|
||||
.set(URL_KEY_NAME, workerState.url())
|
||||
.set(CONFIG_OFFSET_KEY_NAME, workerState.offset());
|
||||
// Not a big issue if we embed the protocol version with the assignment in the metadata
|
||||
Struct allocation = new Struct(ALLOCATION_V1)
|
||||
.set(ALLOCATION_KEY_NAME, serializeAssignment(workerState.assignment()));
|
||||
Struct connectProtocolHeader = sessioned ? CONNECT_PROTOCOL_HEADER_V2 : CONNECT_PROTOCOL_HEADER_V1;
|
||||
ByteBuffer buffer = ByteBuffer.allocate(connectProtocolHeader.sizeOf()
|
||||
+ CONFIG_STATE_V1.sizeOf(configState)
|
||||
+ ALLOCATION_V1.sizeOf(allocation));
|
||||
connectProtocolHeader.writeTo(buffer);
|
||||
CONFIG_STATE_V1.write(buffer, configState);
|
||||
ALLOCATION_V1.write(buffer, allocation);
|
||||
buffer.flip();
|
||||
return buffer;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the collection of Connect protocols that are supported by this version along
|
||||
* with their serialized metadata. The protocols are ordered by preference.
|
||||
*
|
||||
* @param workerState the current state of the worker metadata
|
||||
* @param sessioned whether the {@link ConnectProtocolCompatibility#SESSIONED} protocol should
|
||||
* be included in the collection of supported protocols
|
||||
* @return the collection of Connect protocol metadata
|
||||
*/
|
||||
public static JoinGroupRequestProtocolCollection metadataRequest(ExtendedWorkerState workerState, boolean sessioned) {
|
||||
// Order matters in terms of protocol preference
|
||||
List<JoinGroupRequestProtocol> joinGroupRequestProtocols = new ArrayList<>();
|
||||
if (sessioned) {
|
||||
joinGroupRequestProtocols.add(new JoinGroupRequestProtocol()
|
||||
.setName(SESSIONED.protocol())
|
||||
.setMetadata(IncrementalCooperativeConnectProtocol.serializeMetadata(workerState, true).array())
|
||||
);
|
||||
}
|
||||
joinGroupRequestProtocols.add(new JoinGroupRequestProtocol()
|
||||
.setName(COMPATIBLE.protocol())
|
||||
.setMetadata(IncrementalCooperativeConnectProtocol.serializeMetadata(workerState, false).array())
|
||||
);
|
||||
joinGroupRequestProtocols.add(new JoinGroupRequestProtocol()
|
||||
.setName(EAGER.protocol())
|
||||
.setMetadata(ConnectProtocol.serializeMetadata(workerState).array())
|
||||
);
|
||||
return new JoinGroupRequestProtocolCollection(joinGroupRequestProtocols.iterator());
|
||||
}
|
||||
|
||||
/**
|
||||
* Given a byte buffer that contains protocol metadata return the deserialized form of the
|
||||
* metadata.
|
||||
*
|
||||
* @param buffer A buffer containing the protocols metadata
|
||||
* @return the deserialized metadata
|
||||
* @throws SchemaException on incompatible Connect protocol version
|
||||
*/
|
||||
public static ExtendedWorkerState deserializeMetadata(ByteBuffer buffer) {
|
||||
Struct header = CONNECT_PROTOCOL_HEADER_SCHEMA.read(buffer);
|
||||
Short version = header.getShort(VERSION_KEY_NAME);
|
||||
checkVersionCompatibility(version);
|
||||
Struct configState = CONFIG_STATE_V1.read(buffer);
|
||||
long configOffset = configState.getLong(CONFIG_OFFSET_KEY_NAME);
|
||||
String url = configState.getString(URL_KEY_NAME);
|
||||
Struct allocation = ALLOCATION_V1.read(buffer);
|
||||
// Protocol version is embedded with the assignment in the metadata
|
||||
ExtendedAssignment assignment = deserializeAssignment(allocation.getBytes(ALLOCATION_KEY_NAME));
|
||||
return new ExtendedWorkerState(url, configOffset, assignment);
|
||||
}
|
||||
|
||||
/**
|
||||
* The fields are serialized in sequence as follows:
|
||||
* Complete Assignment V1:
|
||||
* <pre>
|
||||
* Version => Int16
|
||||
* Error => Int16
|
||||
* Leader => [String]
|
||||
* LeaderUrl => [String]
|
||||
* ConfigOffset => Int64
|
||||
* Assignment => [Connector Assignment]
|
||||
* Revoked => [Connector Assignment]
|
||||
* ScheduledDelay => Int32
|
||||
* </pre>
|
||||
*/
|
||||
public static ByteBuffer serializeAssignment(ExtendedAssignment assignment) {
|
||||
// comparison depends on reference equality for now
|
||||
if (assignment == null || ExtendedAssignment.empty().equals(assignment)) {
|
||||
return null;
|
||||
}
|
||||
Struct struct = assignment.toStruct();
|
||||
ByteBuffer buffer = ByteBuffer.allocate(CONNECT_PROTOCOL_HEADER_V1.sizeOf()
|
||||
+ ASSIGNMENT_V1.sizeOf(struct));
|
||||
CONNECT_PROTOCOL_HEADER_V1.writeTo(buffer);
|
||||
ASSIGNMENT_V1.write(buffer, struct);
|
||||
buffer.flip();
|
||||
return buffer;
|
||||
}
|
||||
|
||||
/**
|
||||
* Given a byte buffer that contains an assignment as defined by this protocol, return the
|
||||
* deserialized form of the assignment.
|
||||
*
|
||||
* @param buffer the buffer containing a serialized assignment
|
||||
* @return the deserialized assignment
|
||||
* @throws SchemaException on incompatible Connect protocol version
|
||||
*/
|
||||
public static ExtendedAssignment deserializeAssignment(ByteBuffer buffer) {
|
||||
if (buffer == null) {
|
||||
return null;
|
||||
}
|
||||
Struct header = CONNECT_PROTOCOL_HEADER_SCHEMA.read(buffer);
|
||||
Short version = header.getShort(VERSION_KEY_NAME);
|
||||
checkVersionCompatibility(version);
|
||||
Struct struct = ASSIGNMENT_V1.read(buffer);
|
||||
return ExtendedAssignment.fromStruct(version, struct);
|
||||
}
|
||||
|
||||
private static void checkVersionCompatibility(short version) {
|
||||
// check for invalid versions
|
||||
if (version < CONNECT_PROTOCOL_V0)
|
||||
throw new SchemaException("Unsupported subscription version: " + version);
|
||||
|
||||
// otherwise, assume versions can be parsed
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,29 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime.distributed;
|
||||
|
||||
/**
|
||||
* Thrown when a request intended for the owner of a task or connector is received by a worker which doesn't
|
||||
* own it (typically the leader).
|
||||
*/
|
||||
public class NotAssignedException extends RequestTargetException {
|
||||
|
||||
public NotAssignedException(String message, String ownerUrl) {
|
||||
super(message, ownerUrl);
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,29 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime.distributed;
|
||||
|
||||
/**
|
||||
* Indicates an operation was not permitted because it can only be performed on the leader and this worker is not currently
|
||||
* the leader.
|
||||
*/
|
||||
public class NotLeaderException extends RequestTargetException {
|
||||
|
||||
public NotLeaderException(String msg, String leaderUrl) {
|
||||
super(msg, leaderUrl);
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,27 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime.distributed;
|
||||
|
||||
import org.apache.kafka.connect.errors.ConnectException;
|
||||
|
||||
public class RebalanceNeededException extends ConnectException {
|
||||
|
||||
public RebalanceNeededException(String s) {
|
||||
super(s);
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,47 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime.distributed;
|
||||
|
||||
import org.apache.kafka.connect.errors.ConnectException;
|
||||
|
||||
/**
|
||||
* Raised when a request has been received by a worker which cannot handle it,
|
||||
* but can forward it to the right target
|
||||
*/
|
||||
public class RequestTargetException extends ConnectException {
|
||||
private final String forwardUrl;
|
||||
|
||||
public RequestTargetException(String s, String forwardUrl) {
|
||||
super(s);
|
||||
this.forwardUrl = forwardUrl;
|
||||
}
|
||||
|
||||
public RequestTargetException(String s, Throwable throwable, String forwardUrl) {
|
||||
super(s, throwable);
|
||||
this.forwardUrl = forwardUrl;
|
||||
}
|
||||
|
||||
public RequestTargetException(Throwable throwable, String forwardUrl) {
|
||||
super(throwable);
|
||||
this.forwardUrl = forwardUrl;
|
||||
}
|
||||
|
||||
public String forwardUrl() {
|
||||
return forwardUrl;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,596 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime.distributed;
|
||||
|
||||
import org.apache.kafka.clients.consumer.internals.AbstractCoordinator;
|
||||
import org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient;
|
||||
import org.apache.kafka.clients.GroupRebalanceConfig;
|
||||
import org.apache.kafka.common.metrics.Measurable;
|
||||
import org.apache.kafka.common.metrics.MetricConfig;
|
||||
import org.apache.kafka.common.metrics.Metrics;
|
||||
import org.apache.kafka.common.requests.JoinGroupRequest;
|
||||
import org.apache.kafka.common.utils.LogContext;
|
||||
import org.apache.kafka.common.utils.Time;
|
||||
import org.apache.kafka.common.utils.Timer;
|
||||
import org.apache.kafka.connect.storage.ConfigBackingStore;
|
||||
import org.apache.kafka.connect.util.ConnectorTaskId;
|
||||
import org.slf4j.Logger;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.apache.kafka.common.message.JoinGroupRequestData.JoinGroupRequestProtocolCollection;
|
||||
import static org.apache.kafka.common.message.JoinGroupResponseData.JoinGroupResponseMember;
|
||||
import static org.apache.kafka.connect.runtime.distributed.ConnectProtocolCompatibility.EAGER;
|
||||
|
||||
/**
|
||||
* This class manages the coordination process with the Kafka group coordinator on the broker for managing assignments
|
||||
* to workers.
|
||||
*/
|
||||
public class WorkerCoordinator extends AbstractCoordinator implements Closeable {
|
||||
// Currently doesn't support multiple task assignment strategies, so we just fill in a default value
|
||||
public static final String DEFAULT_SUBPROTOCOL = "default";
|
||||
|
||||
private final Logger log;
|
||||
private final String restUrl;
|
||||
private final ConfigBackingStore configStorage;
|
||||
private volatile ExtendedAssignment assignmentSnapshot;
|
||||
private ClusterConfigState configSnapshot;
|
||||
private final WorkerRebalanceListener listener;
|
||||
private final ConnectProtocolCompatibility protocolCompatibility;
|
||||
private LeaderState leaderState;
|
||||
|
||||
private boolean rejoinRequested;
|
||||
private volatile ConnectProtocolCompatibility currentConnectProtocol;
|
||||
private final ConnectAssignor eagerAssignor;
|
||||
private final ConnectAssignor incrementalAssignor;
|
||||
private final int coordinatorDiscoveryTimeoutMs;
|
||||
|
||||
/**
|
||||
* Initialize the coordination manager.
|
||||
*/
|
||||
public WorkerCoordinator(GroupRebalanceConfig config,
|
||||
LogContext logContext,
|
||||
ConsumerNetworkClient client,
|
||||
Metrics metrics,
|
||||
String metricGrpPrefix,
|
||||
Time time,
|
||||
String restUrl,
|
||||
ConfigBackingStore configStorage,
|
||||
WorkerRebalanceListener listener,
|
||||
ConnectProtocolCompatibility protocolCompatibility,
|
||||
int maxDelay) {
|
||||
super(config,
|
||||
logContext,
|
||||
client,
|
||||
metrics,
|
||||
metricGrpPrefix,
|
||||
time);
|
||||
this.log = logContext.logger(WorkerCoordinator.class);
|
||||
this.restUrl = restUrl;
|
||||
this.configStorage = configStorage;
|
||||
this.assignmentSnapshot = null;
|
||||
new WorkerCoordinatorMetrics(metrics, metricGrpPrefix);
|
||||
this.listener = listener;
|
||||
this.rejoinRequested = false;
|
||||
this.protocolCompatibility = protocolCompatibility;
|
||||
this.incrementalAssignor = new IncrementalCooperativeAssignor(logContext, time, maxDelay);
|
||||
this.eagerAssignor = new EagerAssignor(logContext);
|
||||
this.currentConnectProtocol = protocolCompatibility;
|
||||
this.coordinatorDiscoveryTimeoutMs = config.heartbeatIntervalMs;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void requestRejoin() {
|
||||
rejoinRequested = true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String protocolType() {
|
||||
return "connect";
|
||||
}
|
||||
|
||||
// expose for tests
|
||||
@Override
|
||||
protected synchronized boolean ensureCoordinatorReady(final Timer timer) {
|
||||
return super.ensureCoordinatorReady(timer);
|
||||
}
|
||||
|
||||
public void poll(long timeout) {
|
||||
// poll for io until the timeout expires
|
||||
final long start = time.milliseconds();
|
||||
long now = start;
|
||||
long remaining;
|
||||
|
||||
do {
|
||||
if (coordinatorUnknown()) {
|
||||
log.debug("Broker coordinator is marked unknown. Attempting discovery with a timeout of {}ms",
|
||||
coordinatorDiscoveryTimeoutMs);
|
||||
if (ensureCoordinatorReady(time.timer(coordinatorDiscoveryTimeoutMs))) {
|
||||
log.debug("Broker coordinator is ready");
|
||||
} else {
|
||||
log.debug("Can not connect to broker coordinator");
|
||||
final ExtendedAssignment localAssignmentSnapshot = assignmentSnapshot;
|
||||
if (localAssignmentSnapshot != null && !localAssignmentSnapshot.failed()) {
|
||||
log.info("Broker coordinator was unreachable for {}ms. Revoking previous assignment {} to " +
|
||||
"avoid running tasks while not being a member the group", coordinatorDiscoveryTimeoutMs, localAssignmentSnapshot);
|
||||
listener.onRevoked(localAssignmentSnapshot.leader(), localAssignmentSnapshot.connectors(), localAssignmentSnapshot.tasks());
|
||||
assignmentSnapshot = null;
|
||||
}
|
||||
}
|
||||
now = time.milliseconds();
|
||||
}
|
||||
|
||||
if (rejoinNeededOrPending()) {
|
||||
ensureActiveGroup();
|
||||
now = time.milliseconds();
|
||||
}
|
||||
|
||||
pollHeartbeat(now);
|
||||
|
||||
long elapsed = now - start;
|
||||
remaining = timeout - elapsed;
|
||||
|
||||
// Note that because the network client is shared with the background heartbeat thread,
|
||||
// we do not want to block in poll longer than the time to the next heartbeat.
|
||||
long pollTimeout = Math.min(Math.max(0, remaining), timeToNextHeartbeat(now));
|
||||
client.poll(time.timer(pollTimeout));
|
||||
|
||||
now = time.milliseconds();
|
||||
elapsed = now - start;
|
||||
remaining = timeout - elapsed;
|
||||
} while (remaining > 0);
|
||||
}
|
||||
|
||||
@Override
|
||||
public JoinGroupRequestProtocolCollection metadata() {
|
||||
configSnapshot = configStorage.snapshot();
|
||||
final ExtendedAssignment localAssignmentSnapshot = assignmentSnapshot;
|
||||
ExtendedWorkerState workerState = new ExtendedWorkerState(restUrl, configSnapshot.offset(), localAssignmentSnapshot);
|
||||
switch (protocolCompatibility) {
|
||||
case EAGER:
|
||||
return ConnectProtocol.metadataRequest(workerState);
|
||||
case COMPATIBLE:
|
||||
return IncrementalCooperativeConnectProtocol.metadataRequest(workerState, false);
|
||||
case SESSIONED:
|
||||
return IncrementalCooperativeConnectProtocol.metadataRequest(workerState, true);
|
||||
default:
|
||||
throw new IllegalStateException("Unknown Connect protocol compatibility mode " + protocolCompatibility);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void onJoinComplete(int generation, String memberId, String protocol, ByteBuffer memberAssignment) {
|
||||
ExtendedAssignment newAssignment = IncrementalCooperativeConnectProtocol.deserializeAssignment(memberAssignment);
|
||||
log.debug("Deserialized new assignment: {}", newAssignment);
|
||||
currentConnectProtocol = ConnectProtocolCompatibility.fromProtocol(protocol);
|
||||
// At this point we always consider ourselves to be a member of the cluster, even if there was an assignment
|
||||
// error (the leader couldn't make the assignment) or we are behind the config and cannot yet work on our assigned
|
||||
// tasks. It's the responsibility of the code driving this process to decide how to react (e.g. trying to get
|
||||
// up to date, try to rejoin again, leaving the group and backing off, etc.).
|
||||
rejoinRequested = false;
|
||||
if (currentConnectProtocol != EAGER) {
|
||||
if (!newAssignment.revokedConnectors().isEmpty() || !newAssignment.revokedTasks().isEmpty()) {
|
||||
listener.onRevoked(newAssignment.leader(), newAssignment.revokedConnectors(), newAssignment.revokedTasks());
|
||||
}
|
||||
|
||||
final ExtendedAssignment localAssignmentSnapshot = assignmentSnapshot;
|
||||
if (localAssignmentSnapshot != null) {
|
||||
localAssignmentSnapshot.connectors().removeAll(newAssignment.revokedConnectors());
|
||||
localAssignmentSnapshot.tasks().removeAll(newAssignment.revokedTasks());
|
||||
log.debug("After revocations snapshot of assignment: {}", localAssignmentSnapshot);
|
||||
newAssignment.connectors().addAll(localAssignmentSnapshot.connectors());
|
||||
newAssignment.tasks().addAll(localAssignmentSnapshot.tasks());
|
||||
}
|
||||
log.debug("Augmented new assignment: {}", newAssignment);
|
||||
}
|
||||
assignmentSnapshot = newAssignment;
|
||||
listener.onAssigned(newAssignment, generation);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Map<String, ByteBuffer> performAssignment(String leaderId, String protocol, List<JoinGroupResponseMember> allMemberMetadata) {
|
||||
return ConnectProtocolCompatibility.fromProtocol(protocol) == EAGER
|
||||
? eagerAssignor.performAssignment(leaderId, protocol, allMemberMetadata, this)
|
||||
: incrementalAssignor.performAssignment(leaderId, protocol, allMemberMetadata, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void onJoinPrepare(int generation, String memberId) {
|
||||
log.info("Rebalance started");
|
||||
leaderState(null);
|
||||
final ExtendedAssignment localAssignmentSnapshot = assignmentSnapshot;
|
||||
if (currentConnectProtocol == EAGER) {
|
||||
log.debug("Revoking previous assignment {}", localAssignmentSnapshot);
|
||||
if (localAssignmentSnapshot != null && !localAssignmentSnapshot.failed())
|
||||
listener.onRevoked(localAssignmentSnapshot.leader(), localAssignmentSnapshot.connectors(), localAssignmentSnapshot.tasks());
|
||||
} else {
|
||||
log.debug("Cooperative rebalance triggered. Keeping assignment {} until it's "
|
||||
+ "explicitly revoked.", localAssignmentSnapshot);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean rejoinNeededOrPending() {
|
||||
final ExtendedAssignment localAssignmentSnapshot = assignmentSnapshot;
|
||||
return super.rejoinNeededOrPending() || (localAssignmentSnapshot == null || localAssignmentSnapshot.failed()) || rejoinRequested;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String memberId() {
|
||||
Generation generation = generationIfStable();
|
||||
if (generation != null)
|
||||
return generation.memberId;
|
||||
return JoinGroupRequest.UNKNOWN_MEMBER_ID;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the current generation. The generation refers to this worker's knowledge with
|
||||
* respect to which generation is the latest one and, therefore, this information is local.
|
||||
*
|
||||
* @return the generation ID or -1 if no generation is defined
|
||||
*/
|
||||
public int generationId() {
|
||||
return super.generation().generationId;
|
||||
}
|
||||
|
||||
private boolean isLeader() {
|
||||
final ExtendedAssignment localAssignmentSnapshot = assignmentSnapshot;
|
||||
return localAssignmentSnapshot != null && memberId().equals(localAssignmentSnapshot.leader());
|
||||
}
|
||||
|
||||
public String ownerUrl(String connector) {
|
||||
if (rejoinNeededOrPending() || !isLeader())
|
||||
return null;
|
||||
return leaderState().ownerUrl(connector);
|
||||
}
|
||||
|
||||
public String ownerUrl(ConnectorTaskId task) {
|
||||
if (rejoinNeededOrPending() || !isLeader())
|
||||
return null;
|
||||
return leaderState().ownerUrl(task);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get an up-to-date snapshot of the cluster configuration.
|
||||
*
|
||||
* @return the state of the cluster configuration; the result is not locally cached
|
||||
*/
|
||||
public ClusterConfigState configFreshSnapshot() {
|
||||
return configStorage.snapshot();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a snapshot of the cluster configuration.
|
||||
*
|
||||
* @return the state of the cluster configuration
|
||||
*/
|
||||
public ClusterConfigState configSnapshot() {
|
||||
return configSnapshot;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the state of the cluster configuration to this worker coordinator.
|
||||
*
|
||||
* @param update the updated state of the cluster configuration
|
||||
*/
|
||||
public void configSnapshot(ClusterConfigState update) {
|
||||
configSnapshot = update;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the leader state stored in this worker coordinator.
|
||||
*
|
||||
* @return the leader state
|
||||
*/
|
||||
private LeaderState leaderState() {
|
||||
return leaderState;
|
||||
}
|
||||
|
||||
/**
|
||||
* Store the leader state to this worker coordinator.
|
||||
*
|
||||
* @param update the updated leader state
|
||||
*/
|
||||
public void leaderState(LeaderState update) {
|
||||
leaderState = update;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the version of the connect protocol that is currently active in the group of workers.
|
||||
*
|
||||
* @return the current connect protocol version
|
||||
*/
|
||||
public short currentProtocolVersion() {
|
||||
return currentConnectProtocol.protocolVersion();
|
||||
}
|
||||
|
||||
private class WorkerCoordinatorMetrics {
|
||||
public final String metricGrpName;
|
||||
|
||||
public WorkerCoordinatorMetrics(Metrics metrics, String metricGrpPrefix) {
|
||||
this.metricGrpName = metricGrpPrefix + "-coordinator-metrics";
|
||||
|
||||
Measurable numConnectors = new Measurable() {
|
||||
@Override
|
||||
public double measure(MetricConfig config, long now) {
|
||||
final ExtendedAssignment localAssignmentSnapshot = assignmentSnapshot;
|
||||
if (localAssignmentSnapshot == null) {
|
||||
return 0.0;
|
||||
}
|
||||
return localAssignmentSnapshot.connectors().size();
|
||||
}
|
||||
};
|
||||
|
||||
Measurable numTasks = new Measurable() {
|
||||
@Override
|
||||
public double measure(MetricConfig config, long now) {
|
||||
final ExtendedAssignment localAssignmentSnapshot = assignmentSnapshot;
|
||||
if (localAssignmentSnapshot == null) {
|
||||
return 0.0;
|
||||
}
|
||||
return localAssignmentSnapshot.tasks().size();
|
||||
}
|
||||
};
|
||||
|
||||
metrics.addMetric(metrics.metricName("assigned-connectors",
|
||||
this.metricGrpName,
|
||||
"The number of connector instances currently assigned to this consumer"), numConnectors);
|
||||
metrics.addMetric(metrics.metricName("assigned-tasks",
|
||||
this.metricGrpName,
|
||||
"The number of tasks currently assigned to this consumer"), numTasks);
|
||||
}
|
||||
}
|
||||
|
||||
public static <K, V> Map<V, K> invertAssignment(Map<K, Collection<V>> assignment) {
|
||||
Map<V, K> inverted = new HashMap<>();
|
||||
for (Map.Entry<K, Collection<V>> assignmentEntry : assignment.entrySet()) {
|
||||
K key = assignmentEntry.getKey();
|
||||
for (V value : assignmentEntry.getValue())
|
||||
inverted.put(value, key);
|
||||
}
|
||||
return inverted;
|
||||
}
|
||||
|
||||
public static class LeaderState {
|
||||
private final Map<String, ExtendedWorkerState> allMembers;
|
||||
private final Map<String, String> connectorOwners;
|
||||
private final Map<ConnectorTaskId, String> taskOwners;
|
||||
|
||||
public LeaderState(Map<String, ExtendedWorkerState> allMembers,
|
||||
Map<String, Collection<String>> connectorAssignment,
|
||||
Map<String, Collection<ConnectorTaskId>> taskAssignment) {
|
||||
this.allMembers = allMembers;
|
||||
this.connectorOwners = invertAssignment(connectorAssignment);
|
||||
this.taskOwners = invertAssignment(taskAssignment);
|
||||
}
|
||||
|
||||
private String ownerUrl(ConnectorTaskId id) {
|
||||
String ownerId = taskOwners.get(id);
|
||||
if (ownerId == null)
|
||||
return null;
|
||||
return allMembers.get(ownerId).url();
|
||||
}
|
||||
|
||||
private String ownerUrl(String connector) {
|
||||
String ownerId = connectorOwners.get(connector);
|
||||
if (ownerId == null)
|
||||
return null;
|
||||
return allMembers.get(ownerId).url();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public static class ConnectorsAndTasks {
|
||||
public static final ConnectorsAndTasks EMPTY =
|
||||
new ConnectorsAndTasks(Collections.emptyList(), Collections.emptyList());
|
||||
|
||||
private final Collection<String> connectors;
|
||||
private final Collection<ConnectorTaskId> tasks;
|
||||
|
||||
private ConnectorsAndTasks(Collection<String> connectors, Collection<ConnectorTaskId> tasks) {
|
||||
this.connectors = connectors;
|
||||
this.tasks = tasks;
|
||||
}
|
||||
|
||||
public static class Builder {
|
||||
private Collection<String> withConnectors;
|
||||
private Collection<ConnectorTaskId> withTasks;
|
||||
|
||||
public Builder() {
|
||||
}
|
||||
|
||||
public ConnectorsAndTasks.Builder withCopies(Collection<String> connectors,
|
||||
Collection<ConnectorTaskId> tasks) {
|
||||
withConnectors = new ArrayList<>(connectors);
|
||||
withTasks = new ArrayList<>(tasks);
|
||||
return this;
|
||||
}
|
||||
|
||||
public ConnectorsAndTasks.Builder with(Collection<String> connectors,
|
||||
Collection<ConnectorTaskId> tasks) {
|
||||
withConnectors = new ArrayList<>(connectors);
|
||||
withTasks = new ArrayList<>(tasks);
|
||||
return this;
|
||||
}
|
||||
|
||||
public ConnectorsAndTasks build() {
|
||||
return new ConnectorsAndTasks(
|
||||
withConnectors != null ? withConnectors : new ArrayList<>(),
|
||||
withTasks != null ? withTasks : new ArrayList<>());
|
||||
}
|
||||
}
|
||||
|
||||
public Collection<String> connectors() {
|
||||
return connectors;
|
||||
}
|
||||
|
||||
public Collection<ConnectorTaskId> tasks() {
|
||||
return tasks;
|
||||
}
|
||||
|
||||
public int size() {
|
||||
return connectors.size() + tasks.size();
|
||||
}
|
||||
|
||||
public boolean isEmpty() {
|
||||
return connectors.isEmpty() && tasks.isEmpty();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "{ connectorIds=" + connectors + ", taskIds=" + tasks + '}';
|
||||
}
|
||||
}
|
||||
|
||||
public static class WorkerLoad {
|
||||
private final String worker;
|
||||
private final Collection<String> connectors;
|
||||
private final Collection<ConnectorTaskId> tasks;
|
||||
|
||||
private WorkerLoad(
|
||||
String worker,
|
||||
Collection<String> connectors,
|
||||
Collection<ConnectorTaskId> tasks
|
||||
) {
|
||||
this.worker = worker;
|
||||
this.connectors = connectors;
|
||||
this.tasks = tasks;
|
||||
}
|
||||
|
||||
public static class Builder {
|
||||
private String withWorker;
|
||||
private Collection<String> withConnectors;
|
||||
private Collection<ConnectorTaskId> withTasks;
|
||||
|
||||
public Builder(String worker) {
|
||||
this.withWorker = Objects.requireNonNull(worker, "worker cannot be null");
|
||||
}
|
||||
|
||||
public WorkerLoad.Builder withCopies(Collection<String> connectors,
|
||||
Collection<ConnectorTaskId> tasks) {
|
||||
withConnectors = new ArrayList<>(
|
||||
Objects.requireNonNull(connectors, "connectors may be empty but not null"));
|
||||
withTasks = new ArrayList<>(
|
||||
Objects.requireNonNull(tasks, "tasks may be empty but not null"));
|
||||
return this;
|
||||
}
|
||||
|
||||
public WorkerLoad.Builder with(Collection<String> connectors,
|
||||
Collection<ConnectorTaskId> tasks) {
|
||||
withConnectors = Objects.requireNonNull(connectors,
|
||||
"connectors may be empty but not null");
|
||||
withTasks = Objects.requireNonNull(tasks, "tasks may be empty but not null");
|
||||
return this;
|
||||
}
|
||||
|
||||
public WorkerLoad build() {
|
||||
return new WorkerLoad(
|
||||
withWorker,
|
||||
withConnectors != null ? withConnectors : new ArrayList<>(),
|
||||
withTasks != null ? withTasks : new ArrayList<>());
|
||||
}
|
||||
}
|
||||
|
||||
public String worker() {
|
||||
return worker;
|
||||
}
|
||||
|
||||
public Collection<String> connectors() {
|
||||
return connectors;
|
||||
}
|
||||
|
||||
public Collection<ConnectorTaskId> tasks() {
|
||||
return tasks;
|
||||
}
|
||||
|
||||
public int connectorsSize() {
|
||||
return connectors.size();
|
||||
}
|
||||
|
||||
public int tasksSize() {
|
||||
return tasks.size();
|
||||
}
|
||||
|
||||
public void assign(String connector) {
|
||||
connectors.add(connector);
|
||||
}
|
||||
|
||||
public void assign(ConnectorTaskId task) {
|
||||
tasks.add(task);
|
||||
}
|
||||
|
||||
public int size() {
|
||||
return connectors.size() + tasks.size();
|
||||
}
|
||||
|
||||
public boolean isEmpty() {
|
||||
return connectors.isEmpty() && tasks.isEmpty();
|
||||
}
|
||||
|
||||
public static Comparator<WorkerLoad> connectorComparator() {
|
||||
return (left, right) -> {
|
||||
int res = left.connectors.size() - right.connectors.size();
|
||||
return res != 0 ? res : left.worker == null
|
||||
? right.worker == null ? 0 : -1
|
||||
: left.worker.compareTo(right.worker);
|
||||
};
|
||||
}
|
||||
|
||||
public static Comparator<WorkerLoad> taskComparator() {
|
||||
return (left, right) -> {
|
||||
int res = left.tasks.size() - right.tasks.size();
|
||||
return res != 0 ? res : left.worker == null
|
||||
? right.worker == null ? 0 : -1
|
||||
: left.worker.compareTo(right.worker);
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "{ worker=" + worker + ", connectorIds=" + connectors + ", taskIds=" + tasks + '}';
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (!(o instanceof WorkerLoad)) {
|
||||
return false;
|
||||
}
|
||||
WorkerLoad that = (WorkerLoad) o;
|
||||
return worker.equals(that.worker) &&
|
||||
connectors.equals(that.connectors) &&
|
||||
tasks.equals(that.tasks);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(worker, connectors, tasks);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,224 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime.distributed;
|
||||
|
||||
import org.apache.kafka.clients.ApiVersions;
|
||||
import org.apache.kafka.clients.ClientDnsLookup;
|
||||
import org.apache.kafka.clients.ClientUtils;
|
||||
import org.apache.kafka.clients.CommonClientConfigs;
|
||||
import org.apache.kafka.clients.Metadata;
|
||||
import org.apache.kafka.clients.NetworkClient;
|
||||
import org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient;
|
||||
import org.apache.kafka.clients.GroupRebalanceConfig;
|
||||
import org.apache.kafka.common.KafkaException;
|
||||
import org.apache.kafka.common.internals.ClusterResourceListeners;
|
||||
import org.apache.kafka.common.metrics.JmxReporter;
|
||||
import org.apache.kafka.common.metrics.MetricConfig;
|
||||
import org.apache.kafka.common.metrics.Metrics;
|
||||
import org.apache.kafka.common.metrics.MetricsReporter;
|
||||
import org.apache.kafka.common.network.ChannelBuilder;
|
||||
import org.apache.kafka.common.network.Selector;
|
||||
import org.apache.kafka.common.utils.AppInfoParser;
|
||||
import org.apache.kafka.common.utils.LogContext;
|
||||
import org.apache.kafka.common.utils.Time;
|
||||
import org.apache.kafka.common.utils.Utils;
|
||||
import org.apache.kafka.connect.storage.ConfigBackingStore;
|
||||
import org.apache.kafka.connect.util.ConnectorTaskId;
|
||||
import org.slf4j.Logger;
|
||||
|
||||
import java.net.InetSocketAddress;
|
||||
import java.util.Collections;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
/**
|
||||
* This class manages the coordination process with brokers for the Connect cluster group membership. It ties together
|
||||
* the Coordinator, which implements the group member protocol, with all the other pieces needed to drive the connection
|
||||
* to the group coordinator broker. This isolates all the networking to a single thread managed by this class, with
|
||||
* higher level operations in response to group membership events being handled by the herder.
|
||||
*/
|
||||
public class WorkerGroupMember {
|
||||
private static final String JMX_PREFIX = "kafka.connect";
|
||||
|
||||
private final Logger log;
|
||||
private final Time time;
|
||||
private final String clientId;
|
||||
private final ConsumerNetworkClient client;
|
||||
private final Metrics metrics;
|
||||
private final Metadata metadata;
|
||||
private final long retryBackoffMs;
|
||||
private final WorkerCoordinator coordinator;
|
||||
|
||||
private boolean stopped = false;
|
||||
|
||||
public WorkerGroupMember(DistributedConfig config,
|
||||
String restUrl,
|
||||
ConfigBackingStore configStorage,
|
||||
WorkerRebalanceListener listener,
|
||||
Time time,
|
||||
String clientId,
|
||||
LogContext logContext) {
|
||||
try {
|
||||
this.time = time;
|
||||
this.clientId = clientId;
|
||||
this.log = logContext.logger(WorkerGroupMember.class);
|
||||
|
||||
Map<String, String> metricsTags = new LinkedHashMap<>();
|
||||
metricsTags.put("client-id", clientId);
|
||||
MetricConfig metricConfig = new MetricConfig().samples(config.getInt(CommonClientConfigs.METRICS_NUM_SAMPLES_CONFIG))
|
||||
.timeWindow(config.getLong(CommonClientConfigs.METRICS_SAMPLE_WINDOW_MS_CONFIG), TimeUnit.MILLISECONDS)
|
||||
.tags(metricsTags);
|
||||
List<MetricsReporter> reporters = config.getConfiguredInstances(CommonClientConfigs.METRIC_REPORTER_CLASSES_CONFIG,
|
||||
MetricsReporter.class,
|
||||
Collections.singletonMap(CommonClientConfigs.CLIENT_ID_CONFIG, clientId));
|
||||
reporters.add(new JmxReporter(JMX_PREFIX));
|
||||
this.metrics = new Metrics(metricConfig, reporters, time);
|
||||
this.retryBackoffMs = config.getLong(CommonClientConfigs.RETRY_BACKOFF_MS_CONFIG);
|
||||
this.metadata = new Metadata(retryBackoffMs, config.getLong(CommonClientConfigs.METADATA_MAX_AGE_CONFIG),
|
||||
logContext, new ClusterResourceListeners());
|
||||
List<InetSocketAddress> addresses = ClientUtils.parseAndValidateAddresses(
|
||||
config.getList(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG),
|
||||
config.getString(CommonClientConfigs.CLIENT_DNS_LOOKUP_CONFIG));
|
||||
this.metadata.bootstrap(addresses);
|
||||
String metricGrpPrefix = "connect";
|
||||
ChannelBuilder channelBuilder = ClientUtils.createChannelBuilder(config, time, logContext);
|
||||
NetworkClient netClient = new NetworkClient(
|
||||
new Selector(config.getLong(CommonClientConfigs.CONNECTIONS_MAX_IDLE_MS_CONFIG), metrics, time, metricGrpPrefix, channelBuilder, logContext),
|
||||
this.metadata,
|
||||
clientId,
|
||||
100, // a fixed large enough value will suffice
|
||||
config.getLong(CommonClientConfigs.RECONNECT_BACKOFF_MS_CONFIG),
|
||||
config.getLong(CommonClientConfigs.RECONNECT_BACKOFF_MAX_MS_CONFIG),
|
||||
config.getInt(CommonClientConfigs.SEND_BUFFER_CONFIG),
|
||||
config.getInt(CommonClientConfigs.RECEIVE_BUFFER_CONFIG),
|
||||
config.getInt(CommonClientConfigs.REQUEST_TIMEOUT_MS_CONFIG),
|
||||
ClientDnsLookup.forConfig(config.getString(CommonClientConfigs.CLIENT_DNS_LOOKUP_CONFIG)),
|
||||
time,
|
||||
true,
|
||||
new ApiVersions(),
|
||||
logContext);
|
||||
this.client = new ConsumerNetworkClient(
|
||||
logContext,
|
||||
netClient,
|
||||
metadata,
|
||||
time,
|
||||
retryBackoffMs,
|
||||
config.getInt(CommonClientConfigs.REQUEST_TIMEOUT_MS_CONFIG),
|
||||
Integer.MAX_VALUE);
|
||||
this.coordinator = new WorkerCoordinator(
|
||||
new GroupRebalanceConfig(config, GroupRebalanceConfig.ProtocolType.CONNECT),
|
||||
logContext,
|
||||
this.client,
|
||||
metrics,
|
||||
metricGrpPrefix,
|
||||
this.time,
|
||||
restUrl,
|
||||
configStorage,
|
||||
listener,
|
||||
ConnectProtocolCompatibility.compatibility(config.getString(DistributedConfig.CONNECT_PROTOCOL_CONFIG)),
|
||||
config.getInt(DistributedConfig.SCHEDULED_REBALANCE_MAX_DELAY_MS_CONFIG));
|
||||
|
||||
AppInfoParser.registerAppInfo(JMX_PREFIX, clientId, metrics, time.milliseconds());
|
||||
log.debug("Connect group member created");
|
||||
} catch (Throwable t) {
|
||||
// call close methods if internal objects are already constructed
|
||||
// this is to prevent resource leak. see KAFKA-2121
|
||||
stop(true);
|
||||
// now propagate the exception
|
||||
throw new KafkaException("Failed to construct kafka consumer", t);
|
||||
}
|
||||
}
|
||||
|
||||
public void stop() {
|
||||
if (stopped) return;
|
||||
stop(false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensure that the connection to the broker coordinator is up and that the worker is an
|
||||
* active member of the group.
|
||||
*/
|
||||
public void ensureActive() {
|
||||
coordinator.poll(0);
|
||||
}
|
||||
|
||||
public void poll(long timeout) {
|
||||
if (timeout < 0)
|
||||
throw new IllegalArgumentException("Timeout must not be negative");
|
||||
coordinator.poll(timeout);
|
||||
}
|
||||
|
||||
/**
|
||||
* Interrupt any running poll() calls, causing a WakeupException to be thrown in the thread invoking that method.
|
||||
*/
|
||||
public void wakeup() {
|
||||
this.client.wakeup();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the member ID of this worker in the group of workers.
|
||||
*
|
||||
* This ID is the unique member ID automatically generated.
|
||||
*
|
||||
* @return the member ID
|
||||
*/
|
||||
public String memberId() {
|
||||
return coordinator.memberId();
|
||||
}
|
||||
|
||||
public void requestRejoin() {
|
||||
coordinator.requestRejoin();
|
||||
}
|
||||
|
||||
public void maybeLeaveGroup(String leaveReason) {
|
||||
coordinator.maybeLeaveGroup(leaveReason);
|
||||
}
|
||||
|
||||
public String ownerUrl(String connector) {
|
||||
return coordinator.ownerUrl(connector);
|
||||
}
|
||||
|
||||
public String ownerUrl(ConnectorTaskId task) {
|
||||
return coordinator.ownerUrl(task);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the version of the connect protocol that is currently active in the group of workers.
|
||||
*
|
||||
* @return the current connect protocol version
|
||||
*/
|
||||
public short currentProtocolVersion() {
|
||||
return coordinator.currentProtocolVersion();
|
||||
}
|
||||
|
||||
private void stop(boolean swallowException) {
|
||||
log.trace("Stopping the Connect group member.");
|
||||
AtomicReference<Throwable> firstException = new AtomicReference<>();
|
||||
this.stopped = true;
|
||||
Utils.closeQuietly(coordinator, "coordinator", firstException);
|
||||
Utils.closeQuietly(metrics, "consumer metrics", firstException);
|
||||
Utils.closeQuietly(client, "consumer network client", firstException);
|
||||
AppInfoParser.unregisterAppInfo(JMX_PREFIX, clientId, metrics);
|
||||
if (firstException.get() != null && !swallowException)
|
||||
throw new KafkaException("Failed to stop the Connect group member", firstException.get());
|
||||
else
|
||||
log.debug("The Connect group member has stopped.");
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,39 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime.distributed;
|
||||
|
||||
import org.apache.kafka.connect.util.ConnectorTaskId;
|
||||
|
||||
import java.util.Collection;
|
||||
|
||||
/**
|
||||
* Listener for rebalance events in the worker group.
|
||||
*/
|
||||
public interface WorkerRebalanceListener {
|
||||
/**
|
||||
* Invoked when a new assignment is created by joining the Connect worker group. This is
|
||||
* invoked for both successful and unsuccessful assignments.
|
||||
*/
|
||||
void onAssigned(ExtendedAssignment assignment, int generation);
|
||||
|
||||
/**
|
||||
* Invoked when a rebalance operation starts, revoking ownership for the set of connectors
|
||||
* and tasks. Depending on the Connect protocol version, the collection of revoked connectors
|
||||
* or tasks might refer to all or some of the connectors and tasks running on the worker.
|
||||
*/
|
||||
void onRevoked(String leader, Collection<String> connectors, Collection<ConnectorTaskId> tasks);
|
||||
}
|
||||
@@ -0,0 +1,207 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime.errors;
|
||||
|
||||
import org.apache.kafka.clients.admin.Admin;
|
||||
import org.apache.kafka.clients.admin.NewTopic;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.clients.producer.KafkaProducer;
|
||||
import org.apache.kafka.clients.producer.ProducerRecord;
|
||||
import org.apache.kafka.common.errors.TopicExistsException;
|
||||
import org.apache.kafka.common.header.Headers;
|
||||
import org.apache.kafka.common.record.RecordBatch;
|
||||
import org.apache.kafka.connect.errors.ConnectException;
|
||||
import org.apache.kafka.connect.runtime.SinkConnectorConfig;
|
||||
import org.apache.kafka.connect.util.ConnectorTaskId;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.PrintStream;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
|
||||
import static java.util.Collections.singleton;
|
||||
|
||||
/**
|
||||
* Write the original consumed record into a dead letter queue. The dead letter queue is a Kafka topic located
|
||||
* on the same cluster used by the worker to maintain internal topics. Each connector is typically configured
|
||||
* with its own Kafka topic dead letter queue. By default, the topic name is not set, and if the
|
||||
* connector config doesn't specify one, this feature is disabled.
|
||||
*/
|
||||
public class DeadLetterQueueReporter implements ErrorReporter {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(DeadLetterQueueReporter.class);
|
||||
|
||||
private static final int DLQ_NUM_DESIRED_PARTITIONS = 1;
|
||||
|
||||
public static final String HEADER_PREFIX = "__connect.errors.";
|
||||
public static final String ERROR_HEADER_ORIG_TOPIC = HEADER_PREFIX + "topic";
|
||||
public static final String ERROR_HEADER_ORIG_PARTITION = HEADER_PREFIX + "partition";
|
||||
public static final String ERROR_HEADER_ORIG_OFFSET = HEADER_PREFIX + "offset";
|
||||
public static final String ERROR_HEADER_CONNECTOR_NAME = HEADER_PREFIX + "connector.name";
|
||||
public static final String ERROR_HEADER_TASK_ID = HEADER_PREFIX + "task.id";
|
||||
public static final String ERROR_HEADER_STAGE = HEADER_PREFIX + "stage";
|
||||
public static final String ERROR_HEADER_EXECUTING_CLASS = HEADER_PREFIX + "class.name";
|
||||
public static final String ERROR_HEADER_EXCEPTION = HEADER_PREFIX + "exception.class.name";
|
||||
public static final String ERROR_HEADER_EXCEPTION_MESSAGE = HEADER_PREFIX + "exception.message";
|
||||
public static final String ERROR_HEADER_EXCEPTION_STACK_TRACE = HEADER_PREFIX + "exception.stacktrace";
|
||||
|
||||
private final SinkConnectorConfig connConfig;
|
||||
private final ConnectorTaskId connectorTaskId;
|
||||
private final ErrorHandlingMetrics errorHandlingMetrics;
|
||||
|
||||
private KafkaProducer<byte[], byte[]> kafkaProducer;
|
||||
|
||||
public static DeadLetterQueueReporter createAndSetup(Map<String, Object> adminProps,
|
||||
ConnectorTaskId id,
|
||||
SinkConnectorConfig sinkConfig, Map<String, Object> producerProps,
|
||||
ErrorHandlingMetrics errorHandlingMetrics) {
|
||||
String topic = sinkConfig.dlqTopicName();
|
||||
|
||||
try (Admin admin = Admin.create(adminProps)) {
|
||||
if (!admin.listTopics().names().get().contains(topic)) {
|
||||
log.error("Topic {} doesn't exist. Will attempt to create topic.", topic);
|
||||
NewTopic schemaTopicRequest = new NewTopic(topic, DLQ_NUM_DESIRED_PARTITIONS, sinkConfig.dlqTopicReplicationFactor());
|
||||
admin.createTopics(singleton(schemaTopicRequest)).all().get();
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
throw new ConnectException("Could not initialize dead letter queue with topic=" + topic, e);
|
||||
} catch (ExecutionException e) {
|
||||
if (!(e.getCause() instanceof TopicExistsException)) {
|
||||
throw new ConnectException("Could not initialize dead letter queue with topic=" + topic, e);
|
||||
}
|
||||
}
|
||||
|
||||
KafkaProducer<byte[], byte[]> dlqProducer = new KafkaProducer<>(producerProps);
|
||||
return new DeadLetterQueueReporter(dlqProducer, sinkConfig, id, errorHandlingMetrics);
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize the dead letter queue reporter with a {@link KafkaProducer}.
|
||||
*
|
||||
* @param kafkaProducer a Kafka Producer to produce the original consumed records.
|
||||
*/
|
||||
// Visible for testing
|
||||
DeadLetterQueueReporter(KafkaProducer<byte[], byte[]> kafkaProducer, SinkConnectorConfig connConfig,
|
||||
ConnectorTaskId id, ErrorHandlingMetrics errorHandlingMetrics) {
|
||||
Objects.requireNonNull(kafkaProducer);
|
||||
Objects.requireNonNull(connConfig);
|
||||
Objects.requireNonNull(id);
|
||||
Objects.requireNonNull(errorHandlingMetrics);
|
||||
|
||||
this.kafkaProducer = kafkaProducer;
|
||||
this.connConfig = connConfig;
|
||||
this.connectorTaskId = id;
|
||||
this.errorHandlingMetrics = errorHandlingMetrics;
|
||||
}
|
||||
|
||||
/**
|
||||
* Write the raw records into a Kafka topic.
|
||||
*
|
||||
* @param context processing context containing the raw record at {@link ProcessingContext#consumerRecord()}.
|
||||
*/
|
||||
public void report(ProcessingContext context) {
|
||||
final String dlqTopicName = connConfig.dlqTopicName();
|
||||
if (dlqTopicName.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
errorHandlingMetrics.recordDeadLetterQueueProduceRequest();
|
||||
|
||||
ConsumerRecord<byte[], byte[]> originalMessage = context.consumerRecord();
|
||||
if (originalMessage == null) {
|
||||
errorHandlingMetrics.recordDeadLetterQueueProduceFailed();
|
||||
return;
|
||||
}
|
||||
|
||||
ProducerRecord<byte[], byte[]> producerRecord;
|
||||
if (originalMessage.timestamp() == RecordBatch.NO_TIMESTAMP) {
|
||||
producerRecord = new ProducerRecord<>(dlqTopicName, null,
|
||||
originalMessage.key(), originalMessage.value(), originalMessage.headers());
|
||||
} else {
|
||||
producerRecord = new ProducerRecord<>(dlqTopicName, null, originalMessage.timestamp(),
|
||||
originalMessage.key(), originalMessage.value(), originalMessage.headers());
|
||||
}
|
||||
|
||||
if (connConfig.isDlqContextHeadersEnabled()) {
|
||||
populateContextHeaders(producerRecord, context);
|
||||
}
|
||||
|
||||
this.kafkaProducer.send(producerRecord, (metadata, exception) -> {
|
||||
if (exception != null) {
|
||||
log.error("Could not produce message to dead letter queue. topic=" + dlqTopicName, exception);
|
||||
errorHandlingMetrics.recordDeadLetterQueueProduceFailed();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Visible for testing
|
||||
void populateContextHeaders(ProducerRecord<byte[], byte[]> producerRecord, ProcessingContext context) {
|
||||
Headers headers = producerRecord.headers();
|
||||
if (context.consumerRecord() != null) {
|
||||
headers.add(ERROR_HEADER_ORIG_TOPIC, toBytes(context.consumerRecord().topic()));
|
||||
headers.add(ERROR_HEADER_ORIG_PARTITION, toBytes(context.consumerRecord().partition()));
|
||||
headers.add(ERROR_HEADER_ORIG_OFFSET, toBytes(context.consumerRecord().offset()));
|
||||
}
|
||||
|
||||
headers.add(ERROR_HEADER_CONNECTOR_NAME, toBytes(connectorTaskId.connector()));
|
||||
headers.add(ERROR_HEADER_TASK_ID, toBytes(String.valueOf(connectorTaskId.task())));
|
||||
headers.add(ERROR_HEADER_STAGE, toBytes(context.stage().name()));
|
||||
headers.add(ERROR_HEADER_EXECUTING_CLASS, toBytes(context.executingClass().getName()));
|
||||
if (context.error() != null) {
|
||||
headers.add(ERROR_HEADER_EXCEPTION, toBytes(context.error().getClass().getName()));
|
||||
headers.add(ERROR_HEADER_EXCEPTION_MESSAGE, toBytes(context.error().getMessage()));
|
||||
byte[] trace;
|
||||
if ((trace = stacktrace(context.error())) != null) {
|
||||
headers.add(ERROR_HEADER_EXCEPTION_STACK_TRACE, trace);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private byte[] stacktrace(Throwable error) {
|
||||
ByteArrayOutputStream bos = new ByteArrayOutputStream();
|
||||
try {
|
||||
PrintStream stream = new PrintStream(bos, true, "UTF-8");
|
||||
error.printStackTrace(stream);
|
||||
bos.close();
|
||||
return bos.toByteArray();
|
||||
} catch (IOException e) {
|
||||
log.error("Could not serialize stacktrace.", e);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private byte[] toBytes(int value) {
|
||||
return toBytes(String.valueOf(value));
|
||||
}
|
||||
|
||||
private byte[] toBytes(long value) {
|
||||
return toBytes(String.valueOf(value));
|
||||
}
|
||||
|
||||
private byte[] toBytes(String value) {
|
||||
if (value != null) {
|
||||
return value.getBytes(StandardCharsets.UTF_8);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,150 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime.errors;
|
||||
|
||||
import org.apache.kafka.common.metrics.Sensor;
|
||||
import org.apache.kafka.common.metrics.stats.CumulativeSum;
|
||||
import org.apache.kafka.common.utils.SystemTime;
|
||||
import org.apache.kafka.common.utils.Time;
|
||||
import org.apache.kafka.connect.runtime.ConnectMetrics;
|
||||
import org.apache.kafka.connect.runtime.ConnectMetricsRegistry;
|
||||
import org.apache.kafka.connect.util.ConnectorTaskId;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
||||
/**
|
||||
* Contains various sensors used for monitoring errors.
|
||||
*/
|
||||
public class ErrorHandlingMetrics {
|
||||
|
||||
private final Time time = new SystemTime();
|
||||
|
||||
private final ConnectMetrics.MetricGroup metricGroup;
|
||||
|
||||
// metrics
|
||||
private final Sensor recordProcessingFailures;
|
||||
private final Sensor recordProcessingErrors;
|
||||
private final Sensor recordsSkipped;
|
||||
private final Sensor retries;
|
||||
private final Sensor errorsLogged;
|
||||
private final Sensor dlqProduceRequests;
|
||||
private final Sensor dlqProduceFailures;
|
||||
private long lastErrorTime = 0;
|
||||
|
||||
// for testing only
|
||||
public ErrorHandlingMetrics() {
|
||||
this(new ConnectorTaskId("noop-connector", -1),
|
||||
new ConnectMetrics("noop-worker", new SystemTime(), 2, 3000, Sensor.RecordingLevel.INFO.toString(),
|
||||
new ArrayList<>()));
|
||||
}
|
||||
|
||||
public ErrorHandlingMetrics(ConnectorTaskId id, ConnectMetrics connectMetrics) {
|
||||
|
||||
ConnectMetricsRegistry registry = connectMetrics.registry();
|
||||
metricGroup = connectMetrics.group(registry.taskErrorHandlingGroupName(),
|
||||
registry.connectorTagName(), id.connector(), registry.taskTagName(), Integer.toString(id.task()));
|
||||
|
||||
// prevent collisions by removing any previously created metrics in this group.
|
||||
metricGroup.close();
|
||||
|
||||
recordProcessingFailures = metricGroup.sensor("total-record-failures");
|
||||
recordProcessingFailures.add(metricGroup.metricName(registry.recordProcessingFailures), new CumulativeSum());
|
||||
|
||||
recordProcessingErrors = metricGroup.sensor("total-record-errors");
|
||||
recordProcessingErrors.add(metricGroup.metricName(registry.recordProcessingErrors), new CumulativeSum());
|
||||
|
||||
recordsSkipped = metricGroup.sensor("total-records-skipped");
|
||||
recordsSkipped.add(metricGroup.metricName(registry.recordsSkipped), new CumulativeSum());
|
||||
|
||||
retries = metricGroup.sensor("total-retries");
|
||||
retries.add(metricGroup.metricName(registry.retries), new CumulativeSum());
|
||||
|
||||
errorsLogged = metricGroup.sensor("total-errors-logged");
|
||||
errorsLogged.add(metricGroup.metricName(registry.errorsLogged), new CumulativeSum());
|
||||
|
||||
dlqProduceRequests = metricGroup.sensor("deadletterqueue-produce-requests");
|
||||
dlqProduceRequests.add(metricGroup.metricName(registry.dlqProduceRequests), new CumulativeSum());
|
||||
|
||||
dlqProduceFailures = metricGroup.sensor("deadletterqueue-produce-failures");
|
||||
dlqProduceFailures.add(metricGroup.metricName(registry.dlqProduceFailures), new CumulativeSum());
|
||||
|
||||
metricGroup.addValueMetric(registry.lastErrorTimestamp, now -> lastErrorTime);
|
||||
}
|
||||
|
||||
/**
|
||||
* Increment the number of failed operations (retriable and non-retriable).
|
||||
*/
|
||||
public void recordFailure() {
|
||||
recordProcessingFailures.record();
|
||||
}
|
||||
|
||||
/**
|
||||
* Increment the number of operations which could not be successfully executed.
|
||||
*/
|
||||
public void recordError() {
|
||||
recordProcessingErrors.record();
|
||||
}
|
||||
|
||||
/**
|
||||
* Increment the number of records skipped.
|
||||
*/
|
||||
public void recordSkipped() {
|
||||
recordsSkipped.record();
|
||||
}
|
||||
|
||||
/**
|
||||
* The number of retries made while executing operations.
|
||||
*/
|
||||
public void recordRetry() {
|
||||
retries.record();
|
||||
}
|
||||
|
||||
/**
|
||||
* The number of errors logged by the {@link LogReporter}.
|
||||
*/
|
||||
public void recordErrorLogged() {
|
||||
errorsLogged.record();
|
||||
}
|
||||
|
||||
/**
|
||||
* The number of produce requests to the {@link DeadLetterQueueReporter}.
|
||||
*/
|
||||
public void recordDeadLetterQueueProduceRequest() {
|
||||
dlqProduceRequests.record();
|
||||
}
|
||||
|
||||
/**
|
||||
* The number of produce requests to the {@link DeadLetterQueueReporter} which failed to be successfully produced into Kafka.
|
||||
*/
|
||||
public void recordDeadLetterQueueProduceFailed() {
|
||||
dlqProduceFailures.record();
|
||||
}
|
||||
|
||||
/**
|
||||
* Record the time of error.
|
||||
*/
|
||||
public void recordErrorTimestamp() {
|
||||
this.lastErrorTime = time.milliseconds();
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the metric group for this class.
|
||||
*/
|
||||
public ConnectMetrics.MetricGroup metricGroup() {
|
||||
return metricGroup;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,31 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime.errors;
|
||||
|
||||
/**
|
||||
* Report an error using the information contained in the {@link ProcessingContext}.
|
||||
*/
|
||||
public interface ErrorReporter {
|
||||
|
||||
/**
|
||||
* Report an error.
|
||||
*
|
||||
* @param context the processing context (cannot be null).
|
||||
*/
|
||||
void report(ProcessingContext context);
|
||||
|
||||
}
|
||||
@@ -0,0 +1,72 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime.errors;
|
||||
|
||||
import org.apache.kafka.connect.runtime.ConnectorConfig;
|
||||
import org.apache.kafka.connect.util.ConnectorTaskId;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Writes errors and their context to application logs.
|
||||
*/
|
||||
public class LogReporter implements ErrorReporter {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(LogReporter.class);
|
||||
|
||||
private final ConnectorTaskId id;
|
||||
private final ConnectorConfig connConfig;
|
||||
private final ErrorHandlingMetrics errorHandlingMetrics;
|
||||
|
||||
public LogReporter(ConnectorTaskId id, ConnectorConfig connConfig, ErrorHandlingMetrics errorHandlingMetrics) {
|
||||
Objects.requireNonNull(id);
|
||||
Objects.requireNonNull(connConfig);
|
||||
Objects.requireNonNull(errorHandlingMetrics);
|
||||
|
||||
this.id = id;
|
||||
this.connConfig = connConfig;
|
||||
this.errorHandlingMetrics = errorHandlingMetrics;
|
||||
}
|
||||
|
||||
/**
|
||||
* Log error context.
|
||||
*
|
||||
* @param context the processing context.
|
||||
*/
|
||||
@Override
|
||||
public void report(ProcessingContext context) {
|
||||
if (!connConfig.enableErrorLog()) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!context.failed()) {
|
||||
return;
|
||||
}
|
||||
|
||||
log.error(message(context), context.error());
|
||||
errorHandlingMetrics.recordErrorLogged();
|
||||
}
|
||||
|
||||
// Visible for testing
|
||||
String message(ProcessingContext context) {
|
||||
return String.format("Error encountered in task %s. %s", String.valueOf(id),
|
||||
context.toString(connConfig.includeRecordDetailsInErrorLog()));
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,28 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime.errors;
|
||||
|
||||
import java.util.concurrent.Callable;
|
||||
|
||||
/**
|
||||
* A recoverable operation evaluated in the connector pipeline.
|
||||
*
|
||||
* @param <V> return type of the result of the operation.
|
||||
*/
|
||||
public interface Operation<V> extends Callable<V> {
|
||||
|
||||
}
|
||||
@@ -0,0 +1,219 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime.errors;
|
||||
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.common.record.TimestampType;
|
||||
import org.apache.kafka.connect.source.SourceRecord;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Contains all the metadata related to the currently evaluating operation. Only one instance of this class is meant
|
||||
* to exist per task in a JVM.
|
||||
*/
|
||||
class ProcessingContext {
|
||||
|
||||
private Collection<ErrorReporter> reporters = Collections.emptyList();
|
||||
|
||||
private ConsumerRecord<byte[], byte[]> consumedMessage;
|
||||
private SourceRecord sourceRecord;
|
||||
|
||||
/**
|
||||
* The following fields need to be reset every time a new record is seen.
|
||||
*/
|
||||
|
||||
private Stage position;
|
||||
private Class<?> klass;
|
||||
private int attempt;
|
||||
private Throwable error;
|
||||
|
||||
/**
|
||||
* Reset the internal fields before executing operations on a new record.
|
||||
*/
|
||||
private void reset() {
|
||||
attempt = 0;
|
||||
position = null;
|
||||
klass = null;
|
||||
error = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the record consumed from Kafka in a sink connector.
|
||||
*
|
||||
* @param consumedMessage the record
|
||||
*/
|
||||
public void consumerRecord(ConsumerRecord<byte[], byte[]> consumedMessage) {
|
||||
this.consumedMessage = consumedMessage;
|
||||
reset();
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the record consumed from Kafka. could be null
|
||||
*/
|
||||
public ConsumerRecord<byte[], byte[]> consumerRecord() {
|
||||
return consumedMessage;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the source record being processed.
|
||||
*/
|
||||
public SourceRecord sourceRecord() {
|
||||
return sourceRecord;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the source record being processed in the connect pipeline.
|
||||
*
|
||||
* @param record the source record
|
||||
*/
|
||||
public void sourceRecord(SourceRecord record) {
|
||||
this.sourceRecord = record;
|
||||
reset();
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the stage in the connector pipeline which is currently executing.
|
||||
*
|
||||
* @param position the stage
|
||||
*/
|
||||
public void position(Stage position) {
|
||||
this.position = position;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the stage in the connector pipeline which is currently executing.
|
||||
*/
|
||||
public Stage stage() {
|
||||
return position;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the class which is going to execute the current operation.
|
||||
*/
|
||||
public Class<?> executingClass() {
|
||||
return klass;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param klass set the class which is currently executing.
|
||||
*/
|
||||
public void executingClass(Class<?> klass) {
|
||||
this.klass = klass;
|
||||
}
|
||||
|
||||
/**
|
||||
* A helper method to set both the stage and the class.
|
||||
*
|
||||
* @param stage the stage
|
||||
* @param klass the class which will execute the operation in this stage.
|
||||
*/
|
||||
public void currentContext(Stage stage, Class<?> klass) {
|
||||
position(stage);
|
||||
executingClass(klass);
|
||||
}
|
||||
|
||||
/**
|
||||
* Report errors. Should be called only if an error was encountered while executing the operation.
|
||||
*/
|
||||
public void report() {
|
||||
for (ErrorReporter reporter: reporters) {
|
||||
reporter.report(this);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return toString(false);
|
||||
}
|
||||
|
||||
public String toString(boolean includeMessage) {
|
||||
StringBuilder builder = new StringBuilder();
|
||||
builder.append("Executing stage '");
|
||||
builder.append(stage().name());
|
||||
builder.append("' with class '");
|
||||
builder.append(executingClass() == null ? "null" : executingClass().getName());
|
||||
builder.append('\'');
|
||||
if (includeMessage && sourceRecord() != null) {
|
||||
builder.append(", where source record is = ");
|
||||
builder.append(sourceRecord());
|
||||
} else if (includeMessage && consumerRecord() != null) {
|
||||
ConsumerRecord<byte[], byte[]> msg = consumerRecord();
|
||||
builder.append(", where consumed record is ");
|
||||
builder.append("{topic='").append(msg.topic()).append('\'');
|
||||
builder.append(", partition=").append(msg.partition());
|
||||
builder.append(", offset=").append(msg.offset());
|
||||
if (msg.timestampType() == TimestampType.CREATE_TIME || msg.timestampType() == TimestampType.LOG_APPEND_TIME) {
|
||||
builder.append(", timestamp=").append(msg.timestamp());
|
||||
builder.append(", timestampType=").append(msg.timestampType());
|
||||
}
|
||||
builder.append("}");
|
||||
}
|
||||
builder.append('.');
|
||||
return builder.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* @param attempt the number of attempts made to execute the current operation.
|
||||
*/
|
||||
public void attempt(int attempt) {
|
||||
this.attempt = attempt;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the number of attempts made to execute the current operation.
|
||||
*/
|
||||
public int attempt() {
|
||||
return attempt;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the error (if any) which was encountered while processing the current stage.
|
||||
*/
|
||||
public Throwable error() {
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* The error (if any) which was encountered while processing the current stage.
|
||||
*
|
||||
* @param error the error
|
||||
*/
|
||||
public void error(Throwable error) {
|
||||
this.error = error;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true, if the last operation encountered an error; false otherwise
|
||||
*/
|
||||
public boolean failed() {
|
||||
return error() != null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the error reporters for this connector.
|
||||
*
|
||||
* @param reporters the error reporters (should not be null).
|
||||
*/
|
||||
public void reporters(Collection<ErrorReporter> reporters) {
|
||||
Objects.requireNonNull(reporters);
|
||||
this.reporters = reporters;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,273 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime.errors;
|
||||
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.common.config.ConfigException;
|
||||
import org.apache.kafka.common.utils.Time;
|
||||
import org.apache.kafka.connect.errors.ConnectException;
|
||||
import org.apache.kafka.connect.errors.RetriableException;
|
||||
import org.apache.kafka.connect.runtime.ConnectorConfig;
|
||||
import org.apache.kafka.connect.source.SourceRecord;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
|
||||
/**
|
||||
* Attempt to recover a failed operation with retries and tolerance limits.
|
||||
* <p>
|
||||
*
|
||||
* A retry is attempted if the operation throws a {@link RetriableException}. Retries are accompanied by exponential backoffs, starting with
|
||||
* {@link #RETRIES_DELAY_MIN_MS}, up to what is specified with {@link ConnectorConfig#errorMaxDelayInMillis()}.
|
||||
* Including the first attempt and future retries, the total time taken to evaluate the operation should be within
|
||||
* {@link ConnectorConfig#errorMaxDelayInMillis()} millis.
|
||||
* <p>
|
||||
*
|
||||
* This executor will tolerate failures, as specified by {@link ConnectorConfig#errorToleranceType()}.
|
||||
* For transformations and converters, all exceptions are tolerated. For others operations, only {@link RetriableException} are tolerated.
|
||||
* <p>
|
||||
*
|
||||
* There are three outcomes to executing an operation. It might succeed, in which case the result is returned to the caller.
|
||||
* If it fails, this class does one of these two things: (1) if the failure occurred due to a tolerable exception, then
|
||||
* set appropriate error reason in the {@link ProcessingContext} and return null, or (2) if the exception is not tolerated,
|
||||
* then it is wrapped into a ConnectException and rethrown to the caller.
|
||||
* <p>
|
||||
*/
|
||||
public class RetryWithToleranceOperator {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(RetryWithToleranceOperator.class);
|
||||
|
||||
public static final long RETRIES_DELAY_MIN_MS = 300;
|
||||
|
||||
private static final Map<Stage, Class<? extends Exception>> TOLERABLE_EXCEPTIONS = new HashMap<>();
|
||||
static {
|
||||
TOLERABLE_EXCEPTIONS.put(Stage.TRANSFORMATION, Exception.class);
|
||||
TOLERABLE_EXCEPTIONS.put(Stage.HEADER_CONVERTER, Exception.class);
|
||||
TOLERABLE_EXCEPTIONS.put(Stage.KEY_CONVERTER, Exception.class);
|
||||
TOLERABLE_EXCEPTIONS.put(Stage.VALUE_CONVERTER, Exception.class);
|
||||
}
|
||||
|
||||
private final long errorRetryTimeout;
|
||||
private final long errorMaxDelayInMillis;
|
||||
private final ToleranceType errorToleranceType;
|
||||
|
||||
private long totalFailures = 0;
|
||||
private final Time time;
|
||||
private ErrorHandlingMetrics errorHandlingMetrics;
|
||||
|
||||
protected ProcessingContext context = new ProcessingContext();
|
||||
|
||||
public RetryWithToleranceOperator(long errorRetryTimeout, long errorMaxDelayInMillis,
|
||||
ToleranceType toleranceType, Time time) {
|
||||
this.errorRetryTimeout = errorRetryTimeout;
|
||||
this.errorMaxDelayInMillis = errorMaxDelayInMillis;
|
||||
this.errorToleranceType = toleranceType;
|
||||
this.time = time;
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute the recoverable operation. If the operation is already in a failed state, then simply return
|
||||
* with the existing failure.
|
||||
*
|
||||
* @param operation the recoverable operation
|
||||
* @param <V> return type of the result of the operation.
|
||||
* @return result of the operation
|
||||
*/
|
||||
public <V> V execute(Operation<V> operation, Stage stage, Class<?> executingClass) {
|
||||
context.currentContext(stage, executingClass);
|
||||
|
||||
if (context.failed()) {
|
||||
log.debug("ProcessingContext is already in failed state. Ignoring requested operation.");
|
||||
return null;
|
||||
}
|
||||
|
||||
try {
|
||||
Class<? extends Exception> ex = TOLERABLE_EXCEPTIONS.getOrDefault(context.stage(), RetriableException.class);
|
||||
return execAndHandleError(operation, ex);
|
||||
} finally {
|
||||
if (context.failed()) {
|
||||
errorHandlingMetrics.recordError();
|
||||
context.report();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempt to execute an operation. Retry if a {@link RetriableException} is raised. Re-throw everything else.
|
||||
*
|
||||
* @param operation the operation to be executed.
|
||||
* @param <V> the return type of the result of the operation.
|
||||
* @return the result of the operation.
|
||||
* @throws Exception rethrow if a non-retriable Exception is thrown by the operation
|
||||
*/
|
||||
protected <V> V execAndRetry(Operation<V> operation) throws Exception {
|
||||
int attempt = 0;
|
||||
long startTime = time.milliseconds();
|
||||
long deadline = startTime + errorRetryTimeout;
|
||||
do {
|
||||
try {
|
||||
attempt++;
|
||||
return operation.call();
|
||||
} catch (RetriableException e) {
|
||||
log.trace("Caught a retriable exception while executing {} operation with {}", context.stage(), context.executingClass());
|
||||
errorHandlingMetrics.recordFailure();
|
||||
if (checkRetry(startTime)) {
|
||||
backoff(attempt, deadline);
|
||||
if (Thread.currentThread().isInterrupted()) {
|
||||
log.trace("Thread was interrupted. Marking operation as failed.");
|
||||
context.error(e);
|
||||
return null;
|
||||
}
|
||||
errorHandlingMetrics.recordRetry();
|
||||
} else {
|
||||
log.trace("Can't retry. start={}, attempt={}, deadline={}", startTime, attempt, deadline);
|
||||
context.error(e);
|
||||
return null;
|
||||
}
|
||||
} finally {
|
||||
context.attempt(attempt);
|
||||
}
|
||||
} while (true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a given operation multiple times (if needed), and tolerate certain exceptions.
|
||||
*
|
||||
* @param operation the operation to be executed.
|
||||
* @param tolerated the class of exceptions which can be tolerated.
|
||||
* @param <V> The return type of the result of the operation.
|
||||
* @return the result of the operation
|
||||
*/
|
||||
// Visible for testing
|
||||
protected <V> V execAndHandleError(Operation<V> operation, Class<? extends Exception> tolerated) {
|
||||
try {
|
||||
V result = execAndRetry(operation);
|
||||
if (context.failed()) {
|
||||
markAsFailed();
|
||||
errorHandlingMetrics.recordSkipped();
|
||||
}
|
||||
return result;
|
||||
} catch (Exception e) {
|
||||
errorHandlingMetrics.recordFailure();
|
||||
markAsFailed();
|
||||
context.error(e);
|
||||
|
||||
if (!tolerated.isAssignableFrom(e.getClass())) {
|
||||
throw new ConnectException("Unhandled exception in error handler", e);
|
||||
}
|
||||
|
||||
if (!withinToleranceLimits()) {
|
||||
throw new ConnectException("Tolerance exceeded in error handler", e);
|
||||
}
|
||||
|
||||
errorHandlingMetrics.recordSkipped();
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
// Visible for testing
|
||||
void markAsFailed() {
|
||||
errorHandlingMetrics.recordErrorTimestamp();
|
||||
totalFailures++;
|
||||
}
|
||||
|
||||
// Visible for testing
|
||||
@SuppressWarnings("fallthrough")
|
||||
boolean withinToleranceLimits() {
|
||||
switch (errorToleranceType) {
|
||||
case NONE:
|
||||
if (totalFailures > 0) return false;
|
||||
case ALL:
|
||||
return true;
|
||||
default:
|
||||
throw new ConfigException("Unknown tolerance type: {}", errorToleranceType);
|
||||
}
|
||||
}
|
||||
|
||||
// Visible for testing
|
||||
boolean checkRetry(long startTime) {
|
||||
return (time.milliseconds() - startTime) < errorRetryTimeout;
|
||||
}
|
||||
|
||||
// Visible for testing
|
||||
void backoff(int attempt, long deadline) {
|
||||
int numRetry = attempt - 1;
|
||||
long delay = RETRIES_DELAY_MIN_MS << numRetry;
|
||||
if (delay > errorMaxDelayInMillis) {
|
||||
delay = ThreadLocalRandom.current().nextLong(errorMaxDelayInMillis);
|
||||
}
|
||||
if (delay + time.milliseconds() > deadline) {
|
||||
delay = deadline - time.milliseconds();
|
||||
}
|
||||
log.debug("Sleeping for {} millis", delay);
|
||||
time.sleep(delay);
|
||||
}
|
||||
|
||||
public void metrics(ErrorHandlingMetrics errorHandlingMetrics) {
|
||||
this.errorHandlingMetrics = errorHandlingMetrics;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "RetryWithToleranceOperator{" +
|
||||
"errorRetryTimeout=" + errorRetryTimeout +
|
||||
", errorMaxDelayInMillis=" + errorMaxDelayInMillis +
|
||||
", errorToleranceType=" + errorToleranceType +
|
||||
", totalFailures=" + totalFailures +
|
||||
", time=" + time +
|
||||
", context=" + context +
|
||||
'}';
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the error reporters for this connector.
|
||||
*
|
||||
* @param reporters the error reporters (should not be null).
|
||||
*/
|
||||
public void reporters(List<ErrorReporter> reporters) {
|
||||
this.context.reporters(reporters);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the source record being processed in the connect pipeline.
|
||||
*
|
||||
* @param preTransformRecord the source record
|
||||
*/
|
||||
public void sourceRecord(SourceRecord preTransformRecord) {
|
||||
this.context.sourceRecord(preTransformRecord);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the record consumed from Kafka in a sink connector.
|
||||
*
|
||||
* @param consumedMessage the record
|
||||
*/
|
||||
public void consumerRecord(ConsumerRecord<byte[], byte[]> consumedMessage) {
|
||||
this.context.consumerRecord(consumedMessage);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true, if the last operation encountered an error; false otherwise
|
||||
*/
|
||||
public boolean failed() {
|
||||
return this.context.failed();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,63 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime.errors;
|
||||
|
||||
/**
|
||||
* A logical stage in a Connect pipeline.
|
||||
*/
|
||||
public enum Stage {
|
||||
|
||||
/**
|
||||
* When calling the poll() method on a SourceConnector
|
||||
*/
|
||||
TASK_POLL,
|
||||
|
||||
/**
|
||||
* When calling the put() method on a SinkConnector
|
||||
*/
|
||||
TASK_PUT,
|
||||
|
||||
/**
|
||||
* When running any transformation operation on a record
|
||||
*/
|
||||
TRANSFORMATION,
|
||||
|
||||
/**
|
||||
* When using the key converter to serialize/deserialize keys in ConnectRecords
|
||||
*/
|
||||
KEY_CONVERTER,
|
||||
|
||||
/**
|
||||
* When using the value converter to serialize/deserialize values in ConnectRecords
|
||||
*/
|
||||
VALUE_CONVERTER,
|
||||
|
||||
/**
|
||||
* When using the header converter to serialize/deserialize headers in ConnectRecords
|
||||
*/
|
||||
HEADER_CONVERTER,
|
||||
|
||||
/**
|
||||
* When producing to Kafka topic
|
||||
*/
|
||||
KAFKA_PRODUCE,
|
||||
|
||||
/**
|
||||
* When consuming from a Kafka topic
|
||||
*/
|
||||
KAFKA_CONSUME
|
||||
}
|
||||
@@ -0,0 +1,40 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime.errors;
|
||||
|
||||
import java.util.Locale;
|
||||
|
||||
/**
|
||||
* The different levels of error tolerance.
|
||||
*/
|
||||
public enum ToleranceType {
|
||||
|
||||
/**
|
||||
* Tolerate no errors.
|
||||
*/
|
||||
NONE,
|
||||
|
||||
/**
|
||||
* Tolerate all errors.
|
||||
*/
|
||||
ALL;
|
||||
|
||||
public String value() {
|
||||
return name().toLowerCase(Locale.ROOT);
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,34 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.kafka.connect.runtime.health;
|
||||
|
||||
import org.apache.kafka.connect.health.ConnectClusterDetails;
|
||||
|
||||
public class ConnectClusterDetailsImpl implements ConnectClusterDetails {
|
||||
|
||||
private final String kafkaClusterId;
|
||||
|
||||
public ConnectClusterDetailsImpl(String kafkaClusterId) {
|
||||
this.kafkaClusterId = kafkaClusterId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String kafkaClusterId() {
|
||||
return kafkaClusterId;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,115 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.kafka.connect.runtime.health;
|
||||
|
||||
import org.apache.kafka.connect.errors.ConnectException;
|
||||
import org.apache.kafka.connect.health.ConnectClusterDetails;
|
||||
import org.apache.kafka.connect.health.ConnectClusterState;
|
||||
import org.apache.kafka.connect.health.ConnectorHealth;
|
||||
import org.apache.kafka.connect.health.ConnectorState;
|
||||
import org.apache.kafka.connect.health.ConnectorType;
|
||||
import org.apache.kafka.connect.health.TaskState;
|
||||
import org.apache.kafka.connect.runtime.Herder;
|
||||
import org.apache.kafka.connect.runtime.rest.entities.ConnectorStateInfo;
|
||||
import org.apache.kafka.connect.util.FutureCallback;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
|
||||
public class ConnectClusterStateImpl implements ConnectClusterState {
|
||||
|
||||
private final long herderRequestTimeoutMs;
|
||||
private final ConnectClusterDetails clusterDetails;
|
||||
private final Herder herder;
|
||||
|
||||
public ConnectClusterStateImpl(
|
||||
long connectorsTimeoutMs,
|
||||
ConnectClusterDetails clusterDetails,
|
||||
Herder herder
|
||||
) {
|
||||
this.herderRequestTimeoutMs = connectorsTimeoutMs;
|
||||
this.clusterDetails = clusterDetails;
|
||||
this.herder = herder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<String> connectors() {
|
||||
FutureCallback<Collection<String>> connectorsCallback = new FutureCallback<>();
|
||||
herder.connectors(connectorsCallback);
|
||||
try {
|
||||
return connectorsCallback.get(herderRequestTimeoutMs, TimeUnit.MILLISECONDS);
|
||||
} catch (InterruptedException | ExecutionException | TimeoutException e) {
|
||||
throw new ConnectException("Failed to retrieve list of connectors", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public ConnectorHealth connectorHealth(String connName) {
|
||||
ConnectorStateInfo state = herder.connectorStatus(connName);
|
||||
ConnectorState connectorState = new ConnectorState(
|
||||
state.connector().state(),
|
||||
state.connector().workerId(),
|
||||
state.connector().trace()
|
||||
);
|
||||
Map<Integer, TaskState> taskStates = taskStates(state.tasks());
|
||||
ConnectorHealth connectorHealth = new ConnectorHealth(
|
||||
connName,
|
||||
connectorState,
|
||||
taskStates,
|
||||
ConnectorType.valueOf(state.type().name())
|
||||
);
|
||||
return connectorHealth;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, String> connectorConfig(String connName) {
|
||||
FutureCallback<Map<String, String>> connectorConfigCallback = new FutureCallback<>();
|
||||
herder.connectorConfig(connName, connectorConfigCallback);
|
||||
try {
|
||||
return connectorConfigCallback.get(herderRequestTimeoutMs, TimeUnit.MILLISECONDS);
|
||||
} catch (InterruptedException | ExecutionException | TimeoutException e) {
|
||||
throw new ConnectException(
|
||||
String.format("Failed to retrieve configuration for connector '%s'", connName),
|
||||
e
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public ConnectClusterDetails clusterDetails() {
|
||||
return clusterDetails;
|
||||
}
|
||||
|
||||
private Map<Integer, TaskState> taskStates(List<ConnectorStateInfo.TaskState> states) {
|
||||
|
||||
Map<Integer, TaskState> taskStates = new HashMap<>();
|
||||
|
||||
for (ConnectorStateInfo.TaskState state : states) {
|
||||
taskStates.put(
|
||||
state.id(),
|
||||
new TaskState(state.id(), state.state(), state.workerId(), state.trace())
|
||||
);
|
||||
}
|
||||
return taskStates;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,477 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime.isolation;
|
||||
|
||||
import org.apache.kafka.common.config.provider.ConfigProvider;
|
||||
import org.apache.kafka.connect.components.Versioned;
|
||||
import org.apache.kafka.connect.connector.Connector;
|
||||
import org.apache.kafka.connect.connector.policy.ConnectorClientConfigOverridePolicy;
|
||||
import org.apache.kafka.connect.rest.ConnectRestExtension;
|
||||
import org.apache.kafka.connect.storage.Converter;
|
||||
import org.apache.kafka.connect.storage.HeaderConverter;
|
||||
import org.apache.kafka.connect.transforms.Transformation;
|
||||
import org.reflections.Configuration;
|
||||
import org.reflections.Reflections;
|
||||
import org.reflections.ReflectionsException;
|
||||
import org.reflections.scanners.SubTypesScanner;
|
||||
import org.reflections.util.ClasspathHelper;
|
||||
import org.reflections.util.ConfigurationBuilder;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.net.URLClassLoader;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.InvalidPathException;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.security.AccessController;
|
||||
import java.security.PrivilegedAction;
|
||||
import java.sql.Driver;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Enumeration;
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.ServiceLoader;
|
||||
import java.util.Set;
|
||||
import java.util.SortedMap;
|
||||
import java.util.SortedSet;
|
||||
import java.util.TreeMap;
|
||||
import java.util.TreeSet;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class DelegatingClassLoader extends URLClassLoader {
|
||||
private static final Logger log = LoggerFactory.getLogger(DelegatingClassLoader.class);
|
||||
private static final String CLASSPATH_NAME = "classpath";
|
||||
private static final String UNDEFINED_VERSION = "undefined";
|
||||
|
||||
private final Map<String, SortedMap<PluginDesc<?>, ClassLoader>> pluginLoaders;
|
||||
private final Map<String, String> aliases;
|
||||
private final SortedSet<PluginDesc<Connector>> connectors;
|
||||
private final SortedSet<PluginDesc<Converter>> converters;
|
||||
private final SortedSet<PluginDesc<HeaderConverter>> headerConverters;
|
||||
private final SortedSet<PluginDesc<Transformation>> transformations;
|
||||
private final SortedSet<PluginDesc<ConfigProvider>> configProviders;
|
||||
private final SortedSet<PluginDesc<ConnectRestExtension>> restExtensions;
|
||||
private final SortedSet<PluginDesc<ConnectorClientConfigOverridePolicy>> connectorClientConfigPolicies;
|
||||
private final List<String> pluginPaths;
|
||||
|
||||
private static final String MANIFEST_PREFIX = "META-INF/services/";
|
||||
private static final Class[] SERVICE_LOADER_PLUGINS = new Class[] {ConnectRestExtension.class, ConfigProvider.class};
|
||||
private static final Set<String> PLUGIN_MANIFEST_FILES =
|
||||
Arrays.stream(SERVICE_LOADER_PLUGINS).map(serviceLoaderPlugin -> MANIFEST_PREFIX + serviceLoaderPlugin.getName())
|
||||
.collect(Collectors.toSet());
|
||||
|
||||
public DelegatingClassLoader(List<String> pluginPaths, ClassLoader parent) {
|
||||
super(new URL[0], parent);
|
||||
this.pluginPaths = pluginPaths;
|
||||
this.pluginLoaders = new HashMap<>();
|
||||
this.aliases = new HashMap<>();
|
||||
this.connectors = new TreeSet<>();
|
||||
this.converters = new TreeSet<>();
|
||||
this.headerConverters = new TreeSet<>();
|
||||
this.transformations = new TreeSet<>();
|
||||
this.configProviders = new TreeSet<>();
|
||||
this.restExtensions = new TreeSet<>();
|
||||
this.connectorClientConfigPolicies = new TreeSet<>();
|
||||
}
|
||||
|
||||
public DelegatingClassLoader(List<String> pluginPaths) {
|
||||
// Use as parent the classloader that loaded this class. In most cases this will be the
|
||||
// System classloader. But this choice here provides additional flexibility in managed
|
||||
// environments that control classloading differently (OSGi, Spring and others) and don't
|
||||
// depend on the System classloader to load Connect's classes.
|
||||
this(pluginPaths, DelegatingClassLoader.class.getClassLoader());
|
||||
}
|
||||
|
||||
public Set<PluginDesc<Connector>> connectors() {
|
||||
return connectors;
|
||||
}
|
||||
|
||||
public Set<PluginDesc<Converter>> converters() {
|
||||
return converters;
|
||||
}
|
||||
|
||||
public Set<PluginDesc<HeaderConverter>> headerConverters() {
|
||||
return headerConverters;
|
||||
}
|
||||
|
||||
public Set<PluginDesc<Transformation>> transformations() {
|
||||
return transformations;
|
||||
}
|
||||
|
||||
public Set<PluginDesc<ConfigProvider>> configProviders() {
|
||||
return configProviders;
|
||||
}
|
||||
|
||||
public Set<PluginDesc<ConnectRestExtension>> restExtensions() {
|
||||
return restExtensions;
|
||||
}
|
||||
|
||||
public Set<PluginDesc<ConnectorClientConfigOverridePolicy>> connectorClientConfigPolicies() {
|
||||
return connectorClientConfigPolicies;
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve the PluginClassLoader associated with a plugin class
|
||||
* @param name The fully qualified class name of the plugin
|
||||
* @return the PluginClassLoader that should be used to load this, or null if the plugin is not isolated.
|
||||
*/
|
||||
public PluginClassLoader pluginClassLoader(String name) {
|
||||
if (!PluginUtils.shouldLoadInIsolation(name)) {
|
||||
return null;
|
||||
}
|
||||
SortedMap<PluginDesc<?>, ClassLoader> inner = pluginLoaders.get(name);
|
||||
if (inner == null) {
|
||||
return null;
|
||||
}
|
||||
ClassLoader pluginLoader = inner.get(inner.lastKey());
|
||||
return pluginLoader instanceof PluginClassLoader
|
||||
? (PluginClassLoader) pluginLoader
|
||||
: null;
|
||||
}
|
||||
|
||||
public ClassLoader connectorLoader(Connector connector) {
|
||||
return connectorLoader(connector.getClass().getName());
|
||||
}
|
||||
|
||||
public ClassLoader connectorLoader(String connectorClassOrAlias) {
|
||||
String fullName = aliases.containsKey(connectorClassOrAlias)
|
||||
? aliases.get(connectorClassOrAlias)
|
||||
: connectorClassOrAlias;
|
||||
ClassLoader classLoader = pluginClassLoader(fullName);
|
||||
if (classLoader == null) classLoader = this;
|
||||
log.debug(
|
||||
"Getting plugin class loader: '{}' for connector: {}",
|
||||
classLoader,
|
||||
connectorClassOrAlias
|
||||
);
|
||||
return classLoader;
|
||||
}
|
||||
|
||||
private static PluginClassLoader newPluginClassLoader(
|
||||
final URL pluginLocation,
|
||||
final URL[] urls,
|
||||
final ClassLoader parent
|
||||
) {
|
||||
return AccessController.doPrivileged(
|
||||
(PrivilegedAction<PluginClassLoader>) () -> new PluginClassLoader(pluginLocation, urls, parent)
|
||||
);
|
||||
}
|
||||
|
||||
private <T> void addPlugins(Collection<PluginDesc<T>> plugins, ClassLoader loader) {
|
||||
for (PluginDesc<T> plugin : plugins) {
|
||||
String pluginClassName = plugin.className();
|
||||
SortedMap<PluginDesc<?>, ClassLoader> inner = pluginLoaders.get(pluginClassName);
|
||||
if (inner == null) {
|
||||
inner = new TreeMap<>();
|
||||
pluginLoaders.put(pluginClassName, inner);
|
||||
// TODO: once versioning is enabled this line should be moved outside this if branch
|
||||
log.info("Added plugin '{}'", pluginClassName);
|
||||
}
|
||||
inner.put(plugin, loader);
|
||||
}
|
||||
}
|
||||
|
||||
protected void initLoaders() {
|
||||
for (String configPath : pluginPaths) {
|
||||
initPluginLoader(configPath);
|
||||
}
|
||||
// Finally add parent/system loader.
|
||||
initPluginLoader(CLASSPATH_NAME);
|
||||
addAllAliases();
|
||||
}
|
||||
|
||||
private void initPluginLoader(String path) {
|
||||
try {
|
||||
if (CLASSPATH_NAME.equals(path)) {
|
||||
scanUrlsAndAddPlugins(
|
||||
getParent(),
|
||||
ClasspathHelper.forJavaClassPath().toArray(new URL[0]),
|
||||
null
|
||||
);
|
||||
} else {
|
||||
Path pluginPath = Paths.get(path).toAbsolutePath();
|
||||
// Update for exception handling
|
||||
path = pluginPath.toString();
|
||||
// Currently 'plugin.paths' property is a list of top-level directories
|
||||
// containing plugins
|
||||
if (Files.isDirectory(pluginPath)) {
|
||||
for (Path pluginLocation : PluginUtils.pluginLocations(pluginPath)) {
|
||||
registerPlugin(pluginLocation);
|
||||
}
|
||||
} else if (PluginUtils.isArchive(pluginPath)) {
|
||||
registerPlugin(pluginPath);
|
||||
}
|
||||
}
|
||||
} catch (InvalidPathException | MalformedURLException e) {
|
||||
log.error("Invalid path in plugin path: {}. Ignoring.", path, e);
|
||||
} catch (IOException e) {
|
||||
log.error("Could not get listing for plugin path: {}. Ignoring.", path, e);
|
||||
} catch (InstantiationException | IllegalAccessException e) {
|
||||
log.error("Could not instantiate plugins in: {}. Ignoring: {}", path, e);
|
||||
}
|
||||
}
|
||||
|
||||
private void registerPlugin(Path pluginLocation)
|
||||
throws InstantiationException, IllegalAccessException, IOException {
|
||||
log.info("Loading plugin from: {}", pluginLocation);
|
||||
List<URL> pluginUrls = new ArrayList<>();
|
||||
for (Path path : PluginUtils.pluginUrls(pluginLocation)) {
|
||||
pluginUrls.add(path.toUri().toURL());
|
||||
}
|
||||
URL[] urls = pluginUrls.toArray(new URL[0]);
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("Loading plugin urls: {}", Arrays.toString(urls));
|
||||
}
|
||||
PluginClassLoader loader = newPluginClassLoader(
|
||||
pluginLocation.toUri().toURL(),
|
||||
urls,
|
||||
this
|
||||
);
|
||||
scanUrlsAndAddPlugins(loader, urls, pluginLocation);
|
||||
}
|
||||
|
||||
private void scanUrlsAndAddPlugins(
|
||||
ClassLoader loader,
|
||||
URL[] urls,
|
||||
Path pluginLocation
|
||||
) throws InstantiationException, IllegalAccessException {
|
||||
PluginScanResult plugins = scanPluginPath(loader, urls);
|
||||
log.info("Registered loader: {}", loader);
|
||||
if (!plugins.isEmpty()) {
|
||||
addPlugins(plugins.connectors(), loader);
|
||||
connectors.addAll(plugins.connectors());
|
||||
addPlugins(plugins.converters(), loader);
|
||||
converters.addAll(plugins.converters());
|
||||
addPlugins(plugins.headerConverters(), loader);
|
||||
headerConverters.addAll(plugins.headerConverters());
|
||||
addPlugins(plugins.transformations(), loader);
|
||||
transformations.addAll(plugins.transformations());
|
||||
addPlugins(plugins.configProviders(), loader);
|
||||
configProviders.addAll(plugins.configProviders());
|
||||
addPlugins(plugins.restExtensions(), loader);
|
||||
restExtensions.addAll(plugins.restExtensions());
|
||||
addPlugins(plugins.connectorClientConfigPolicies(), loader);
|
||||
connectorClientConfigPolicies.addAll(plugins.connectorClientConfigPolicies());
|
||||
}
|
||||
|
||||
loadJdbcDrivers(loader);
|
||||
}
|
||||
|
||||
private void loadJdbcDrivers(final ClassLoader loader) {
|
||||
// Apply here what java.sql.DriverManager does to discover and register classes
|
||||
// implementing the java.sql.Driver interface.
|
||||
AccessController.doPrivileged(
|
||||
new PrivilegedAction<Void>() {
|
||||
@Override
|
||||
public Void run() {
|
||||
ServiceLoader<Driver> loadedDrivers = ServiceLoader.load(
|
||||
Driver.class,
|
||||
loader
|
||||
);
|
||||
Iterator<Driver> driversIterator = loadedDrivers.iterator();
|
||||
try {
|
||||
while (driversIterator.hasNext()) {
|
||||
Driver driver = driversIterator.next();
|
||||
log.debug(
|
||||
"Registered java.sql.Driver: {} to java.sql.DriverManager",
|
||||
driver
|
||||
);
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
log.debug(
|
||||
"Ignoring java.sql.Driver classes listed in resources but not"
|
||||
+ " present in class loader's classpath: ",
|
||||
t
|
||||
);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
private PluginScanResult scanPluginPath(
|
||||
ClassLoader loader,
|
||||
URL[] urls
|
||||
) throws InstantiationException, IllegalAccessException {
|
||||
ConfigurationBuilder builder = new ConfigurationBuilder();
|
||||
builder.setClassLoaders(new ClassLoader[]{loader});
|
||||
builder.addUrls(urls);
|
||||
builder.setScanners(new SubTypesScanner());
|
||||
builder.useParallelExecutor();
|
||||
Reflections reflections = new InternalReflections(builder);
|
||||
|
||||
return new PluginScanResult(
|
||||
getPluginDesc(reflections, Connector.class, loader),
|
||||
getPluginDesc(reflections, Converter.class, loader),
|
||||
getPluginDesc(reflections, HeaderConverter.class, loader),
|
||||
getPluginDesc(reflections, Transformation.class, loader),
|
||||
getServiceLoaderPluginDesc(ConfigProvider.class, loader),
|
||||
getServiceLoaderPluginDesc(ConnectRestExtension.class, loader),
|
||||
getServiceLoaderPluginDesc(ConnectorClientConfigOverridePolicy.class, loader)
|
||||
);
|
||||
}
|
||||
|
||||
private <T> Collection<PluginDesc<T>> getPluginDesc(
|
||||
Reflections reflections,
|
||||
Class<T> klass,
|
||||
ClassLoader loader
|
||||
) throws InstantiationException, IllegalAccessException {
|
||||
Set<Class<? extends T>> plugins;
|
||||
try {
|
||||
plugins = reflections.getSubTypesOf(klass);
|
||||
} catch (ReflectionsException e) {
|
||||
log.debug("Reflections scanner could not find any classes for URLs: " +
|
||||
reflections.getConfiguration().getUrls(), e);
|
||||
return Collections.emptyList();
|
||||
}
|
||||
|
||||
Collection<PluginDesc<T>> result = new ArrayList<>();
|
||||
for (Class<? extends T> plugin : plugins) {
|
||||
if (PluginUtils.isConcrete(plugin)) {
|
||||
result.add(new PluginDesc<>(plugin, versionFor(plugin), loader));
|
||||
} else {
|
||||
log.debug("Skipping {} as it is not concrete implementation", plugin);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private <T> Collection<PluginDesc<T>> getServiceLoaderPluginDesc(Class<T> klass, ClassLoader loader) {
|
||||
ClassLoader savedLoader = Plugins.compareAndSwapLoaders(loader);
|
||||
Collection<PluginDesc<T>> result = new ArrayList<>();
|
||||
try {
|
||||
ServiceLoader<T> serviceLoader = ServiceLoader.load(klass, loader);
|
||||
for (T pluginImpl : serviceLoader) {
|
||||
result.add(new PluginDesc<>((Class<? extends T>) pluginImpl.getClass(),
|
||||
versionFor(pluginImpl), loader));
|
||||
}
|
||||
} finally {
|
||||
Plugins.compareAndSwapLoaders(savedLoader);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
private static <T> String versionFor(T pluginImpl) {
|
||||
return pluginImpl instanceof Versioned ? ((Versioned) pluginImpl).version() : UNDEFINED_VERSION;
|
||||
}
|
||||
|
||||
private static <T> String versionFor(Class<? extends T> pluginKlass) throws IllegalAccessException, InstantiationException {
|
||||
// Temporary workaround until all the plugins are versioned.
|
||||
return Connector.class.isAssignableFrom(pluginKlass) ? versionFor(pluginKlass.newInstance()) : UNDEFINED_VERSION;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Class<?> loadClass(String name, boolean resolve) throws ClassNotFoundException {
|
||||
String fullName = aliases.containsKey(name) ? aliases.get(name) : name;
|
||||
PluginClassLoader pluginLoader = pluginClassLoader(fullName);
|
||||
if (pluginLoader != null) {
|
||||
log.trace("Retrieving loaded class '{}' from '{}'", fullName, pluginLoader);
|
||||
return pluginLoader.loadClass(fullName, resolve);
|
||||
}
|
||||
|
||||
return super.loadClass(fullName, resolve);
|
||||
}
|
||||
|
||||
private void addAllAliases() {
|
||||
addAliases(connectors);
|
||||
addAliases(converters);
|
||||
addAliases(headerConverters);
|
||||
addAliases(transformations);
|
||||
addAliases(restExtensions);
|
||||
addAliases(connectorClientConfigPolicies);
|
||||
}
|
||||
|
||||
private <S> void addAliases(Collection<PluginDesc<S>> plugins) {
|
||||
for (PluginDesc<S> plugin : plugins) {
|
||||
if (PluginUtils.isAliasUnique(plugin, plugins)) {
|
||||
String simple = PluginUtils.simpleName(plugin);
|
||||
String pruned = PluginUtils.prunedName(plugin);
|
||||
aliases.put(simple, plugin.className());
|
||||
if (simple.equals(pruned)) {
|
||||
log.info("Added alias '{}' to plugin '{}'", simple, plugin.className());
|
||||
} else {
|
||||
aliases.put(pruned, plugin.className());
|
||||
log.info(
|
||||
"Added aliases '{}' and '{}' to plugin '{}'",
|
||||
simple,
|
||||
pruned,
|
||||
plugin.className()
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static class InternalReflections extends Reflections {
|
||||
|
||||
public InternalReflections(Configuration configuration) {
|
||||
super(configuration);
|
||||
}
|
||||
|
||||
// When Reflections is used for parallel scans, it has a bug where it propagates ReflectionsException
|
||||
// as RuntimeException. Override the scan behavior to emulate the singled-threaded logic.
|
||||
@Override
|
||||
protected void scan(URL url) {
|
||||
try {
|
||||
super.scan(url);
|
||||
} catch (ReflectionsException e) {
|
||||
Logger log = Reflections.log;
|
||||
if (log != null && log.isWarnEnabled()) {
|
||||
log.warn("could not create Vfs.Dir from url. ignoring the exception and continuing", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public URL getResource(String name) {
|
||||
if (serviceLoaderManifestForPlugin(name)) {
|
||||
// Default implementation of getResource searches the parent class loader and if not available/found, its own URL paths.
|
||||
// This will enable thePluginClassLoader to limit its resource search only to its own URL paths.
|
||||
return null;
|
||||
} else {
|
||||
return super.getResource(name);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Enumeration<URL> getResources(String name) throws IOException {
|
||||
if (serviceLoaderManifestForPlugin(name)) {
|
||||
// Default implementation of getResources searches the parent class loader and and also its own URL paths. This will enable the
|
||||
// PluginClassLoader to limit its resource search to only its own URL paths.
|
||||
return null;
|
||||
} else {
|
||||
return super.getResources(name);
|
||||
}
|
||||
}
|
||||
|
||||
//Visible for testing
|
||||
static boolean serviceLoaderManifestForPlugin(String name) {
|
||||
return PLUGIN_MANIFEST_FILES.contains(name);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,112 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime.isolation;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.net.URL;
|
||||
import java.net.URLClassLoader;
|
||||
|
||||
/**
|
||||
* A custom classloader dedicated to loading Connect plugin classes in classloading isolation.
|
||||
* <p>
|
||||
* Under the current scheme for classloading isolation in Connect, a plugin classloader loads the
|
||||
* classes that it finds in its urls. For classes that are either not found or are not supposed to
|
||||
* be loaded in isolation, this plugin classloader delegates their loading to its parent. This makes
|
||||
* this classloader a child-first classloader.
|
||||
* <p>
|
||||
* This class is thread-safe.
|
||||
*/
|
||||
public class PluginClassLoader extends URLClassLoader {
|
||||
private static final Logger log = LoggerFactory.getLogger(PluginClassLoader.class);
|
||||
private final URL pluginLocation;
|
||||
|
||||
static {
|
||||
ClassLoader.registerAsParallelCapable();
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor that accepts a specific classloader as parent.
|
||||
*
|
||||
* @param pluginLocation the top-level location of the plugin to be loaded in isolation by this
|
||||
* classloader.
|
||||
* @param urls the list of urls from which to load classes and resources for this plugin.
|
||||
* @param parent the parent classloader to be used for delegation for classes that were
|
||||
* not found or should not be loaded in isolation by this classloader.
|
||||
*/
|
||||
public PluginClassLoader(URL pluginLocation, URL[] urls, ClassLoader parent) {
|
||||
super(urls, parent);
|
||||
this.pluginLocation = pluginLocation;
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor that defines the system classloader as parent of this plugin classloader.
|
||||
*
|
||||
* @param pluginLocation the top-level location of the plugin to be loaded in isolation by this
|
||||
* classloader.
|
||||
* @param urls the list of urls from which to load classes and resources for this plugin.
|
||||
*/
|
||||
public PluginClassLoader(URL pluginLocation, URL[] urls) {
|
||||
super(urls);
|
||||
this.pluginLocation = pluginLocation;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the top-level location of the classes and dependencies required by the plugin that
|
||||
* is loaded by this classloader.
|
||||
*
|
||||
* @return the plugin location.
|
||||
*/
|
||||
public String location() {
|
||||
return pluginLocation.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "PluginClassLoader{pluginLocation=" + pluginLocation + "}";
|
||||
}
|
||||
|
||||
// This method needs to be thread-safe because it is supposed to be called by multiple
|
||||
// Connect tasks. While findClass is thread-safe, defineClass called within loadClass of the
|
||||
// base method is not. More on multithreaded classloaders in:
|
||||
// https://docs.oracle.com/javase/7/docs/technotes/guides/lang/cl-mt.html
|
||||
@Override
|
||||
protected synchronized Class<?> loadClass(String name, boolean resolve)
|
||||
throws ClassNotFoundException {
|
||||
synchronized (getClassLoadingLock(name)) {
|
||||
Class<?> klass = findLoadedClass(name);
|
||||
if (klass == null) {
|
||||
try {
|
||||
if (PluginUtils.shouldLoadInIsolation(name)) {
|
||||
klass = findClass(name);
|
||||
}
|
||||
} catch (ClassNotFoundException e) {
|
||||
// Not found in loader's path. Search in parents.
|
||||
log.trace("Class '{}' not found. Delegating to parent", name);
|
||||
}
|
||||
}
|
||||
if (klass == null) {
|
||||
klass = super.loadClass(name, false);
|
||||
}
|
||||
if (resolve) {
|
||||
resolveClass(klass);
|
||||
}
|
||||
return klass;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,110 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime.isolation;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
import org.apache.maven.artifact.versioning.DefaultArtifactVersion;
|
||||
|
||||
import java.util.Objects;
|
||||
|
||||
public class PluginDesc<T> implements Comparable<PluginDesc<T>> {
|
||||
private final Class<? extends T> klass;
|
||||
private final String name;
|
||||
private final String version;
|
||||
private final DefaultArtifactVersion encodedVersion;
|
||||
private final PluginType type;
|
||||
private final String typeName;
|
||||
private final String location;
|
||||
|
||||
public PluginDesc(Class<? extends T> klass, String version, ClassLoader loader) {
|
||||
this.klass = klass;
|
||||
this.name = klass.getName();
|
||||
this.version = version != null ? version : "null";
|
||||
this.encodedVersion = new DefaultArtifactVersion(this.version);
|
||||
this.type = PluginType.from(klass);
|
||||
this.typeName = type.toString();
|
||||
this.location = loader instanceof PluginClassLoader
|
||||
? ((PluginClassLoader) loader).location()
|
||||
: "classpath";
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "PluginDesc{" +
|
||||
"klass=" + klass +
|
||||
", name='" + name + '\'' +
|
||||
", version='" + version + '\'' +
|
||||
", encodedVersion=" + encodedVersion +
|
||||
", type=" + type +
|
||||
", typeName='" + typeName + '\'' +
|
||||
", location='" + location + '\'' +
|
||||
'}';
|
||||
}
|
||||
|
||||
public Class<? extends T> pluginClass() {
|
||||
return klass;
|
||||
}
|
||||
|
||||
@JsonProperty("class")
|
||||
public String className() {
|
||||
return name;
|
||||
}
|
||||
|
||||
@JsonProperty("version")
|
||||
public String version() {
|
||||
return version;
|
||||
}
|
||||
|
||||
public PluginType type() {
|
||||
return type;
|
||||
}
|
||||
|
||||
@JsonProperty("type")
|
||||
public String typeName() {
|
||||
return typeName;
|
||||
}
|
||||
|
||||
@JsonProperty("location")
|
||||
public String location() {
|
||||
return location;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (!(o instanceof PluginDesc)) {
|
||||
return false;
|
||||
}
|
||||
PluginDesc<?> that = (PluginDesc<?>) o;
|
||||
return Objects.equals(klass, that.klass) &&
|
||||
Objects.equals(version, that.version) &&
|
||||
type == that.type;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(klass, version, type);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compareTo(PluginDesc other) {
|
||||
int nameComp = name.compareTo(other.name);
|
||||
return nameComp != 0 ? nameComp : encodedVersion.compareTo(other.encodedVersion);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,98 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime.isolation;
|
||||
|
||||
import org.apache.kafka.common.config.provider.ConfigProvider;
|
||||
import org.apache.kafka.connect.connector.Connector;
|
||||
import org.apache.kafka.connect.connector.policy.ConnectorClientConfigOverridePolicy;
|
||||
import org.apache.kafka.connect.rest.ConnectRestExtension;
|
||||
import org.apache.kafka.connect.storage.Converter;
|
||||
import org.apache.kafka.connect.storage.HeaderConverter;
|
||||
import org.apache.kafka.connect.transforms.Transformation;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
|
||||
public class PluginScanResult {
|
||||
private final Collection<PluginDesc<Connector>> connectors;
|
||||
private final Collection<PluginDesc<Converter>> converters;
|
||||
private final Collection<PluginDesc<HeaderConverter>> headerConverters;
|
||||
private final Collection<PluginDesc<Transformation>> transformations;
|
||||
private final Collection<PluginDesc<ConfigProvider>> configProviders;
|
||||
private final Collection<PluginDesc<ConnectRestExtension>> restExtensions;
|
||||
private final Collection<PluginDesc<ConnectorClientConfigOverridePolicy>> connectorClientConfigPolicies;
|
||||
|
||||
private final List<Collection> allPlugins;
|
||||
|
||||
public PluginScanResult(
|
||||
Collection<PluginDesc<Connector>> connectors,
|
||||
Collection<PluginDesc<Converter>> converters,
|
||||
Collection<PluginDesc<HeaderConverter>> headerConverters,
|
||||
Collection<PluginDesc<Transformation>> transformations,
|
||||
Collection<PluginDesc<ConfigProvider>> configProviders,
|
||||
Collection<PluginDesc<ConnectRestExtension>> restExtensions,
|
||||
Collection<PluginDesc<ConnectorClientConfigOverridePolicy>> connectorClientConfigPolicies
|
||||
) {
|
||||
this.connectors = connectors;
|
||||
this.converters = converters;
|
||||
this.headerConverters = headerConverters;
|
||||
this.transformations = transformations;
|
||||
this.configProviders = configProviders;
|
||||
this.restExtensions = restExtensions;
|
||||
this.connectorClientConfigPolicies = connectorClientConfigPolicies;
|
||||
this.allPlugins =
|
||||
Arrays.asList(connectors, converters, headerConverters, transformations, configProviders,
|
||||
connectorClientConfigPolicies);
|
||||
}
|
||||
|
||||
public Collection<PluginDesc<Connector>> connectors() {
|
||||
return connectors;
|
||||
}
|
||||
|
||||
public Collection<PluginDesc<Converter>> converters() {
|
||||
return converters;
|
||||
}
|
||||
|
||||
public Collection<PluginDesc<HeaderConverter>> headerConverters() {
|
||||
return headerConverters;
|
||||
}
|
||||
|
||||
public Collection<PluginDesc<Transformation>> transformations() {
|
||||
return transformations;
|
||||
}
|
||||
|
||||
public Collection<PluginDesc<ConfigProvider>> configProviders() {
|
||||
return configProviders;
|
||||
}
|
||||
|
||||
public Collection<PluginDesc<ConnectRestExtension>> restExtensions() {
|
||||
return restExtensions;
|
||||
}
|
||||
|
||||
public Collection<PluginDesc<ConnectorClientConfigOverridePolicy>> connectorClientConfigPolicies() {
|
||||
return connectorClientConfigPolicies;
|
||||
}
|
||||
|
||||
public boolean isEmpty() {
|
||||
boolean isEmpty = true;
|
||||
for (Collection plugins : allPlugins) {
|
||||
isEmpty = isEmpty && plugins.isEmpty();
|
||||
}
|
||||
return isEmpty;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,64 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime.isolation;
|
||||
|
||||
import org.apache.kafka.common.config.provider.ConfigProvider;
|
||||
import org.apache.kafka.connect.connector.Connector;
|
||||
import org.apache.kafka.connect.connector.policy.ConnectorClientConfigOverridePolicy;
|
||||
import org.apache.kafka.connect.rest.ConnectRestExtension;
|
||||
import org.apache.kafka.connect.sink.SinkConnector;
|
||||
import org.apache.kafka.connect.source.SourceConnector;
|
||||
import org.apache.kafka.connect.storage.Converter;
|
||||
import org.apache.kafka.connect.transforms.Transformation;
|
||||
|
||||
import java.util.Locale;
|
||||
|
||||
public enum PluginType {
|
||||
SOURCE(SourceConnector.class),
|
||||
SINK(SinkConnector.class),
|
||||
CONNECTOR(Connector.class),
|
||||
CONVERTER(Converter.class),
|
||||
TRANSFORMATION(Transformation.class),
|
||||
CONFIGPROVIDER(ConfigProvider.class),
|
||||
REST_EXTENSION(ConnectRestExtension.class),
|
||||
CONNECTOR_CLIENT_CONFIG_OVERRIDE_POLICY(ConnectorClientConfigOverridePolicy.class),
|
||||
UNKNOWN(Object.class);
|
||||
|
||||
private Class<?> klass;
|
||||
|
||||
PluginType(Class<?> klass) {
|
||||
this.klass = klass;
|
||||
}
|
||||
|
||||
public static PluginType from(Class<?> klass) {
|
||||
for (PluginType type : PluginType.values()) {
|
||||
if (type.klass.isAssignableFrom(klass)) {
|
||||
return type;
|
||||
}
|
||||
}
|
||||
return UNKNOWN;
|
||||
}
|
||||
|
||||
public String simpleName() {
|
||||
return klass.getSimpleName();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return super.toString().toLowerCase(Locale.ROOT);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,378 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime.isolation;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.lang.reflect.Modifier;
|
||||
import java.nio.file.DirectoryStream;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Set;
|
||||
import java.util.TreeSet;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
/**
|
||||
* Connect plugin utility methods.
|
||||
*/
|
||||
public class PluginUtils {
|
||||
private static final Logger log = LoggerFactory.getLogger(PluginUtils.class);
|
||||
|
||||
// Be specific about javax packages and exclude those existing in Java SE and Java EE libraries.
|
||||
private static final Pattern BLACKLIST = Pattern.compile("^(?:"
|
||||
+ "java"
|
||||
+ "|javax\\.accessibility"
|
||||
+ "|javax\\.activation"
|
||||
+ "|javax\\.activity"
|
||||
+ "|javax\\.annotation"
|
||||
+ "|javax\\.batch\\.api"
|
||||
+ "|javax\\.batch\\.operations"
|
||||
+ "|javax\\.batch\\.runtime"
|
||||
+ "|javax\\.crypto"
|
||||
+ "|javax\\.decorator"
|
||||
+ "|javax\\.ejb"
|
||||
+ "|javax\\.el"
|
||||
+ "|javax\\.enterprise\\.concurrent"
|
||||
+ "|javax\\.enterprise\\.context"
|
||||
+ "|javax\\.enterprise\\.context\\.spi"
|
||||
+ "|javax\\.enterprise\\.deploy\\.model"
|
||||
+ "|javax\\.enterprise\\.deploy\\.shared"
|
||||
+ "|javax\\.enterprise\\.deploy\\.spi"
|
||||
+ "|javax\\.enterprise\\.event"
|
||||
+ "|javax\\.enterprise\\.inject"
|
||||
+ "|javax\\.enterprise\\.inject\\.spi"
|
||||
+ "|javax\\.enterprise\\.util"
|
||||
+ "|javax\\.faces"
|
||||
+ "|javax\\.imageio"
|
||||
+ "|javax\\.inject"
|
||||
+ "|javax\\.interceptor"
|
||||
+ "|javax\\.jms"
|
||||
+ "|javax\\.json"
|
||||
+ "|javax\\.jws"
|
||||
+ "|javax\\.lang\\.model"
|
||||
+ "|javax\\.mail"
|
||||
+ "|javax\\.management"
|
||||
+ "|javax\\.management\\.j2ee"
|
||||
+ "|javax\\.naming"
|
||||
+ "|javax\\.net"
|
||||
+ "|javax\\.persistence"
|
||||
+ "|javax\\.print"
|
||||
+ "|javax\\.resource"
|
||||
+ "|javax\\.rmi"
|
||||
+ "|javax\\.script"
|
||||
+ "|javax\\.security\\.auth"
|
||||
+ "|javax\\.security\\.auth\\.message"
|
||||
+ "|javax\\.security\\.cert"
|
||||
+ "|javax\\.security\\.jacc"
|
||||
+ "|javax\\.security\\.sasl"
|
||||
+ "|javax\\.servlet"
|
||||
+ "|javax\\.sound\\.midi"
|
||||
+ "|javax\\.sound\\.sampled"
|
||||
+ "|javax\\.sql"
|
||||
+ "|javax\\.swing"
|
||||
+ "|javax\\.tools"
|
||||
+ "|javax\\.transaction"
|
||||
+ "|javax\\.validation"
|
||||
+ "|javax\\.websocket"
|
||||
+ "|javax\\.ws\\.rs"
|
||||
+ "|javax\\.xml"
|
||||
+ "|javax\\.xml\\.bind"
|
||||
+ "|javax\\.xml\\.registry"
|
||||
+ "|javax\\.xml\\.rpc"
|
||||
+ "|javax\\.xml\\.soap"
|
||||
+ "|javax\\.xml\\.ws"
|
||||
+ "|org\\.ietf\\.jgss"
|
||||
+ "|org\\.omg\\.CORBA"
|
||||
+ "|org\\.omg\\.CosNaming"
|
||||
+ "|org\\.omg\\.Dynamic"
|
||||
+ "|org\\.omg\\.DynamicAny"
|
||||
+ "|org\\.omg\\.IOP"
|
||||
+ "|org\\.omg\\.Messaging"
|
||||
+ "|org\\.omg\\.PortableInterceptor"
|
||||
+ "|org\\.omg\\.PortableServer"
|
||||
+ "|org\\.omg\\.SendingContext"
|
||||
+ "|org\\.omg\\.stub\\.java\\.rmi"
|
||||
+ "|org\\.w3c\\.dom"
|
||||
+ "|org\\.xml\\.sax"
|
||||
+ "|org\\.apache\\.kafka"
|
||||
+ "|org\\.slf4j"
|
||||
+ ")\\..*$");
|
||||
|
||||
// If the base interface or class that will be used to identify Connect plugins resides within
|
||||
// the same java package as the plugins that need to be loaded in isolation (and thus are
|
||||
// added to the WHITELIST), then this base interface or class needs to be excluded in the
|
||||
// regular expression pattern
|
||||
private static final Pattern WHITELIST = Pattern.compile("^org\\.apache\\.kafka\\.(?:connect\\.(?:"
|
||||
+ "transforms\\.(?!Transformation$).*"
|
||||
+ "|json\\..*"
|
||||
+ "|file\\..*"
|
||||
+ "|mirror\\..*"
|
||||
+ "|mirror-client\\..*"
|
||||
+ "|converters\\..*"
|
||||
+ "|storage\\.StringConverter"
|
||||
+ "|storage\\.SimpleHeaderConverter"
|
||||
+ "|rest\\.basic\\.auth\\.extension\\.BasicAuthSecurityRestExtension"
|
||||
+ "|connector\\.policy\\.(?!ConnectorClientConfigOverridePolicy$).*"
|
||||
+ ")"
|
||||
+ "|common\\.config\\.provider\\.(?!ConfigProvider$).*"
|
||||
+ ")$");
|
||||
|
||||
private static final DirectoryStream.Filter<Path> PLUGIN_PATH_FILTER = new DirectoryStream
|
||||
.Filter<Path>() {
|
||||
@Override
|
||||
public boolean accept(Path path) {
|
||||
return Files.isDirectory(path) || isArchive(path) || isClassFile(path);
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Return whether the class with the given name should be loaded in isolation using a plugin
|
||||
* classloader.
|
||||
*
|
||||
* @param name the fully qualified name of the class.
|
||||
* @return true if this class should be loaded in isolation, false otherwise.
|
||||
*/
|
||||
public static boolean shouldLoadInIsolation(String name) {
|
||||
return !(BLACKLIST.matcher(name).matches() && !WHITELIST.matcher(name).matches());
|
||||
}
|
||||
|
||||
/**
|
||||
* Verify the given class corresponds to a concrete class and not to an abstract class or
|
||||
* interface.
|
||||
* @param klass the class object.
|
||||
* @return true if the argument is a concrete class, false if it's abstract or interface.
|
||||
*/
|
||||
public static boolean isConcrete(Class<?> klass) {
|
||||
int mod = klass.getModifiers();
|
||||
return !Modifier.isAbstract(mod) && !Modifier.isInterface(mod);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return whether a path corresponds to a JAR or ZIP archive.
|
||||
*
|
||||
* @param path the path to validate.
|
||||
* @return true if the path is a JAR or ZIP archive file, otherwise false.
|
||||
*/
|
||||
public static boolean isArchive(Path path) {
|
||||
String archivePath = path.toString().toLowerCase(Locale.ROOT);
|
||||
return archivePath.endsWith(".jar") || archivePath.endsWith(".zip");
|
||||
}
|
||||
|
||||
/**
|
||||
* Return whether a path corresponds java class file.
|
||||
*
|
||||
* @param path the path to validate.
|
||||
* @return true if the path is a java class file, otherwise false.
|
||||
*/
|
||||
public static boolean isClassFile(Path path) {
|
||||
return path.toString().toLowerCase(Locale.ROOT).endsWith(".class");
|
||||
}
|
||||
|
||||
public static List<Path> pluginLocations(Path topPath) throws IOException {
|
||||
List<Path> locations = new ArrayList<>();
|
||||
try (
|
||||
DirectoryStream<Path> listing = Files.newDirectoryStream(
|
||||
topPath,
|
||||
PLUGIN_PATH_FILTER
|
||||
)
|
||||
) {
|
||||
for (Path dir : listing) {
|
||||
locations.add(dir);
|
||||
}
|
||||
}
|
||||
return locations;
|
||||
}
|
||||
|
||||
/**
|
||||
* Given a top path in the filesystem, return a list of paths to archives (JAR or ZIP
|
||||
* files) contained under this top path. If the top path contains only java class files,
|
||||
* return the top path itself. This method follows symbolic links to discover archives and
|
||||
* returns the such archives as absolute paths.
|
||||
*
|
||||
* @param topPath the path to use as root of plugin search.
|
||||
* @return a list of potential plugin paths, or empty list if no such paths exist.
|
||||
* @throws IOException
|
||||
*/
|
||||
public static List<Path> pluginUrls(Path topPath) throws IOException {
|
||||
boolean containsClassFiles = false;
|
||||
Set<Path> archives = new TreeSet<>();
|
||||
LinkedList<DirectoryEntry> dfs = new LinkedList<>();
|
||||
Set<Path> visited = new HashSet<>();
|
||||
|
||||
if (isArchive(topPath)) {
|
||||
return Collections.singletonList(topPath);
|
||||
}
|
||||
|
||||
DirectoryStream<Path> topListing = Files.newDirectoryStream(
|
||||
topPath,
|
||||
PLUGIN_PATH_FILTER
|
||||
);
|
||||
dfs.push(new DirectoryEntry(topListing));
|
||||
visited.add(topPath);
|
||||
try {
|
||||
while (!dfs.isEmpty()) {
|
||||
Iterator<Path> neighbors = dfs.peek().iterator;
|
||||
if (!neighbors.hasNext()) {
|
||||
dfs.pop().stream.close();
|
||||
continue;
|
||||
}
|
||||
|
||||
Path adjacent = neighbors.next();
|
||||
if (Files.isSymbolicLink(adjacent)) {
|
||||
try {
|
||||
Path symlink = Files.readSymbolicLink(adjacent);
|
||||
// if symlink is absolute resolve() returns the absolute symlink itself
|
||||
Path parent = adjacent.getParent();
|
||||
if (parent == null) {
|
||||
continue;
|
||||
}
|
||||
Path absolute = parent.resolve(symlink).toRealPath();
|
||||
if (Files.exists(absolute)) {
|
||||
adjacent = absolute;
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
} catch (IOException e) {
|
||||
// See https://issues.apache.org/jira/browse/KAFKA-6288 for a reported
|
||||
// failure. Such a failure at this stage is not easily reproducible and
|
||||
// therefore an exception is caught and ignored after issuing a
|
||||
// warning. This allows class scanning to continue for non-broken plugins.
|
||||
log.warn(
|
||||
"Resolving symbolic link '{}' failed. Ignoring this path.",
|
||||
adjacent,
|
||||
e
|
||||
);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (!visited.contains(adjacent)) {
|
||||
visited.add(adjacent);
|
||||
if (isArchive(adjacent)) {
|
||||
archives.add(adjacent);
|
||||
} else if (isClassFile(adjacent)) {
|
||||
containsClassFiles = true;
|
||||
} else {
|
||||
DirectoryStream<Path> listing = Files.newDirectoryStream(
|
||||
adjacent,
|
||||
PLUGIN_PATH_FILTER
|
||||
);
|
||||
dfs.push(new DirectoryEntry(listing));
|
||||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
while (!dfs.isEmpty()) {
|
||||
dfs.pop().stream.close();
|
||||
}
|
||||
}
|
||||
|
||||
if (containsClassFiles) {
|
||||
if (archives.isEmpty()) {
|
||||
return Collections.singletonList(topPath);
|
||||
}
|
||||
log.warn("Plugin path contains both java archives and class files. Returning only the"
|
||||
+ " archives");
|
||||
}
|
||||
return Arrays.asList(archives.toArray(new Path[0]));
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the simple class name of a plugin as {@code String}.
|
||||
*
|
||||
* @param plugin the plugin descriptor.
|
||||
* @return the plugin's simple class name.
|
||||
*/
|
||||
public static String simpleName(PluginDesc<?> plugin) {
|
||||
return plugin.pluginClass().getSimpleName();
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove the plugin type name at the end of a plugin class name, if such suffix is present.
|
||||
* This method is meant to be used to extract plugin aliases.
|
||||
*
|
||||
* @param plugin the plugin descriptor.
|
||||
* @return the pruned simple class name of the plugin.
|
||||
*/
|
||||
public static String prunedName(PluginDesc<?> plugin) {
|
||||
// It's currently simpler to switch on type than do pattern matching.
|
||||
switch (plugin.type()) {
|
||||
case SOURCE:
|
||||
case SINK:
|
||||
case CONNECTOR:
|
||||
return prunePluginName(plugin, "Connector");
|
||||
default:
|
||||
return prunePluginName(plugin, plugin.type().simpleName());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Verify whether a given plugin's alias matches another alias in a collection of plugins.
|
||||
*
|
||||
* @param alias the plugin descriptor to test for alias matching.
|
||||
* @param plugins the collection of plugins to test against.
|
||||
* @param <U> the plugin type.
|
||||
* @return false if a match was found in the collection, otherwise true.
|
||||
*/
|
||||
public static <U> boolean isAliasUnique(
|
||||
PluginDesc<U> alias,
|
||||
Collection<PluginDesc<U>> plugins
|
||||
) {
|
||||
boolean matched = false;
|
||||
for (PluginDesc<U> plugin : plugins) {
|
||||
if (simpleName(alias).equals(simpleName(plugin))
|
||||
|| prunedName(alias).equals(prunedName(plugin))) {
|
||||
if (matched) {
|
||||
return false;
|
||||
}
|
||||
matched = true;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
private static String prunePluginName(PluginDesc<?> plugin, String suffix) {
|
||||
String simple = plugin.pluginClass().getSimpleName();
|
||||
int pos = simple.lastIndexOf(suffix);
|
||||
if (pos > 0) {
|
||||
return simple.substring(0, pos);
|
||||
}
|
||||
return simple;
|
||||
}
|
||||
|
||||
private static class DirectoryEntry {
|
||||
final DirectoryStream<Path> stream;
|
||||
final Iterator<Path> iterator;
|
||||
|
||||
DirectoryEntry(DirectoryStream<Path> stream) {
|
||||
this.stream = stream;
|
||||
this.iterator = stream.iterator();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,463 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime.isolation;
|
||||
|
||||
import org.apache.kafka.common.Configurable;
|
||||
import org.apache.kafka.common.config.AbstractConfig;
|
||||
import org.apache.kafka.common.config.provider.ConfigProvider;
|
||||
import org.apache.kafka.common.utils.Utils;
|
||||
import org.apache.kafka.connect.components.Versioned;
|
||||
import org.apache.kafka.connect.connector.ConnectRecord;
|
||||
import org.apache.kafka.connect.connector.Connector;
|
||||
import org.apache.kafka.connect.connector.Task;
|
||||
import org.apache.kafka.connect.errors.ConnectException;
|
||||
import org.apache.kafka.connect.json.JsonConverter;
|
||||
import org.apache.kafka.connect.json.JsonConverterConfig;
|
||||
import org.apache.kafka.connect.runtime.WorkerConfig;
|
||||
import org.apache.kafka.connect.storage.Converter;
|
||||
import org.apache.kafka.connect.storage.ConverterConfig;
|
||||
import org.apache.kafka.connect.storage.ConverterType;
|
||||
import org.apache.kafka.connect.storage.HeaderConverter;
|
||||
import org.apache.kafka.connect.transforms.Transformation;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.security.AccessController;
|
||||
import java.security.PrivilegedAction;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
public class Plugins {
|
||||
|
||||
public enum ClassLoaderUsage {
|
||||
CURRENT_CLASSLOADER,
|
||||
PLUGINS
|
||||
}
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(Plugins.class);
|
||||
private final DelegatingClassLoader delegatingLoader;
|
||||
|
||||
public Plugins(Map<String, String> props) {
|
||||
List<String> pluginLocations = WorkerConfig.pluginLocations(props);
|
||||
delegatingLoader = newDelegatingClassLoader(pluginLocations);
|
||||
delegatingLoader.initLoaders();
|
||||
}
|
||||
|
||||
private static DelegatingClassLoader newDelegatingClassLoader(final List<String> paths) {
|
||||
return AccessController.doPrivileged(
|
||||
(PrivilegedAction<DelegatingClassLoader>) () -> new DelegatingClassLoader(paths)
|
||||
);
|
||||
}
|
||||
|
||||
private static <T> String pluginNames(Collection<PluginDesc<T>> plugins) {
|
||||
return Utils.join(plugins, ", ");
|
||||
}
|
||||
|
||||
protected static <T> T newPlugin(Class<T> klass) {
|
||||
// KAFKA-8340: The thread classloader is used during static initialization and must be
|
||||
// set to the plugin's classloader during instantiation
|
||||
ClassLoader savedLoader = compareAndSwapLoaders(klass.getClassLoader());
|
||||
try {
|
||||
return Utils.newInstance(klass);
|
||||
} catch (Throwable t) {
|
||||
throw new ConnectException("Instantiation error", t);
|
||||
} finally {
|
||||
compareAndSwapLoaders(savedLoader);
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
protected <U> Class<? extends U> pluginClassFromConfig(
|
||||
AbstractConfig config,
|
||||
String propertyName,
|
||||
Class<U> pluginClass,
|
||||
Collection<PluginDesc<U>> plugins
|
||||
) {
|
||||
Class<?> klass = config.getClass(propertyName);
|
||||
if (pluginClass.isAssignableFrom(klass)) {
|
||||
return (Class<? extends U>) klass;
|
||||
}
|
||||
throw new ConnectException(
|
||||
"Failed to find any class that implements " + pluginClass.getSimpleName()
|
||||
+ " for the config "
|
||||
+ propertyName + ", available classes are: "
|
||||
+ pluginNames(plugins)
|
||||
);
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
protected static <U> Class<? extends U> pluginClass(
|
||||
DelegatingClassLoader loader,
|
||||
String classOrAlias,
|
||||
Class<U> pluginClass
|
||||
) throws ClassNotFoundException {
|
||||
Class<?> klass = loader.loadClass(classOrAlias, false);
|
||||
if (pluginClass.isAssignableFrom(klass)) {
|
||||
return (Class<? extends U>) klass;
|
||||
}
|
||||
|
||||
throw new ClassNotFoundException(
|
||||
"Requested class: "
|
||||
+ classOrAlias
|
||||
+ " does not extend " + pluginClass.getSimpleName()
|
||||
);
|
||||
}
|
||||
|
||||
@SuppressWarnings("deprecation")
|
||||
protected static boolean isInternalConverter(String classPropertyName) {
|
||||
return classPropertyName.equals(WorkerConfig.INTERNAL_KEY_CONVERTER_CLASS_CONFIG)
|
||||
|| classPropertyName.equals(WorkerConfig.INTERNAL_VALUE_CONVERTER_CLASS_CONFIG);
|
||||
}
|
||||
|
||||
public static ClassLoader compareAndSwapLoaders(ClassLoader loader) {
|
||||
ClassLoader current = Thread.currentThread().getContextClassLoader();
|
||||
if (!current.equals(loader)) {
|
||||
Thread.currentThread().setContextClassLoader(loader);
|
||||
}
|
||||
return current;
|
||||
}
|
||||
|
||||
public ClassLoader currentThreadLoader() {
|
||||
return Thread.currentThread().getContextClassLoader();
|
||||
}
|
||||
|
||||
public ClassLoader compareAndSwapWithDelegatingLoader() {
|
||||
ClassLoader current = Thread.currentThread().getContextClassLoader();
|
||||
if (!current.equals(delegatingLoader)) {
|
||||
Thread.currentThread().setContextClassLoader(delegatingLoader);
|
||||
}
|
||||
return current;
|
||||
}
|
||||
|
||||
public ClassLoader compareAndSwapLoaders(Connector connector) {
|
||||
ClassLoader connectorLoader = delegatingLoader.connectorLoader(connector);
|
||||
return compareAndSwapLoaders(connectorLoader);
|
||||
}
|
||||
|
||||
public DelegatingClassLoader delegatingLoader() {
|
||||
return delegatingLoader;
|
||||
}
|
||||
|
||||
public Set<PluginDesc<Connector>> connectors() {
|
||||
return delegatingLoader.connectors();
|
||||
}
|
||||
|
||||
public Set<PluginDesc<Converter>> converters() {
|
||||
return delegatingLoader.converters();
|
||||
}
|
||||
|
||||
public Set<PluginDesc<Transformation>> transformations() {
|
||||
return delegatingLoader.transformations();
|
||||
}
|
||||
|
||||
public Set<PluginDesc<ConfigProvider>> configProviders() {
|
||||
return delegatingLoader.configProviders();
|
||||
}
|
||||
|
||||
public Connector newConnector(String connectorClassOrAlias) {
|
||||
Class<? extends Connector> klass = connectorClass(connectorClassOrAlias);
|
||||
return newPlugin(klass);
|
||||
}
|
||||
|
||||
public Class<? extends Connector> connectorClass(String connectorClassOrAlias) {
|
||||
Class<? extends Connector> klass;
|
||||
try {
|
||||
klass = pluginClass(
|
||||
delegatingLoader,
|
||||
connectorClassOrAlias,
|
||||
Connector.class
|
||||
);
|
||||
} catch (ClassNotFoundException e) {
|
||||
List<PluginDesc<Connector>> matches = new ArrayList<>();
|
||||
for (PluginDesc<Connector> plugin : delegatingLoader.connectors()) {
|
||||
Class<?> pluginClass = plugin.pluginClass();
|
||||
String simpleName = pluginClass.getSimpleName();
|
||||
if (simpleName.equals(connectorClassOrAlias)
|
||||
|| simpleName.equals(connectorClassOrAlias + "Connector")) {
|
||||
matches.add(plugin);
|
||||
}
|
||||
}
|
||||
|
||||
if (matches.isEmpty()) {
|
||||
throw new ConnectException(
|
||||
"Failed to find any class that implements Connector and which name matches "
|
||||
+ connectorClassOrAlias
|
||||
+ ", available connectors are: "
|
||||
+ pluginNames(delegatingLoader.connectors())
|
||||
);
|
||||
}
|
||||
if (matches.size() > 1) {
|
||||
throw new ConnectException(
|
||||
"More than one connector matches alias "
|
||||
+ connectorClassOrAlias
|
||||
+
|
||||
". Please use full package and class name instead. Classes found: "
|
||||
+ pluginNames(matches)
|
||||
);
|
||||
}
|
||||
|
||||
PluginDesc<Connector> entry = matches.get(0);
|
||||
klass = entry.pluginClass();
|
||||
}
|
||||
return klass;
|
||||
}
|
||||
|
||||
public Task newTask(Class<? extends Task> taskClass) {
|
||||
return newPlugin(taskClass);
|
||||
}
|
||||
|
||||
/**
|
||||
* If the given configuration defines a {@link Converter} using the named configuration property, return a new configured instance.
|
||||
*
|
||||
* @param config the configuration containing the {@link Converter}'s configuration; may not be null
|
||||
* @param classPropertyName the name of the property that contains the name of the {@link Converter} class; may not be null
|
||||
* @param classLoaderUsage which classloader should be used
|
||||
* @return the instantiated and configured {@link Converter}; null if the configuration did not define the specified property
|
||||
* @throws ConnectException if the {@link Converter} implementation class could not be found
|
||||
*/
|
||||
public Converter newConverter(AbstractConfig config, String classPropertyName, ClassLoaderUsage classLoaderUsage) {
|
||||
if (!config.originals().containsKey(classPropertyName) && !isInternalConverter(classPropertyName)) {
|
||||
// This configuration does not define the converter via the specified property name, and
|
||||
// it does not represent an internal converter (which has a default available)
|
||||
return null;
|
||||
}
|
||||
Class<? extends Converter> klass = null;
|
||||
switch (classLoaderUsage) {
|
||||
case CURRENT_CLASSLOADER:
|
||||
// Attempt to load first with the current classloader, and plugins as a fallback.
|
||||
// Note: we can't use config.getConfiguredInstance because Converter doesn't implement Configurable, and even if it did
|
||||
// we have to remove the property prefixes before calling config(...) and we still always want to call Converter.config.
|
||||
klass = pluginClassFromConfig(config, classPropertyName, Converter.class, delegatingLoader.converters());
|
||||
break;
|
||||
case PLUGINS:
|
||||
// Attempt to load with the plugin class loader, which uses the current classloader as a fallback
|
||||
String converterClassOrAlias = config.getClass(classPropertyName).getName();
|
||||
try {
|
||||
klass = pluginClass(delegatingLoader, converterClassOrAlias, Converter.class);
|
||||
} catch (ClassNotFoundException e) {
|
||||
throw new ConnectException(
|
||||
"Failed to find any class that implements Converter and which name matches "
|
||||
+ converterClassOrAlias + ", available converters are: "
|
||||
+ pluginNames(delegatingLoader.converters())
|
||||
);
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (klass == null) {
|
||||
throw new ConnectException("Unable to initialize the Converter specified in '" + classPropertyName + "'");
|
||||
}
|
||||
|
||||
// Determine whether this is a key or value converter based upon the supplied property name ...
|
||||
@SuppressWarnings("deprecation")
|
||||
final boolean isKeyConverter = WorkerConfig.KEY_CONVERTER_CLASS_CONFIG.equals(classPropertyName)
|
||||
|| WorkerConfig.INTERNAL_KEY_CONVERTER_CLASS_CONFIG.equals(classPropertyName);
|
||||
|
||||
// Configure the Converter using only the old configuration mechanism ...
|
||||
String configPrefix = classPropertyName + ".";
|
||||
Map<String, Object> converterConfig = config.originalsWithPrefix(configPrefix);
|
||||
log.debug("Configuring the {} converter with configuration keys:{}{}",
|
||||
isKeyConverter ? "key" : "value", System.lineSeparator(), converterConfig.keySet());
|
||||
|
||||
// Have to override schemas.enable from true to false for internal JSON converters
|
||||
// Don't have to warn the user about anything since all deprecation warnings take place in the
|
||||
// WorkerConfig class
|
||||
if (JsonConverter.class.isAssignableFrom(klass) && isInternalConverter(classPropertyName)) {
|
||||
// If they haven't explicitly specified values for internal.key.converter.schemas.enable
|
||||
// or internal.value.converter.schemas.enable, we can safely default them to false
|
||||
if (!converterConfig.containsKey(JsonConverterConfig.SCHEMAS_ENABLE_CONFIG)) {
|
||||
converterConfig.put(JsonConverterConfig.SCHEMAS_ENABLE_CONFIG, false);
|
||||
}
|
||||
}
|
||||
|
||||
Converter plugin;
|
||||
ClassLoader savedLoader = compareAndSwapLoaders(klass.getClassLoader());
|
||||
try {
|
||||
plugin = newPlugin(klass);
|
||||
plugin.configure(converterConfig, isKeyConverter);
|
||||
} finally {
|
||||
compareAndSwapLoaders(savedLoader);
|
||||
}
|
||||
return plugin;
|
||||
}
|
||||
|
||||
/**
|
||||
* If the given configuration defines a {@link HeaderConverter} using the named configuration property, return a new configured
|
||||
* instance.
|
||||
*
|
||||
* @param config the configuration containing the {@link Converter}'s configuration; may not be null
|
||||
* @param classPropertyName the name of the property that contains the name of the {@link Converter} class; may not be null
|
||||
* @param classLoaderUsage which classloader should be used
|
||||
* @return the instantiated and configured {@link HeaderConverter}; null if the configuration did not define the specified property
|
||||
* @throws ConnectException if the {@link HeaderConverter} implementation class could not be found
|
||||
*/
|
||||
public HeaderConverter newHeaderConverter(AbstractConfig config, String classPropertyName, ClassLoaderUsage classLoaderUsage) {
|
||||
Class<? extends HeaderConverter> klass = null;
|
||||
switch (classLoaderUsage) {
|
||||
case CURRENT_CLASSLOADER:
|
||||
if (!config.originals().containsKey(classPropertyName)) {
|
||||
// This connector configuration does not define the header converter via the specified property name
|
||||
return null;
|
||||
}
|
||||
// Attempt to load first with the current classloader, and plugins as a fallback.
|
||||
// Note: we can't use config.getConfiguredInstance because we have to remove the property prefixes
|
||||
// before calling config(...)
|
||||
klass = pluginClassFromConfig(config, classPropertyName, HeaderConverter.class, delegatingLoader.headerConverters());
|
||||
break;
|
||||
case PLUGINS:
|
||||
// Attempt to load with the plugin class loader, which uses the current classloader as a fallback.
|
||||
// Note that there will always be at least a default header converter for the worker
|
||||
String converterClassOrAlias = config.getClass(classPropertyName).getName();
|
||||
try {
|
||||
klass = pluginClass(
|
||||
delegatingLoader,
|
||||
converterClassOrAlias,
|
||||
HeaderConverter.class
|
||||
);
|
||||
} catch (ClassNotFoundException e) {
|
||||
throw new ConnectException(
|
||||
"Failed to find any class that implements HeaderConverter and which name matches "
|
||||
+ converterClassOrAlias
|
||||
+ ", available header converters are: "
|
||||
+ pluginNames(delegatingLoader.headerConverters())
|
||||
);
|
||||
}
|
||||
}
|
||||
if (klass == null) {
|
||||
throw new ConnectException("Unable to initialize the HeaderConverter specified in '" + classPropertyName + "'");
|
||||
}
|
||||
|
||||
String configPrefix = classPropertyName + ".";
|
||||
Map<String, Object> converterConfig = config.originalsWithPrefix(configPrefix);
|
||||
converterConfig.put(ConverterConfig.TYPE_CONFIG, ConverterType.HEADER.getName());
|
||||
log.debug("Configuring the header converter with configuration keys:{}{}", System.lineSeparator(), converterConfig.keySet());
|
||||
|
||||
HeaderConverter plugin;
|
||||
ClassLoader savedLoader = compareAndSwapLoaders(klass.getClassLoader());
|
||||
try {
|
||||
plugin = newPlugin(klass);
|
||||
plugin.configure(converterConfig);
|
||||
} finally {
|
||||
compareAndSwapLoaders(savedLoader);
|
||||
}
|
||||
return plugin;
|
||||
}
|
||||
|
||||
public ConfigProvider newConfigProvider(AbstractConfig config, String providerPrefix, ClassLoaderUsage classLoaderUsage) {
|
||||
String classPropertyName = providerPrefix + ".class";
|
||||
Map<String, String> originalConfig = config.originalsStrings();
|
||||
if (!originalConfig.containsKey(classPropertyName)) {
|
||||
// This configuration does not define the config provider via the specified property name
|
||||
return null;
|
||||
}
|
||||
Class<? extends ConfigProvider> klass = null;
|
||||
switch (classLoaderUsage) {
|
||||
case CURRENT_CLASSLOADER:
|
||||
// Attempt to load first with the current classloader, and plugins as a fallback.
|
||||
klass = pluginClassFromConfig(config, classPropertyName, ConfigProvider.class, delegatingLoader.configProviders());
|
||||
break;
|
||||
case PLUGINS:
|
||||
// Attempt to load with the plugin class loader, which uses the current classloader as a fallback
|
||||
String configProviderClassOrAlias = originalConfig.get(classPropertyName);
|
||||
try {
|
||||
klass = pluginClass(delegatingLoader, configProviderClassOrAlias, ConfigProvider.class);
|
||||
} catch (ClassNotFoundException e) {
|
||||
throw new ConnectException(
|
||||
"Failed to find any class that implements ConfigProvider and which name matches "
|
||||
+ configProviderClassOrAlias + ", available ConfigProviders are: "
|
||||
+ pluginNames(delegatingLoader.configProviders())
|
||||
);
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (klass == null) {
|
||||
throw new ConnectException("Unable to initialize the ConfigProvider specified in '" + classPropertyName + "'");
|
||||
}
|
||||
|
||||
// Configure the ConfigProvider
|
||||
String configPrefix = providerPrefix + ".param.";
|
||||
Map<String, Object> configProviderConfig = config.originalsWithPrefix(configPrefix);
|
||||
|
||||
ConfigProvider plugin;
|
||||
ClassLoader savedLoader = compareAndSwapLoaders(klass.getClassLoader());
|
||||
try {
|
||||
plugin = newPlugin(klass);
|
||||
plugin.configure(configProviderConfig);
|
||||
} finally {
|
||||
compareAndSwapLoaders(savedLoader);
|
||||
}
|
||||
return plugin;
|
||||
}
|
||||
|
||||
/**
|
||||
* If the given class names are available in the classloader, return a list of new configured
|
||||
* instances. If the instances implement {@link Configurable}, they are configured with provided {@param config}
|
||||
*
|
||||
* @param klassNames the list of class names of plugins that needs to instantiated and configured
|
||||
* @param config the configuration containing the {@link org.apache.kafka.connect.runtime.Worker}'s configuration; may not be {@code null}
|
||||
* @param pluginKlass the type of the plugin class that is being instantiated
|
||||
* @return the instantiated and configured list of plugins of type <T>; empty list if the {@param klassNames} is {@code null} or empty
|
||||
* @throws ConnectException if the implementation class could not be found
|
||||
*/
|
||||
public <T> List<T> newPlugins(List<String> klassNames, AbstractConfig config, Class<T> pluginKlass) {
|
||||
List<T> plugins = new ArrayList<>();
|
||||
if (klassNames != null) {
|
||||
for (String klassName : klassNames) {
|
||||
plugins.add(newPlugin(klassName, config, pluginKlass));
|
||||
}
|
||||
}
|
||||
return plugins;
|
||||
}
|
||||
|
||||
public <T> T newPlugin(String klassName, AbstractConfig config, Class<T> pluginKlass) {
|
||||
T plugin;
|
||||
Class<? extends T> klass;
|
||||
try {
|
||||
klass = pluginClass(delegatingLoader, klassName, pluginKlass);
|
||||
} catch (ClassNotFoundException e) {
|
||||
String msg = String.format("Failed to find any class that implements %s and which "
|
||||
+ "name matches %s", pluginKlass, klassName);
|
||||
throw new ConnectException(msg);
|
||||
}
|
||||
ClassLoader savedLoader = compareAndSwapLoaders(klass.getClassLoader());
|
||||
try {
|
||||
plugin = newPlugin(klass);
|
||||
if (plugin instanceof Versioned) {
|
||||
Versioned versionedPlugin = (Versioned) plugin;
|
||||
if (versionedPlugin.version() == null || versionedPlugin.version().trim()
|
||||
.isEmpty()) {
|
||||
throw new ConnectException("Version not defined for '" + klassName + "'");
|
||||
}
|
||||
}
|
||||
if (plugin instanceof Configurable) {
|
||||
((Configurable) plugin).configure(config.originals());
|
||||
}
|
||||
} finally {
|
||||
compareAndSwapLoaders(savedLoader);
|
||||
}
|
||||
return plugin;
|
||||
}
|
||||
|
||||
public <R extends ConnectRecord<R>> Transformation<R> newTranformations(
|
||||
String transformationClassOrAlias
|
||||
) {
|
||||
return null;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,138 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.kafka.connect.runtime.rest;
|
||||
|
||||
import org.glassfish.jersey.server.ResourceConfig;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
import javax.ws.rs.core.Configurable;
|
||||
import javax.ws.rs.core.Configuration;
|
||||
|
||||
/**
|
||||
* The implementation delegates to {@link ResourceConfig} so that we can handle duplicate
|
||||
* registrations deterministically by not re-registering them again.
|
||||
*/
|
||||
public class ConnectRestConfigurable implements Configurable<ResourceConfig> {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(ConnectRestConfigurable.class);
|
||||
|
||||
private static final boolean ALLOWED_TO_REGISTER = true;
|
||||
private static final boolean NOT_ALLOWED_TO_REGISTER = false;
|
||||
|
||||
private ResourceConfig resourceConfig;
|
||||
|
||||
public ConnectRestConfigurable(ResourceConfig resourceConfig) {
|
||||
Objects.requireNonNull(resourceConfig, "ResourceConfig can't be null");
|
||||
this.resourceConfig = resourceConfig;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public Configuration getConfiguration() {
|
||||
return resourceConfig.getConfiguration();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ResourceConfig property(String name, Object value) {
|
||||
return resourceConfig.property(name, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ResourceConfig register(Object component) {
|
||||
if (allowedToRegister(component)) {
|
||||
resourceConfig.register(component);
|
||||
}
|
||||
return resourceConfig;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ResourceConfig register(Object component, int priority) {
|
||||
if (allowedToRegister(component)) {
|
||||
resourceConfig.register(component, priority);
|
||||
}
|
||||
return resourceConfig;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ResourceConfig register(Object component, Map<Class<?>, Integer> contracts) {
|
||||
if (allowedToRegister(component)) {
|
||||
resourceConfig.register(component, contracts);
|
||||
}
|
||||
return resourceConfig;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ResourceConfig register(Object component, Class... contracts) {
|
||||
if (allowedToRegister(component)) {
|
||||
resourceConfig.register(component, contracts);
|
||||
}
|
||||
return resourceConfig;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ResourceConfig register(Class<?> componentClass, Map<Class<?>, Integer> contracts) {
|
||||
if (allowedToRegister(componentClass)) {
|
||||
resourceConfig.register(componentClass, contracts);
|
||||
}
|
||||
return resourceConfig;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ResourceConfig register(Class<?> componentClass, Class<?>... contracts) {
|
||||
if (allowedToRegister(componentClass)) {
|
||||
resourceConfig.register(componentClass, contracts);
|
||||
}
|
||||
return resourceConfig;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ResourceConfig register(Class<?> componentClass, int priority) {
|
||||
if (allowedToRegister(componentClass)) {
|
||||
resourceConfig.register(componentClass, priority);
|
||||
}
|
||||
return resourceConfig;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ResourceConfig register(Class<?> componentClass) {
|
||||
if (allowedToRegister(componentClass)) {
|
||||
resourceConfig.register(componentClass);
|
||||
}
|
||||
return resourceConfig;
|
||||
}
|
||||
|
||||
private boolean allowedToRegister(Object component) {
|
||||
if (resourceConfig.isRegistered(component)) {
|
||||
log.warn("The resource {} is already registered", component);
|
||||
return NOT_ALLOWED_TO_REGISTER;
|
||||
}
|
||||
return ALLOWED_TO_REGISTER;
|
||||
}
|
||||
|
||||
private boolean allowedToRegister(Class<?> componentClass) {
|
||||
if (resourceConfig.isRegistered(componentClass)) {
|
||||
log.warn("The resource {} is already registered", componentClass);
|
||||
return NOT_ALLOWED_TO_REGISTER;
|
||||
}
|
||||
return ALLOWED_TO_REGISTER;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,47 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.kafka.connect.runtime.rest;
|
||||
|
||||
import org.apache.kafka.connect.health.ConnectClusterState;
|
||||
import org.apache.kafka.connect.rest.ConnectRestExtensionContext;
|
||||
|
||||
import javax.ws.rs.core.Configurable;
|
||||
|
||||
public class ConnectRestExtensionContextImpl implements ConnectRestExtensionContext {
|
||||
|
||||
private Configurable<? extends Configurable> configurable;
|
||||
private ConnectClusterState clusterState;
|
||||
|
||||
public ConnectRestExtensionContextImpl(
|
||||
Configurable<? extends Configurable> configurable,
|
||||
ConnectClusterState clusterState
|
||||
) {
|
||||
this.configurable = configurable;
|
||||
this.clusterState = clusterState;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Configurable<? extends Configurable> configurable() {
|
||||
return configurable;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ConnectClusterState clusterState() {
|
||||
return clusterState;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,148 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime.rest;
|
||||
|
||||
import org.apache.kafka.connect.errors.ConnectException;
|
||||
import org.apache.kafka.connect.runtime.rest.errors.BadRequestException;
|
||||
import org.eclipse.jetty.client.api.Request;
|
||||
|
||||
import javax.crypto.Mac;
|
||||
import javax.crypto.SecretKey;
|
||||
import javax.ws.rs.core.HttpHeaders;
|
||||
import java.security.InvalidKeyException;
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Base64;
|
||||
import java.util.Objects;
|
||||
|
||||
public class InternalRequestSignature {
|
||||
|
||||
public static final String SIGNATURE_HEADER = "X-Connect-Authorization";
|
||||
public static final String SIGNATURE_ALGORITHM_HEADER = "X-Connect-Request-Signature-Algorithm";
|
||||
|
||||
private final byte[] requestBody;
|
||||
private final Mac mac;
|
||||
private final byte[] requestSignature;
|
||||
|
||||
/**
|
||||
* Add a signature to a request.
|
||||
* @param key the key to sign the request with; may not be null
|
||||
* @param requestBody the body of the request; may not be null
|
||||
* @param signatureAlgorithm the algorithm to use to sign the request; may not be null
|
||||
* @param request the request to add the signature to; may not be null
|
||||
*/
|
||||
public static void addToRequest(SecretKey key, byte[] requestBody, String signatureAlgorithm, Request request) {
|
||||
Mac mac;
|
||||
try {
|
||||
mac = mac(signatureAlgorithm);
|
||||
} catch (NoSuchAlgorithmException e) {
|
||||
throw new ConnectException(e);
|
||||
}
|
||||
byte[] requestSignature = sign(mac, key, requestBody);
|
||||
request.header(InternalRequestSignature.SIGNATURE_HEADER, Base64.getEncoder().encodeToString(requestSignature))
|
||||
.header(InternalRequestSignature.SIGNATURE_ALGORITHM_HEADER, signatureAlgorithm);
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract a signature from a request.
|
||||
* @param requestBody the body of the request; may not be null
|
||||
* @param headers the headers for the request; may be null
|
||||
* @return the signature extracted from the request, or null if one or more request signature
|
||||
* headers was not present
|
||||
*/
|
||||
public static InternalRequestSignature fromHeaders(byte[] requestBody, HttpHeaders headers) {
|
||||
if (headers == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
String signatureAlgorithm = headers.getHeaderString(SIGNATURE_ALGORITHM_HEADER);
|
||||
String encodedSignature = headers.getHeaderString(SIGNATURE_HEADER);
|
||||
if (signatureAlgorithm == null || encodedSignature == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
Mac mac;
|
||||
try {
|
||||
mac = mac(signatureAlgorithm);
|
||||
} catch (NoSuchAlgorithmException e) {
|
||||
throw new BadRequestException(e.getMessage());
|
||||
}
|
||||
|
||||
byte[] decodedSignature;
|
||||
try {
|
||||
decodedSignature = Base64.getDecoder().decode(encodedSignature);
|
||||
} catch (IllegalArgumentException e) {
|
||||
throw new BadRequestException(e.getMessage());
|
||||
}
|
||||
|
||||
return new InternalRequestSignature(
|
||||
requestBody,
|
||||
mac,
|
||||
decodedSignature
|
||||
);
|
||||
}
|
||||
|
||||
// Public for testing
|
||||
public InternalRequestSignature(byte[] requestBody, Mac mac, byte[] requestSignature) {
|
||||
this.requestBody = requestBody;
|
||||
this.mac = mac;
|
||||
this.requestSignature = requestSignature;
|
||||
}
|
||||
|
||||
public String keyAlgorithm() {
|
||||
return mac.getAlgorithm();
|
||||
}
|
||||
|
||||
public boolean isValid(SecretKey key) {
|
||||
return Arrays.equals(sign(mac, key, requestBody), requestSignature);
|
||||
}
|
||||
|
||||
private static Mac mac(String signatureAlgorithm) throws NoSuchAlgorithmException {
|
||||
return Mac.getInstance(signatureAlgorithm);
|
||||
}
|
||||
|
||||
private static byte[] sign(Mac mac, SecretKey key, byte[] requestBody) {
|
||||
try {
|
||||
mac.init(key);
|
||||
} catch (InvalidKeyException e) {
|
||||
throw new ConnectException(e);
|
||||
}
|
||||
return mac.doFinal(requestBody);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o)
|
||||
return true;
|
||||
if (o == null || getClass() != o.getClass())
|
||||
return false;
|
||||
InternalRequestSignature that = (InternalRequestSignature) o;
|
||||
return Arrays.equals(requestBody, that.requestBody)
|
||||
&& mac.getAlgorithm().equals(that.mac.getAlgorithm())
|
||||
&& mac.getMacLength() == that.mac.getMacLength()
|
||||
&& mac.getProvider().equals(that.mac.getProvider())
|
||||
&& Arrays.equals(requestSignature, that.requestSignature);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = Objects.hash(mac);
|
||||
result = 31 * result + Arrays.hashCode(requestBody);
|
||||
result = 31 * result + Arrays.hashCode(requestSignature);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,210 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.kafka.connect.runtime.rest;
|
||||
|
||||
import com.fasterxml.jackson.core.type.TypeReference;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
|
||||
import javax.crypto.SecretKey;
|
||||
import javax.ws.rs.core.HttpHeaders;
|
||||
|
||||
import org.apache.kafka.connect.runtime.WorkerConfig;
|
||||
import org.apache.kafka.connect.runtime.rest.entities.ErrorMessage;
|
||||
import org.apache.kafka.connect.runtime.rest.errors.ConnectRestException;
|
||||
import org.apache.kafka.connect.runtime.rest.util.SSLUtils;
|
||||
import org.eclipse.jetty.client.HttpClient;
|
||||
import org.eclipse.jetty.client.api.ContentResponse;
|
||||
import org.eclipse.jetty.client.api.Request;
|
||||
import org.eclipse.jetty.client.util.StringContentProvider;
|
||||
import org.eclipse.jetty.http.HttpField;
|
||||
import org.eclipse.jetty.http.HttpFields;
|
||||
import org.eclipse.jetty.http.HttpStatus;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import javax.ws.rs.core.Response;
|
||||
import java.io.IOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
|
||||
public class RestClient {
|
||||
private static final Logger log = LoggerFactory.getLogger(RestClient.class);
|
||||
private static final ObjectMapper JSON_SERDE = new ObjectMapper();
|
||||
|
||||
/**
|
||||
* Sends HTTP request to remote REST server
|
||||
*
|
||||
* @param url HTTP connection will be established with this url.
|
||||
* @param method HTTP method ("GET", "POST", "PUT", etc.)
|
||||
* @param headers HTTP headers from REST endpoint
|
||||
* @param requestBodyData Object to serialize as JSON and send in the request body.
|
||||
* @param responseFormat Expected format of the response to the HTTP request.
|
||||
* @param <T> The type of the deserialized response to the HTTP request.
|
||||
* @return The deserialized response to the HTTP request, or null if no data is expected.
|
||||
*/
|
||||
public static <T> HttpResponse<T> httpRequest(String url, String method, HttpHeaders headers, Object requestBodyData,
|
||||
TypeReference<T> responseFormat, WorkerConfig config) {
|
||||
return httpRequest(url, method, headers, requestBodyData, responseFormat, config, null, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends HTTP request to remote REST server
|
||||
*
|
||||
* @param url HTTP connection will be established with this url.
|
||||
* @param method HTTP method ("GET", "POST", "PUT", etc.)
|
||||
* @param headers HTTP headers from REST endpoint
|
||||
* @param requestBodyData Object to serialize as JSON and send in the request body.
|
||||
* @param responseFormat Expected format of the response to the HTTP request.
|
||||
* @param <T> The type of the deserialized response to the HTTP request.
|
||||
* @param sessionKey The key to sign the request with (intended for internal requests only);
|
||||
* may be null if the request doesn't need to be signed
|
||||
* @param requestSignatureAlgorithm The algorithm to sign the request with (intended for internal requests only);
|
||||
* may be null if the request doesn't need to be signed
|
||||
* @return The deserialized response to the HTTP request, or null if no data is expected.
|
||||
*/
|
||||
public static <T> HttpResponse<T> httpRequest(String url, String method, HttpHeaders headers, Object requestBodyData,
|
||||
TypeReference<T> responseFormat, WorkerConfig config,
|
||||
SecretKey sessionKey, String requestSignatureAlgorithm) {
|
||||
HttpClient client;
|
||||
|
||||
if (url.startsWith("https://")) {
|
||||
client = new HttpClient(SSLUtils.createClientSideSslContextFactory(config));
|
||||
} else {
|
||||
client = new HttpClient();
|
||||
}
|
||||
|
||||
client.setFollowRedirects(false);
|
||||
|
||||
try {
|
||||
client.start();
|
||||
} catch (Exception e) {
|
||||
log.error("Failed to start RestClient: ", e);
|
||||
throw new ConnectRestException(Response.Status.INTERNAL_SERVER_ERROR, "Failed to start RestClient: " + e.getMessage(), e);
|
||||
}
|
||||
|
||||
try {
|
||||
String serializedBody = requestBodyData == null ? null : JSON_SERDE.writeValueAsString(requestBodyData);
|
||||
log.trace("Sending {} with input {} to {}", method, serializedBody, url);
|
||||
|
||||
Request req = client.newRequest(url);
|
||||
req.method(method);
|
||||
req.accept("application/json");
|
||||
req.agent("kafka-connect");
|
||||
addHeadersToRequest(headers, req);
|
||||
|
||||
if (serializedBody != null) {
|
||||
req.content(new StringContentProvider(serializedBody, StandardCharsets.UTF_8), "application/json");
|
||||
if (sessionKey != null && requestSignatureAlgorithm != null) {
|
||||
InternalRequestSignature.addToRequest(
|
||||
sessionKey,
|
||||
serializedBody.getBytes(StandardCharsets.UTF_8),
|
||||
requestSignatureAlgorithm,
|
||||
req
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
ContentResponse res = req.send();
|
||||
|
||||
int responseCode = res.getStatus();
|
||||
log.debug("Request's response code: {}", responseCode);
|
||||
if (responseCode == HttpStatus.NO_CONTENT_204) {
|
||||
return new HttpResponse<>(responseCode, convertHttpFieldsToMap(res.getHeaders()), null);
|
||||
} else if (responseCode >= 400) {
|
||||
ErrorMessage errorMessage = JSON_SERDE.readValue(res.getContentAsString(), ErrorMessage.class);
|
||||
throw new ConnectRestException(responseCode, errorMessage.errorCode(), errorMessage.message());
|
||||
} else if (responseCode >= 200 && responseCode < 300) {
|
||||
T result = JSON_SERDE.readValue(res.getContentAsString(), responseFormat);
|
||||
return new HttpResponse<>(responseCode, convertHttpFieldsToMap(res.getHeaders()), result);
|
||||
} else {
|
||||
throw new ConnectRestException(Response.Status.INTERNAL_SERVER_ERROR,
|
||||
Response.Status.INTERNAL_SERVER_ERROR.getStatusCode(),
|
||||
"Unexpected status code when handling forwarded request: " + responseCode);
|
||||
}
|
||||
} catch (IOException | InterruptedException | TimeoutException | ExecutionException e) {
|
||||
log.error("IO error forwarding REST request: ", e);
|
||||
throw new ConnectRestException(Response.Status.INTERNAL_SERVER_ERROR, "IO Error trying to forward REST request: " + e.getMessage(), e);
|
||||
} finally {
|
||||
try {
|
||||
client.stop();
|
||||
} catch (Exception e) {
|
||||
log.error("Failed to stop HTTP client", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Extract headers from REST call and add to client request
|
||||
* @param headers Headers from REST endpoint
|
||||
* @param req The client request to modify
|
||||
*/
|
||||
private static void addHeadersToRequest(HttpHeaders headers, Request req) {
|
||||
if (headers != null) {
|
||||
String credentialAuthorization = headers.getHeaderString(HttpHeaders.AUTHORIZATION);
|
||||
if (credentialAuthorization != null) {
|
||||
req.header(HttpHeaders.AUTHORIZATION, credentialAuthorization);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert response parameters from Jetty format (HttpFields)
|
||||
* @param httpFields
|
||||
* @return
|
||||
*/
|
||||
private static Map<String, String> convertHttpFieldsToMap(HttpFields httpFields) {
|
||||
Map<String, String> headers = new HashMap<String, String>();
|
||||
|
||||
if (httpFields == null || httpFields.size() == 0)
|
||||
return headers;
|
||||
|
||||
for (HttpField field : httpFields) {
|
||||
headers.put(field.getName(), field.getValue());
|
||||
}
|
||||
|
||||
return headers;
|
||||
}
|
||||
|
||||
public static class HttpResponse<T> {
|
||||
private int status;
|
||||
private Map<String, String> headers;
|
||||
private T body;
|
||||
|
||||
public HttpResponse(int status, Map<String, String> headers, T body) {
|
||||
this.status = status;
|
||||
this.headers = headers;
|
||||
this.body = body;
|
||||
}
|
||||
|
||||
public int status() {
|
||||
return status;
|
||||
}
|
||||
|
||||
public Map<String, String> headers() {
|
||||
return headers;
|
||||
}
|
||||
|
||||
public T body() {
|
||||
return body;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,464 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime.rest;
|
||||
|
||||
import com.fasterxml.jackson.jaxrs.json.JacksonJsonProvider;
|
||||
import org.apache.kafka.common.config.ConfigException;
|
||||
import org.apache.kafka.connect.errors.ConnectException;
|
||||
import org.apache.kafka.connect.health.ConnectClusterDetails;
|
||||
import org.apache.kafka.connect.rest.ConnectRestExtension;
|
||||
import org.apache.kafka.connect.rest.ConnectRestExtensionContext;
|
||||
import org.apache.kafka.connect.runtime.Herder;
|
||||
import org.apache.kafka.connect.runtime.WorkerConfig;
|
||||
import org.apache.kafka.connect.runtime.health.ConnectClusterDetailsImpl;
|
||||
import org.apache.kafka.connect.runtime.health.ConnectClusterStateImpl;
|
||||
import org.apache.kafka.connect.runtime.rest.errors.ConnectExceptionMapper;
|
||||
import org.apache.kafka.connect.runtime.rest.resources.ConnectorPluginsResource;
|
||||
import org.apache.kafka.connect.runtime.rest.resources.ConnectorsResource;
|
||||
import org.apache.kafka.connect.runtime.rest.resources.LoggingResource;
|
||||
import org.apache.kafka.connect.runtime.rest.resources.RootResource;
|
||||
import org.apache.kafka.connect.runtime.rest.util.SSLUtils;
|
||||
import org.eclipse.jetty.server.Connector;
|
||||
import org.eclipse.jetty.server.CustomRequestLog;
|
||||
import org.eclipse.jetty.server.Handler;
|
||||
import org.eclipse.jetty.server.Server;
|
||||
import org.eclipse.jetty.server.ServerConnector;
|
||||
import org.eclipse.jetty.server.Slf4jRequestLogWriter;
|
||||
import org.eclipse.jetty.server.handler.ContextHandlerCollection;
|
||||
import org.eclipse.jetty.server.handler.DefaultHandler;
|
||||
import org.eclipse.jetty.server.handler.RequestLogHandler;
|
||||
import org.eclipse.jetty.server.handler.StatisticsHandler;
|
||||
import org.eclipse.jetty.servlet.FilterHolder;
|
||||
import org.eclipse.jetty.servlet.ServletContextHandler;
|
||||
import org.eclipse.jetty.servlet.ServletHolder;
|
||||
import org.eclipse.jetty.servlets.CrossOriginFilter;
|
||||
import org.eclipse.jetty.util.ssl.SslContextFactory;
|
||||
import org.glassfish.jersey.server.ResourceConfig;
|
||||
import org.glassfish.jersey.server.ServerProperties;
|
||||
import org.glassfish.jersey.servlet.ServletContainer;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import javax.servlet.DispatcherType;
|
||||
import javax.ws.rs.core.UriBuilder;
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import static org.apache.kafka.connect.runtime.WorkerConfig.ADMIN_LISTENERS_HTTPS_CONFIGS_PREFIX;
|
||||
|
||||
/**
|
||||
* Embedded server for the REST API that provides the control plane for Kafka Connect workers.
|
||||
*/
|
||||
public class RestServer {
|
||||
private static final Logger log = LoggerFactory.getLogger(RestServer.class);
|
||||
|
||||
// Used to distinguish between Admin connectors and regular REST API connectors when binding admin handlers
|
||||
private static final String ADMIN_SERVER_CONNECTOR_NAME = "Admin";
|
||||
|
||||
private static final Pattern LISTENER_PATTERN = Pattern.compile("^(.*)://\\[?([0-9a-zA-Z\\-%._:]*)\\]?:(-?[0-9]+)");
|
||||
private static final long GRACEFUL_SHUTDOWN_TIMEOUT_MS = 60 * 1000;
|
||||
|
||||
private static final String PROTOCOL_HTTP = "http";
|
||||
private static final String PROTOCOL_HTTPS = "https";
|
||||
|
||||
private final WorkerConfig config;
|
||||
private ContextHandlerCollection handlers;
|
||||
private Server jettyServer;
|
||||
|
||||
private List<ConnectRestExtension> connectRestExtensions = Collections.emptyList();
|
||||
|
||||
/**
|
||||
* Create a REST server for this herder using the specified configs.
|
||||
*/
|
||||
public RestServer(WorkerConfig config) {
|
||||
this.config = config;
|
||||
|
||||
List<String> listeners = parseListeners();
|
||||
List<String> adminListeners = config.getList(WorkerConfig.ADMIN_LISTENERS_CONFIG);
|
||||
|
||||
jettyServer = new Server();
|
||||
handlers = new ContextHandlerCollection();
|
||||
|
||||
createConnectors(listeners, adminListeners);
|
||||
}
|
||||
|
||||
@SuppressWarnings("deprecation")
|
||||
List<String> parseListeners() {
|
||||
List<String> listeners = config.getList(WorkerConfig.LISTENERS_CONFIG);
|
||||
if (listeners == null || listeners.size() == 0) {
|
||||
String hostname = config.getString(WorkerConfig.REST_HOST_NAME_CONFIG);
|
||||
|
||||
if (hostname == null)
|
||||
hostname = "";
|
||||
|
||||
listeners = Collections.singletonList(String.format("%s://%s:%d", PROTOCOL_HTTP, hostname, config.getInt(WorkerConfig.REST_PORT_CONFIG)));
|
||||
}
|
||||
|
||||
return listeners;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds Jetty connector for each configured listener
|
||||
*/
|
||||
public void createConnectors(List<String> listeners, List<String> adminListeners) {
|
||||
List<Connector> connectors = new ArrayList<>();
|
||||
|
||||
for (String listener : listeners) {
|
||||
if (!listener.isEmpty()) {
|
||||
Connector connector = createConnector(listener);
|
||||
connectors.add(connector);
|
||||
log.info("Added connector for {}", listener);
|
||||
}
|
||||
}
|
||||
|
||||
jettyServer.setConnectors(connectors.toArray(new Connector[connectors.size()]));
|
||||
|
||||
if (adminListeners != null && !adminListeners.isEmpty()) {
|
||||
for (String adminListener : adminListeners) {
|
||||
Connector conn = createConnector(adminListener, true);
|
||||
jettyServer.addConnector(conn);
|
||||
log.info("Added admin connector for {}", adminListener);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates regular (non-admin) Jetty connector according to configuration
|
||||
*/
|
||||
public Connector createConnector(String listener) {
|
||||
return createConnector(listener, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates Jetty connector according to configuration
|
||||
*/
|
||||
public Connector createConnector(String listener, boolean isAdmin) {
|
||||
Matcher listenerMatcher = LISTENER_PATTERN.matcher(listener);
|
||||
|
||||
if (!listenerMatcher.matches())
|
||||
throw new ConfigException("Listener doesn't have the right format (protocol://hostname:port).");
|
||||
|
||||
String protocol = listenerMatcher.group(1).toLowerCase(Locale.ENGLISH);
|
||||
|
||||
if (!PROTOCOL_HTTP.equals(protocol) && !PROTOCOL_HTTPS.equals(protocol))
|
||||
throw new ConfigException(String.format("Listener protocol must be either \"%s\" or \"%s\".", PROTOCOL_HTTP, PROTOCOL_HTTPS));
|
||||
|
||||
String hostname = listenerMatcher.group(2);
|
||||
int port = Integer.parseInt(listenerMatcher.group(3));
|
||||
|
||||
ServerConnector connector;
|
||||
|
||||
if (PROTOCOL_HTTPS.equals(protocol)) {
|
||||
SslContextFactory ssl;
|
||||
if (isAdmin) {
|
||||
ssl = SSLUtils.createServerSideSslContextFactory(config, ADMIN_LISTENERS_HTTPS_CONFIGS_PREFIX);
|
||||
} else {
|
||||
ssl = SSLUtils.createServerSideSslContextFactory(config);
|
||||
}
|
||||
connector = new ServerConnector(jettyServer, ssl);
|
||||
if (!isAdmin) {
|
||||
connector.setName(String.format("%s_%s%d", PROTOCOL_HTTPS, hostname, port));
|
||||
}
|
||||
} else {
|
||||
connector = new ServerConnector(jettyServer);
|
||||
if (!isAdmin) {
|
||||
connector.setName(String.format("%s_%s%d", PROTOCOL_HTTP, hostname, port));
|
||||
}
|
||||
}
|
||||
|
||||
if (isAdmin) {
|
||||
connector.setName(ADMIN_SERVER_CONNECTOR_NAME);
|
||||
}
|
||||
|
||||
if (!hostname.isEmpty())
|
||||
connector.setHost(hostname);
|
||||
|
||||
connector.setPort(port);
|
||||
|
||||
return connector;
|
||||
}
|
||||
|
||||
public void initializeServer() {
|
||||
log.info("Initializing REST server");
|
||||
|
||||
/* Needed for graceful shutdown as per `setStopTimeout` documentation */
|
||||
StatisticsHandler statsHandler = new StatisticsHandler();
|
||||
statsHandler.setHandler(handlers);
|
||||
jettyServer.setHandler(statsHandler);
|
||||
jettyServer.setStopTimeout(GRACEFUL_SHUTDOWN_TIMEOUT_MS);
|
||||
jettyServer.setStopAtShutdown(true);
|
||||
|
||||
try {
|
||||
jettyServer.start();
|
||||
} catch (Exception e) {
|
||||
throw new ConnectException("Unable to initialize REST server", e);
|
||||
}
|
||||
|
||||
log.info("REST server listening at " + jettyServer.getURI() + ", advertising URL " + advertisedUrl());
|
||||
log.info("REST admin endpoints at " + adminUrl());
|
||||
}
|
||||
|
||||
public void initializeResources(Herder herder) {
|
||||
log.info("Initializing REST resources");
|
||||
|
||||
ResourceConfig resourceConfig = new ResourceConfig();
|
||||
resourceConfig.register(new JacksonJsonProvider());
|
||||
|
||||
resourceConfig.register(new RootResource(herder));
|
||||
resourceConfig.register(new ConnectorsResource(herder, config));
|
||||
resourceConfig.register(new ConnectorPluginsResource(herder));
|
||||
|
||||
resourceConfig.register(ConnectExceptionMapper.class);
|
||||
resourceConfig.property(ServerProperties.WADL_FEATURE_DISABLE, true);
|
||||
|
||||
registerRestExtensions(herder, resourceConfig);
|
||||
|
||||
List<String> adminListeners = config.getList(WorkerConfig.ADMIN_LISTENERS_CONFIG);
|
||||
ResourceConfig adminResourceConfig;
|
||||
if (adminListeners == null) {
|
||||
log.info("Adding admin resources to main listener");
|
||||
adminResourceConfig = resourceConfig;
|
||||
adminResourceConfig.register(new LoggingResource());
|
||||
} else if (adminListeners.size() > 0) {
|
||||
// TODO: we need to check if these listeners are same as 'listeners'
|
||||
// TODO: the following code assumes that they are different
|
||||
log.info("Adding admin resources to admin listener");
|
||||
adminResourceConfig = new ResourceConfig();
|
||||
adminResourceConfig.register(new JacksonJsonProvider());
|
||||
adminResourceConfig.register(new LoggingResource());
|
||||
adminResourceConfig.register(ConnectExceptionMapper.class);
|
||||
} else {
|
||||
log.info("Skipping adding admin resources");
|
||||
// set up adminResource but add no handlers to it
|
||||
adminResourceConfig = resourceConfig;
|
||||
}
|
||||
|
||||
ServletContainer servletContainer = new ServletContainer(resourceConfig);
|
||||
ServletHolder servletHolder = new ServletHolder(servletContainer);
|
||||
List<Handler> contextHandlers = new ArrayList<>();
|
||||
|
||||
ServletContextHandler context = new ServletContextHandler(ServletContextHandler.SESSIONS);
|
||||
context.setContextPath("/");
|
||||
context.addServlet(servletHolder, "/*");
|
||||
contextHandlers.add(context);
|
||||
|
||||
ServletContextHandler adminContext = null;
|
||||
if (adminResourceConfig != resourceConfig) {
|
||||
adminContext = new ServletContextHandler(ServletContextHandler.SESSIONS);
|
||||
ServletHolder adminServletHolder = new ServletHolder(new ServletContainer(adminResourceConfig));
|
||||
adminContext.setContextPath("/");
|
||||
adminContext.addServlet(adminServletHolder, "/*");
|
||||
adminContext.setVirtualHosts(new String[]{"@" + ADMIN_SERVER_CONNECTOR_NAME});
|
||||
contextHandlers.add(adminContext);
|
||||
}
|
||||
|
||||
String allowedOrigins = config.getString(WorkerConfig.ACCESS_CONTROL_ALLOW_ORIGIN_CONFIG);
|
||||
if (allowedOrigins != null && !allowedOrigins.trim().isEmpty()) {
|
||||
FilterHolder filterHolder = new FilterHolder(new CrossOriginFilter());
|
||||
filterHolder.setName("cross-origin");
|
||||
filterHolder.setInitParameter(CrossOriginFilter.ALLOWED_ORIGINS_PARAM, allowedOrigins);
|
||||
String allowedMethods = config.getString(WorkerConfig.ACCESS_CONTROL_ALLOW_METHODS_CONFIG);
|
||||
if (allowedMethods != null && !allowedOrigins.trim().isEmpty()) {
|
||||
filterHolder.setInitParameter(CrossOriginFilter.ALLOWED_METHODS_PARAM, allowedMethods);
|
||||
}
|
||||
context.addFilter(filterHolder, "/*", EnumSet.of(DispatcherType.REQUEST));
|
||||
}
|
||||
|
||||
RequestLogHandler requestLogHandler = new RequestLogHandler();
|
||||
Slf4jRequestLogWriter slf4jRequestLogWriter = new Slf4jRequestLogWriter();
|
||||
slf4jRequestLogWriter.setLoggerName(RestServer.class.getCanonicalName());
|
||||
CustomRequestLog requestLog = new CustomRequestLog(slf4jRequestLogWriter, CustomRequestLog.EXTENDED_NCSA_FORMAT + " %msT");
|
||||
requestLogHandler.setRequestLog(requestLog);
|
||||
|
||||
contextHandlers.add(new DefaultHandler());
|
||||
contextHandlers.add(requestLogHandler);
|
||||
|
||||
handlers.setHandlers(contextHandlers.toArray(new Handler[]{}));
|
||||
try {
|
||||
context.start();
|
||||
} catch (Exception e) {
|
||||
throw new ConnectException("Unable to initialize REST resources", e);
|
||||
}
|
||||
|
||||
if (adminResourceConfig != resourceConfig) {
|
||||
try {
|
||||
log.debug("Starting admin context");
|
||||
adminContext.start();
|
||||
} catch (Exception e) {
|
||||
throw new ConnectException("Unable to initialize Admin REST resources", e);
|
||||
}
|
||||
}
|
||||
|
||||
log.info("REST resources initialized; server is started and ready to handle requests");
|
||||
}
|
||||
|
||||
public URI serverUrl() {
|
||||
return jettyServer.getURI();
|
||||
}
|
||||
|
||||
public void stop() {
|
||||
log.info("Stopping REST server");
|
||||
|
||||
try {
|
||||
for (ConnectRestExtension connectRestExtension : connectRestExtensions) {
|
||||
try {
|
||||
connectRestExtension.close();
|
||||
} catch (IOException e) {
|
||||
log.warn("Error while invoking close on " + connectRestExtension.getClass(), e);
|
||||
}
|
||||
}
|
||||
jettyServer.stop();
|
||||
jettyServer.join();
|
||||
} catch (Exception e) {
|
||||
jettyServer.destroy();
|
||||
throw new ConnectException("Unable to stop REST server", e);
|
||||
}
|
||||
|
||||
log.info("REST server stopped");
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the URL to advertise to other workers and clients. This uses the default connector from the embedded Jetty
|
||||
* server, unless overrides for advertised hostname and/or port are provided via configs. {@link #initializeServer()}
|
||||
* must be invoked successfully before calling this method.
|
||||
*/
|
||||
public URI advertisedUrl() {
|
||||
UriBuilder builder = UriBuilder.fromUri(jettyServer.getURI());
|
||||
|
||||
String advertisedSecurityProtocol = determineAdvertisedProtocol();
|
||||
ServerConnector serverConnector = findConnector(advertisedSecurityProtocol);
|
||||
builder.scheme(advertisedSecurityProtocol);
|
||||
|
||||
String advertisedHostname = config.getString(WorkerConfig.REST_ADVERTISED_HOST_NAME_CONFIG);
|
||||
if (advertisedHostname != null && !advertisedHostname.isEmpty())
|
||||
builder.host(advertisedHostname);
|
||||
else if (serverConnector != null && serverConnector.getHost() != null && serverConnector.getHost().length() > 0)
|
||||
builder.host(serverConnector.getHost());
|
||||
|
||||
Integer advertisedPort = config.getInt(WorkerConfig.REST_ADVERTISED_PORT_CONFIG);
|
||||
if (advertisedPort != null)
|
||||
builder.port(advertisedPort);
|
||||
else if (serverConnector != null && serverConnector.getPort() > 0)
|
||||
builder.port(serverConnector.getPort());
|
||||
|
||||
log.info("Advertised URI: {}", builder.build());
|
||||
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the admin url for this worker. can be null if admin endpoints are disabled.
|
||||
*/
|
||||
public URI adminUrl() {
|
||||
ServerConnector adminConnector = null;
|
||||
for (Connector connector : jettyServer.getConnectors()) {
|
||||
if (ADMIN_SERVER_CONNECTOR_NAME.equals(connector.getName()))
|
||||
adminConnector = (ServerConnector) connector;
|
||||
}
|
||||
|
||||
if (adminConnector == null) {
|
||||
List<String> adminListeners = config.getList(WorkerConfig.ADMIN_LISTENERS_CONFIG);
|
||||
if (adminListeners == null) {
|
||||
return advertisedUrl();
|
||||
} else if (adminListeners.isEmpty()) {
|
||||
return null;
|
||||
} else {
|
||||
log.error("No admin connector found for listeners {}", adminListeners);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
UriBuilder builder = UriBuilder.fromUri(jettyServer.getURI());
|
||||
builder.port(adminConnector.getLocalPort());
|
||||
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
String determineAdvertisedProtocol() {
|
||||
String advertisedSecurityProtocol = config.getString(WorkerConfig.REST_ADVERTISED_LISTENER_CONFIG);
|
||||
if (advertisedSecurityProtocol == null) {
|
||||
String listeners = (String) config.originals().get(WorkerConfig.LISTENERS_CONFIG);
|
||||
|
||||
if (listeners == null)
|
||||
return PROTOCOL_HTTP;
|
||||
else
|
||||
listeners = listeners.toLowerCase(Locale.ENGLISH);
|
||||
|
||||
if (listeners.contains(String.format("%s://", PROTOCOL_HTTP)))
|
||||
return PROTOCOL_HTTP;
|
||||
else if (listeners.contains(String.format("%s://", PROTOCOL_HTTPS)))
|
||||
return PROTOCOL_HTTPS;
|
||||
else
|
||||
return PROTOCOL_HTTP;
|
||||
} else {
|
||||
return advertisedSecurityProtocol.toLowerCase(Locale.ENGLISH);
|
||||
}
|
||||
}
|
||||
|
||||
ServerConnector findConnector(String protocol) {
|
||||
for (Connector connector : jettyServer.getConnectors()) {
|
||||
String connectorName = connector.getName();
|
||||
if (connectorName.startsWith(protocol) && !ADMIN_SERVER_CONNECTOR_NAME.equals(connectorName))
|
||||
return (ServerConnector) connector;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
void registerRestExtensions(Herder herder, ResourceConfig resourceConfig) {
|
||||
connectRestExtensions = herder.plugins().newPlugins(
|
||||
config.getList(WorkerConfig.REST_EXTENSION_CLASSES_CONFIG),
|
||||
config, ConnectRestExtension.class);
|
||||
|
||||
long herderRequestTimeoutMs = ConnectorsResource.REQUEST_TIMEOUT_MS;
|
||||
|
||||
Integer rebalanceTimeoutMs = config.getRebalanceTimeout();
|
||||
|
||||
if (rebalanceTimeoutMs != null) {
|
||||
herderRequestTimeoutMs = Math.min(herderRequestTimeoutMs, rebalanceTimeoutMs.longValue());
|
||||
}
|
||||
|
||||
ConnectClusterDetails connectClusterDetails = new ConnectClusterDetailsImpl(
|
||||
herder.kafkaClusterId()
|
||||
);
|
||||
|
||||
ConnectRestExtensionContext connectRestExtensionContext =
|
||||
new ConnectRestExtensionContextImpl(
|
||||
new ConnectRestConfigurable(resourceConfig),
|
||||
new ConnectClusterStateImpl(herderRequestTimeoutMs, connectClusterDetails, herder)
|
||||
);
|
||||
for (ConnectRestExtension connectRestExtension : connectRestExtensions) {
|
||||
connectRestExtension.register(connectRestExtensionContext);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public static String urlJoin(String base, String path) {
|
||||
if (base.endsWith("/") && path.startsWith("/"))
|
||||
return base + path.substring(1);
|
||||
else
|
||||
return base + path;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,43 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime.rest.entities;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonCreator;
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
|
||||
import java.util.Collection;
|
||||
|
||||
public class ActiveTopicsInfo {
|
||||
private final String connector;
|
||||
private final Collection<String> topics;
|
||||
|
||||
@JsonCreator
|
||||
public ActiveTopicsInfo(String connector, @JsonProperty("topics") Collection<String> topics) {
|
||||
this.connector = connector;
|
||||
this.topics = topics;
|
||||
}
|
||||
|
||||
public String connector() {
|
||||
return connector;
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public Collection<String> topics() {
|
||||
return topics;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,65 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime.rest.entities;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonCreator;
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
|
||||
import java.util.Objects;
|
||||
|
||||
public class ConfigInfo {
|
||||
|
||||
private ConfigKeyInfo configKey;
|
||||
private ConfigValueInfo configValue;
|
||||
|
||||
@JsonCreator
|
||||
public ConfigInfo(
|
||||
@JsonProperty("definition") ConfigKeyInfo configKey,
|
||||
@JsonProperty("value") ConfigValueInfo configValue) {
|
||||
this.configKey = configKey;
|
||||
this.configValue = configValue;
|
||||
}
|
||||
|
||||
@JsonProperty("definition")
|
||||
public ConfigKeyInfo configKey() {
|
||||
return configKey;
|
||||
}
|
||||
|
||||
@JsonProperty("value")
|
||||
public ConfigValueInfo configValue() {
|
||||
return configValue;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
ConfigInfo that = (ConfigInfo) o;
|
||||
return Objects.equals(configKey, that.configKey) &&
|
||||
Objects.equals(configValue, that.configValue);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(configKey, configValue);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "[" + configKey.toString() + "," + configValue.toString() + "]";
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,101 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime.rest.entities;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonCreator;
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
public class ConfigInfos {
|
||||
|
||||
@JsonProperty("name")
|
||||
private final String name;
|
||||
|
||||
@JsonProperty("error_count")
|
||||
private final int errorCount;
|
||||
|
||||
@JsonProperty("groups")
|
||||
private final List<String> groups;
|
||||
|
||||
@JsonProperty("configs")
|
||||
private final List<ConfigInfo> configs;
|
||||
|
||||
@JsonCreator
|
||||
public ConfigInfos(@JsonProperty("name") String name,
|
||||
@JsonProperty("error_count") int errorCount,
|
||||
@JsonProperty("groups") List<String> groups,
|
||||
@JsonProperty("configs") List<ConfigInfo> configs) {
|
||||
this.name = name;
|
||||
this.groups = groups;
|
||||
this.errorCount = errorCount;
|
||||
this.configs = configs;
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public String name() {
|
||||
return name;
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public List<String> groups() {
|
||||
return groups;
|
||||
}
|
||||
|
||||
@JsonProperty("error_count")
|
||||
public int errorCount() {
|
||||
return errorCount;
|
||||
}
|
||||
|
||||
@JsonProperty("configs")
|
||||
public List<ConfigInfo> values() {
|
||||
return configs;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
ConfigInfos that = (ConfigInfos) o;
|
||||
return Objects.equals(name, that.name) &&
|
||||
Objects.equals(errorCount, that.errorCount) &&
|
||||
Objects.equals(groups, that.groups) &&
|
||||
Objects.equals(configs, that.configs);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(name, errorCount, groups, configs);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer sb = new StringBuffer();
|
||||
sb.append("[")
|
||||
.append(name)
|
||||
.append(",")
|
||||
.append(errorCount)
|
||||
.append(",")
|
||||
.append(groups)
|
||||
.append(",")
|
||||
.append(configs)
|
||||
.append("]");
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,170 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime.rest.entities;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonCreator;
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
public class ConfigKeyInfo {
|
||||
|
||||
private final String name;
|
||||
private final String type;
|
||||
private final boolean required;
|
||||
private final String defaultValue;
|
||||
private final String importance;
|
||||
private final String documentation;
|
||||
private final String group;
|
||||
private final int orderInGroup;
|
||||
private final String width;
|
||||
private final String displayName;
|
||||
private final List<String> dependents;
|
||||
|
||||
@JsonCreator
|
||||
public ConfigKeyInfo(@JsonProperty("name") String name,
|
||||
@JsonProperty("type") String type,
|
||||
@JsonProperty("required") boolean required,
|
||||
@JsonProperty("default_value") String defaultValue,
|
||||
@JsonProperty("importance") String importance,
|
||||
@JsonProperty("documentation") String documentation,
|
||||
@JsonProperty("group") String group,
|
||||
@JsonProperty("order_in_group") int orderInGroup,
|
||||
@JsonProperty("width") String width,
|
||||
@JsonProperty("display_name") String displayName,
|
||||
@JsonProperty("dependents") List<String> dependents) {
|
||||
this.name = name;
|
||||
this.type = type;
|
||||
this.required = required;
|
||||
this.defaultValue = defaultValue;
|
||||
this.importance = importance;
|
||||
this.documentation = documentation;
|
||||
this.group = group;
|
||||
this.orderInGroup = orderInGroup;
|
||||
this.width = width;
|
||||
this.displayName = displayName;
|
||||
this.dependents = dependents;
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public String name() {
|
||||
return name;
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public String type() {
|
||||
return type;
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public boolean required() {
|
||||
return required;
|
||||
}
|
||||
|
||||
@JsonProperty("default_value")
|
||||
public String defaultValue() {
|
||||
return defaultValue;
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public String documentation() {
|
||||
return documentation;
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public String group() {
|
||||
return group;
|
||||
}
|
||||
|
||||
@JsonProperty("order")
|
||||
public int orderInGroup() {
|
||||
return orderInGroup;
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public String width() {
|
||||
return width;
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public String importance() {
|
||||
return importance;
|
||||
}
|
||||
|
||||
@JsonProperty("display_name")
|
||||
public String displayName() {
|
||||
return displayName;
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public List<String> dependents() {
|
||||
return dependents;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
ConfigKeyInfo that = (ConfigKeyInfo) o;
|
||||
return Objects.equals(name, that.name) &&
|
||||
Objects.equals(type, that.type) &&
|
||||
Objects.equals(required, that.required) &&
|
||||
Objects.equals(defaultValue, that.defaultValue) &&
|
||||
Objects.equals(importance, that.importance) &&
|
||||
Objects.equals(documentation, that.documentation) &&
|
||||
Objects.equals(group, that.group) &&
|
||||
Objects.equals(orderInGroup, that.orderInGroup) &&
|
||||
Objects.equals(width, that.width) &&
|
||||
Objects.equals(displayName, that.displayName) &&
|
||||
Objects.equals(dependents, that.dependents);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(name, type, required, defaultValue, importance, documentation, group, orderInGroup, width, displayName, dependents);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer sb = new StringBuffer();
|
||||
sb.append("[")
|
||||
.append(name)
|
||||
.append(",")
|
||||
.append(type)
|
||||
.append(",")
|
||||
.append(required)
|
||||
.append(",")
|
||||
.append(defaultValue)
|
||||
.append(",")
|
||||
.append(importance)
|
||||
.append(",")
|
||||
.append(documentation)
|
||||
.append(",")
|
||||
.append(group)
|
||||
.append(",")
|
||||
.append(orderInGroup)
|
||||
.append(",")
|
||||
.append(width)
|
||||
.append(",")
|
||||
.append(displayName)
|
||||
.append(",")
|
||||
.append(dependents)
|
||||
.append("]");
|
||||
return sb.toString();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,105 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime.rest.entities;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonCreator;
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
public class ConfigValueInfo {
|
||||
private String name;
|
||||
private String value;
|
||||
private List<String> recommendedValues;
|
||||
private List<String> errors;
|
||||
private boolean visible;
|
||||
|
||||
@JsonCreator
|
||||
public ConfigValueInfo(
|
||||
@JsonProperty("name") String name,
|
||||
@JsonProperty("value") String value,
|
||||
@JsonProperty("recommended_values") List<String> recommendedValues,
|
||||
@JsonProperty("errors") List<String> errors,
|
||||
@JsonProperty("visible") boolean visible) {
|
||||
this.name = name;
|
||||
this.value = value;
|
||||
this.recommendedValues = recommendedValues;
|
||||
this.errors = errors;
|
||||
this.visible = visible;
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public String name() {
|
||||
return name;
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public String value() {
|
||||
return value;
|
||||
}
|
||||
|
||||
@JsonProperty("recommended_values")
|
||||
public List<String> recommendedValues() {
|
||||
return recommendedValues;
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public List<String> errors() {
|
||||
return errors;
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public boolean visible() {
|
||||
return visible;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
ConfigValueInfo that = (ConfigValueInfo) o;
|
||||
return Objects.equals(name, that.name) &&
|
||||
Objects.equals(value, that.value) &&
|
||||
Objects.equals(recommendedValues, that.recommendedValues) &&
|
||||
Objects.equals(errors, that.errors) &&
|
||||
Objects.equals(visible, that.visible);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(name, value, recommendedValues, errors, visible);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer sb = new StringBuffer();
|
||||
sb.append("[")
|
||||
.append(name)
|
||||
.append(",")
|
||||
.append(value)
|
||||
.append(",")
|
||||
.append(recommendedValues)
|
||||
.append(",")
|
||||
.append(errors)
|
||||
.append(",")
|
||||
.append(visible)
|
||||
.append("]");
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,83 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime.rest.entities;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonCreator;
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
|
||||
import org.apache.kafka.connect.util.ConnectorTaskId;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
public class ConnectorInfo {
|
||||
|
||||
private final String name;
|
||||
private final Map<String, String> config;
|
||||
private final List<ConnectorTaskId> tasks;
|
||||
private final ConnectorType type;
|
||||
|
||||
@JsonCreator
|
||||
public ConnectorInfo(@JsonProperty("name") String name,
|
||||
@JsonProperty("config") Map<String, String> config,
|
||||
@JsonProperty("tasks") List<ConnectorTaskId> tasks,
|
||||
@JsonProperty("type") ConnectorType type) {
|
||||
this.name = name;
|
||||
this.config = config;
|
||||
this.tasks = tasks;
|
||||
this.type = type;
|
||||
}
|
||||
|
||||
|
||||
@JsonProperty
|
||||
public String name() {
|
||||
return name;
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public ConnectorType type() {
|
||||
return type;
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public Map<String, String> config() {
|
||||
return config;
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public List<ConnectorTaskId> tasks() {
|
||||
return tasks;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
ConnectorInfo that = (ConnectorInfo) o;
|
||||
return Objects.equals(name, that.name) &&
|
||||
Objects.equals(config, that.config) &&
|
||||
Objects.equals(tasks, that.tasks) &&
|
||||
Objects.equals(type, that.type);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(name, config, tasks, type);
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,89 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime.rest.entities;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonCreator;
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
import org.apache.kafka.connect.connector.Connector;
|
||||
import org.apache.kafka.connect.runtime.isolation.PluginDesc;
|
||||
|
||||
import java.util.Objects;
|
||||
|
||||
public class ConnectorPluginInfo {
|
||||
private String className;
|
||||
private ConnectorType type;
|
||||
private String version;
|
||||
|
||||
@JsonCreator
|
||||
public ConnectorPluginInfo(
|
||||
@JsonProperty("class") String className,
|
||||
@JsonProperty("type") ConnectorType type,
|
||||
@JsonProperty("version") String version
|
||||
) {
|
||||
this.className = className;
|
||||
this.type = type;
|
||||
this.version = version;
|
||||
}
|
||||
|
||||
public ConnectorPluginInfo(PluginDesc<Connector> plugin) {
|
||||
this(plugin.className(), ConnectorType.from(plugin.pluginClass()), plugin.version());
|
||||
}
|
||||
|
||||
@JsonProperty("class")
|
||||
public String className() {
|
||||
return className;
|
||||
}
|
||||
|
||||
@JsonProperty("type")
|
||||
public ConnectorType type() {
|
||||
return type;
|
||||
}
|
||||
|
||||
@JsonProperty("version")
|
||||
public String version() {
|
||||
return version;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (o == null || getClass() != o.getClass()) {
|
||||
return false;
|
||||
}
|
||||
ConnectorPluginInfo that = (ConnectorPluginInfo) o;
|
||||
return Objects.equals(className, that.className) &&
|
||||
type == that.type &&
|
||||
Objects.equals(version, that.version);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(className, type, version);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
final StringBuilder sb = new StringBuilder("ConnectorPluginInfo{");
|
||||
sb.append("className='").append(className).append('\'');
|
||||
sb.append(", type=").append(type);
|
||||
sb.append(", version='").append(version).append('\'');
|
||||
sb.append('}');
|
||||
return sb.toString();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,139 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime.rest.entities;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonCreator;
|
||||
import com.fasterxml.jackson.annotation.JsonInclude;
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
public class ConnectorStateInfo {
|
||||
|
||||
private final String name;
|
||||
private final ConnectorState connector;
|
||||
private final List<TaskState> tasks;
|
||||
private final ConnectorType type;
|
||||
|
||||
@JsonCreator
|
||||
public ConnectorStateInfo(@JsonProperty("name") String name,
|
||||
@JsonProperty("connector") ConnectorState connector,
|
||||
@JsonProperty("tasks") List<TaskState> tasks,
|
||||
@JsonProperty("type") ConnectorType type) {
|
||||
this.name = name;
|
||||
this.connector = connector;
|
||||
this.tasks = tasks;
|
||||
this.type = type;
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public String name() {
|
||||
return name;
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public ConnectorState connector() {
|
||||
return connector;
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public List<TaskState> tasks() {
|
||||
return tasks;
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public ConnectorType type() {
|
||||
return type;
|
||||
}
|
||||
|
||||
public abstract static class AbstractState {
|
||||
private final String state;
|
||||
private final String trace;
|
||||
private final String workerId;
|
||||
|
||||
public AbstractState(String state, String workerId, String trace) {
|
||||
this.state = state;
|
||||
this.workerId = workerId;
|
||||
this.trace = trace;
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public String state() {
|
||||
return state;
|
||||
}
|
||||
|
||||
@JsonProperty("worker_id")
|
||||
public String workerId() {
|
||||
return workerId;
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
@JsonInclude(JsonInclude.Include.NON_EMPTY)
|
||||
public String trace() {
|
||||
return trace;
|
||||
}
|
||||
}
|
||||
|
||||
public static class ConnectorState extends AbstractState {
|
||||
@JsonCreator
|
||||
public ConnectorState(@JsonProperty("state") String state,
|
||||
@JsonProperty("worker_id") String worker,
|
||||
@JsonProperty("msg") String msg) {
|
||||
super(state, worker, msg);
|
||||
}
|
||||
}
|
||||
|
||||
public static class TaskState extends AbstractState implements Comparable<TaskState> {
|
||||
private final int id;
|
||||
|
||||
@JsonCreator
|
||||
public TaskState(@JsonProperty("id") int id,
|
||||
@JsonProperty("state") String state,
|
||||
@JsonProperty("worker_id") String worker,
|
||||
@JsonProperty("msg") String msg) {
|
||||
super(state, worker, msg);
|
||||
this.id = id;
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public int id() {
|
||||
return id;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compareTo(TaskState that) {
|
||||
return Integer.compare(this.id, that.id);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (o == this)
|
||||
return true;
|
||||
if (!(o instanceof TaskState))
|
||||
return false;
|
||||
TaskState other = (TaskState) o;
|
||||
return compareTo(other) == 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(id);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,52 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime.rest.entities;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonCreator;
|
||||
import com.fasterxml.jackson.annotation.JsonValue;
|
||||
|
||||
import org.apache.kafka.connect.connector.Connector;
|
||||
import org.apache.kafka.connect.sink.SinkConnector;
|
||||
import org.apache.kafka.connect.source.SourceConnector;
|
||||
|
||||
import java.util.Locale;
|
||||
|
||||
public enum ConnectorType {
|
||||
SOURCE, SINK, UNKNOWN;
|
||||
|
||||
public static ConnectorType from(Class<? extends Connector> clazz) {
|
||||
if (SinkConnector.class.isAssignableFrom(clazz)) {
|
||||
return SINK;
|
||||
}
|
||||
if (SourceConnector.class.isAssignableFrom(clazz)) {
|
||||
return SOURCE;
|
||||
}
|
||||
|
||||
return UNKNOWN;
|
||||
}
|
||||
|
||||
@Override
|
||||
@JsonValue
|
||||
public String toString() {
|
||||
return super.toString().toLowerCase(Locale.ROOT);
|
||||
}
|
||||
|
||||
@JsonCreator
|
||||
public static ConnectorType forValue(String value) {
|
||||
return ConnectorType.valueOf(value.toUpperCase(Locale.ROOT));
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,58 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime.rest.entities;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonCreator;
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
public class CreateConnectorRequest {
|
||||
private final String name;
|
||||
private final Map<String, String> config;
|
||||
|
||||
@JsonCreator
|
||||
public CreateConnectorRequest(@JsonProperty("name") String name, @JsonProperty("config") Map<String, String> config) {
|
||||
this.name = name;
|
||||
this.config = config;
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public String name() {
|
||||
return name;
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public Map<String, String> config() {
|
||||
return config;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
CreateConnectorRequest that = (CreateConnectorRequest) o;
|
||||
return Objects.equals(name, that.name) &&
|
||||
Objects.equals(config, that.config);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(name, config);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,62 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime.rest.entities;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonCreator;
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Standard error format for all REST API failures. These are generated automatically by
|
||||
* {@link org.apache.kafka.connect.runtime.rest.errors.ConnectExceptionMapper} in response to uncaught
|
||||
* {@link org.apache.kafka.connect.errors.ConnectException}s.
|
||||
*/
|
||||
public class ErrorMessage {
|
||||
private final int errorCode;
|
||||
private final String message;
|
||||
|
||||
@JsonCreator
|
||||
public ErrorMessage(@JsonProperty("error_code") int errorCode, @JsonProperty("message") String message) {
|
||||
this.errorCode = errorCode;
|
||||
this.message = message;
|
||||
}
|
||||
|
||||
@JsonProperty("error_code")
|
||||
public int errorCode() {
|
||||
return errorCode;
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public String message() {
|
||||
return message;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
ErrorMessage that = (ErrorMessage) o;
|
||||
return Objects.equals(errorCode, that.errorCode) &&
|
||||
Objects.equals(message, that.message);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(errorCode, message);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,55 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime.rest.entities;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonCreator;
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
import org.apache.kafka.common.utils.AppInfoParser;
|
||||
|
||||
public class ServerInfo {
|
||||
private final String version;
|
||||
private final String commit;
|
||||
private final String kafkaClusterId;
|
||||
|
||||
@JsonCreator
|
||||
private ServerInfo(@JsonProperty("version") String version,
|
||||
@JsonProperty("commit") String commit,
|
||||
@JsonProperty("kafka_cluster_id") String kafkaClusterId) {
|
||||
this.version = version;
|
||||
this.commit = commit;
|
||||
this.kafkaClusterId = kafkaClusterId;
|
||||
}
|
||||
|
||||
public ServerInfo(String kafkaClusterId) {
|
||||
this(AppInfoParser.getVersion(), AppInfoParser.getCommitId(), kafkaClusterId);
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public String version() {
|
||||
return version;
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public String commit() {
|
||||
return commit;
|
||||
}
|
||||
|
||||
@JsonProperty("kafka_cluster_id")
|
||||
public String clusterId() {
|
||||
return kafkaClusterId;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,57 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime.rest.entities;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
import org.apache.kafka.connect.util.ConnectorTaskId;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
public class TaskInfo {
|
||||
private final ConnectorTaskId id;
|
||||
private final Map<String, String> config;
|
||||
|
||||
public TaskInfo(ConnectorTaskId id, Map<String, String> config) {
|
||||
this.id = id;
|
||||
this.config = config;
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public ConnectorTaskId id() {
|
||||
return id;
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public Map<String, String> config() {
|
||||
return config;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
TaskInfo taskInfo = (TaskInfo) o;
|
||||
return Objects.equals(id, taskInfo.id) &&
|
||||
Objects.equals(config, taskInfo.config);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(id, config);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,27 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime.rest.errors;
|
||||
|
||||
import javax.ws.rs.core.Response;
|
||||
|
||||
public class BadRequestException extends ConnectRestException {
|
||||
|
||||
public BadRequestException(String message) {
|
||||
super(Response.Status.BAD_REQUEST, message);
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,75 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime.rest.errors;
|
||||
|
||||
import org.apache.kafka.connect.errors.AlreadyExistsException;
|
||||
import org.apache.kafka.connect.errors.NotFoundException;
|
||||
import org.apache.kafka.connect.runtime.rest.entities.ErrorMessage;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import javax.ws.rs.WebApplicationException;
|
||||
import javax.ws.rs.core.Context;
|
||||
import javax.ws.rs.core.Response;
|
||||
import javax.ws.rs.core.UriInfo;
|
||||
import javax.ws.rs.ext.ExceptionMapper;
|
||||
|
||||
public class ConnectExceptionMapper implements ExceptionMapper<Exception> {
|
||||
private static final Logger log = LoggerFactory.getLogger(ConnectExceptionMapper.class);
|
||||
|
||||
@Context
|
||||
private UriInfo uriInfo;
|
||||
|
||||
@Override
|
||||
public Response toResponse(Exception exception) {
|
||||
log.debug("Uncaught exception in REST call to /{}", uriInfo.getPath(), exception);
|
||||
|
||||
if (exception instanceof ConnectRestException) {
|
||||
ConnectRestException restException = (ConnectRestException) exception;
|
||||
return Response.status(restException.statusCode())
|
||||
.entity(new ErrorMessage(restException.errorCode(), restException.getMessage()))
|
||||
.build();
|
||||
}
|
||||
|
||||
if (exception instanceof NotFoundException) {
|
||||
return Response.status(Response.Status.NOT_FOUND)
|
||||
.entity(new ErrorMessage(Response.Status.NOT_FOUND.getStatusCode(), exception.getMessage()))
|
||||
.build();
|
||||
}
|
||||
|
||||
if (exception instanceof AlreadyExistsException) {
|
||||
return Response.status(Response.Status.CONFLICT)
|
||||
.entity(new ErrorMessage(Response.Status.CONFLICT.getStatusCode(), exception.getMessage()))
|
||||
.build();
|
||||
}
|
||||
|
||||
if (!log.isDebugEnabled()) {
|
||||
log.error("Uncaught exception in REST call to /{}", uriInfo.getPath(), exception);
|
||||
}
|
||||
|
||||
final int statusCode;
|
||||
if (exception instanceof WebApplicationException) {
|
||||
Response.StatusType statusInfo = ((WebApplicationException) exception).getResponse().getStatusInfo();
|
||||
statusCode = statusInfo.getStatusCode();
|
||||
} else {
|
||||
statusCode = Response.Status.INTERNAL_SERVER_ERROR.getStatusCode();
|
||||
}
|
||||
return Response.status(statusCode)
|
||||
.entity(new ErrorMessage(statusCode, exception.getMessage()))
|
||||
.build();
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user