5. Deployment Guide#
5.1. ONTP Deployment Guide#
5.1.1. ONTP Real Time Platform#
Deploy ontp-tsdb or ontp-tsdb-ml see docs at Metrics Database on how to deploy with docker.
Deploy ontp-mbus see docs at Metrics Message Bus on how to deploy with docker.
Deploy ontp-wire to the node that you want to collect network metrics from see docs at Network Capture on how to deploy with docker.
Deploy and configure your Grafana instance to read from the ontp-tsdb[-ml] instance.
Note
Download example dashboards to get started with viewing your network metrics from grafana.
5.1.2. Kafka/Splunk Stack#
- Do a mkdir spluk; mkdir connectors from your desired deployment directory
Copy the example splunk-default.yml to the splunk directory
Download the splunk-kafka-connect-v2.1.0.jar from https://github.com/splunk/kafka-connect-splunk/releases/download/v2.1.0/splunk-kafka-connect-v2.1.0.jar and copy and unzip it into connectors directory.
- Deploy kakfa/splunk using docker compose
Copy and modify the docker-compose example yaml file for kafka/splunk
execute docker compose up -d
Add a new topic to kafka for for storing network metrics
- Configure confluent kafka to send data to splunk
See the section below on “configuring Kafka to Send Data”
Deploy ontp-wire via docker to start collecting metrics and reporting network metrics
Note
Modify the ontp-wire configuration to send to your kafka instance and topic.
Configure Kafka to Send Data To Splunk As JSON
curl localhost:8083/connectors -X POST -H "Content-Type: application/json" -d '{
"name": "kafka-connect-splunk",
"config": {
"value.converter.schema.registry.url": "http://schema-registry:8081",
"value.converter.schemas.enable": "false",
"schemas.enable": "false",
"name": "kafka-connect-splunk",
"connector.class": "com.splunk.kafka.connect.SplunkSinkConnector",
"tasks.max": "3",
"value.converter": "org.apache.kafka.connect.json.JsonConverter",
"topics": "NY1-Zone1",
"splunk.hec.token": "************************************",
"splunk.hec.uri": "http://splunk:8099",
"splunk.hec.raw": "false",
"splunk.hec.ack.enabled": "false",
"splunk.indexes": "main",
"splunk.hec.ssl.validate.certs": "false",
"enable.timestamp.extraction": "false"
}
}'
Configure Kafka to Send Data To Splunk As String Data
curl localhost:8083/connectors -X POST -H "Content-Type: application/json" -d '{
"name": "kafka-connect-splunk",
"config": {
"value.converter.schema.registry.url": "http://schema-registry:8081",
"value.converter.schemas.enable": "false",
"schemas.enable": "false",
"name": "kafka-connect-splunk",
"connector.class": "com.splunk.kafka.connect.SplunkSinkConnector",
"tasks.max": "3",
"value.converter": "org.apache.kafka.connect.storage.StringConverter",
"topics": "NY1-Zone1",
"splunk.hec.token": "************************************",
"splunk.hec.uri": "http://splunk:8099",
"splunk.hec.raw": "false",
"splunk.hec.ack.enabled": "false",
"splunk.indexes": "main",
"splunk.hec.ssl.validate.certs": "false",
"enable.timestamp.extraction": "false"
}
}'
5.1.3. Kafka / splunk Docker compose#
---
version: '3.6'
networks:
dev-kafka:
driver: bridge
attachable: true
volumes:
splunk1-var:
splunk1-etc:
services:
zookeeper:
image: confluentinc/cp-zookeeper:7.3.2
hostname: zookeeper
container_name: zookeeper
networks:
- dev-kafka
ports:
- "2181:2181"
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
volumes:
- /etc/localtime:/etc/localtime:ro
broker:
image: confluentinc/cp-server:7.3.2
hostname: broker
container_name: broker
depends_on:
- zookeeper
networks:
- dev-kafka
extra_hosts:
- "host.docker.internal:192.168.1.20"
ports:
- "9092:9092"
- "9101:9101"
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://broker:29092,PLAINTEXT_HOST://localhost:9092
KAFKA_METRIC_REPORTERS: io.confluent.metrics.reporter.ConfluentMetricsReporter
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
KAFKA_CONFLUENT_LICENSE_TOPIC_REPLICATION_FACTOR: 1
KAFKA_CONFLUENT_BALANCER_TOPIC_REPLICATION_FACTOR: 1
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
KAFKA_JMX_PORT: 9101
KAFKA_JMX_HOSTNAME: localhost
KAFKA_CONFLUENT_SCHEMA_REGISTRY_URL: http://schema-registry:8081
CONFLUENT_METRICS_REPORTER_BOOTSTRAP_SERVERS: broker:29092
CONFLUENT_METRICS_REPORTER_TOPIC_REPLICAS: 1
CONFLUENT_METRICS_ENABLE: 'true'
CONFLUENT_SUPPORT_CUSTOMER_ID: 'anonymous'
volumes:
- /etc/localtime:/etc/localtime:ro
schema-registry:
image: confluentinc/cp-schema-registry:7.3.2
hostname: schema-registry
container_name: schema-registry
depends_on:
- broker
networks:
- dev-kafka
extra_hosts:
- "host.docker.internal:192.168.1.20"
ports:
- "8081:8081"
environment:
SCHEMA_REGISTRY_HOST_NAME: schema-registry
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: 'broker:29092'
SCHEMA_REGISTRY_LISTENERS: http://0.0.0.0:8081
volumes:
- /etc/localtime:/etc/localtime:ro
connect:
image: cnfldemos/cp-server-connect-datagen:0.5.3-7.1.0
hostname: connect
container_name: connect
depends_on:
- broker
- schema-registry
networks:
- dev-kafka
extra_hosts:
- "host.docker.internal:192.168.1.20"
ports:
- "8083:8083"
environment:
CONNECT_BOOTSTRAP_SERVERS: 'broker:29092'
CONNECT_REST_ADVERTISED_HOST_NAME: connect
CONNECT_GROUP_ID: compose-connect-group
CONNECT_CONFIG_STORAGE_TOPIC: docker-connect-configs
CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: 1
CONNECT_OFFSET_FLUSH_INTERVAL_MS: 10000
CONNECT_OFFSET_STORAGE_TOPIC: docker-connect-offsets
CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: 1
CONNECT_STATUS_STORAGE_TOPIC: docker-connect-status
CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: 1
CONNECT_KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter
CONNECT_VALUE_CONVERTER: io.confluent.connect.avro.AvroConverter
CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: http://schema-registry:8081
# CLASSPATH required due to CC-2422
CLASSPATH: /usr/share/java/monitoring-interceptors/monitoring-interceptors-7.3.2.jar
CONNECT_PRODUCER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringProducerInterceptor"
CONNECT_CONSUMER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor"
CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components,/usr/local/share/kafka/plugins"
CONNECT_LOG4J_LOGGERS: org.apache.zookeeper=ERROR,org.I0Itec.zkclient=ERROR,org.reflections=ERROR
volumes:
- /etc/localtime:/etc/localtime:ro
- ./connectors:/usr/local/share/kafka/plugins:ro
- /var/run/docker.sock:/var/run/docker.sock
control-center:
image: confluentinc/cp-enterprise-control-center:7.3.2
hostname: control-center
container_name: control-center
depends_on:
- broker
- schema-registry
- connect
- ksqldb-server
networks:
- dev-kafka
extra_hosts:
- "host.docker.internal:192.168.1.20"
ports:
- "9021:9021"
environment:
CONTROL_CENTER_BOOTSTRAP_SERVERS: 'broker:29092'
CONTROL_CENTER_CONNECT_CONNECT-DEFAULT_CLUSTER: 'connect:8083'
CONTROL_CENTER_KSQL_KSQLDB1_URL: "http://ksqldb-server:8088"
CONTROL_CENTER_KSQL_KSQLDB1_ADVERTISED_URL: "http://localhost:8088"
CONTROL_CENTER_SCHEMA_REGISTRY_URL: "http://schema-registry:8081"
CONTROL_CENTER_REPLICATION_FACTOR: 1
CONTROL_CENTER_INTERNAL_TOPICS_PARTITIONS: 1
CONTROL_CENTER_MONITORING_INTERCEPTOR_TOPIC_PARTITIONS: 1
CONFLUENT_METRICS_TOPIC_REPLICATION: 1
PORT: 9021
volumes:
- /etc/localtime:/etc/localtime:ro
- /var/run/docker.sock:/var/run/docker.sock
ksqldb-server:
image: confluentinc/cp-ksqldb-server:7.3.2
hostname: ksqldb-server
container_name: ksqldb-server
depends_on:
- broker
- connect
networks:
- dev-kafka
extra_hosts:
- "host.docker.internal:192.168.1.20"
ports:
- "8088:8088"
environment:
KSQL_CONFIG_DIR: "/etc/ksql"
KSQL_BOOTSTRAP_SERVERS: "broker:29092"
KSQL_HOST_NAME: ksqldb-server
KSQL_LISTENERS: "http://0.0.0.0:8088"
KSQL_CACHE_MAX_BYTES_BUFFERING: 0
KSQL_KSQL_SCHEMA_REGISTRY_URL: "http://schema-registry:8081"
KSQL_PRODUCER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringProducerInterceptor"
KSQL_CONSUMER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor"
KSQL_KSQL_CONNECT_URL: "http://connect:8083"
KSQL_KSQL_LOGGING_PROCESSING_TOPIC_REPLICATION_FACTOR: 1
KSQL_KSQL_LOGGING_PROCESSING_TOPIC_AUTO_CREATE: 'true'
KSQL_KSQL_LOGGING_PROCESSING_STREAM_AUTO_CREATE: 'true'
volumes:
- /etc/localtime:/etc/localtime:ro
ksqldb-cli:
image: confluentinc/cp-ksqldb-cli:7.3.2
container_name: ksqldb-cli
depends_on:
- broker
- connect
- ksqldb-server
networks:
- dev-kafka
entrypoint: /bin/sh
tty: true
volumes:
- /etc/localtime:/etc/localtime:ro
ksql-datagen:
image: confluentinc/ksqldb-examples:7.3.2
hostname: ksql-datagen
container_name: ksql-datagen
depends_on:
- ksqldb-server
- broker
- schema-registry
- connect
networks:
- dev-kafka
extra_hosts:
- "host.docker.internal:192.168.1.20"
command: "bash -c 'echo Waiting for Kafka to be ready... && \
cub kafka-ready -b broker:29092 1 40 && \
echo Waiting for Confluent Schema Registry to be ready... && \
cub sr-ready schema-registry 8081 40 && \
echo Waiting a few seconds for topic creation to finish... && \
sleep 11 && \
tail -f /dev/null'"
environment:
KSQL_CONFIG_DIR: "/etc/ksql"
STREAMS_BOOTSTRAP_SERVERS: broker:29092
STREAMS_SCHEMA_REGISTRY_HOST: schema-registry
STREAMS_SCHEMA_REGISTRY_PORT: 8081
volumes:
- /etc/localtime:/etc/localtime:ro
rest-proxy:
image: confluentinc/cp-kafka-rest:7.3.2
depends_on:
- broker
- schema-registry
networks:
- dev-kafka
extra_hosts:
- "host.docker.internal:192.168.1.20"
ports:
- 8082:8082
hostname: rest-proxy
container_name: rest-proxy
environment:
KAFKA_REST_HOST_NAME: rest-proxy
KAFKA_REST_BOOTSTRAP_SERVERS: 'broker:29092'
KAFKA_REST_LISTENERS: "http://0.0.0.0:8082"
KAFKA_REST_SCHEMA_REGISTRY_URL: 'http://schema-registry:8081'
volumes:
- /etc/localtime:/etc/localtime:ro
splunk:
image: splunk/splunk
hostname: splunk
container_name: splunk
environment:
SPLUNK_PASSWORD: splunk9957
SPLUNK_START_ARGS: "--accept-license"
networks:
- dev-kafka
extra_hosts:
- "host.docker.internal:192.168.1.20"
ports:
- "8000:8000"
- "8099:8099"
volumes:
- /etc/localtime:/etc/localtime:ro
- ./splunk/default.yml:/tmp/defaults/default.yml:ro
- /var/run/docker.sock:/var/run/docker.sock
- splunk1-var:/opt/splunk/var
- splunk1-etc:/opt/splunk/etc
5.1.4. Splunk Defaults Yaml Configuration#
---
ansible_connection: local
ansible_environment: {}
ansible_post_tasks: []
ansible_pre_tasks: []
cert_prefix: https
config:
baked: default.yml
defaults_dir: /tmp/defaults
env:
headers: null
var: SPLUNK_DEFAULTS_URL
verify: true
host:
headers: null
url: null
verify: true
max_delay: 60
max_retries: 3
max_timeout: 1200
dmc_asset_interval: 3,18,33,48 * * * *
dmc_forwarder_monitoring: false
docker: true
es_ssl_enablement: --ssl_enablement auto
hide_password: false
java_download_url: null
java_update_version: null
java_version: null
retry_delay: 6
retry_num: 60
shc_sync_retry_num: 60
splunk:
admin_user: admin
allow_upgrade: true
app_paths:
default: /opt/splunk/etc/apps
deployment: /opt/splunk/etc/deployment-apps
httpinput: /opt/splunk/etc/apps/splunk_httpinput
idxc: /opt/splunk/etc/master-apps
shc: /opt/splunk/etc/shcluster/apps
apps_location_local: []
appserver:
port: 8065
asan: false
auxiliary_cluster_masters: []
build_url_bearer_token: null
cluster_master_url: null
connection_timeout: 0
declarative_admin_password: false
deployer_url: null
deployment_client:
name: null
dfs:
dfc_num_slots: 4
dfw_num_slots: 10
dfw_num_slots_enabled: false
enable: false
port: 9000
spark_master_host: 127.0.0.1
spark_master_webui_port: 8080
disable_popups: false
dsp:
cert: null
enable: false
pipeline_desc: null
pipeline_name: null
pipeline_spec: null
server: forwarders.scp.splunk.com:9997
verify: false
enable_service: false
es:
ssl_enablement: auto
exec: /opt/splunk/bin/splunk
group: splunk
hec:
cert: null
enable: true
password: null
port: 8099
ssl: false
token: 42d6115c-d6ac-4934-8db4-cf54691c4c5a
home: /opt/splunk
http_enableSSL: false
http_enableSSL_cert: null
http_enableSSL_privKey: null
http_enableSSL_privKey_password: null
http_port: 8000
idxc:
discoveryPass4SymmKey: woDDsxFtLMKEwpcqC8O0KCXCrQxZWTLDs0LChSlpwp3CoQ==
label: idxc_label
pass4SymmKey: woDDsxFtLMKEwpcqC8O0KCXCrQxZWTLDs0LChSlpwp3CoQ==
replication_factor: 3
replication_port: 9887
search_factor: 3
secret: woDDsxFtLMKEwpcqC8O0KCXCrQxZWTLDs0LChSlpwp3CoQ==
ignore_license: false
kvstore:
port: 8191
launch: {}
license_download_dest: /tmp/splunk.lic
license_master_url: ''
multisite_master_port: 8089
multisite_replication_factor_origin: 2
multisite_replication_factor_total: 3
multisite_search_factor_origin: 1
multisite_search_factor_total: 3
opt: /opt
pass4SymmKey: null
password: BwE6BS5dMjfDpMOfwpN2w77DusOLLsORcV7Dt3/ClTLCsQ==
pid: /opt/splunk/var/run/splunk/splunkd.pid
root_endpoint: null
s2s:
ca: null
cert: null
enable: true
password: null
port: 9997
ssl: false
search_head_captain_url: null
secret: null
service_name: null
set_search_peers: true
shc:
deployer_push_mode: null
label: shc_label
pass4SymmKey: wrXDgsOnwq7CkS/DuxHDqGHDt8OQPmzDtUPDp8OSwrwjYMODQzk=
replication_factor: 3
replication_port: 9887
secret: wrXDgsOnwq7CkS/DuxHDqGHDt8OQPmzDtUPDp8OSwrwjYMODQzk=
smartstore: null
ssl:
ca: null
cert: null
enable: true
password: null
svc_port: 8089
tar_dir: splunk
user: splunk
wildcard_license: false
splunk_home_ownership_enforcement: true
splunkbase_password: null
splunkbase_token: null
splunkbase_username: null
wait_for_splunk_retry_num: 60