Skip to content

Commit

Permalink
kafka mlc
Browse files Browse the repository at this point in the history
  • Loading branch information
MitchellGerdisch committed Dec 18, 2023
1 parent a2b370a commit 8df2dc4
Show file tree
Hide file tree
Showing 64 changed files with 1,029 additions and 706 deletions.
4 changes: 2 additions & 2 deletions Makefile
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
VERSION := 2.0.1
VERSION := 1.0.0

PACK := k8sdatadog
PACK := kafkacluster
ORG := pulumi-pequod
PROJECT := github.com/${ORG}/${PACK}

Expand Down
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# Pequod K8s Datadog Multilanguage Component
This multilanguage component is used to setup Datadog monitoring for K8s clusters.
# Pequod Kafka Cluster Multilanguage Component
This multilanguage component is used to deploy a Kafka Cluster (using Confluent).

## Building and Publishing the MLC
Whenever the component code (under provider/cmd/pulumi-resource...) or schema.json, etc has been udated, follow the process below to make the updated version available:
Expand Down
File renamed without changes.
42 changes: 0 additions & 42 deletions provider/cmd/pulumi-resource-k8sdatadog/k8sMonitor.ts

This file was deleted.

192 changes: 192 additions & 0 deletions provider/cmd/pulumi-resource-kafkacluster/confluentCluster.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,192 @@
import * as pulumi from "@pulumi/pulumi";
import * as confluent from "@pulumi/confluentcloud";

// Component resource to stand up Pequod's Kafka cluster in ConfluentCloud.

export interface ConfluentClusterArgs {
kafkaClusterName: string;
kafkaTopics: string[];
region?: string;
}

export class ConfluentCluster extends pulumi.ComponentResource {
public readonly envId: pulumi.Output<string>;
public readonly kafkaUrl: pulumi.Output<string>;

constructor(name: string, args: ConfluentClusterArgs, opts?: pulumi.ComponentResourceOptions) {

super("kafkacluster:resource:ConfluentCluster", name, args, opts);

const clusterRegion = args.region || "centralus"

// Create a Confluent environment which is a container for the other Confluent resources
const confluentEnvName = `${name}-environment`
const env = new confluent.Environment(confluentEnvName, {
displayName: confluentEnvName
}, { parent: this });

// Create a standard Kafka cluster with multi-zone availability and us-west-2
const cluster = new confluent.KafkaCluster(`${name}-${args.kafkaClusterName}`, {
displayName: args.kafkaClusterName,
availability: "SINGLE_ZONE",
cloud: "AZURE",
region: clusterRegion,
environment: {
id: env.id,
},
standard: {}
}, { parent: this });

// Create the admin-level service account used to create Kafka topic and producer and consumer accounts.
// This app manager account is similar to the "DBA" account in relational databases or the root account in Linux
const serviceAccount = new confluent.ServiceAccount(`${name}-app-manager`, {
description: "Service account to manage 'inventory' Kafka cluster",
}, { parent: this });

const roleBinding = new confluent.RoleBinding(`${name}-app-manager-kafka-cluster-admin`, {
principal: pulumi.interpolate`User:${serviceAccount.id}`,
roleName: "CloudClusterAdmin",
crnPattern: cluster.rbacCrn,
}, { parent: this });

const managerApiKey = new confluent.ApiKey(`${name}-app-manager-kafka-api-key`, {
displayName: "app-manager-kafka-api-key",
description: `Kafka API Key that is managed by Pulumi stack, ${pulumi.getOrganization()}/${pulumi.getProject()}/${pulumi.getStack()}`,
owner: {
id: serviceAccount.id,
kind: serviceAccount.kind,
apiVersion: serviceAccount.apiVersion,
},
managedResource: {
id: cluster.id,
apiVersion: cluster.apiVersion,
kind: cluster.kind,
environment: {
id: env.id,
},
}
}, {
dependsOn: roleBinding,
parent: this
});

// Create a consumer service account and give that account permissions to write to the topic
const producerAccount = new confluent.ServiceAccount(`${name}-producer`, {
description: `Service account to produce to topics of ${args.kafkaClusterName} Kafka cluster`,
}, { parent: this });

const producerApiKey = new confluent.ApiKey(`${name}-producer-api-key`, {
owner: {
id: producerAccount.id,
kind: producerAccount.kind,
apiVersion: producerAccount.apiVersion,
},
managedResource: {
id: cluster.id,
apiVersion: cluster.apiVersion,
kind: cluster.kind,
environment: {
id: env.id,
},
},
}, { parent: this });

// Create consumer account which will read messages from Kafka topic
const consumerAccount = new confluent.ServiceAccount(`${name}-consumer`, {
description: `Service account to consume from topics of ${args.kafkaClusterName} Kafka cluster`,
}, { parent: this });

const consumerApiKey = new confluent.ApiKey(`${name}-consumer-api-key`, {
owner: {
id: consumerAccount.id,
kind: consumerAccount.kind,
apiVersion: consumerAccount.apiVersion,
},
managedResource: {
id: cluster.id,
apiVersion: cluster.apiVersion,
kind: cluster.kind,
environment: {
id: env.id,
},
},
}, { parent: this });

new confluent.KafkaAcl(`${name}-consumer-read-group-acl`, {
kafkaCluster: {
id: cluster.id,
},
resourceType: "GROUP",
resourceName: "confluent_cli_consumer_",
patternType: "PREFIXED",
principal: pulumi.interpolate`User:${consumerAccount.id}`,
host: "*",
operation: "READ",
permission: "ALLOW",
restEndpoint: cluster.restEndpoint,
credentials: {
key: managerApiKey.id,
secret: managerApiKey.secret,
}
}, { parent: this });

// Create topics and manage permissions.
for (let kafkaTopic of args.kafkaTopics) {

// Create Kafka topic using the cluster admin service account credentials created above
const topic = new confluent.KafkaTopic(`${name}-${kafkaTopic}`, {
kafkaCluster: {
id: cluster.id,
},
topicName: kafkaTopic,
restEndpoint: cluster.restEndpoint,
credentials: {
key: managerApiKey.id,
secret: managerApiKey.secret,
},
}, { parent: this });

// Give produce write permissions to the topic
new confluent.KafkaAcl(`${name}-${kafkaTopic}-app-producer-write`, {
kafkaCluster: {
id: cluster.id,
},
resourceType: "TOPIC",
resourceName: topic.topicName,
patternType: "LITERAL",
principal: pulumi.interpolate`User:${producerAccount.id}`,
host: "*",
operation: "WRITE",
permission: "ALLOW",
restEndpoint: cluster.restEndpoint,
credentials: {
key: managerApiKey.id,
secret: managerApiKey.secret,
}
}, { parent: this })

// Give consumer access to the topic
new confluent.KafkaAcl(`${name}-${kafkaTopic}-consumer-read-topic-acl`, {
kafkaCluster: {
id: cluster.id,
},
resourceType: "TOPIC",
resourceName: topic.topicName,
patternType: "LITERAL",
principal: pulumi.interpolate`User:${consumerAccount.id}`,
host: "*",
operation: "READ",
permission: "ALLOW",
restEndpoint: cluster.restEndpoint,
credentials: {
key: managerApiKey.id,
secret: managerApiKey.secret,
}
}, { parent: this });
};

this.envId = env.id;
this.kafkaUrl = pulumi.interpolate`https://confluent.cloud/environments/${env.id}/clusters`
this.registerOutputs({});
}
};
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,7 @@
"version": "${VERSION}",
"bin": "bin/index.js",
"dependencies": {
"@pulumi/kubernetes": "^4.0.0",
"@pulumi/pulumi": "^3.0.0"
"@pulumi/confluentcloud": "^1.6.0"
},
"devDependencies": {
"@types/node": "^10.0.0",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
import * as pulumi from "@pulumi/pulumi";
import * as provider from "@pulumi/pulumi/provider";

import { K8sMonitor, K8sMonitorArgs} from "./k8sMonitor";
import { ConfluentCluster, ConfluentClusterArgs} from "./confluentCluster";

export class Provider implements provider.Provider {
constructor(readonly version: string, readonly schema: string) { }
Expand All @@ -25,23 +25,23 @@ export class Provider implements provider.Provider {

// TODO: Add support for additional component resources here.
switch (type) {
case "k8sdatadog:index:K8sMonitor":
return await constructK8sMonitor(name, inputs, options);
case "kafkacluster:index:ConfluentCluster":
return await constructConfluentCluster(name, inputs, options);
default:
throw new Error(`unknown resource type ${type}`);
}
}
}

async function constructK8sMonitor(name: string, inputs: pulumi.Inputs,
async function constructConfluentCluster(name: string, inputs: pulumi.Inputs,
options: pulumi.ComponentResourceOptions): Promise<provider.ConstructResult> {

// Create the component resource.
const k8sMonitor = new K8sMonitor(name, inputs as K8sMonitorArgs, options);
const confluentCluster = new ConfluentCluster(name, inputs as ConfluentClusterArgs, options);

// Return the component resource's URN and outputs as its state.
return {
urn: k8sMonitor.urn,
urn: confluentCluster.urn,
state: {
},
};
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,6 @@
"files": [
"index.ts",
"provider.ts",
"k8sMonitor.ts"
"confluentCluster.ts"
]
}
2 changes: 1 addition & 1 deletion provider/go.mod
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
module github.com/pulumi/pulumi-pequod-k8sdatadog
module github.com/pulumi/pulumi-pequod-kafkacluster

go 1.17

Expand Down
Loading

0 comments on commit 8df2dc4

Please sign in to comment.