From 309f11ed1ad775a0631fe53ccfc84d84ec652b88 Mon Sep 17 00:00:00 2001 From: Michael Edgar Date: Mon, 10 Jun 2024 08:00:39 -0400 Subject: [PATCH] Support for OIDC, dex installation, and authorization configuration Signed-off-by: Michael Edgar --- api/pom.xml | 4 + .../console/api/BrokersResource.java | 5 + .../streamshub/console/api/ClientFactory.java | 114 ++-- .../console/api/ConsumerGroupsResource.java | 11 + .../console/api/KafkaClustersResource.java | 11 +- .../console/api/KafkaRebalancesResource.java | 7 + .../console/api/RecordsResource.java | 7 + .../client/ForbiddenExceptionHandler.java | 34 ++ .../console/api/model/ConsumerGroup.java | 7 +- .../streamshub/console/api/model/Topic.java | 5 +- .../security/AuthorizationInterceptor.java | 47 ++ .../console/api/security/Authorized.java | 18 + .../ConsoleAuthenticationMechanism.java | 403 ++++++++++++++ .../api/security/ConsolePermission.java | 172 ++++++ .../security/OidcTenantConfigResolver.java | 49 ++ .../api/security/PermissionService.java | 68 +++ .../api/security/ResourcePrivilege.java | 20 + .../security/SaslJaasConfigCredential.java | 40 ++ .../api/service/ConsumerGroupService.java | 69 ++- .../api/service/KafkaClusterService.java | 8 +- .../console/api/service/RecordService.java | 21 +- .../api/service/TopicDescribeService.java | 510 ++++++++++++++++++ .../console/api/service/TopicService.java | 483 ++--------------- api/src/main/resources/application.properties | 8 +- .../console/config/ConsoleConfig.java | 11 + .../console/config/KafkaClusterConfig.java | 10 + .../config/security/GlobalSecurityConfig.java | 14 + .../console/config/security/OidcConfig.java | 42 ++ .../console/config/security/Privilege.java | 24 + .../console/config/security/RoleConfig.java | 27 + .../console/config/security/RuleConfig.java | 47 ++ .../config/security/SecurityConfig.java | 27 + .../config/security/SubjectConfig.java | 45 ++ install/004-deploy-dex.sh | 37 ++ install/_common.sh | 82 +++ install/resources/dex/dex.yaml | 180 +++++++ pom.xml | 11 + ui/api/api.ts | 5 + ui/api/kafka/actions.ts | 16 +- ui/app/[locale]/(authorized)/kafka/page.tsx | 2 - ui/app/[locale]/(public)/(home)/page.tsx | 28 +- ui/app/[locale]/layout.tsx | 5 - ui/app/api/auth/[...nextauth]/anonymous.ts | 1 - ui/app/api/auth/[...nextauth]/keycloak.ts | 144 ----- ui/app/api/auth/[...nextauth]/oidc.ts | 182 +++++++ ui/app/api/auth/[...nextauth]/route.ts | 61 ++- ui/app/api/auth/[...nextauth]/scram.ts | 2 - ui/components/ClustersTable.tsx | 28 +- ui/environment.d.ts | 3 - ui/middleware.ts | 13 +- ui/package-lock.json | 102 ++-- ui/package.json | 2 + ui/utils/config.ts | 26 + ui/utils/env.ts | 8 - 54 files changed, 2477 insertions(+), 829 deletions(-) create mode 100644 api/src/main/java/com/github/streamshub/console/api/errors/client/ForbiddenExceptionHandler.java create mode 100644 api/src/main/java/com/github/streamshub/console/api/security/AuthorizationInterceptor.java create mode 100644 api/src/main/java/com/github/streamshub/console/api/security/Authorized.java create mode 100644 api/src/main/java/com/github/streamshub/console/api/security/ConsoleAuthenticationMechanism.java create mode 100644 api/src/main/java/com/github/streamshub/console/api/security/ConsolePermission.java create mode 100644 api/src/main/java/com/github/streamshub/console/api/security/OidcTenantConfigResolver.java create mode 100644 api/src/main/java/com/github/streamshub/console/api/security/PermissionService.java create mode 100644 api/src/main/java/com/github/streamshub/console/api/security/ResourcePrivilege.java create mode 100644 api/src/main/java/com/github/streamshub/console/api/security/SaslJaasConfigCredential.java create mode 100644 api/src/main/java/com/github/streamshub/console/api/service/TopicDescribeService.java create mode 100644 common/src/main/java/com/github/streamshub/console/config/security/GlobalSecurityConfig.java create mode 100644 common/src/main/java/com/github/streamshub/console/config/security/OidcConfig.java create mode 100644 common/src/main/java/com/github/streamshub/console/config/security/Privilege.java create mode 100644 common/src/main/java/com/github/streamshub/console/config/security/RoleConfig.java create mode 100644 common/src/main/java/com/github/streamshub/console/config/security/RuleConfig.java create mode 100644 common/src/main/java/com/github/streamshub/console/config/security/SecurityConfig.java create mode 100644 common/src/main/java/com/github/streamshub/console/config/security/SubjectConfig.java create mode 100755 install/004-deploy-dex.sh create mode 100644 install/_common.sh create mode 100644 install/resources/dex/dex.yaml delete mode 100644 ui/app/api/auth/[...nextauth]/keycloak.ts create mode 100644 ui/app/api/auth/[...nextauth]/oidc.ts create mode 100644 ui/utils/config.ts diff --git a/api/pom.xml b/api/pom.xml index 2e7b7114c..fe32a787e 100644 --- a/api/pom.xml +++ b/api/pom.xml @@ -88,6 +88,10 @@ io.quarkus quarkus-apicurio-registry-avro + + io.quarkus + quarkus-oidc + io.smallrye.common smallrye-common-annotation diff --git a/api/src/main/java/com/github/streamshub/console/api/BrokersResource.java b/api/src/main/java/com/github/streamshub/console/api/BrokersResource.java index 7109aba39..bcf004311 100644 --- a/api/src/main/java/com/github/streamshub/console/api/BrokersResource.java +++ b/api/src/main/java/com/github/streamshub/console/api/BrokersResource.java @@ -16,7 +16,10 @@ import org.eclipse.microprofile.openapi.annotations.tags.Tag; import com.github.streamshub.console.api.model.ConfigEntry; +import com.github.streamshub.console.api.security.Authorized; +import com.github.streamshub.console.api.security.ResourcePrivilege; import com.github.streamshub.console.api.service.BrokerService; +import com.github.streamshub.console.config.security.Privilege; @Path("/api/kafkas/{clusterId}/nodes") @Tag(name = "Kafka Cluster Resources") @@ -32,6 +35,8 @@ public class BrokersResource { @APIResponse(responseCode = "404", ref = "NotFound") @APIResponse(responseCode = "500", ref = "ServerError") @APIResponse(responseCode = "504", ref = "ServerTimeout") + @Authorized + @ResourcePrivilege(Privilege.GET) public CompletionStage describeConfigs( @Parameter(description = "Cluster identifier") @PathParam("clusterId") diff --git a/api/src/main/java/com/github/streamshub/console/api/ClientFactory.java b/api/src/main/java/com/github/streamshub/console/api/ClientFactory.java index 87505bfaf..d85b85915 100644 --- a/api/src/main/java/com/github/streamshub/console/api/ClientFactory.java +++ b/api/src/main/java/com/github/streamshub/console/api/ClientFactory.java @@ -1,6 +1,5 @@ package com.github.streamshub.console.api; -import java.util.Base64; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -46,14 +45,13 @@ import org.apache.kafka.common.config.SslConfigs; import org.apache.kafka.common.security.auth.SecurityProtocol; import org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule; -import org.apache.kafka.common.security.plain.PlainLoginModule; -import org.apache.kafka.common.security.scram.ScramLoginModule; import org.apache.kafka.common.serialization.ByteArrayDeserializer; import org.apache.kafka.common.serialization.StringSerializer; import org.eclipse.microprofile.config.Config; import org.jboss.logging.Logger; import com.fasterxml.jackson.databind.ObjectMapper; +import com.github.streamshub.console.api.security.SaslJaasConfigCredential; import com.github.streamshub.console.api.support.Holder; import com.github.streamshub.console.api.support.KafkaContext; import com.github.streamshub.console.api.support.TrustAllCertificateManager; @@ -66,6 +64,7 @@ import io.fabric8.kubernetes.client.informers.ResourceEventHandler; import io.fabric8.kubernetes.client.informers.SharedIndexInformer; import io.fabric8.kubernetes.client.informers.cache.Cache; +import io.quarkus.security.identity.SecurityIdentity; import io.strimzi.api.kafka.model.kafka.Kafka; import io.strimzi.api.kafka.model.kafka.KafkaClusterSpec; import io.strimzi.api.kafka.model.kafka.KafkaSpec; @@ -95,20 +94,11 @@ public class ClientFactory { public static final String SCRAM_SHA256 = "SCRAM-SHA-256"; public static final String SCRAM_SHA512 = "SCRAM-SHA-512"; - private static final String BEARER = "Bearer "; private static final String STRIMZI_OAUTH_CALLBACK = "io.strimzi.kafka.oauth.client.JaasClientOauthLoginCallbackHandler"; - private static final String SASL_OAUTH_CONFIG_TEMPLATE = OAuthBearerLoginModule.class.getName() - + " required" - + " oauth.access.token=\"%s\" ;"; - private static final String BASIC = "Basic "; - private static final String BASIC_TEMPLATE = "%s required username=\"%%s\" password=\"%%s\" ;"; - private static final String SASL_PLAIN_CONFIG_TEMPLATE = BASIC_TEMPLATE.formatted(PlainLoginModule.class.getName()); - private static final String SASL_SCRAM_CONFIG_TEMPLATE = BASIC_TEMPLATE.formatted(ScramLoginModule.class.getName()); - - static final String NO_SUCH_KAFKA_MESSAGE = "Requested Kafka cluster %s does not exist or is not configured"; + public static final String NO_SUCH_KAFKA_MESSAGE = "Requested Kafka cluster %s does not exist or is not configured"; private final Function noSuchKafka = - clusterName -> new NotFoundException(NO_SUCH_KAFKA_MESSAGE.formatted(clusterName)); + clusterId -> new NotFoundException(NO_SUCH_KAFKA_MESSAGE.formatted(clusterId)); @Inject Logger log; @@ -168,7 +158,7 @@ Map produceKafkaContexts(Function, Adm consoleConfig.getKafka().getClusters() .stream() .filter(c -> cachedKafkaResource(c).isEmpty()) - .filter(Predicate.not(KafkaClusterConfig::hasNamespace)) + //.filter(Predicate.not(KafkaClusterConfig::hasNamespace)) .forEach(clusterConfig -> putKafkaContext(contexts, clusterConfig, Optional.empty(), @@ -464,6 +454,7 @@ void disposeKafkaContexts(@Disposes Map contexts) { @Produces @RequestScoped public KafkaContext produceKafkaContext(Map contexts, + SecurityIdentity identity, UnaryOperator filter, Function, Admin> adminBuilder) { @@ -473,22 +464,28 @@ public KafkaContext produceKafkaContext(Map contexts, return KafkaContext.EMPTY; } - return Optional.ofNullable(contexts.get(clusterId)) - .map(ctx -> { - if (ctx.admin() == null) { - /* - * Admin may be null if credentials were not given in the - * configuration. The user must provide the login secrets - * in the request in that case. - */ - var adminConfigs = maybeAuthenticate(ctx, Admin.class); - var admin = adminBuilder.apply(adminConfigs); - return new KafkaContext(ctx, filter.apply(admin)); - } + KafkaContext ctx = contexts.get(clusterId); - return ctx; - }) - .orElseThrow(() -> noSuchKafka.apply(clusterId)); + if (ctx == null) { + throw noSuchKafka.apply(clusterId); + } + + if (identity.isAnonymous()) { + return ctx; + } + + if (ctx.admin() == null) { + /* + * Admin may be null if credentials were not given in the + * configuration. The user must provide the login secrets + * in the request in that case. + */ + var adminConfigs = maybeAuthenticate(identity, ctx, Admin.class); + var admin = adminBuilder.apply(adminConfigs); + return new KafkaContext(ctx, filter.apply(admin)); + } + + return ctx; } public void disposeKafkaContext(@Disposes KafkaContext context, Map contexts) { @@ -505,8 +502,8 @@ public void disposeKafkaContext(@Disposes KafkaContext context, Map consumerSupplier(KafkaContext context) { - var configs = maybeAuthenticate(context, Consumer.class); + public Consumer consumerSupplier(SecurityIdentity identity, KafkaContext context) { + var configs = maybeAuthenticate(identity, context, Consumer.class); return new KafkaConsumer<>( configs, @@ -520,8 +517,8 @@ public void disposeConsumer(@Disposes Consumer consumer) @Produces @RequestScoped - public Producer producerSupplier(KafkaContext context) { - var configs = maybeAuthenticate(context, Producer.class); + public Producer producerSupplier(SecurityIdentity identity, KafkaContext context) { + var configs = maybeAuthenticate(identity, context, Producer.class); return new KafkaProducer<>( configs, context.schemaRegistryContext().keySerializer(), @@ -532,13 +529,13 @@ public void disposeProducer(@Disposes Producer producer) producer.close(); } - Map maybeAuthenticate(KafkaContext context, Class clientType) { + Map maybeAuthenticate(SecurityIdentity identity, KafkaContext context, Class clientType) { Map configs = context.configs(clientType); if (configs.containsKey(SaslConfigs.SASL_MECHANISM) && !configs.containsKey(SaslConfigs.SASL_JAAS_CONFIG)) { configs = new HashMap<>(configs); - configureAuthentication(context.saslMechanism(clientType), configs); + configureAuthentication(identity, context.saslMechanism(clientType), configs); } return configs; @@ -697,23 +694,25 @@ void logConfig(String clientType, Map config) { } } - void configureAuthentication(String saslMechanism, Map configs) { + void configureAuthentication(SecurityIdentity identity, String saslMechanism, Map configs) { + SaslJaasConfigCredential credential = identity.getCredential(SaslJaasConfigCredential.class); + switch (saslMechanism) { case OAUTHBEARER: - configureOAuthBearer(configs); + configureOAuthBearer(credential, configs); break; case PLAIN: - configureBasic(configs, SASL_PLAIN_CONFIG_TEMPLATE); + configureBasic(credential, configs); break; case SCRAM_SHA256, SCRAM_SHA512: - configureBasic(configs, SASL_SCRAM_CONFIG_TEMPLATE); + configureBasic(credential, configs); break; default: throw new NotAuthorizedException("Unknown"); } } - void configureOAuthBearer(Map configs) { + void configureOAuthBearer(SaslJaasConfigCredential credential, Map configs) { log.trace("SASL/OAUTHBEARER enabled"); configs.putIfAbsent(SaslConfigs.SASL_LOGIN_CALLBACK_HANDLER_CLASS, STRIMZI_OAUTH_CALLBACK); @@ -721,39 +720,12 @@ void configureOAuthBearer(Map configs) { // May still cause warnings to be logged when token will expire in less than SASL_LOGIN_REFRESH_MIN_PERIOD_SECONDS. configs.putIfAbsent(SaslConfigs.SASL_LOGIN_REFRESH_BUFFER_SECONDS, "0"); - String jaasConfig = getAuthorization(BEARER) - .map(SASL_OAUTH_CONFIG_TEMPLATE::formatted) - .orElseThrow(() -> new NotAuthorizedException(BEARER.trim())); - - configs.put(SaslConfigs.SASL_JAAS_CONFIG, jaasConfig); + configs.put(SaslConfigs.SASL_JAAS_CONFIG, credential.value()); } - void configureBasic(Map configs, String template) { + void configureBasic(SaslJaasConfigCredential credential, Map configs) { log.trace("SASL/SCRAM enabled"); - - String jaasConfig = getBasicAuthentication() - .map(template::formatted) - .orElseThrow(() -> new NotAuthorizedException(BASIC.trim())); - - configs.put(SaslConfigs.SASL_JAAS_CONFIG, jaasConfig); - } - - Optional getBasicAuthentication() { - return getAuthorization(BASIC) - .map(Base64.getDecoder()::decode) - .map(String::new) - .filter(authn -> authn.indexOf(':') >= 0) - .map(authn -> new String[] { - authn.substring(0, authn.indexOf(':')), - authn.substring(authn.indexOf(':') + 1) - }) - .filter(userPass -> !userPass[0].isEmpty() && !userPass[1].isEmpty()); - } - - Optional getAuthorization(String scheme) { - return Optional.ofNullable(headers.getHeaderString(HttpHeaders.AUTHORIZATION)) - .filter(header -> header.regionMatches(true, 0, scheme, 0, scheme.length())) - .map(header -> header.substring(scheme.length())); + configs.put(SaslConfigs.SASL_JAAS_CONFIG, credential.value()); } private static final Pattern BOUNDARY_QUOTES = Pattern.compile("(^[\"'])|([\"']$)"); diff --git a/api/src/main/java/com/github/streamshub/console/api/ConsumerGroupsResource.java b/api/src/main/java/com/github/streamshub/console/api/ConsumerGroupsResource.java index 813c55d04..6ef2d56a7 100644 --- a/api/src/main/java/com/github/streamshub/console/api/ConsumerGroupsResource.java +++ b/api/src/main/java/com/github/streamshub/console/api/ConsumerGroupsResource.java @@ -36,11 +36,14 @@ import com.github.streamshub.console.api.model.ConsumerGroup; import com.github.streamshub.console.api.model.ConsumerGroupFilterParams; import com.github.streamshub.console.api.model.ListFetchParams; +import com.github.streamshub.console.api.security.Authorized; +import com.github.streamshub.console.api.security.ResourcePrivilege; import com.github.streamshub.console.api.service.ConsumerGroupService; import com.github.streamshub.console.api.support.ErrorCategory; import com.github.streamshub.console.api.support.FieldFilter; import com.github.streamshub.console.api.support.ListRequestContext; import com.github.streamshub.console.api.support.StringEnumeration; +import com.github.streamshub.console.config.security.Privilege; import io.xlate.validation.constraints.Expression; @@ -67,6 +70,8 @@ public class ConsumerGroupsResource { @APIResponseSchema(ConsumerGroup.ListResponse.class) @APIResponse(responseCode = "500", ref = "ServerError") @APIResponse(responseCode = "504", ref = "ServerTimeout") + @Authorized + @ResourcePrivilege(Privilege.LIST) public CompletionStage listConsumerGroups( @Parameter(description = "Cluster identifier") @PathParam("clusterId") @@ -132,6 +137,8 @@ public CompletionStage listConsumerGroups( @APIResponse(responseCode = "404", ref = "NotFound") @APIResponse(responseCode = "500", ref = "ServerError") @APIResponse(responseCode = "504", ref = "ServerTimeout") + @Authorized + @ResourcePrivilege(Privilege.GET) public CompletionStage describeConsumerGroup( @Parameter(description = "Cluster identifier") @PathParam("clusterId") @@ -200,6 +207,8 @@ public CompletionStage describeConsumerGroup( node = { "data", "id" }, payload = ErrorCategory.InvalidResource.class, validationAppliesTo = ConstraintTarget.PARAMETERS) + @Authorized + @ResourcePrivilege(Privilege.UPDATE) public CompletionStage patchConsumerGroup( @Parameter(description = "Cluster identifier") @PathParam("clusterId") @@ -244,6 +253,8 @@ public CompletionStage patchConsumerGroup( @Path("{groupId}") @DELETE @APIResponseSchema(responseCode = "204", value = Void.class) + @Authorized + @ResourcePrivilege(Privilege.DELETE) public CompletionStage deleteConsumerGroup( @Parameter(description = "Cluster identifier") @PathParam("clusterId") diff --git a/api/src/main/java/com/github/streamshub/console/api/KafkaClustersResource.java b/api/src/main/java/com/github/streamshub/console/api/KafkaClustersResource.java index ee260d9f9..b84951ac6 100644 --- a/api/src/main/java/com/github/streamshub/console/api/KafkaClustersResource.java +++ b/api/src/main/java/com/github/streamshub/console/api/KafkaClustersResource.java @@ -32,11 +32,14 @@ import com.github.streamshub.console.api.model.KafkaCluster; import com.github.streamshub.console.api.model.ListFetchParams; +import com.github.streamshub.console.api.security.Authorized; +import com.github.streamshub.console.api.security.ResourcePrivilege; import com.github.streamshub.console.api.service.KafkaClusterService; import com.github.streamshub.console.api.support.ErrorCategory; import com.github.streamshub.console.api.support.FieldFilter; import com.github.streamshub.console.api.support.ListRequestContext; import com.github.streamshub.console.api.support.StringEnumeration; +import com.github.streamshub.console.config.security.Privilege; import io.xlate.validation.constraints.Expression; @@ -63,6 +66,8 @@ public class KafkaClustersResource { @APIResponseSchema(KafkaCluster.KafkaClusterDataList.class) @APIResponse(responseCode = "500", ref = "ServerError") @APIResponse(responseCode = "504", ref = "ServerTimeout") + @Authorized + @ResourcePrivilege(Privilege.LIST) public Response listClusters( @QueryParam(KafkaCluster.FIELDS_PARAM) @DefaultValue(KafkaCluster.Fields.LIST_DEFAULT) @@ -121,6 +126,8 @@ public Response listClusters( @APIResponse(responseCode = "404", ref = "NotFound") @APIResponse(responseCode = "500", ref = "ServerError") @APIResponse(responseCode = "504", ref = "ServerTimeout") + @Authorized + @ResourcePrivilege(Privilege.GET) public CompletionStage describeCluster( @Parameter(description = "Cluster identifier") @PathParam("clusterId") @@ -194,6 +201,8 @@ public CompletionStage describeCluster( node = { "data", "id" }, payload = ErrorCategory.InvalidResource.class, validationAppliesTo = ConstraintTarget.PARAMETERS) + @Authorized + @ResourcePrivilege(Privilege.UPDATE) public Response patchCluster( @Parameter(description = "Cluster identifier") @PathParam("clusterId") @@ -205,7 +214,7 @@ public Response patchCluster( // Return all fields requestedFields.accept(Arrays.asList(KafkaCluster.Fields.DESCRIBE_DEFAULT.split(",\\s*"))); - var result = clusterService.patchCluster(clusterId, clusterData.getData()); + var result = clusterService.patchCluster(clusterData.getData()); var responseEntity = new KafkaCluster.KafkaClusterData(result); return Response.ok(responseEntity).build(); diff --git a/api/src/main/java/com/github/streamshub/console/api/KafkaRebalancesResource.java b/api/src/main/java/com/github/streamshub/console/api/KafkaRebalancesResource.java index a70026149..244fa7e90 100644 --- a/api/src/main/java/com/github/streamshub/console/api/KafkaRebalancesResource.java +++ b/api/src/main/java/com/github/streamshub/console/api/KafkaRebalancesResource.java @@ -31,11 +31,14 @@ import com.github.streamshub.console.api.model.KafkaRebalance; import com.github.streamshub.console.api.model.KafkaRebalanceFilterParams; import com.github.streamshub.console.api.model.ListFetchParams; +import com.github.streamshub.console.api.security.Authorized; +import com.github.streamshub.console.api.security.ResourcePrivilege; import com.github.streamshub.console.api.service.KafkaRebalanceService; import com.github.streamshub.console.api.support.ErrorCategory; import com.github.streamshub.console.api.support.FieldFilter; import com.github.streamshub.console.api.support.ListRequestContext; import com.github.streamshub.console.api.support.StringEnumeration; +import com.github.streamshub.console.config.security.Privilege; import io.xlate.validation.constraints.Expression; @@ -62,6 +65,8 @@ public class KafkaRebalancesResource { @APIResponseSchema(KafkaRebalance.RebalanceDataList.class) @APIResponse(responseCode = "500", ref = "ServerError") @APIResponse(responseCode = "504", ref = "ServerTimeout") + @Authorized + @ResourcePrivilege(Privilege.LIST) public Response listRebalances( @Parameter(description = "Cluster identifier") @PathParam("clusterId") @@ -158,6 +163,8 @@ public Response listRebalances( node = { "data", "id" }, payload = ErrorCategory.InvalidResource.class, validationAppliesTo = ConstraintTarget.PARAMETERS) + @Authorized + @ResourcePrivilege(Privilege.UPDATE) public Response patchRebalance( @Parameter(description = "Cluster identifier") @PathParam("clusterId") diff --git a/api/src/main/java/com/github/streamshub/console/api/RecordsResource.java b/api/src/main/java/com/github/streamshub/console/api/RecordsResource.java index a16498521..15120d582 100644 --- a/api/src/main/java/com/github/streamshub/console/api/RecordsResource.java +++ b/api/src/main/java/com/github/streamshub/console/api/RecordsResource.java @@ -35,11 +35,14 @@ import com.github.streamshub.console.api.model.KafkaRecord; import com.github.streamshub.console.api.model.RecordFilterParams; +import com.github.streamshub.console.api.security.Authorized; +import com.github.streamshub.console.api.security.ResourcePrivilege; import com.github.streamshub.console.api.service.RecordService; import com.github.streamshub.console.api.support.ErrorCategory; import com.github.streamshub.console.api.support.FieldFilter; import com.github.streamshub.console.api.support.KafkaUuid; import com.github.streamshub.console.api.support.StringEnumeration; +import com.github.streamshub.console.config.security.Privilege; @Path("/api/kafkas/{clusterId}/topics/{topicId}/records") @Tag(name = "Kafka Cluster Resources") @@ -72,6 +75,8 @@ public class RecordsResource { @APIResponse(responseCode = "404", ref = "NotFound") @APIResponse(responseCode = "500", ref = "ServerError") @APIResponse(responseCode = "504", ref = "ServerTimeout") + @Authorized + @ResourcePrivilege(Privilege.LIST) public CompletionStage consumeRecords( @Parameter(description = "Cluster identifier") @PathParam("clusterId") @@ -154,6 +159,8 @@ public CompletionStage consumeRecords( @APIResponse(responseCode = "404", ref = "NotFound") @APIResponse(responseCode = "500", ref = "ServerError") @APIResponse(responseCode = "504", ref = "ServerTimeout") + @Authorized + @ResourcePrivilege(Privilege.CREATE) public CompletionStage produceRecord( @Parameter(description = "Cluster identifier") @PathParam("clusterId") diff --git a/api/src/main/java/com/github/streamshub/console/api/errors/client/ForbiddenExceptionHandler.java b/api/src/main/java/com/github/streamshub/console/api/errors/client/ForbiddenExceptionHandler.java new file mode 100644 index 000000000..800249665 --- /dev/null +++ b/api/src/main/java/com/github/streamshub/console/api/errors/client/ForbiddenExceptionHandler.java @@ -0,0 +1,34 @@ +package com.github.streamshub.console.api.errors.client; + +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.ws.rs.ForbiddenException; +import jakarta.ws.rs.core.Response; +import jakarta.ws.rs.ext.Provider; + +import com.github.streamshub.console.api.model.ErrorResponse; +import com.github.streamshub.console.api.support.ErrorCategory; + +@Provider +@ApplicationScoped +public class ForbiddenExceptionHandler extends AbstractClientExceptionHandler { + + public ForbiddenExceptionHandler() { + super(ErrorCategory.NotAuthorized.class, "Insufficient permissions to resource or action", (String) null); + } + + @Override + public boolean handlesException(Throwable thrown) { + return thrown instanceof ForbiddenException; + } + + @Override + public Response toResponse(ForbiddenException exception) { + var responseBuilder = Response.status(category.getHttpStatus()) + .entity(new ErrorResponse(buildErrors(exception))); + + exception.getResponse().getHeaders().forEach((k, v) -> + responseBuilder.header(k, exception.getResponse().getHeaderString(k))); + + return responseBuilder.build(); + } +} \ No newline at end of file diff --git a/api/src/main/java/com/github/streamshub/console/api/model/ConsumerGroup.java b/api/src/main/java/com/github/streamshub/console/api/model/ConsumerGroup.java index 793f0adb7..851f1a1de 100644 --- a/api/src/main/java/com/github/streamshub/console/api/model/ConsumerGroup.java +++ b/api/src/main/java/com/github/streamshub/console/api/model/ConsumerGroup.java @@ -34,7 +34,8 @@ @JsonFilter("fieldFilter") public class ConsumerGroup { - public static final String FIELDS_PARAM = "fields[consumerGroups]"; + public static final String API_TYPE = "consumerGroups"; + public static final String FIELDS_PARAM = "fields[" + API_TYPE + "]"; public static final class Fields { public static final String STATE = "state"; @@ -118,7 +119,7 @@ public ConsumerGroupDocument(ConsumerGroup attributes) { payload = ErrorCategory.InvalidResource.class) @Expression( when = "self.type != null", - value = "self.type == 'consumerGroups'", + value = "self.type == '" + API_TYPE + "'", message = "resource type conflicts with operation", node = "type", payload = ErrorCategory.ResourceConflict.class) @@ -135,7 +136,7 @@ public ConsumerGroupResource(String id, String type, ConsumerGroup attributes) { * Used by list and describe */ public ConsumerGroupResource(ConsumerGroup attributes) { - super(attributes.groupId, "consumerGroups", attributes); + super(attributes.groupId, API_TYPE, attributes); if (attributes.errors != null) { addMeta("errors", attributes.errors); diff --git a/api/src/main/java/com/github/streamshub/console/api/model/Topic.java b/api/src/main/java/com/github/streamshub/console/api/model/Topic.java index 84ddd26ce..cac361886 100644 --- a/api/src/main/java/com/github/streamshub/console/api/model/Topic.java +++ b/api/src/main/java/com/github/streamshub/console/api/model/Topic.java @@ -33,7 +33,8 @@ @Schema(name = "Topic") public class Topic extends RelatableResource { - public static final String FIELDS_PARAM = "fields[topics]"; + public static final String API_TYPE = "topics"; + public static final String FIELDS_PARAM = "fields[" + API_TYPE + "]"; public static final class Fields { public static final String NAME = "name"; @@ -227,7 +228,7 @@ static class Relationships { } public Topic(String name, boolean internal, String id) { - super(id, "topics", new Attributes(name, internal), new Relationships()); + super(id, API_TYPE, new Attributes(name, internal), new Relationships()); } public static Topic fromTopicListing(org.apache.kafka.clients.admin.TopicListing listing) { diff --git a/api/src/main/java/com/github/streamshub/console/api/security/AuthorizationInterceptor.java b/api/src/main/java/com/github/streamshub/console/api/security/AuthorizationInterceptor.java new file mode 100644 index 000000000..c698e4dc5 --- /dev/null +++ b/api/src/main/java/com/github/streamshub/console/api/security/AuthorizationInterceptor.java @@ -0,0 +1,47 @@ +package com.github.streamshub.console.api.security; + +import jakarta.annotation.Priority; +import jakarta.enterprise.context.Dependent; +import jakarta.inject.Inject; +import jakarta.interceptor.AroundInvoke; +import jakarta.interceptor.Interceptor; +import jakarta.interceptor.InvocationContext; +import jakarta.ws.rs.ForbiddenException; +import jakarta.ws.rs.core.UriInfo; + +import org.jboss.logging.Logger; + +import io.quarkus.security.identity.SecurityIdentity; + +@Authorized +@Priority(1) +@Interceptor +@Dependent +public class AuthorizationInterceptor { + + @Inject + Logger logger; + + @Inject + SecurityIdentity securityIdentity; + + @Inject + UriInfo requestUri; + + @AroundInvoke + Object authorize(InvocationContext context) throws Exception { + ResourcePrivilege authz = context.getMethod().getAnnotation(ResourcePrivilege.class); + String resourcePath = requestUri.getPath().substring("/api/".length()); + var requiredPermission = new ConsolePermission(resourcePath, authz.value()); + boolean allow = securityIdentity.checkPermission(requiredPermission) + .subscribeAsCompletionStage() + .get(); + + if (!allow) { + throw new ForbiddenException("Access denied"); + } + + return context.proceed(); + } + +} diff --git a/api/src/main/java/com/github/streamshub/console/api/security/Authorized.java b/api/src/main/java/com/github/streamshub/console/api/security/Authorized.java new file mode 100644 index 000000000..4fc482335 --- /dev/null +++ b/api/src/main/java/com/github/streamshub/console/api/security/Authorized.java @@ -0,0 +1,18 @@ +package com.github.streamshub.console.api.security; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +import jakarta.interceptor.InterceptorBinding; + +/** + * Binding annotation to mark methods that should be intercepted by the + * {@link AuthorizationInterceptor}. + */ +@InterceptorBinding +@Retention(RetentionPolicy.RUNTIME) +@Target({ ElementType.TYPE, ElementType.METHOD }) +public @interface Authorized { +} diff --git a/api/src/main/java/com/github/streamshub/console/api/security/ConsoleAuthenticationMechanism.java b/api/src/main/java/com/github/streamshub/console/api/security/ConsoleAuthenticationMechanism.java new file mode 100644 index 000000000..c28355b79 --- /dev/null +++ b/api/src/main/java/com/github/streamshub/console/api/security/ConsoleAuthenticationMechanism.java @@ -0,0 +1,403 @@ +package com.github.streamshub.console.api.security; + +import java.io.IOException; +import java.security.Permission; +import java.security.Principal; +import java.util.ArrayList; +import java.util.Base64; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.function.BiFunction; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.Stream; + +import jakarta.annotation.Priority; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.enterprise.inject.Alternative; +import jakarta.inject.Inject; +import jakarta.ws.rs.core.HttpHeaders; + +import org.apache.kafka.clients.admin.Admin; +import org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule; +import org.jboss.logging.Logger; +import org.jose4j.jwt.JwtClaims; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.github.streamshub.console.api.ClientFactory; +import com.github.streamshub.console.api.model.Error; +import com.github.streamshub.console.api.model.ErrorResponse; +import com.github.streamshub.console.api.support.ErrorCategory; +import com.github.streamshub.console.api.support.KafkaContext; +import com.github.streamshub.console.config.ConsoleConfig; +import com.github.streamshub.console.config.security.Privilege; +import com.github.streamshub.console.config.security.SecurityConfig; +import com.github.streamshub.console.config.security.SubjectConfig; + +import io.quarkus.oidc.runtime.OidcAuthenticationMechanism; +import io.quarkus.oidc.runtime.OidcJwtCallerPrincipal; +import io.quarkus.security.AuthenticationFailedException; +import io.quarkus.security.credential.Credential; +import io.quarkus.security.identity.IdentityProviderManager; +import io.quarkus.security.identity.SecurityIdentity; +import io.quarkus.security.identity.request.AnonymousAuthenticationRequest; +import io.quarkus.security.identity.request.AuthenticationRequest; +import io.quarkus.security.identity.request.TokenAuthenticationRequest; +import io.quarkus.security.identity.request.UsernamePasswordAuthenticationRequest; +import io.quarkus.security.runtime.QuarkusPrincipal; +import io.quarkus.security.runtime.QuarkusSecurityIdentity; +import io.quarkus.vertx.http.runtime.security.ChallengeData; +import io.quarkus.vertx.http.runtime.security.HttpAuthenticationMechanism; +import io.smallrye.mutiny.Uni; +import io.vertx.core.MultiMap; +import io.vertx.ext.web.RoutingContext; + +@Alternative +@Priority(1) +@ApplicationScoped +public class ConsoleAuthenticationMechanism implements HttpAuthenticationMechanism { + + public static final String OAUTHBEARER = OAuthBearerLoginModule.OAUTHBEARER_MECHANISM; + public static final String PLAIN = "PLAIN"; + public static final String SCRAM_SHA256 = "SCRAM-SHA-256"; + public static final String SCRAM_SHA512 = "SCRAM-SHA-512"; + + private static final String BEARER = "Bearer "; + private static final String BASIC = "Basic "; + + private static class Audit extends java.util.logging.Level { + private static final long serialVersionUID = 1L; + + Audit() { + super("AUDIT", java.util.logging.Level.INFO.intValue() - 1); + } + } + + private static final java.util.logging.Level AUDIT = new Audit(); + + private static final SecurityIdentity ANONYMOUS = QuarkusSecurityIdentity.builder() + .setAnonymous(true) + .setPrincipal(new QuarkusPrincipal("ANONYMOUS")) + .build(); + + @Inject + Logger log; + + @Inject + ObjectMapper mapper; + + @Inject + ConsoleConfig consoleConfig; + + @Inject + Map contexts; + + @Inject + OidcAuthenticationMechanism oidc; + + boolean oidcEnabled() { + return Objects.nonNull(consoleConfig.getSecurity().getOidc()); + } + + @Override + public Uni authenticate(RoutingContext context, IdentityProviderManager identityProviderManager) { + if (oidcEnabled()) { + return oidc.authenticate(context, identityProviderManager) + .map(identity -> { + if (identity != null) { + String clusterId = getClusterId(context); + var ctx = clusterId != null ? contexts.get(clusterId) : null; + return createIdentity(ctx, identity); + } + throw new AuthenticationFailedException(); + }); + } + + String clusterId = getClusterId(context); + + if (clusterId == null) { + return Uni.createFrom().item(createAnonymousIdentity(null)); + } + + var ctx = contexts.get(clusterId); + + if (ctx == null) { + return Uni.createFrom().item(createAnonymousIdentity(null)); + } + + String saslMechanism = ctx.saslMechanism(Admin.class); + + if (ctx.admin() != null || saslMechanism.isEmpty()) { + return Uni.createFrom().item(createAnonymousIdentity(ctx)); + } + + var identity = createIdentity(ctx, context.request().headers(), saslMechanism); + + if (identity != null) { + return Uni.createFrom().item(identity); + } + + return Uni.createFrom().failure(new AuthenticationFailedException()); + } + + @Override + public Uni sendChallenge(RoutingContext context) { + return getChallenge(context).map(challengeData -> { + if (challengeData == null) { + return false; + } + + var response = context.response(); + response.setStatusCode(challengeData.status); + + if (challengeData.headerName != null) { + response.headers().set(challengeData.headerName, challengeData.headerContent); + } + + try { + response.send(mapper.writeValueAsString(((PayloadChallengeData) challengeData).payload)); + } catch (IOException e) { + log.warnf(e, "Failed to serialize challenge response body: %s", e.getMessage()); + } + + return true; + }); + } + + @Override + public Uni getChallenge(RoutingContext context) { + if (oidcEnabled()) { + return oidc.getChallenge(context) + .map(data -> { + var category = ErrorCategory.get(ErrorCategory.NotAuthenticated.class); + Error error = category.createError("Authentication credentials missing or invalid", null, null); + var responseBody = new ErrorResponse(List.of(error)); + return new PayloadChallengeData(data, responseBody); + }); + } + + String clusterId = getClusterId(context); + + if (clusterId == null) { + return Uni.createFrom().nullItem(); + } + + var ctx = contexts.get(clusterId); + + if (ctx == null) { + return Uni.createFrom().nullItem(); + } + + String saslMechanism = ctx.saslMechanism(Admin.class); + String scheme = getAuthorizationScheme(saslMechanism); + ChallengeData challenge; + + if (scheme != null) { + var category = ErrorCategory.get(ErrorCategory.NotAuthenticated.class); + Error error = category.createError("Authentication credentials missing or invalid", null, null); + var responseBody = new ErrorResponse(List.of(error)); + challenge = new PayloadChallengeData(401, "WWW-Authenticate", scheme, responseBody); + } else { + log.warnf("Access not permitted to cluster %s with unknown SASL mechanism '%s'", + clusterId, saslMechanism); + var category = ErrorCategory.get(ErrorCategory.ResourceNotFound.class); + Error error = category.createError(ClientFactory.NO_SUCH_KAFKA_MESSAGE.formatted(clusterId), null, null); + var responseBody = new ErrorResponse(List.of(error)); + challenge = new PayloadChallengeData(404, null, null, responseBody); + } + + return Uni.createFrom().item(challenge); + } + + @Override + public Set> getCredentialTypes() { + if (oidcEnabled()) { + return oidc.getCredentialTypes(); + } + + return Set.of( + AnonymousAuthenticationRequest.class, + TokenAuthenticationRequest.class, + UsernamePasswordAuthenticationRequest.class + ); + } + + String getClusterId(RoutingContext context) { + Pattern p = Pattern.compile("/api/kafkas/([^/]+)(?:/.*)?"); + Matcher m = p.matcher(context.normalizedPath()); + if (m.matches()) { + return m.group(1); + } + return null; + } + + String getAuthorizationScheme(String saslMechanism) { + switch (saslMechanism) { + case OAUTHBEARER: + return BEARER.trim(); + case PLAIN, SCRAM_SHA256, SCRAM_SHA512: + return BASIC.trim(); + default: + return null; + } + } + + SecurityIdentity createAnonymousIdentity(KafkaContext ctx) { + return createIdentity(ctx, ANONYMOUS); + } + + SecurityIdentity createIdentity(KafkaContext ctx, SecurityIdentity source) { + var builder = QuarkusSecurityIdentity.builder(source); + addRoleChecker(ctx, builder, source.getPrincipal()); + return builder.build(); + } + + SecurityIdentity createIdentity(KafkaContext ctx, MultiMap headers, String saslMechanism) { + switch (saslMechanism) { + case OAUTHBEARER: + return createOAuthIdentity(ctx, headers); + case PLAIN: + return createBasicIdentity(ctx, headers, SaslJaasConfigCredential::forPlainLogin); + case SCRAM_SHA256, SCRAM_SHA512: + return createBasicIdentity(ctx, headers, SaslJaasConfigCredential::forScramLogin); + default: + return null; + } + } + + SecurityIdentity createOAuthIdentity(KafkaContext ctx, MultiMap headers) { + return getAuthorization(headers, BEARER) + .map(accessToken -> { + var builder = QuarkusSecurityIdentity.builder(); + builder.addCredential(SaslJaasConfigCredential.forOAuthLogin(accessToken)); + Principal principal; + + try { + var claims = JwtClaims.parse(accessToken); + principal = new OidcJwtCallerPrincipal(claims, null); + } catch (Exception e) { + log.infof("JWT access token could not be parsed: %s", e.getMessage()); + principal = new QuarkusPrincipal("UNKNOWN"); + } + + builder.setPrincipal(principal); + addRoleChecker(ctx, builder, principal); + return builder.build(); + }) + .orElse(null); + } + + SecurityIdentity createBasicIdentity(KafkaContext ctx, MultiMap headers, BiFunction credentialBuilder) { + return getBasicAuthentication(headers) + .map(userpass -> { + var builder = QuarkusSecurityIdentity.builder(); + var principal = new QuarkusPrincipal(userpass[0]); + builder.addCredential(credentialBuilder.apply(userpass[0], userpass[1])); + builder.setPrincipal(principal); + addRoleChecker(ctx, builder, principal); + return builder.build(); + }) + .orElse(null); + } + + void addRoleChecker(KafkaContext ctx, QuarkusSecurityIdentity.Builder builder, Principal principal) { + Stream globalSubjects = consoleConfig.getSecurity().getSubjects().stream(); + Stream clusterSubjects = Stream.empty(); + + if (ctx != null) { + clusterSubjects = ctx.clusterConfig().getSecurity().getSubjects().stream(); + } + + List roleNames = Stream.concat(clusterSubjects, globalSubjects) + .filter(sub -> Objects.isNull(sub.getIssuer()) /* or issuer matches `iss` claim */) + .filter(sub -> Objects.isNull(sub.getClaim()) /* only without OIDC */) + .filter(sub -> sub.getInclude().contains(principal.getName())) + .flatMap(sub -> sub.getRoleNames().stream()) + .distinct() + .toList(); + + Stream globalPermissions = getPermissions(consoleConfig.getSecurity(), roleNames, ""); + Stream clusterPermissions = Stream.empty(); + + if (ctx != null) { + clusterPermissions = getPermissions( + ctx.clusterConfig().getSecurity(), + roleNames, + "kafkas/" + ctx.clusterId() + '/' + ); + } + + List possessedPermissions = Stream.concat(globalPermissions, clusterPermissions).toList(); + + builder.addPermissionChecker(requiredPermission -> { + boolean allowed = possessedPermissions + .stream() + .anyMatch(possessed -> possessed.implies(requiredPermission)); + + String category = getClass().getPackageName() + (allowed ? ".ALLOW" : ".DENY"); + + java.util.logging.Logger.getLogger(category).log(AUDIT, () -> { + return String.format("Principal %s %s access to %s", principal.getName(), allowed ? "allowed" : "denied", requiredPermission); + }); + + return Uni.createFrom().item(allowed); + }); + } + + Stream getPermissions(SecurityConfig security, Collection roleNames, String resourcePrefix) { + return security.getRoles() + .stream() + .filter(role -> roleNames.contains(role.getName())) + .flatMap(role -> role.getRules().stream()) + .flatMap(rule -> { + List rulePermissions = new ArrayList<>(); + Privilege[] actions = rule.getPrivileges().toArray(Privilege[]::new); + + for (var resource : rule.getResources()) { + rulePermissions.add(new ConsolePermission( + resourcePrefix + resource, + rule.getResourceNames(), + actions + )); + } + + return rulePermissions.stream(); + }); + } + + Optional getBasicAuthentication(MultiMap headers) { + return getAuthorization(headers, BASIC) + .map(Base64.getDecoder()::decode) + .map(String::new) + .filter(authn -> authn.indexOf(':') >= 0) + .map(authn -> new String[] { + authn.substring(0, authn.indexOf(':')), + authn.substring(authn.indexOf(':') + 1) + }) + .filter(userPass -> !userPass[0].isEmpty() && !userPass[1].isEmpty()); + } + + Optional getAuthorization(MultiMap headers, String scheme) { + return Optional.ofNullable(headers.get(HttpHeaders.AUTHORIZATION)) + .filter(header -> header.regionMatches(true, 0, scheme, 0, scheme.length())) + .map(header -> header.substring(scheme.length())); + } + + static class PayloadChallengeData extends ChallengeData { + public final Object payload; + + public PayloadChallengeData(int status, CharSequence headerName, String headerContent, Object payload) { + super(status, headerName, headerContent); + this.payload = payload; + } + + public PayloadChallengeData(ChallengeData data, Object payload) { + super(data.status, data.headerName, data.headerContent); + this.payload = payload; + } + } +} diff --git a/api/src/main/java/com/github/streamshub/console/api/security/ConsolePermission.java b/api/src/main/java/com/github/streamshub/console/api/security/ConsolePermission.java new file mode 100644 index 000000000..406faa161 --- /dev/null +++ b/api/src/main/java/com/github/streamshub/console/api/security/ConsolePermission.java @@ -0,0 +1,172 @@ +package com.github.streamshub.console.api.security; + +import java.security.Permission; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; + +import com.github.streamshub.console.config.security.Privilege; + +public class ConsolePermission extends Permission { + + private static final long serialVersionUID = 1L; + public static final String ACTIONS_SEPARATOR = ","; + + private String resource; + private Collection resourceNames; + private final Set actions; + + public ConsolePermission(String resource, Privilege... actions) { + super("console"); + this.resource = resource; + this.resourceNames = Collections.emptySet(); + this.actions = checkActions(actions); + } + + public ConsolePermission(String resource, Collection resourceNames, Privilege... actions) { + super("console"); + this.resource = resource; + this.resourceNames = resourceNames; + this.actions = checkActions(actions); + } + + private static Set checkActions(Privilege[] actions) { + Set validActions = new HashSet<>(actions.length, 1); + for (Privilege action : actions) { + validActions.add(validateAndTrim(action, "Action")); + } + return Collections.unmodifiableSet(validActions); + } + + private static Privilege validateAndTrim(Privilege action, String paramName) { + if (action == null) { + throw new IllegalArgumentException(String.format("%s must not be null", paramName)); + } + + return action; + } + + public String resource() { + return resource; + } + + public ConsolePermission resource(String resource) { + this.resource = resource; + return this; + } + + public ConsolePermission resourceName(String resourceName) { + this.resourceNames = Collections.singleton(resourceName); + return this; + } + + @Override + public boolean implies(Permission other) { + if (other instanceof ConsolePermission requiredPermission) { + if (!getName().equals(requiredPermission.getName())) { + return false; + } + + return implies(requiredPermission); + } else { + return false; + } + } + + boolean implies(ConsolePermission requiredPermission) { + if (resourceDenied(requiredPermission)) { + return false; + } + + // actions are optional, however if at least one action was specified, + // an intersection of compared sets must not be empty + if (requiredPermission.actions.isEmpty()) { + // no required actions + return true; + } + + if (actions.isEmpty()) { + // no possessed actions + return false; + } + + if (actions.contains(Privilege.ALL)) { + // all actions possessed + return true; + } + + for (Privilege action : requiredPermission.actions) { + if (actions.contains(action)) { + // has at least one of required actions + return true; + } + } + + return false; + } + + boolean resourceDenied(ConsolePermission requiredPermission) { + if (!requiredPermission.resource.startsWith(resource)) { + return true; + } + + if (requiredPermission.resource.equals(resource)) { + if (!requiredPermission.resourceNames.isEmpty() + && !resourceNames.isEmpty() + && requiredPermission.resourceNames.stream().noneMatch(resourceNames::contains)) { + return true; + } + } else if (requiredPermission.resourceNames.isEmpty() && !resourceNames.isEmpty()) { + boolean matches = false; + for (String name : resourceNames) { + String fullName = resource + '/' + name; + if (fullName.equals(requiredPermission.resource)) { + matches = true; + } + } + if (!matches) { + return true; + } + } else { + return true; + } + + return false; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (!(obj instanceof ConsolePermission other)) { + return false; + } + + return getName().equals(other.getName()) + && resource.equals(other.resource) + && actions.equals(other.actions); + } + + @Override + public int hashCode() { + return Objects.hash(getName(), resource, actions); + } + + @Override + public String toString() { + return getName() + ":" + resource() + ":" + resourceNames + ":" + actions; + } + + /** + * @return null if no actions were specified, or actions joined together with the {@link #ACTIONS_SEPARATOR} + */ + @Override + public String getActions() { + return actions.isEmpty() ? null : actions.stream().map(Enum::name).collect(Collectors.joining(ACTIONS_SEPARATOR)); + } +} diff --git a/api/src/main/java/com/github/streamshub/console/api/security/OidcTenantConfigResolver.java b/api/src/main/java/com/github/streamshub/console/api/security/OidcTenantConfigResolver.java new file mode 100644 index 000000000..8c9542baf --- /dev/null +++ b/api/src/main/java/com/github/streamshub/console/api/security/OidcTenantConfigResolver.java @@ -0,0 +1,49 @@ +package com.github.streamshub.console.api.security; + +import java.time.Duration; +import java.util.List; + +import jakarta.annotation.PostConstruct; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; + +import com.github.streamshub.console.config.ConsoleConfig; + +import io.quarkus.oidc.OidcRequestContext; +import io.quarkus.oidc.OidcTenantConfig; +import io.quarkus.oidc.TenantConfigResolver; +import io.smallrye.mutiny.Uni; +import io.vertx.ext.web.RoutingContext; + +/** + * This class is discovered and used by the Quarkus OIDC framework. The purpose + * is to create an OIDC tenant from the ConsoleConfig (sourced from YAML) that + * is provided to the console by the user directly or via the operator. + */ +@ApplicationScoped +public class OidcTenantConfigResolver implements TenantConfigResolver { + + @Inject + ConsoleConfig consoleConfig; + + OidcTenantConfig oidcConfig; + + @PostConstruct + void initialize() { + oidcConfig = new OidcTenantConfig(); + var oidc = consoleConfig.getSecurity().getOidc(); + + oidcConfig.setTenantId(oidc.getTenantId()); + oidcConfig.setDiscoveryEnabled(true); + oidcConfig.setAuthServerUrl(oidc.getAuthServerUrl()); + oidcConfig.setRoles(OidcTenantConfig.Roles.fromClaimPath(List.of("groups"))); + oidcConfig.getToken().setForcedJwkRefreshInterval(Duration.ofSeconds(5)); + } + + @Override + public Uni resolve(RoutingContext routingContext, + OidcRequestContext requestContext) { + return Uni.createFrom().item(oidcConfig); + } + +} diff --git a/api/src/main/java/com/github/streamshub/console/api/security/PermissionService.java b/api/src/main/java/com/github/streamshub/console/api/security/PermissionService.java new file mode 100644 index 000000000..ebc770ee8 --- /dev/null +++ b/api/src/main/java/com/github/streamshub/console/api/security/PermissionService.java @@ -0,0 +1,68 @@ +package com.github.streamshub.console.api.security; + +import java.util.List; +import java.util.Set; +import java.util.function.Function; +import java.util.function.Predicate; + +import jakarta.enterprise.context.RequestScoped; +import jakarta.inject.Inject; +import jakarta.ws.rs.ForbiddenException; + +import com.github.streamshub.console.api.model.ConsumerGroup; +import com.github.streamshub.console.api.model.KafkaRebalance; +import com.github.streamshub.console.api.model.KafkaRecord; +import com.github.streamshub.console.api.model.Topic; +import com.github.streamshub.console.api.support.KafkaContext; +import com.github.streamshub.console.config.security.Privilege; + +import io.quarkus.security.identity.SecurityIdentity; + +@RequestScoped +public class PermissionService { + + private static final Set KAFKA_SUBRESOURCES = Set.of( + ConsumerGroup.API_TYPE, + KafkaRebalance.API_TYPE, + // Records are a sub-resource of topics + Topic.API_TYPE + '/' + KafkaRecord.API_TYPE, + Topic.API_TYPE); + + @Inject + SecurityIdentity securityIdentity; + + @Inject + KafkaContext kafkaContext; + + private String resolveResource(String resource) { + if (KAFKA_SUBRESOURCES.contains(resource)) { + resource = "kafkas/" + kafkaContext.clusterId() + '/' + resource; + } + return resource; + } + + public Predicate permitted(String resource, Privilege privilege, Function name) { + ConsolePermission required = new ConsolePermission(resolveResource(resource), privilege); + + return (T item) -> { + required.resourceName(name.apply(item)); + return securityIdentity.checkPermissionBlocking(required); + }; + } + + public boolean permitted(String resource, Privilege privilege, String name) { + ConsolePermission required = new ConsolePermission(resolveResource(resource), List.of(name), privilege); + return securityIdentity.checkPermissionBlocking(required); + } + + public void assertPermitted(String resource, Privilege privilege, String name) { + if (!permitted(resource, privilege, name)) { + throw forbidden(resource, privilege, name); + } + } + + public ForbiddenException forbidden(String resource, Privilege privilege, String name) { + return new ForbiddenException("Access denied: resource={%s} privilege:{%s}, resourceName:{%s}" + .formatted(resource, privilege, name)); + } +} diff --git a/api/src/main/java/com/github/streamshub/console/api/security/ResourcePrivilege.java b/api/src/main/java/com/github/streamshub/console/api/security/ResourcePrivilege.java new file mode 100644 index 000000000..da592bb32 --- /dev/null +++ b/api/src/main/java/com/github/streamshub/console/api/security/ResourcePrivilege.java @@ -0,0 +1,20 @@ +package com.github.streamshub.console.api.security; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +import com.github.streamshub.console.config.security.Privilege; + +/** + * Method annotation used by the {@link AuthorizationInterceptor} to declare + * the privilege a principal must be granted to execute the annotated method. + */ +@Retention(RetentionPolicy.RUNTIME) +@Target(ElementType.METHOD) +public @interface ResourcePrivilege { + + Privilege value() default Privilege.ALL; + +} diff --git a/api/src/main/java/com/github/streamshub/console/api/security/SaslJaasConfigCredential.java b/api/src/main/java/com/github/streamshub/console/api/security/SaslJaasConfigCredential.java new file mode 100644 index 000000000..be87105b5 --- /dev/null +++ b/api/src/main/java/com/github/streamshub/console/api/security/SaslJaasConfigCredential.java @@ -0,0 +1,40 @@ +package com.github.streamshub.console.api.security; + +import org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule; +import org.apache.kafka.common.security.plain.PlainLoginModule; +import org.apache.kafka.common.security.scram.ScramLoginModule; + +import io.quarkus.security.credential.Credential; + +public class SaslJaasConfigCredential implements Credential { + + private static final String SASL_OAUTH_CONFIG_TEMPLATE = OAuthBearerLoginModule.class.getName() + + " required" + + " oauth.access.token=\"%s\" ;"; + + private static final String BASIC_TEMPLATE = "%s required username=\"%%s\" password=\"%%s\" ;"; + private static final String SASL_PLAIN_CONFIG_TEMPLATE = BASIC_TEMPLATE.formatted(PlainLoginModule.class.getName()); + private static final String SASL_SCRAM_CONFIG_TEMPLATE = BASIC_TEMPLATE.formatted(ScramLoginModule.class.getName()); + + public static SaslJaasConfigCredential forOAuthLogin(String accessToken) { + return new SaslJaasConfigCredential(SASL_OAUTH_CONFIG_TEMPLATE.formatted(accessToken)); + } + + public static SaslJaasConfigCredential forPlainLogin(String username, String password) { + return new SaslJaasConfigCredential(SASL_PLAIN_CONFIG_TEMPLATE.formatted(username, password)); + } + + public static SaslJaasConfigCredential forScramLogin(String username, String password) { + return new SaslJaasConfigCredential(SASL_SCRAM_CONFIG_TEMPLATE.formatted(username, password)); + } + + private final String value; + + private SaslJaasConfigCredential(String value) { + this.value = value; + } + + public String value() { + return value; + } +} diff --git a/api/src/main/java/com/github/streamshub/console/api/service/ConsumerGroupService.java b/api/src/main/java/com/github/streamshub/console/api/service/ConsumerGroupService.java index 075c81e10..2c384e8f1 100644 --- a/api/src/main/java/com/github/streamshub/console/api/service/ConsumerGroupService.java +++ b/api/src/main/java/com/github/streamshub/console/api/service/ConsumerGroupService.java @@ -51,6 +51,7 @@ import com.github.streamshub.console.api.model.PartitionId; import com.github.streamshub.console.api.model.PartitionInfo; import com.github.streamshub.console.api.model.Topic; +import com.github.streamshub.console.api.security.PermissionService; import com.github.streamshub.console.api.support.ConsumerGroupValidation; import com.github.streamshub.console.api.support.FetchFilterPredicate; import com.github.streamshub.console.api.support.KafkaContext; @@ -58,6 +59,7 @@ import com.github.streamshub.console.api.support.ListRequestContext; import com.github.streamshub.console.api.support.UnknownTopicIdPatch; import com.github.streamshub.console.api.support.ValidationProxy; +import com.github.streamshub.console.config.security.Privilege; @ApplicationScoped public class ConsumerGroupService { @@ -88,7 +90,10 @@ public class ConsumerGroupService { KafkaContext kafkaContext; @Inject - TopicService topicService; + PermissionService permissionService; + + @Inject + TopicDescribeService topicService; @Inject ValidationProxy validationService; @@ -111,7 +116,10 @@ public CompletionStage> listConsumerGroups(String topicId, L .exceptionally(error -> { throw (RuntimeException) UnknownTopicIdPatch.apply(error, CompletionException::new); }) - .thenComposeAsync(unused -> listConsumerGroupMembership(List.of(topicId)), asyncExec) + .thenComposeAsync(topic -> { + permissionService.assertPermitted(Topic.API_TYPE, Privilege.GET, topic.name()); + return listConsumerGroupMembership(List.of(topicId)); + }, asyncExec) .thenComposeAsync(topicGroups -> { if (topicGroups.containsKey(topicId)) { return listConsumerGroups(topicGroups.get(topicId), includes, listSupport); @@ -120,7 +128,9 @@ public CompletionStage> listConsumerGroups(String topicId, L }, asyncExec); } - CompletionStage> listConsumerGroups(List groupIds, List includes, ListRequestContext listSupport) { + private CompletionStage> listConsumerGroups(List groupIds, + List includes, ListRequestContext listSupport) { + Admin adminClient = kafkaContext.admin(); Set states = listSupport.filters() @@ -144,9 +154,9 @@ CompletionStage> listConsumerGroups(List groupIds, L .toCompletionStage() .thenApply(groups -> groups.stream() .filter(group -> groupIds.isEmpty() || groupIds.contains(group.groupId())) - .map(ConsumerGroup::fromKafkaModel) - .toList()) - .thenApply(list -> list.stream() + .filter(permissionService.permitted(ConsumerGroup.API_TYPE, Privilege.LIST, ConsumerGroupListing::groupId)) + .map(ConsumerGroup::fromKafkaModel)) + .thenApply(groups -> groups .filter(listSupport) .map(listSupport::tally) .filter(listSupport::betweenCursors) @@ -154,7 +164,9 @@ CompletionStage> listConsumerGroups(List groupIds, L .dropWhile(listSupport::beforePageBegin) .takeWhile(listSupport::pageCapacityAvailable) .toList()) - .thenCompose(groups -> augmentList(adminClient, groups, includes)); + .thenComposeAsync( + groups -> augmentList(adminClient, groups, includes), + threadContext.currentContextExecutor()); } public CompletionStage describeConsumerGroup(String requestGroupId, List includes) { @@ -162,7 +174,9 @@ public CompletionStage describeConsumerGroup(String requestGroupI String groupId = preprocessGroupId(requestGroupId); return assertConsumerGroupExists(adminClient, groupId) - .thenCompose(nothing -> describeConsumerGroups(adminClient, List.of(groupId), includes)) + .thenComposeAsync( + nothing -> describeConsumerGroups(adminClient, List.of(groupId), includes), + threadContext.currentContextExecutor()) .thenApply(groups -> groups.get(groupId)) .thenApply(result -> result.getOrThrow(CompletionException::new)); } @@ -174,13 +188,17 @@ public CompletionStage>> listConsumerGroupMembership(Co .inStates(Set.of( ConsumerGroupState.STABLE, ConsumerGroupState.PREPARING_REBALANCE, - ConsumerGroupState.COMPLETING_REBALANCE))) + ConsumerGroupState.COMPLETING_REBALANCE, + ConsumerGroupState.EMPTY))) .valid() .toCompletionStage() - .thenApply(groups -> groups.stream().map(ConsumerGroup::fromKafkaModel).toList()) - .thenCompose(groups -> augmentList(adminClient, groups, List.of( + .thenApply(groups -> groups.stream() + .filter(permissionService.permitted(ConsumerGroup.API_TYPE, Privilege.LIST, ConsumerGroupListing::groupId)) + .map(ConsumerGroup::fromKafkaModel).toList()) + .thenComposeAsync(groups -> augmentList(adminClient, groups, List.of( ConsumerGroup.Fields.MEMBERS, - ConsumerGroup.Fields.OFFSETS))) + ConsumerGroup.Fields.OFFSETS)), + threadContext.currentContextExecutor()) .thenApply(list -> list.stream() .map(group -> Map.entry( group.getGroupId(), @@ -226,7 +244,10 @@ CompletionStage assertConsumerGroupExists(Admin adminClient, String groupI .all() .toCompletionStage() .thenAccept(listing -> { - if (listing.stream().map(ConsumerGroupListing::groupId).noneMatch(groupId::equals)) { + if (listing.stream() + .filter(permissionService.permitted(ConsumerGroup.API_TYPE, Privilege.GET, ConsumerGroupListing::groupId)) + .map(ConsumerGroupListing::groupId) + .noneMatch(groupId::equals)) { throw new GroupIdNotFoundException("No such consumer group: " + groupId); } }); @@ -341,7 +362,7 @@ CompletionStage> alterConsumerGroupOffsets(Admin adminCl CompletionStage alterConsumerGroupOffsetsDryRun(Admin adminClient, String groupId, Map alterRequest) { - var pendingTopicsIds = fetchTopicIdMap(adminClient); + var pendingTopicsIds = fetchTopicIdMap(); return describeConsumerGroups(adminClient, List.of(groupId), Collections.emptyList()) .thenApply(groups -> groups.get(groupId)) @@ -387,7 +408,7 @@ CompletableFuture alterConsumerGroupOffsets(Admin adminClient, String grou return allOf(offsetResults.values()); } - Map> getListOffsetsResults( + private Map> getListOffsetsResults( Set partitions, ListOffsetsResult topicOffsetsResult) { @@ -434,7 +455,7 @@ public CompletionStage deleteConsumerGroup(String requestGroupId) { .toCompletionStage(); } - CompletionStage> augmentList(Admin adminClient, List list, List includes) { + private CompletionStage> augmentList(Admin adminClient, List list, List includes) { Map groups = list.stream().collect(Collectors.toMap(ConsumerGroup::getGroupId, Function.identity())); CompletableFuture describePromise; @@ -450,7 +471,7 @@ CompletionStage> augmentList(Admin adminClient, List list); } - void mergeDescriptions(ConsumerGroup group, Either description) { + private void mergeDescriptions(ConsumerGroup group, Either description) { if (description.isPrimaryEmpty()) { Throwable thrown = description.getAlternate(); Error error = new Error("Unable to describe consumer group", thrown.getMessage(), thrown); @@ -464,14 +485,14 @@ void mergeDescriptions(ConsumerGroup group, Either des } } - CompletionStage>> describeConsumerGroups( + private CompletionStage>> describeConsumerGroups( Admin adminClient, Collection groupIds, List includes) { Map> result = new LinkedHashMap<>(groupIds.size()); - var pendingTopicsIds = fetchTopicIdMap(adminClient); + var pendingTopicsIds = fetchTopicIdMap(); var pendingDescribes = adminClient.describeConsumerGroups(groupIds, new DescribeConsumerGroupsOptions() @@ -513,13 +534,13 @@ CompletionStage>> describeConsumerG }); } - CompletableFuture> fetchTopicIdMap(Admin adminClient) { - return topicService.listTopics(adminClient, true) + private CompletableFuture> fetchTopicIdMap() { + return topicService.listTopics(true) .thenApply(topics -> topics.stream() .collect(Collectors.toMap(TopicListing::name, l -> l.topicId().toString()))); } - CompletableFuture fetchOffsets(Admin adminClient, Map groups, Map topicIds) { + private CompletableFuture fetchOffsets(Admin adminClient, Map groups, Map topicIds) { var groupOffsetsRequest = groups.keySet() .stream() .collect(Collectors.toMap(Function.identity(), key -> ALL_GROUP_PARTITIONS)); @@ -571,7 +592,7 @@ CompletableFuture fetchOffsets(Admin adminClient, Map topicIds, Map> topicOffsets, Map groupOffsets, @@ -612,7 +633,7 @@ void addOffsets(ConsumerGroup group, } } - static String preprocessGroupId(String groupId) { + private static String preprocessGroupId(String groupId) { return "+".equals(groupId) ? "" : groupId; } } diff --git a/api/src/main/java/com/github/streamshub/console/api/service/KafkaClusterService.java b/api/src/main/java/com/github/streamshub/console/api/service/KafkaClusterService.java index 316a14d90..7a115174a 100644 --- a/api/src/main/java/com/github/streamshub/console/api/service/KafkaClusterService.java +++ b/api/src/main/java/com/github/streamshub/console/api/service/KafkaClusterService.java @@ -30,10 +30,12 @@ import com.github.streamshub.console.api.model.KafkaCluster; import com.github.streamshub.console.api.model.KafkaListener; import com.github.streamshub.console.api.model.Node; +import com.github.streamshub.console.api.security.PermissionService; import com.github.streamshub.console.api.support.Holder; import com.github.streamshub.console.api.support.KafkaContext; import com.github.streamshub.console.api.support.ListRequestContext; import com.github.streamshub.console.config.ConsoleConfig; +import com.github.streamshub.console.config.security.Privilege; import io.fabric8.kubernetes.api.model.ObjectMeta; import io.fabric8.kubernetes.client.KubernetesClient; @@ -89,6 +91,9 @@ public class KafkaClusterService { */ KafkaContext kafkaContext; + @Inject + PermissionService permissionService; + boolean listUnconfigured = false; Predicate includeAll = k -> listUnconfigured; @@ -120,6 +125,7 @@ public List listClusters(ListRequestContext listSupp .toList(); return Stream.concat(configuredClusters.values().stream(), otherClusters.stream()) + .filter(permissionService.permitted(KafkaCluster.API_TYPE, Privilege.LIST, KafkaCluster::getId)) .map(listSupport::tally) .filter(listSupport::betweenCursors) .sorted(listSupport.getSortComparator()) @@ -151,7 +157,7 @@ public CompletionStage describeCluster(List fields) { .thenApply(this::setManaged); } - public KafkaCluster patchCluster(String id, KafkaCluster cluster) { + public KafkaCluster patchCluster(KafkaCluster cluster) { Kafka resource = kafkaContext.resource(); if (resource != null) { diff --git a/api/src/main/java/com/github/streamshub/console/api/service/RecordService.java b/api/src/main/java/com/github/streamshub/console/api/service/RecordService.java index 45f4dceb5..125547999 100644 --- a/api/src/main/java/com/github/streamshub/console/api/service/RecordService.java +++ b/api/src/main/java/com/github/streamshub/console/api/service/RecordService.java @@ -4,7 +4,6 @@ import java.time.Instant; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collection; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; @@ -27,8 +26,6 @@ import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; -import org.apache.kafka.clients.admin.ListTopicsOptions; -import org.apache.kafka.clients.admin.TopicListing; import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecords; @@ -37,7 +34,6 @@ import org.apache.kafka.clients.producer.RecordMetadata; import org.apache.kafka.common.PartitionInfo; import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.Uuid; import org.apache.kafka.common.errors.InvalidPartitionsException; import org.apache.kafka.common.errors.UnknownTopicIdException; import org.apache.kafka.common.header.Header; @@ -72,6 +68,9 @@ public class RecordService { @Inject ThreadContext threadContext; + @Inject + TopicDescribeService topicService; + public CompletionStage> consumeRecords(String topicId, Integer partition, Long offset, @@ -227,18 +226,8 @@ Optional schemaMeta(JsonApiRelationship schemaRelationship, String key) } CompletionStage topicNameForId(String topicId) { - Uuid kafkaTopicId = Uuid.fromString(topicId); - - return kafkaContext.admin() - .listTopics(new ListTopicsOptions().listInternal(true)) - .listings() - .toCompletionStage() - .thenApply(Collection::stream) - .thenApply(listings -> listings - .filter(topic -> kafkaTopicId.equals(topic.topicId())) - .findFirst() - .map(TopicListing::name) - .orElseThrow(() -> noSuchTopic(topicId))); + return topicService.topicNameForId(topicId) + .thenApply(topic -> topic.orElseThrow(() -> noSuchTopic(topicId))); } void seekToTimestamp(Consumer consumer, List assignments, Instant timestamp) { diff --git a/api/src/main/java/com/github/streamshub/console/api/service/TopicDescribeService.java b/api/src/main/java/com/github/streamshub/console/api/service/TopicDescribeService.java new file mode 100644 index 000000000..1a69d4649 --- /dev/null +++ b/api/src/main/java/com/github/streamshub/console/api/service/TopicDescribeService.java @@ -0,0 +1,510 @@ +package com.github.streamshub.console.api.service; + +import java.time.Instant; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionException; +import java.util.concurrent.CompletionStage; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; +import java.util.function.Predicate; +import java.util.regex.Pattern; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; +import jakarta.inject.Named; + +import org.apache.kafka.clients.admin.Admin; +import org.apache.kafka.clients.admin.DescribeLogDirsOptions; +import org.apache.kafka.clients.admin.DescribeTopicsOptions; +import org.apache.kafka.clients.admin.ListOffsetsOptions; +import org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo; +import org.apache.kafka.clients.admin.ListTopicsOptions; +import org.apache.kafka.clients.admin.OffsetSpec; +import org.apache.kafka.clients.admin.TopicListing; +import org.apache.kafka.common.TopicCollection; +import org.apache.kafka.common.Uuid; +import org.apache.kafka.common.config.ConfigResource; +import org.eclipse.microprofile.context.ThreadContext; +import org.jboss.logging.Logger; + +import com.github.streamshub.console.api.model.Either; +import com.github.streamshub.console.api.model.Identifier; +import com.github.streamshub.console.api.model.OffsetInfo; +import com.github.streamshub.console.api.model.PartitionId; +import com.github.streamshub.console.api.model.PartitionInfo; +import com.github.streamshub.console.api.model.ReplicaLocalStorage; +import com.github.streamshub.console.api.model.Topic; +import com.github.streamshub.console.api.security.PermissionService; +import com.github.streamshub.console.api.support.KafkaContext; +import com.github.streamshub.console.api.support.KafkaOffsetSpec; +import com.github.streamshub.console.api.support.ListRequestContext; +import com.github.streamshub.console.api.support.UnknownTopicIdPatch; +import com.github.streamshub.console.config.security.Privilege; + +import io.fabric8.kubernetes.api.model.ObjectMeta; +import io.fabric8.kubernetes.client.KubernetesClient; +import io.strimzi.api.kafka.model.kafka.Kafka; +import io.strimzi.api.kafka.model.topic.KafkaTopic; + +@ApplicationScoped +class TopicDescribeService { + + private static final List DEFAULT_OFFSET_SPECS = + List.of(OffsetSpec.earliest(), OffsetSpec.latest(), OffsetSpec.maxTimestamp()); + private static final Predicate CONFIG_SORT = + Pattern.compile("^-?configs\\..+$").asMatchPredicate(); + private static final Set REQUIRE_DESCRIBE = Set.of( + Topic.Fields.PARTITIONS, + Topic.Fields.NUM_PARTITIONS, + Topic.Fields.AUTHORIZED_OPERATIONS, + Topic.Fields.TOTAL_LEADER_LOG_BYTES, + Topic.Fields.STATUS); + private static final Set REQUIRE_PARTITIONS = Set.of( + Topic.Fields.PARTITIONS, + Topic.Fields.NUM_PARTITIONS, + Topic.Fields.TOTAL_LEADER_LOG_BYTES, + Topic.Fields.STATUS); + + @Inject + Logger logger; + + /** + * ThreadContext of the request thread. This is used to execute asynchronous + * tasks to allow access to request-scoped beans such as an injected + * {@linkplain Admin Admin client} + */ + @Inject + ThreadContext threadContext; + + @Inject + KafkaContext kafkaContext; + + @Inject + @Named("KafkaTopics") + Map>> managedTopics; + + @Inject + KubernetesClient k8s; + + @Inject + PermissionService permissionService; + + @Inject + ConfigService configService; + + @Inject + ConsumerGroupService consumerGroupService; + + public CompletionStage> listTopics(List fields, String offsetSpec, ListRequestContext listSupport) { + List fetchList = new ArrayList<>(fields); + + if (listSupport.getSortEntries().stream().anyMatch(CONFIG_SORT)) { + fetchList.add(Topic.Fields.CONFIGS); + } + + Admin adminClient = kafkaContext.admin(); + final Map statuses = new HashMap<>(); + final AtomicInteger partitionCount = new AtomicInteger(0); + + listSupport.meta().put("summary", Map.of( + "statuses", statuses, + "totalPartitions", partitionCount)); + + return listTopics(true) + .thenApply(list -> list.stream().map(Topic::fromTopicListing).toList()) + .thenComposeAsync( + list -> augmentList(adminClient, list, fetchList, offsetSpec), + threadContext.currentContextExecutor()) + .thenApply(list -> list.stream() + .filter(listSupport) + .map(topic -> tallySummary(statuses, partitionCount, topic)) + .map(listSupport::tally) + .filter(listSupport::betweenCursors) + .sorted(listSupport.getSortComparator()) + .dropWhile(listSupport::beforePageBegin) + .takeWhile(listSupport::pageCapacityAvailable)) + .thenApplyAsync( + topics -> topics.map(this::setManaged).toList(), + threadContext.currentContextExecutor()); + } + + private Topic tallySummary(Map statuses, AtomicInteger partitionCount, Topic topic) { + statuses.compute(topic.status(), (k, v) -> v == null ? 1 : v + 1); + + Integer numPartitions = topic.getAttributes().numPartitions(); + //numPartitions may be null if it was not included in the requested fields + if (numPartitions != null) { + partitionCount.addAndGet(numPartitions); + } + + return topic; + } + + CompletableFuture> listTopics(boolean listInternal) { + Admin adminClient = kafkaContext.admin(); + + return adminClient + .listTopics(new ListTopicsOptions().listInternal(listInternal)) + .listings() + .toCompletionStage() + .thenApplyAsync(topics -> topics.stream() + .filter(permissionService.permitted(Topic.API_TYPE, Privilege.LIST, TopicListing::name)) + .toList(), threadContext.currentContextExecutor()) + .toCompletableFuture(); + } + + CompletionStage> topicNameForId(String topicId) { + Uuid kafkaTopicId = Uuid.fromString(topicId); + + return listTopics(true) + .thenApply(listings -> listings.stream() + .filter(topic -> kafkaTopicId.equals(topic.topicId())) + .findFirst() + .map(TopicListing::name)); + } + + public CompletionStage describeTopic(String topicId, List fields, String offsetSpec) { + Admin adminClient = kafkaContext.admin(); + Uuid id = Uuid.fromString(topicId); + + CompletableFuture describePromise = describeTopics(adminClient, List.of(id), fields, offsetSpec) + .thenApply(result -> result.get(id)) + .thenApply(result -> result.getOrThrow(CompletionException::new)) + .thenApplyAsync(this::setManaged, threadContext.currentContextExecutor()) + .toCompletableFuture(); + + return describePromise.thenComposeAsync(topic -> { + var topics = Map.of(id, topic); + + return CompletableFuture.allOf( + maybeDescribeConfigs(adminClient, topics, fields), + maybeFetchConsumerGroups(topics, fields)) + .thenApply(nothing -> topic); + }, threadContext.currentContextExecutor()); + } + + Topic setManaged(Topic topic) { + topic.addMeta("managed", getManagedTopic(topic.name()) + .map(kafkaTopic -> Boolean.TRUE) + .orElse(Boolean.FALSE)); + return topic; + } + + Optional getManagedTopic(String topicName) { + return Optional.ofNullable(kafkaContext.resource()) + .map(Kafka::getMetadata) + .flatMap(kafkaMeta -> Optional.ofNullable(managedTopics.get(kafkaMeta.getNamespace())) + .map(clustersInNamespace -> clustersInNamespace.get(kafkaMeta.getName())) + .map(topicsInCluster -> topicsInCluster.get(topicName)) + .filter(this::isManaged)); + } + + boolean isManaged(KafkaTopic topic) { + return Optional.of(topic) + .map(KafkaTopic::getMetadata) + .map(ObjectMeta::getAnnotations) + .map(annotations -> annotations.getOrDefault("strimzi.io/managed", "true")) + .map(managed -> !"false".equals(managed)) + .orElse(true); + } + + CompletionStage> augmentList(Admin adminClient, List list, List fields, String offsetSpec) { + Map topics = list.stream().collect(Collectors.toMap(t -> Uuid.fromString(t.getId()), Function.identity())); + CompletableFuture configPromise = maybeDescribeConfigs(adminClient, topics, fields); + CompletableFuture describePromise = maybeDescribeTopics(adminClient, topics, fields, offsetSpec); + CompletableFuture consumerGroupPromise = maybeFetchConsumerGroups(topics, fields); + + return CompletableFuture.allOf(configPromise, describePromise, consumerGroupPromise) + .thenApply(nothing -> list); + } + + CompletableFuture maybeDescribeConfigs(Admin adminClient, Map topics, List fields) { + if (fields.contains(Topic.Fields.CONFIGS)) { + Map topicIds = new HashMap<>(); + List keys = topics.values().stream() + .map(topic -> { + topicIds.put(topic.name(), Uuid.fromString(topic.getId())); + return topic.name(); + }) + .map(name -> new ConfigResource(ConfigResource.Type.TOPIC, name)) + .toList(); + + return configService.describeConfigs(adminClient, keys) + .thenAccept(configs -> + configs.forEach((name, either) -> topics.get(topicIds.get(name)).addConfigs(either))) + .toCompletableFuture(); + } + + return CompletableFuture.completedFuture(null); + } + + private CompletableFuture maybeDescribeTopics(Admin adminClient, Map topics, List fields, String offsetSpec) { + if (REQUIRE_DESCRIBE.stream().anyMatch(fields::contains)) { + return describeTopics(adminClient, topics.keySet(), fields, offsetSpec) + .thenApply(descriptions -> { + descriptions.forEach((id, either) -> { + if (REQUIRE_PARTITIONS.stream().anyMatch(fields::contains)) { + topics.get(id).addPartitions(either); + } + if (fields.contains(Topic.Fields.AUTHORIZED_OPERATIONS)) { + topics.get(id).addAuthorizedOperations(either); + } + }); + + return null; + }) + .toCompletableFuture(); + } + + return CompletableFuture.completedFuture(null); + } + + private CompletableFuture maybeFetchConsumerGroups(Map topics, List fields) { + CompletionStage>> pendingConsumerGroups; + + if (fields.contains(Topic.Fields.CONSUMER_GROUPS)) { + var topicIds = topics.keySet().stream().map(Uuid::toString).toList(); + pendingConsumerGroups = consumerGroupService.listConsumerGroupMembership(topicIds); + } else { + pendingConsumerGroups = CompletableFuture.completedStage(Collections.emptyMap()); + } + + return pendingConsumerGroups.thenAccept(consumerGroups -> + consumerGroups.entrySet() + .stream() + .forEach(e -> { + Topic topic = topics.get(Uuid.fromString(e.getKey())); + var identifiers = e.getValue().stream().map(g -> new Identifier("consumerGroups", g)).toList(); + topic.consumerGroups().data().addAll(identifiers); + topic.consumerGroups().addMeta("count", identifiers.size()); + })) + .toCompletableFuture(); + } + + /* package */ CompletionStage>> describeTopics( + Admin adminClient, + Collection topicIds, + List fields, + String offsetSpec) { + + Map> result = new LinkedHashMap<>(topicIds.size()); + TopicCollection request = TopicCollection.ofTopicIds(topicIds); + DescribeTopicsOptions options = new DescribeTopicsOptions() + .includeAuthorizedOperations(fields.contains(Topic.Fields.AUTHORIZED_OPERATIONS)); + + var pendingDescribes = adminClient.describeTopics(request, options) + .topicIdValues() + .entrySet() + .stream() + .map(entry -> + entry.getValue().toCompletionStage().handleAsync((description, error) -> { + error = UnknownTopicIdPatch.apply(error, Function.identity()); + + if (error == null && !permissionService.permitted(Topic.API_TYPE, Privilege.GET, description.name())) { + error = permissionService.forbidden(Topic.API_TYPE, Privilege.GET, description.name()); + } + + result.put( + entry.getKey(), + Either.of(description, error, Topic::fromTopicDescription)); + return null; + }, threadContext.currentContextExecutor())) + .map(CompletionStage::toCompletableFuture) + .toArray(CompletableFuture[]::new); + + return CompletableFuture.allOf(pendingDescribes) + .thenCompose(nothing -> CompletableFuture.allOf( + listOffsets(adminClient, result, offsetSpec).toCompletableFuture(), + describeLogDirs(adminClient, result).toCompletableFuture() + )) + .thenApply(nothing -> result); + } + + private CompletionStage listOffsets(Admin adminClient, Map> topics, String offsetSpec) { + Map topicIds = new HashMap<>(topics.size()); + var onlineTopics = topics.entrySet() + .stream() + .filter(topic -> topic.getValue() + .getOptionalPrimary() + .map(Topic::partitionsOnline) + .orElse(false)) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + + var pendingOffsets = getRequestOffsetSpecs(offsetSpec) + .stream() + .map(reqOffsetSpec -> topicPartitionLeaders(onlineTopics, topicIds) + .keySet() + .stream() + .collect(Collectors.toMap(Function.identity(), ignored -> reqOffsetSpec))) + .flatMap(request -> listOffsets(adminClient, onlineTopics, topicIds, request)) + .map(CompletionStage::toCompletableFuture) + .toArray(CompletableFuture[]::new); + + return CompletableFuture.allOf(pendingOffsets); + } + + private List getRequestOffsetSpecs(String offsetSpec) { + List specs = new ArrayList<>(DEFAULT_OFFSET_SPECS); + + // Never null, defaults to latest + switch (offsetSpec) { // NOSONAR + case KafkaOffsetSpec.EARLIEST, KafkaOffsetSpec.LATEST, KafkaOffsetSpec.MAX_TIMESTAMP: + break; + default: + specs.add(OffsetSpec.forTimestamp(Instant.parse(offsetSpec).toEpochMilli())); + break; + } + + return specs; + } + + /** + * Build of map of {@linkplain PartitionId}s to the partition leader node ID. + * Concurrently, a map of topic names to topic identifiers is constructed to + * support cross referencing the {@linkplain PartitionId} keys (via + * {@linkplain PartitionId#topicId()}) back to the topic's {@linkplain Uuid}. + * This allows easy access of the topics located in the topics map provided to + * this method and is particularly useful for Kafka operations that still + * require topic name. + * + * @param topics map of topics (keyed by Id) + * @param topicIds map of topic names to topic Ids, modified by this method + * @return map of {@linkplain PartitionId}s to the partition leader node ID + */ + private Map topicPartitionLeaders(Map> topics, Map topicIds) { + return topics.entrySet() + .stream() + .filter(entry -> entry.getValue().isPrimaryPresent()) + .map(entry -> { + var topic = entry.getValue().getPrimary(); + topicIds.put(topic.name(), entry.getKey()); + return topic; + }) + .filter(topic -> topic.partitions().isPrimaryPresent()) + .flatMap(topic -> topic.partitions().getPrimary() + .stream() + .filter(PartitionInfo::online) + .map(partition -> { + var key = new PartitionId(topic.getId(), topic.name(), partition.getPartition()); + return Map.entry(key, partition.getLeaderId()); + })) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + } + + private String getOffsetKey(OffsetSpec spec) { + if (spec instanceof OffsetSpec.EarliestSpec) { + return KafkaOffsetSpec.EARLIEST; + } + if (spec instanceof OffsetSpec.LatestSpec) { + return KafkaOffsetSpec.LATEST; + } + if (spec instanceof OffsetSpec.MaxTimestampSpec) { + return KafkaOffsetSpec.MAX_TIMESTAMP; + } + return "timestamp"; + } + + private Stream> listOffsets( + Admin adminClient, + Map> topics, + Map topicIds, + Map request) { + + var kafkaRequest = request.entrySet() + .stream() + .map(e -> Map.entry(e.getKey().toKafkaModel(), e.getValue())) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + var result = adminClient.listOffsets(kafkaRequest, new ListOffsetsOptions() + .timeoutMs(5000)); + + return kafkaRequest.entrySet() + .stream() + .map(entry -> result.partitionResult(entry.getKey()) + .toCompletionStage() + .handle((offsetResult, error) -> { + addOffset(topics.get(topicIds.get(entry.getKey().topic())).getPrimary(), + entry.getKey().partition(), + getOffsetKey(entry.getValue()), + offsetResult, + error); + return null; + })); + + } + + private void addOffset(Topic topic, int partitionNo, String key, ListOffsetsResultInfo result, Throwable error) { + topic.partitions() + .getPrimary() + .stream() + .filter(partition -> partition.getPartition() == partitionNo) + .findFirst() + .ifPresent(partition -> partition.addOffset(key, either(result, error))); + } + + private Either either(ListOffsetsResultInfo result, Throwable error) { + Function transformer = offsetInfo -> { + Instant timestamp = offsetInfo.timestamp() != -1 ? Instant.ofEpochMilli(offsetInfo.timestamp()) : null; + return new OffsetInfo(offsetInfo.offset(), timestamp, offsetInfo.leaderEpoch().orElse(null)); + }; + + return Either.of(result, error, transformer); + } + + private CompletionStage describeLogDirs(Admin adminClient, Map> topics) { + Map topicIds = new HashMap<>(topics.size()); + + var topicPartitionReplicas = topicPartitionLeaders(topics, topicIds); + var nodeIds = topicPartitionReplicas.values().stream().distinct().toList(); + var logDirs = adminClient.describeLogDirs(nodeIds, new DescribeLogDirsOptions() + .timeoutMs(5000)) + .descriptions(); + + var pendingInfo = topicPartitionReplicas.entrySet() + .stream() + .map(e -> { + var topicPartition = e.getKey().toKafkaModel(); + int nodeId = e.getValue(); + var partitionInfo = topics.get(topicIds.get(topicPartition.topic())) + .getPrimary() + .partitions() + .getPrimary() + .stream() + .filter(p -> p.getPartition() == topicPartition.partition()) + .findFirst(); + + return logDirs.get(nodeId).toCompletionStage().handle((nodeLogDirs, error) -> { + if (error != null) { + partitionInfo.ifPresent(p -> p.setReplicaLocalStorage(nodeId, Either.ofAlternate(error))); + } else { + nodeLogDirs.values() + .stream() + .map(dir -> dir.replicaInfos()) + .map(replicas -> replicas.get(topicPartition)) + .filter(Objects::nonNull) + .map(org.apache.kafka.clients.admin.ReplicaInfo.class::cast) + .map(ReplicaLocalStorage::fromKafkaModel) + .forEach(replicaInfo -> partitionInfo.ifPresent(p -> p.setReplicaLocalStorage(nodeId, Either.of(replicaInfo)))); + } + + return null; + }); + }) + .map(CompletionStage::toCompletableFuture) + .toArray(CompletableFuture[]::new); + + return CompletableFuture.allOf(pendingInfo); + } + +} diff --git a/api/src/main/java/com/github/streamshub/console/api/service/TopicService.java b/api/src/main/java/com/github/streamshub/console/api/service/TopicService.java index 6d04de918..fd5f56303 100644 --- a/api/src/main/java/com/github/streamshub/console/api/service/TopicService.java +++ b/api/src/main/java/com/github/streamshub/console/api/service/TopicService.java @@ -1,92 +1,52 @@ package com.github.streamshub.console.api.service; -import java.time.Instant; import java.util.ArrayList; -import java.util.Collection; import java.util.Collections; -import java.util.HashMap; -import java.util.LinkedHashMap; import java.util.List; import java.util.Map; -import java.util.Objects; import java.util.Optional; -import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionException; import java.util.concurrent.CompletionStage; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.function.Function; import java.util.function.Predicate; -import java.util.regex.Pattern; import java.util.stream.Collectors; import java.util.stream.IntStream; -import java.util.stream.Stream; import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; -import jakarta.inject.Named; import org.apache.kafka.clients.admin.Admin; import org.apache.kafka.clients.admin.CreatePartitionsOptions; import org.apache.kafka.clients.admin.CreateTopicsOptions; import org.apache.kafka.clients.admin.CreateTopicsResult; -import org.apache.kafka.clients.admin.DescribeLogDirsOptions; -import org.apache.kafka.clients.admin.DescribeTopicsOptions; -import org.apache.kafka.clients.admin.ListOffsetsOptions; -import org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo; -import org.apache.kafka.clients.admin.ListTopicsOptions; import org.apache.kafka.clients.admin.NewPartitionReassignment; -import org.apache.kafka.clients.admin.OffsetSpec; -import org.apache.kafka.clients.admin.TopicListing; import org.apache.kafka.common.TopicCollection; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.config.ConfigResource; +import org.apache.kafka.common.errors.UnknownTopicIdException; import org.eclipse.microprofile.context.ThreadContext; import org.jboss.logging.Logger; -import com.github.streamshub.console.api.model.Either; -import com.github.streamshub.console.api.model.Identifier; import com.github.streamshub.console.api.model.NewTopic; -import com.github.streamshub.console.api.model.OffsetInfo; -import com.github.streamshub.console.api.model.PartitionId; -import com.github.streamshub.console.api.model.PartitionInfo; -import com.github.streamshub.console.api.model.ReplicaLocalStorage; import com.github.streamshub.console.api.model.Topic; import com.github.streamshub.console.api.model.TopicPatch; +import com.github.streamshub.console.api.security.PermissionService; import com.github.streamshub.console.api.support.KafkaContext; import com.github.streamshub.console.api.support.KafkaOffsetSpec; import com.github.streamshub.console.api.support.ListRequestContext; import com.github.streamshub.console.api.support.TopicValidation; -import com.github.streamshub.console.api.support.UnknownTopicIdPatch; import com.github.streamshub.console.api.support.ValidationProxy; +import com.github.streamshub.console.config.security.Privilege; -import io.fabric8.kubernetes.api.model.ObjectMeta; import io.fabric8.kubernetes.client.KubernetesClient; import io.strimzi.api.kafka.model.kafka.Kafka; -import io.strimzi.api.kafka.model.topic.KafkaTopic; import static org.apache.kafka.clients.admin.NewPartitions.increaseTo; @ApplicationScoped public class TopicService { - private static final List DEFAULT_OFFSET_SPECS = - List.of(OffsetSpec.earliest(), OffsetSpec.latest(), OffsetSpec.maxTimestamp()); - private static final Predicate CONFIG_SORT = - Pattern.compile("^-?configs\\..+$").asMatchPredicate(); - private static final Set REQUIRE_DESCRIBE = Set.of( - Topic.Fields.PARTITIONS, - Topic.Fields.NUM_PARTITIONS, - Topic.Fields.AUTHORIZED_OPERATIONS, - Topic.Fields.TOTAL_LEADER_LOG_BYTES, - Topic.Fields.STATUS); - private static final Set REQUIRE_PARTITIONS = Set.of( - Topic.Fields.PARTITIONS, - Topic.Fields.NUM_PARTITIONS, - Topic.Fields.TOTAL_LEADER_LOG_BYTES, - Topic.Fields.STATUS); - @Inject Logger logger; @@ -105,19 +65,19 @@ public class TopicService { KafkaContext kafkaContext; @Inject - @Named("KafkaTopics") - Map>> managedTopics; + KubernetesClient k8s; @Inject - KubernetesClient k8s; + PermissionService permissionService; @Inject ConfigService configService; @Inject - ConsumerGroupService consumerGroupService; + TopicDescribeService topicDescribe; public CompletionStage createTopic(NewTopic topic, boolean validateOnly) { + permissionService.assertPermitted(Topic.API_TYPE, Privilege.CREATE, topic.name()); Kafka kafka = kafkaContext.resource(); Admin adminClient = kafkaContext.admin(); @@ -156,77 +116,11 @@ public CompletionStage createTopic(NewTopic topic, boolean validateOnl } public CompletionStage> listTopics(List fields, String offsetSpec, ListRequestContext listSupport) { - List fetchList = new ArrayList<>(fields); - - if (listSupport.getSortEntries().stream().anyMatch(CONFIG_SORT)) { - fetchList.add(Topic.Fields.CONFIGS); - } - - Admin adminClient = kafkaContext.admin(); - final Map statuses = new HashMap<>(); - final AtomicInteger partitionCount = new AtomicInteger(0); - - listSupport.meta().put("summary", Map.of( - "statuses", statuses, - "totalPartitions", partitionCount)); - - return listTopics(adminClient, true) - .thenApply(list -> list.stream().map(Topic::fromTopicListing).toList()) - .thenComposeAsync( - list -> augmentList(adminClient, list, fetchList, offsetSpec), - threadContext.currentContextExecutor()) - .thenApply(list -> list.stream() - .filter(listSupport) - .map(topic -> tallySummary(statuses, partitionCount, topic)) - .map(listSupport::tally) - .filter(listSupport::betweenCursors) - .sorted(listSupport.getSortComparator()) - .dropWhile(listSupport::beforePageBegin) - .takeWhile(listSupport::pageCapacityAvailable)) - .thenApplyAsync( - topics -> topics.map(this::setManaged).toList(), - threadContext.currentContextExecutor()); - } - - Topic tallySummary(Map statuses, AtomicInteger partitionCount, Topic topic) { - statuses.compute(topic.status(), (k, v) -> v == null ? 1 : v + 1); - - Integer numPartitions = topic.getAttributes().numPartitions(); - //numPartitions may be null if it was not included in the requested fields - if (numPartitions != null) { - partitionCount.addAndGet(numPartitions); - } - - return topic; - } - - CompletableFuture> listTopics(Admin adminClient, boolean listInternal) { - return adminClient - .listTopics(new ListTopicsOptions().listInternal(listInternal)) - .listings() - .thenApply(topics -> topics.stream().toList()) - .toCompletionStage() - .toCompletableFuture(); + return topicDescribe.listTopics(fields, offsetSpec, listSupport); } public CompletionStage describeTopic(String topicId, List fields, String offsetSpec) { - Admin adminClient = kafkaContext.admin(); - Uuid id = Uuid.fromString(topicId); - - CompletableFuture describePromise = describeTopics(adminClient, List.of(id), fields, offsetSpec) - .thenApply(result -> result.get(id)) - .thenApply(result -> result.getOrThrow(CompletionException::new)) - .thenApplyAsync(this::setManaged, threadContext.currentContextExecutor()) - .toCompletableFuture(); - - return describePromise.thenComposeAsync(topic -> { - var topics = Map.of(id, topic); - - return CompletableFuture.allOf( - maybeDescribeConfigs(adminClient, topics, fields), - maybeFetchConsumerGroups(topics, fields)) - .thenApply(nothing -> topic); - }, threadContext.currentContextExecutor()); + return topicDescribe.describeTopic(topicId, fields, offsetSpec); } /** @@ -244,16 +138,37 @@ public CompletionStage patchTopic(String topicId, TopicPatch patch, boolea Kafka kafka = kafkaContext.resource(); return describeTopic(topicId, List.of(Topic.Fields.CONFIGS), KafkaOffsetSpec.LATEST) - .thenApply(topic -> validationService.validate(new TopicValidation.TopicPatchInputs(kafka, topic, patch))) + .thenApplyAsync(topic -> { + permissionService.assertPermitted(Topic.API_TYPE, Privilege.UPDATE, topic.name()); + return validationService.validate(new TopicValidation.TopicPatchInputs(kafka, topic, patch)); + }, threadContext.currentContextExecutor()) .thenApply(TopicValidation.TopicPatchInputs::topic) - .thenComposeAsync(topic -> getManagedTopic(topic.name()) + .thenComposeAsync(topic -> topicDescribe.getManagedTopic(topic.name()) .map(kafkaTopic -> patchManagedTopic()) .orElseGet(() -> patchUnmanagedTopic(topic, patch, validateOnly)), - threadContext.currentContextExecutor()); + threadContext.currentContextExecutor()); + } + + public CompletionStage deleteTopic(String topicId) { + Admin adminClient = kafkaContext.admin(); + Uuid id = Uuid.fromString(topicId); + + return topicDescribe.topicNameForId(topicId).thenComposeAsync(topicName -> { + if (topicName.isPresent()) { + permissionService.assertPermitted(Topic.API_TYPE, Privilege.DELETE, topicName.get()); + + return adminClient.deleteTopics(TopicCollection.ofTopicIds(List.of(id))) + .topicIdValues() + .get(id) + .toCompletionStage(); + } + + throw new UnknownTopicIdException("No such topic: " + topicId); + }, threadContext.currentContextExecutor()); } // Modifications disabled for now - CompletionStage patchManagedTopic(/*KafkaTopic topic, TopicPatch patch, boolean validateOnly*/) { + private CompletionStage patchManagedTopic(/*KafkaTopic topic, TopicPatch patch, boolean validateOnly*/) { return CompletableFuture.completedStage(null); // if (validateOnly) { // NOSONAR // return CompletableFuture.completedStage(null); @@ -282,7 +197,7 @@ CompletionStage patchManagedTopic(/*KafkaTopic topic, TopicPatch patch, bo // return CompletableFuture.runAsync(() -> k8s.resource(modifiedTopic).serverSideApply()); } - CompletionStage patchUnmanagedTopic(Topic topic, TopicPatch patch, boolean validateOnly) { + private CompletionStage patchUnmanagedTopic(Topic topic, TopicPatch patch, boolean validateOnly) { List> pending = new ArrayList<>(); pending.add(maybeCreatePartitions(topic, patch, validateOnly)); @@ -309,7 +224,7 @@ CompletionStage patchUnmanagedTopic(Topic topic, TopicPatch patch, boolean }); } - CompletableFuture maybeCreatePartitions(Topic topic, TopicPatch topicPatch, boolean validateOnly) { + private CompletableFuture maybeCreatePartitions(Topic topic, TopicPatch topicPatch, boolean validateOnly) { int currentNumPartitions = topic.partitions().getPrimary().size(); int newNumPartitions = Optional.ofNullable(topicPatch.numPartitions()).orElse(currentNumPartitions); @@ -326,7 +241,7 @@ CompletableFuture maybeCreatePartitions(Topic topic, TopicPatch topicPatch return CompletableFuture.completedFuture(null); } - CompletionStage createPartitions(String topicName, int totalCount, List> newAssignments, boolean validateOnly) { + private CompletionStage createPartitions(String topicName, int totalCount, List> newAssignments, boolean validateOnly) { Admin adminClient = kafkaContext.admin(); org.apache.kafka.clients.admin.NewPartitions newPartitions; @@ -345,7 +260,7 @@ CompletionStage createPartitions(String topicName, int totalCount, List
  • > maybeAlterPartitionAssignments(Topic topic, TopicPatch topicPatch) { + private List> maybeAlterPartitionAssignments(Topic topic, TopicPatch topicPatch) { int currentNumPartitions = topic.partitions().getPrimary().size(); var alteredAssignments = IntStream.range(0, currentNumPartitions) @@ -381,7 +296,7 @@ List> maybeAlterPartitionAssignments(Topic topic, TopicP .toList(); } - void logPartitionReassignments(Topic topic, + private void logPartitionReassignments(Topic topic, Map> alteredAssignments) { StringBuilder changes = new StringBuilder(); @@ -417,329 +332,11 @@ void logPartitionReassignments(Topic topic, changes); } - CompletableFuture maybeAlterConfigs(Topic topic, TopicPatch topicPatch, boolean validateOnly) { + private CompletableFuture maybeAlterConfigs(Topic topic, TopicPatch topicPatch, boolean validateOnly) { return Optional.ofNullable(topicPatch.configs()) .filter(Predicate.not(Map::isEmpty)) .map(configs -> configService.alterConfigs(ConfigResource.Type.TOPIC, topic.name(), configs, validateOnly) .toCompletableFuture()) .orElseGet(() -> CompletableFuture.completedFuture(null)); } - - public CompletionStage deleteTopic(String topicId) { - Admin adminClient = kafkaContext.admin(); - Uuid id = Uuid.fromString(topicId); - - return adminClient.deleteTopics(TopicCollection.ofTopicIds(List.of(id))) - .topicIdValues() - .get(id) - .toCompletionStage(); - } - - Topic setManaged(Topic topic) { - topic.addMeta("managed", getManagedTopic(topic.name()) - .map(kafkaTopic -> Boolean.TRUE) - .orElse(Boolean.FALSE)); - return topic; - } - - Optional getManagedTopic(String topicName) { - return Optional.ofNullable(kafkaContext.resource()) - .map(Kafka::getMetadata) - .flatMap(kafkaMeta -> Optional.ofNullable(managedTopics.get(kafkaMeta.getNamespace())) - .map(clustersInNamespace -> clustersInNamespace.get(kafkaMeta.getName())) - .map(topicsInCluster -> topicsInCluster.get(topicName)) - .filter(this::isManaged)); - } - - boolean isManaged(KafkaTopic topic) { - return Optional.of(topic) - .map(KafkaTopic::getMetadata) - .map(ObjectMeta::getAnnotations) - .map(annotations -> annotations.getOrDefault("strimzi.io/managed", "true")) - .map(managed -> !"false".equals(managed)) - .orElse(true); - } - - CompletionStage> augmentList(Admin adminClient, List list, List fields, String offsetSpec) { - Map topics = list.stream().collect(Collectors.toMap(t -> Uuid.fromString(t.getId()), Function.identity())); - CompletableFuture configPromise = maybeDescribeConfigs(adminClient, topics, fields); - CompletableFuture describePromise = maybeDescribeTopics(adminClient, topics, fields, offsetSpec); - CompletableFuture consumerGroupPromise = maybeFetchConsumerGroups(topics, fields); - - return CompletableFuture.allOf(configPromise, describePromise, consumerGroupPromise) - .thenApply(nothing -> list); - } - - CompletableFuture maybeDescribeConfigs(Admin adminClient, Map topics, List fields) { - if (fields.contains(Topic.Fields.CONFIGS)) { - Map topicIds = new HashMap<>(); - List keys = topics.values().stream() - .map(topic -> { - topicIds.put(topic.name(), Uuid.fromString(topic.getId())); - return topic.name(); - }) - .map(name -> new ConfigResource(ConfigResource.Type.TOPIC, name)) - .toList(); - - return configService.describeConfigs(adminClient, keys) - .thenAccept(configs -> - configs.forEach((name, either) -> topics.get(topicIds.get(name)).addConfigs(either))) - .toCompletableFuture(); - } - - return CompletableFuture.completedFuture(null); - } - - CompletableFuture maybeDescribeTopics(Admin adminClient, Map topics, List fields, String offsetSpec) { - if (REQUIRE_DESCRIBE.stream().anyMatch(fields::contains)) { - return describeTopics(adminClient, topics.keySet(), fields, offsetSpec) - .thenApply(descriptions -> { - descriptions.forEach((id, either) -> { - if (REQUIRE_PARTITIONS.stream().anyMatch(fields::contains)) { - topics.get(id).addPartitions(either); - } - if (fields.contains(Topic.Fields.AUTHORIZED_OPERATIONS)) { - topics.get(id).addAuthorizedOperations(either); - } - }); - - return null; - }) - .toCompletableFuture(); - } - - return CompletableFuture.completedFuture(null); - } - - CompletableFuture maybeFetchConsumerGroups(Map topics, List fields) { - CompletionStage>> pendingConsumerGroups; - - if (fields.contains(Topic.Fields.CONSUMER_GROUPS)) { - var topicIds = topics.keySet().stream().map(Uuid::toString).toList(); - pendingConsumerGroups = consumerGroupService.listConsumerGroupMembership(topicIds); - } else { - pendingConsumerGroups = CompletableFuture.completedStage(Collections.emptyMap()); - } - - return pendingConsumerGroups.thenAccept(consumerGroups -> - consumerGroups.entrySet() - .stream() - .forEach(e -> { - Topic topic = topics.get(Uuid.fromString(e.getKey())); - var identifiers = e.getValue().stream().map(g -> new Identifier("consumerGroups", g)).toList(); - topic.consumerGroups().data().addAll(identifiers); - topic.consumerGroups().addMeta("count", identifiers.size()); - })) - .toCompletableFuture(); - } - - CompletionStage>> describeTopics( - Admin adminClient, - Collection topicIds, - List fields, - String offsetSpec) { - - Map> result = new LinkedHashMap<>(topicIds.size()); - TopicCollection request = TopicCollection.ofTopicIds(topicIds); - DescribeTopicsOptions options = new DescribeTopicsOptions() - .includeAuthorizedOperations(fields.contains(Topic.Fields.AUTHORIZED_OPERATIONS)); - - var pendingDescribes = adminClient.describeTopics(request, options) - .topicIdValues() - .entrySet() - .stream() - .map(entry -> - entry.getValue().toCompletionStage().handle((description, error) -> { - error = UnknownTopicIdPatch.apply(error, Function.identity()); - - result.put( - entry.getKey(), - Either.of(description, error, Topic::fromTopicDescription)); - return null; - })) - .map(CompletionStage::toCompletableFuture) - .toArray(CompletableFuture[]::new); - - return CompletableFuture.allOf(pendingDescribes) - .thenCompose(nothing -> CompletableFuture.allOf( - listOffsets(adminClient, result, offsetSpec).toCompletableFuture(), - describeLogDirs(adminClient, result).toCompletableFuture() - )) - .thenApply(nothing -> result); - } - - CompletionStage listOffsets(Admin adminClient, Map> topics, String offsetSpec) { - Map topicIds = new HashMap<>(topics.size()); - var onlineTopics = topics.entrySet() - .stream() - .filter(topic -> topic.getValue() - .getOptionalPrimary() - .map(Topic::partitionsOnline) - .orElse(false)) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); - - var pendingOffsets = getRequestOffsetSpecs(offsetSpec) - .stream() - .map(reqOffsetSpec -> topicPartitionLeaders(onlineTopics, topicIds) - .keySet() - .stream() - .collect(Collectors.toMap(Function.identity(), ignored -> reqOffsetSpec))) - .flatMap(request -> listOffsets(adminClient, onlineTopics, topicIds, request)) - .map(CompletionStage::toCompletableFuture) - .toArray(CompletableFuture[]::new); - - return CompletableFuture.allOf(pendingOffsets); - } - - List getRequestOffsetSpecs(String offsetSpec) { - List specs = new ArrayList<>(DEFAULT_OFFSET_SPECS); - - // Never null, defaults to latest - switch (offsetSpec) { // NOSONAR - case KafkaOffsetSpec.EARLIEST, KafkaOffsetSpec.LATEST, KafkaOffsetSpec.MAX_TIMESTAMP: - break; - default: - specs.add(OffsetSpec.forTimestamp(Instant.parse(offsetSpec).toEpochMilli())); - break; - } - - return specs; - } - - /** - * Build of map of {@linkplain PartitionId}s to the partition leader node ID. - * Concurrently, a map of topic names to topic identifiers is constructed to - * support cross referencing the {@linkplain PartitionId} keys (via - * {@linkplain PartitionId#topicId()}) back to the topic's {@linkplain Uuid}. - * This allows easy access of the topics located in the topics map provided to - * this method and is particularly useful for Kafka operations that still - * require topic name. - * - * @param topics map of topics (keyed by Id) - * @param topicIds map of topic names to topic Ids, modified by this method - * @return map of {@linkplain PartitionId}s to the partition leader node ID - */ - Map topicPartitionLeaders(Map> topics, Map topicIds) { - return topics.entrySet() - .stream() - .filter(entry -> entry.getValue().isPrimaryPresent()) - .map(entry -> { - var topic = entry.getValue().getPrimary(); - topicIds.put(topic.name(), entry.getKey()); - return topic; - }) - .filter(topic -> topic.partitions().isPrimaryPresent()) - .flatMap(topic -> topic.partitions().getPrimary() - .stream() - .filter(PartitionInfo::online) - .map(partition -> { - var key = new PartitionId(topic.getId(), topic.name(), partition.getPartition()); - return Map.entry(key, partition.getLeaderId()); - })) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); - } - - String getOffsetKey(OffsetSpec spec) { - if (spec instanceof OffsetSpec.EarliestSpec) { - return KafkaOffsetSpec.EARLIEST; - } - if (spec instanceof OffsetSpec.LatestSpec) { - return KafkaOffsetSpec.LATEST; - } - if (spec instanceof OffsetSpec.MaxTimestampSpec) { - return KafkaOffsetSpec.MAX_TIMESTAMP; - } - return "timestamp"; - } - - Stream> listOffsets( - Admin adminClient, - Map> topics, - Map topicIds, - Map request) { - - var kafkaRequest = request.entrySet() - .stream() - .map(e -> Map.entry(e.getKey().toKafkaModel(), e.getValue())) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); - var result = adminClient.listOffsets(kafkaRequest, new ListOffsetsOptions() - .timeoutMs(5000)); - - return kafkaRequest.entrySet() - .stream() - .map(entry -> result.partitionResult(entry.getKey()) - .toCompletionStage() - .handle((offsetResult, error) -> { - addOffset(topics.get(topicIds.get(entry.getKey().topic())).getPrimary(), - entry.getKey().partition(), - getOffsetKey(entry.getValue()), - offsetResult, - error); - return null; - })); - - } - - void addOffset(Topic topic, int partitionNo, String key, ListOffsetsResultInfo result, Throwable error) { - topic.partitions() - .getPrimary() - .stream() - .filter(partition -> partition.getPartition() == partitionNo) - .findFirst() - .ifPresent(partition -> partition.addOffset(key, either(result, error))); - } - - Either either(ListOffsetsResultInfo result, Throwable error) { - Function transformer = offsetInfo -> { - Instant timestamp = offsetInfo.timestamp() != -1 ? Instant.ofEpochMilli(offsetInfo.timestamp()) : null; - return new OffsetInfo(offsetInfo.offset(), timestamp, offsetInfo.leaderEpoch().orElse(null)); - }; - - return Either.of(result, error, transformer); - } - - CompletionStage describeLogDirs(Admin adminClient, Map> topics) { - Map topicIds = new HashMap<>(topics.size()); - - var topicPartitionReplicas = topicPartitionLeaders(topics, topicIds); - var nodeIds = topicPartitionReplicas.values().stream().distinct().toList(); - var logDirs = adminClient.describeLogDirs(nodeIds, new DescribeLogDirsOptions() - .timeoutMs(5000)) - .descriptions(); - - var pendingInfo = topicPartitionReplicas.entrySet() - .stream() - .map(e -> { - var topicPartition = e.getKey().toKafkaModel(); - int nodeId = e.getValue(); - var partitionInfo = topics.get(topicIds.get(topicPartition.topic())) - .getPrimary() - .partitions() - .getPrimary() - .stream() - .filter(p -> p.getPartition() == topicPartition.partition()) - .findFirst(); - - return logDirs.get(nodeId).toCompletionStage().handle((nodeLogDirs, error) -> { - if (error != null) { - partitionInfo.ifPresent(p -> p.setReplicaLocalStorage(nodeId, Either.ofAlternate(error))); - } else { - nodeLogDirs.values() - .stream() - .map(dir -> dir.replicaInfos()) - .map(replicas -> replicas.get(topicPartition)) - .filter(Objects::nonNull) - .map(org.apache.kafka.clients.admin.ReplicaInfo.class::cast) - .map(ReplicaLocalStorage::fromKafkaModel) - .forEach(replicaInfo -> partitionInfo.ifPresent(p -> p.setReplicaLocalStorage(nodeId, Either.of(replicaInfo)))); - } - - return null; - }); - }) - .map(CompletionStage::toCompletableFuture) - .toArray(CompletableFuture[]::new); - - return CompletableFuture.allOf(pendingInfo); - } } diff --git a/api/src/main/resources/application.properties b/api/src/main/resources/application.properties index dfc2213b5..ecc61421a 100644 --- a/api/src/main/resources/application.properties +++ b/api/src/main/resources/application.properties @@ -1,7 +1,7 @@ quarkus.http.access-log.enabled=true quarkus.http.record-request-start-time=true # Default access-log pattern with `%u` removed. Due to the mixing of Quarkus and Vert.x authorization, the user authenticated cannot be obtained at this time -quarkus.http.access-log.pattern=%{REMOTE_HOST} %l "%{REQUEST_LINE}" %{RESPONSE_CODE} %{RESPONSE_TIME}ms %{BYTES_SENT} +quarkus.http.access-log.pattern=%{REMOTE_USER} %{REMOTE_HOST} %l "%{REQUEST_LINE}" %{RESPONSE_CODE} %{RESPONSE_TIME}ms %{BYTES_SENT} quarkus.http.access-log.exclude-pattern=(?:/health(/live|/ready|/started)?|/metrics) quarkus.http.non-application-root-path=${quarkus.http.root-path} quarkus.http.http2=false @@ -16,8 +16,8 @@ quarkus.http.cors.access-control-allow-credentials=true quarkus.http.header."Strict-Transport-Security".value=max-age=31536000 quarkus.http.auth.basic=false -#quarkus.http.auth.permission."oidc".policy=authenticated -#quarkus.http.auth.permission."oidc".paths=/api/* +quarkus.http.auth.permission."oidc".policy=permit +quarkus.http.auth.permission."oidc".paths=/api/* # See https://quarkus.io/guides/kafka-dev-services # Enable when using quarkus-kafka-client @@ -69,9 +69,11 @@ console.kafka.admin.default.api.timeout.ms=10000 ######## #%dev.quarkus.http.auth.proactive=false #%dev.quarkus.http.auth.permission."oidc".policy=permit +%dev.quarkus.tls.trust-all=true %dev.quarkus.kubernetes-client.trust-certs=true %dev.quarkus.log.category."io.vertx.core.impl.BlockedThreadChecker".level=OFF %dev.quarkus.log.category."com.github.streamshub.console".level=DEBUG +%dev.quarkus.log.category."io.quarkus.oidc.runtime".level=DEBUG # %dev.quarkus.apicurio-registry.devservices.enabled=true # %dev.apicurio.rest.client.disable-auto-basepath-append=true diff --git a/common/src/main/java/com/github/streamshub/console/config/ConsoleConfig.java b/common/src/main/java/com/github/streamshub/console/config/ConsoleConfig.java index 19d7ec94b..055c79662 100644 --- a/common/src/main/java/com/github/streamshub/console/config/ConsoleConfig.java +++ b/common/src/main/java/com/github/streamshub/console/config/ConsoleConfig.java @@ -7,6 +7,7 @@ import jakarta.validation.constraints.AssertTrue; import com.fasterxml.jackson.annotation.JsonIgnore; +import com.github.streamshub.console.config.security.GlobalSecurityConfig; import io.xlate.validation.constraints.Expression; @@ -25,6 +26,8 @@ public class ConsoleConfig { KubernetesConfig kubernetes = new KubernetesConfig(); + GlobalSecurityConfig security = new GlobalSecurityConfig(); + @Valid List schemaRegistries = new ArrayList<>(); @@ -45,6 +48,14 @@ public void setKubernetes(KubernetesConfig kubernetes) { this.kubernetes = kubernetes; } + public GlobalSecurityConfig getSecurity() { + return security; + } + + public void setSecurity(GlobalSecurityConfig security) { + this.security = security; + } + public List getSchemaRegistries() { return schemaRegistries; } diff --git a/common/src/main/java/com/github/streamshub/console/config/KafkaClusterConfig.java b/common/src/main/java/com/github/streamshub/console/config/KafkaClusterConfig.java index a676d80c6..c964f0516 100644 --- a/common/src/main/java/com/github/streamshub/console/config/KafkaClusterConfig.java +++ b/common/src/main/java/com/github/streamshub/console/config/KafkaClusterConfig.java @@ -6,6 +6,7 @@ import jakarta.validation.constraints.NotBlank; import com.fasterxml.jackson.annotation.JsonIgnore; +import com.github.streamshub.console.config.security.SecurityConfig; public class KafkaClusterConfig { @@ -14,6 +15,7 @@ public class KafkaClusterConfig { private String name; private String namespace; private String listener; + private SecurityConfig security = new SecurityConfig(); /** * Name of a configured schema registry that will be used to ser/des configurations * with this Kafka cluster. @@ -58,6 +60,14 @@ public void setNamespace(String namespace) { this.namespace = namespace; } + public SecurityConfig getSecurity() { + return security; + } + + public void setSecurity(SecurityConfig security) { + this.security = security; + } + public String getListener() { return listener; } diff --git a/common/src/main/java/com/github/streamshub/console/config/security/GlobalSecurityConfig.java b/common/src/main/java/com/github/streamshub/console/config/security/GlobalSecurityConfig.java new file mode 100644 index 000000000..32f41fcb8 --- /dev/null +++ b/common/src/main/java/com/github/streamshub/console/config/security/GlobalSecurityConfig.java @@ -0,0 +1,14 @@ +package com.github.streamshub.console.config.security; + +public class GlobalSecurityConfig extends SecurityConfig { + + private OidcConfig oidc; + + public OidcConfig getOidc() { + return oidc; + } + + public void setOidc(OidcConfig oidc) { + this.oidc = oidc; + } +} diff --git a/common/src/main/java/com/github/streamshub/console/config/security/OidcConfig.java b/common/src/main/java/com/github/streamshub/console/config/security/OidcConfig.java new file mode 100644 index 000000000..798fcee05 --- /dev/null +++ b/common/src/main/java/com/github/streamshub/console/config/security/OidcConfig.java @@ -0,0 +1,42 @@ +package com.github.streamshub.console.config.security; + +public class OidcConfig { + + private String tenantId = "streamshub-console"; + private String authServerUrl; + private String clientId; + private String clientSecret; + + public String getTenantId() { + return tenantId; + } + + public void setTenantId(String tenantId) { + this.tenantId = tenantId; + } + + public String getAuthServerUrl() { + return authServerUrl; + } + + public void setAuthServerUrl(String authServerUrl) { + this.authServerUrl = authServerUrl; + } + + public String getClientId() { + return clientId; + } + + public void setClientId(String clientId) { + this.clientId = clientId; + } + + public String getClientSecret() { + return clientSecret; + } + + public void setClientSecret(String clientSecret) { + this.clientSecret = clientSecret; + } + +} diff --git a/common/src/main/java/com/github/streamshub/console/config/security/Privilege.java b/common/src/main/java/com/github/streamshub/console/config/security/Privilege.java new file mode 100644 index 000000000..7ba1aebb2 --- /dev/null +++ b/common/src/main/java/com/github/streamshub/console/config/security/Privilege.java @@ -0,0 +1,24 @@ +package com.github.streamshub.console.config.security; + +import java.util.Locale; + +import com.fasterxml.jackson.annotation.JsonCreator; + +public enum Privilege { + + CREATE, + DELETE, + GET, + LIST, + UPDATE, + ALL; + + @JsonCreator + public static Privilege forValue(String value) { + if ("*".equals(value)) { + return ALL; + } + return valueOf(value.toUpperCase(Locale.ROOT)); + } + +} diff --git a/common/src/main/java/com/github/streamshub/console/config/security/RoleConfig.java b/common/src/main/java/com/github/streamshub/console/config/security/RoleConfig.java new file mode 100644 index 000000000..39ac83f96 --- /dev/null +++ b/common/src/main/java/com/github/streamshub/console/config/security/RoleConfig.java @@ -0,0 +1,27 @@ +package com.github.streamshub.console.config.security; + +import java.util.ArrayList; +import java.util.List; + +public class RoleConfig { + + private String name; + private List rules = new ArrayList<>(); + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public List getRules() { + return rules; + } + + public void setRules(List rules) { + this.rules = rules; + } + +} diff --git a/common/src/main/java/com/github/streamshub/console/config/security/RuleConfig.java b/common/src/main/java/com/github/streamshub/console/config/security/RuleConfig.java new file mode 100644 index 000000000..5361d1aed --- /dev/null +++ b/common/src/main/java/com/github/streamshub/console/config/security/RuleConfig.java @@ -0,0 +1,47 @@ +package com.github.streamshub.console.config.security; + +import java.util.ArrayList; +import java.util.List; + +public class RuleConfig { + + /** + * Resources to which this rule applies (required) + */ + List resources = new ArrayList<>(); + + /** + * Specific resource names to which this rule applies (optional) + */ + List resourceNames = new ArrayList<>(); + + /** + * Privileges/actions that may be performed for subjects having this rule + */ + List privileges = new ArrayList<>(); + + public List getResources() { + return resources; + } + + public void setResources(List resources) { + this.resources = resources; + } + + public List getResourceNames() { + return resourceNames; + } + + public void setResourceNames(List resourceNames) { + this.resourceNames = resourceNames; + } + + public List getPrivileges() { + return privileges; + } + + public void setPrivileges(List privileges) { + this.privileges = privileges; + } + +} diff --git a/common/src/main/java/com/github/streamshub/console/config/security/SecurityConfig.java b/common/src/main/java/com/github/streamshub/console/config/security/SecurityConfig.java new file mode 100644 index 000000000..050c88f2f --- /dev/null +++ b/common/src/main/java/com/github/streamshub/console/config/security/SecurityConfig.java @@ -0,0 +1,27 @@ +package com.github.streamshub.console.config.security; + +import java.util.ArrayList; +import java.util.List; + +public class SecurityConfig { + + private List subjects = new ArrayList<>(); + private List roles = new ArrayList<>(); + + public List getSubjects() { + return subjects; + } + + public void setSubjects(List subjects) { + this.subjects = subjects; + } + + public List getRoles() { + return roles; + } + + public void setRoles(List roles) { + this.roles = roles; + } + +} diff --git a/common/src/main/java/com/github/streamshub/console/config/security/SubjectConfig.java b/common/src/main/java/com/github/streamshub/console/config/security/SubjectConfig.java new file mode 100644 index 000000000..bb816184c --- /dev/null +++ b/common/src/main/java/com/github/streamshub/console/config/security/SubjectConfig.java @@ -0,0 +1,45 @@ +package com.github.streamshub.console.config.security; + +import java.util.ArrayList; +import java.util.List; + +public class SubjectConfig { + + private String issuer; + private String claim; + private List include = new ArrayList<>(); + private List roleNames = new ArrayList<>(); + + public String getIssuer() { + return issuer; + } + + public void setIssuer(String issuer) { + this.issuer = issuer; + } + + public String getClaim() { + return claim; + } + + public void setClaim(String claim) { + this.claim = claim; + } + + public List getInclude() { + return include; + } + + public void setInclude(List include) { + this.include = include; + } + + public List getRoleNames() { + return roleNames; + } + + public void setRoleNames(List roleNames) { + this.roleNames = roleNames; + } + +} diff --git a/install/004-deploy-dex.sh b/install/004-deploy-dex.sh new file mode 100755 index 000000000..089e2dad8 --- /dev/null +++ b/install/004-deploy-dex.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +set -euo pipefail + +CONSOLE_INSTALL_PATH="$(cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P)" +RESOURCE_PATH=${CONSOLE_INSTALL_PATH}/resources/dex + +export NAMESPACE="${1?Please provide the deployment namespace}" +export CLUSTER_DOMAIN="${2?Please provide the base domain name for Kafka listener ingress}" + +source ${CONSOLE_INSTALL_PATH}/_common.sh + +${KUBE} create serviceaccount dex --dry-run=client -o yaml \ + | ${KUBE} apply -n ${NAMESPACE} -f - +${KUBE} annotate serviceaccount dex -n ${NAMESPACE} --dry-run=client "serviceaccounts.openshift.io/oauth-redirecturi.dex=https://console-dex.${CLUSTER_DOMAIN}/callback" -o yaml \ + | ${KUBE} apply -n ${NAMESPACE} -f - + +export STATIC_CLIENT_SECRET="$(tr -dc A-Za-z0-9 /dev/null 2>&1 ; then + ${KUBE} scale --replicas=0 deployment/dex -n ${NAMESPACE} +fi + +# Replace env variables +${YQ} '(.. | select(tag == "!!str")) |= envsubst(ne)' ${RESOURCE_PATH}/dex.yaml | ${KUBE} apply -n ${NAMESPACE} -f - + +if [ "$(${KUBE} api-resources --api-group=route.openshift.io -o=name)" != "" ] ; then + ${KUBE} patch ingress/console-dex-ingress -n ${NAMESPACE} --type=merge --patch '{"spec":{"ingressClassName":"openshift-default"}}' +fi \ No newline at end of file diff --git a/install/_common.sh b/install/_common.sh new file mode 100644 index 000000000..43c1ef3b3 --- /dev/null +++ b/install/_common.sh @@ -0,0 +1,82 @@ +RED='\033[0;31m' +GREEN='\033[0;32m' +NC='\033[0m' # No Color +INFO="[ \033[38;5;33mINFO${NC} ]" +WARN="[ \033[38;5;208mWARN${NC} ]" +ERROR="[ \033[38;5;196mERROR${NC} ]" + +KUBE="$(which oc 2>/dev/null || which kubectl 2>/dev/null)" || : + +if [ "${KUBE}" == "" ] ; then + echo -e "${ERROR} Neither 'oc' or 'kubectl' command line utilities found on the PATH" + exit 1 +fi + +YQ="$(which yq 2>/dev/null)" || : + +if [ "${YQ}" == "" ] ; then + echo -e "${ERROR} 'yq' command line utility found on the PATH" + exit 1 +fi + +if ${KUBE} get namespace/${NAMESPACE} >/dev/null 2>&1 ; then + echo -e "${INFO} Namespace '${NAMESPACE}' exists" +else + echo -e "${WARN} Namespace '${NAMESPACE}' not found... creating" + ${KUBE} create namespace ${NAMESPACE} >/dev/null + + if ${KUBE} get namespace/${NAMESPACE} >/dev/null 2>&1 ; then + echo -e "${INFO} Namespace '${NAMESPACE}' created" + else + echo -e "${WARN} Namespace '${NAMESPACE}' could not be created" + fi +fi + +OLM=$(kubectl get crd | grep operators.coreos.com) || : + +if [ "${OLM}" == "" ] && [ "${CI_CLUSTER}" == "" ] ; then + echo -e "${ERROR} Operator Lifecycle Manager not found, please install it. + +$ operator-sdk olm install + +For more info please visit https://sdk.operatorframework.io/ +" + exit 1 +fi + +function fetch_available_packages { + local NAME_PATTERN="${1}" + + for pm in $(${KUBE} get packagemanifests -o name | grep -E '^packagemanifest\.packages\.operators\.coreos\.com/('"${NAME_PATTERN}"')$') ; do + ${KUBE} get $pm -o yaml | ${YQ} -o=json '{ + "name": .status.packageName, + "channel": .status.defaultChannel, + "catalogSource": .status.catalogSource, + "catalogSourceNamespace": .status.catalogSourceNamespace + }' + done | ${YQ} ea -p=json '[.]' | ${YQ} -o=csv | tail -n +2 +} + +function display_suggested_subscription { + local OPERATOR_NAME="${1}" + local NAME_PATTERN="${2}" + + local AVAILABLE_PKGS="$(fetch_available_packages "${NAME_PATTERN}")" + echo -e "${INFO} ${OPERATOR_NAME} may be installed by creating one of the following resources:" + COUNTER=0 + + while IFS=, read -r PKG_NAME PKG_CHANNEL PKG_CTLG_SRC PKG_CTLG_SRC_NS; do + COUNTER=$(( COUNTER + 1 )) + echo -e "${INFO} ----- Option ${COUNTER} -----" + echo "apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: ${OPERATOR_NAME} + namespace: ${NAMESPACE} +spec: + name: ${PKG_NAME} + channel: ${PKG_CHANNEL} + source: ${PKG_CTLG_SRC} + sourceNamespace: ${PKG_CTLG_SRC_NS}" | ${YQ} + done < <(echo "${AVAILABLE_PKGS}") +} diff --git a/install/resources/dex/dex.yaml b/install/resources/dex/dex.yaml new file mode 100644 index 000000000..4583b59b7 --- /dev/null +++ b/install/resources/dex/dex.yaml @@ -0,0 +1,180 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: dex +rules: +- apiGroups: ["dex.coreos.com"] # API group created by dex + resources: ["*"] + verbs: ["*"] +- apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["create"] # To manage its own resources, dex must be able to create customresourcedefinitions +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: dex +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: dex +subjects: +- kind: ServiceAccount + name: dex # Service account assigned to the dex pod, created above + namespace: ${NAMESPACE} # The namespace dex is running in +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: dex + name: dex +spec: + replicas: 1 + selector: + matchLabels: + app: dex + template: + metadata: + labels: + app: dex + spec: + serviceAccountName: dex # This is created above + volumes: + - name: config + configMap: + name: dex + items: + - key: config.yaml + path: config.yaml + - name: openshift-ca + configMap: + name: kube-root-ca.crt + items: + - key: ca.crt + path: openshift.pem + #- name: tls + # secret: + # secretName: dex.example.com.tls + containers: + - image: ghcr.io/dexidp/dex:v2.32.0 + name: dex + command: ["/usr/local/bin/dex", "serve", "/etc/dex/cfg/config.yaml"] + + ports: + - name: http + containerPort: 5556 + + volumeMounts: + - name: config + mountPath: /etc/dex/cfg + - name: openshift-ca + mountPath: /etc/ssl/openshift.pem + subPath: openshift.pem + #- name: tls + # mountPath: /etc/dex/tls + + env: + - name: OPENSHIFT_OAUTH_CLIENT_ID + valueFrom: + secretKeyRef: + name: console-dex-secrets + key: DEX_CLIENT_ID + - name: OPENSHIFT_OAUTH_CLIENT_SECRET + valueFrom: + secretKeyRef: + name: console-dex-secrets + key: DEX_CLIENT_SECRET + - name: DEX_STATIC_CLIENT_SECRET + valueFrom: + secretKeyRef: + name: console-dex-secrets + key: DEX_STATIC_CLIENT_SECRET + + readinessProbe: + httpGet: + path: /healthz + port: 5556 + scheme: HTTP +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: dex +data: + config.yaml: | + issuer: https://console-dex.${CLUSTER_DOMAIN} + storage: + type: kubernetes + config: + inCluster: true + web: + http: 0.0.0.0:5556 + #tlsCert: /etc/dex/tls/tls.crt + #tlsKey: /etc/dex/tls/tls.key + connectors: + - type: openshift + id: openshift + name: OpenShift + config: + # OpenShift API + issuer: https://api.crc.testing:6443 + # Credentials can be string literals or pulled from the environment. + clientID: $$OPENSHIFT_OAUTH_CLIENT_ID + clientSecret: $$OPENSHIFT_OAUTH_CLIENT_SECRET + redirectURI: https://console-dex.${CLUSTER_DOMAIN}/callback + # OpenShift root CA + rootCA: /etc/ssl/openshift.pem + # Communicate to OpenShift without validating SSL certificates + insecureCA: false + # Optional list of required groups a user must be a member of + groups: [] + oauth2: + skipApprovalScreen: true + + staticClients: + - id: streamshub-console + name: 'StreamsHub Console' + secret: ${STATIC_CLIENT_SECRET} + redirectURIs: + - 'http://127.0.0.1:5555/callback' + - 'http://localhost:3000/api/auth/callback/oidc' +--- +apiVersion: v1 +kind: Service +metadata: + name: dex +spec: + type: ClusterIP + ports: + - name: dex + port: 5556 + protocol: TCP + targetPort: 5556 + selector: + app: dex +--- +kind: Ingress +apiVersion: networking.k8s.io/v1 +metadata: + name: console-dex-ingress + annotations: + nginx.ingress.kubernetes.io/backend-protocol: HTTP + route.openshift.io/termination: edge +spec: + defaultBackend: + service: + name: dex + port: + number: 5556 + rules: + - host: console-dex.${CLUSTER_DOMAIN} + http: + paths: + - pathType: ImplementationSpecific + backend: + service: + name: dex + port: + number: 5556 diff --git a/pom.xml b/pom.xml index d8ca83973..f77d8f514 100644 --- a/pom.xml +++ b/pom.xml @@ -20,6 +20,7 @@ 0.43.0 0.15.0 2.6.2.Final + 3.7.1 3.0 @@ -75,6 +76,16 @@ + + org.apache.kafka + kafka-clients + ${kafka.version} + + + org.apache.kafka + kafka-metadata + ${kafka.version} + com.nimbusds diff --git a/ui/api/api.ts b/ui/api/api.ts index 52377c1eb..cccee22cc 100644 --- a/ui/api/api.ts +++ b/ui/api/api.ts @@ -28,3 +28,8 @@ export const ApiError = z.object({ }) .optional(), }); + +export const ApiErrorResponse = z.object({ + meta: z.object({}).nullable().optional(), + errors: z.array(ApiError), +}); diff --git a/ui/api/kafka/actions.ts b/ui/api/kafka/actions.ts index 58319be8b..82bd1f434 100644 --- a/ui/api/kafka/actions.ts +++ b/ui/api/kafka/actions.ts @@ -16,6 +16,7 @@ import { PrometheusDriver } from "prometheus-query"; import * as clusterPromql from "./cluster.promql"; import { values } from "./kpi.promql"; import * as topicPromql from "./topic.promql"; +import { ApiErrorResponse } from '@/api/api'; export type ClusterMetric = keyof typeof clusterPromql; export type TopicMetric = keyof typeof topicPromql; @@ -29,6 +30,7 @@ const prom = process.env.CONSOLE_METRICS_PROMETHEUS_URL const log = logger.child({ module: "kafka-api" }); export async function getKafkaClusters(): Promise { + log.trace("Enter getKafkaClusters()"); const sp = new URLSearchParams({ "fields[kafkas]": "name,namespace,kafkaVersion", sort: "name", @@ -37,17 +39,19 @@ export async function getKafkaClusters(): Promise { const url = `${process.env.BACKEND_URL}/api/kafkas?${kafkaClustersQuery}`; try { const res = await fetch(url, { - headers: { - Accept: "application/json", - "Content-Type": "application/json", - }, + headers: await getHeaders(), next: { revalidate: 30, }, }); const rawData = await res.json(); - log.trace(rawData, "getKafkaClusters response"); - return ClustersResponseSchema.parse(rawData).data; + if (res.status != 200) { + log.info(rawData, "getKafkaClusters response"); + throw new Error(ApiErrorResponse.parse(rawData).errors[0].detail); + } else { + log.trace(rawData, "getKafkaClusters response"); + return ClustersResponseSchema.parse(rawData).data; + } } catch (err) { log.error(err, "getKafkaClusters"); throw new Error("getKafkaClusters: couldn't connect with backend"); diff --git a/ui/app/[locale]/(authorized)/kafka/page.tsx b/ui/app/[locale]/(authorized)/kafka/page.tsx index 892b4f4c5..d18be9638 100644 --- a/ui/app/[locale]/(authorized)/kafka/page.tsx +++ b/ui/app/[locale]/(authorized)/kafka/page.tsx @@ -1,5 +1,3 @@ -import { getKafkaClusters } from "@/api/kafka/actions"; -import { RedirectOnLoad } from "@/components/Navigation/RedirectOnLoad"; import { redirect } from "@/i18n/routing"; export default function Page({}) { diff --git a/ui/app/[locale]/(public)/(home)/page.tsx b/ui/app/[locale]/(public)/(home)/page.tsx index afa357e0b..4fac05a9b 100644 --- a/ui/app/[locale]/(public)/(home)/page.tsx +++ b/ui/app/[locale]/(public)/(home)/page.tsx @@ -30,19 +30,39 @@ import { isProductizedBuild } from "@/utils/env"; import { getTranslations } from "next-intl/server"; import { Suspense } from "react"; import styles from "./home.module.css"; +import config from '@/utils/config'; +import { logger } from "@/utils/logger"; +import { getAuthOptions } from "@/app/api/auth/[...nextauth]/route"; +import { getServerSession } from "next-auth"; + +const log = logger.child({ module: "home" }); export default async function Home() { const t = await getTranslations(); + log.trace("fetching known Kafka clusters...") const allClusters = await getKafkaClusters(); + log.trace(`fetched ${allClusters?.length ?? 0} Kafka clusters`) const productName = t("common.product"); const brand = t("common.brand"); + log.trace("fetching configuration") + let cfg = await config(); + log.trace(`fetched configuration: ${cfg ? 'yes' : 'no'}`); + let oidcCfg = cfg?.security?.oidc ?? null; + let oidcEnabled = !!oidcCfg; + let username: string | undefined; + + if (oidcEnabled) { + const authOptions = await getAuthOptions(); + const session = await getServerSession(authOptions); + username = (session?.user?.name ?? session?.user?.email) ?? undefined; + } - if (allClusters.length === 1) { + if (allClusters.length === 1 && !oidcEnabled) { return ; } return ( - +
    @@ -76,8 +96,8 @@ export default async function Home() { - }> - + }> + diff --git a/ui/app/[locale]/layout.tsx b/ui/app/[locale]/layout.tsx index 58a945b8e..7953b5d78 100644 --- a/ui/app/[locale]/layout.tsx +++ b/ui/app/[locale]/layout.tsx @@ -1,5 +1,4 @@ import { getMessages, getTranslations } from "next-intl/server"; -import { useNow, useTimeZone } from "next-intl"; import { ReactNode } from "react"; import NextIntlProvider from "./NextIntlProvider"; import "../globals.css"; @@ -27,7 +26,3 @@ export async function generateMetadata({ title: t("title"), }; } - -// export function generateStaticParams() { -// return [{ locale: "en" }]; -// } diff --git a/ui/app/api/auth/[...nextauth]/anonymous.ts b/ui/app/api/auth/[...nextauth]/anonymous.ts index a8e95b652..55a29c5dd 100644 --- a/ui/app/api/auth/[...nextauth]/anonymous.ts +++ b/ui/app/api/auth/[...nextauth]/anonymous.ts @@ -1,4 +1,3 @@ -import { AuthOptions } from "next-auth"; import CredentialsProvider from "next-auth/providers/credentials"; import { Provider } from "next-auth/providers/index"; diff --git a/ui/app/api/auth/[...nextauth]/keycloak.ts b/ui/app/api/auth/[...nextauth]/keycloak.ts deleted file mode 100644 index 957868765..000000000 --- a/ui/app/api/auth/[...nextauth]/keycloak.ts +++ /dev/null @@ -1,144 +0,0 @@ -import { logger } from "@/utils/logger"; -import { AuthOptions, Session, TokenSet } from "next-auth"; -import { JWT } from "next-auth/jwt"; -import { Provider } from "next-auth/providers/index"; -import KeycloakProvider from "next-auth/providers/keycloak"; - -const log = logger.child({ module: "keycloak" }); - -export function makeOauthProvider( - clientId: string, - clientSecret: string, - issuer: string, -): Provider { - const provider = KeycloakProvider({ - clientId, - clientSecret, - issuer, - }); - - let _tokenEndpoint: string | undefined = undefined; - - async function getTokenEndpoint() { - if (provider && provider.wellKnown) { - const kc = await fetch(provider.wellKnown); - const res = await kc.json(); - _tokenEndpoint = res.token_endpoint; - } - return _tokenEndpoint; - } - - async function refreshToken(token: JWT): Promise { - try { - const tokenEndpoint = await getTokenEndpoint(); - if (!provider) { - log.error("Invalid Keycloak configuratio"); - throw token; - } - if (!tokenEndpoint) { - log.error("Invalid Keycloak wellKnow"); - throw token; - } - let tokenExpiration = new Date( - (typeof token?.expires_at === "number" ? token.expires_at : 0) * 1000, - ); - log.trace({ tokenExpiration }, "Token expiration"); - - if (Date.now() < tokenExpiration.getTime()) { - log.trace(token, "Token not yet expired"); - return token; - } else { - log.trace(token, "Token has expired"); - let refresh_token = - typeof token.refresh_token === "string" ? token.refresh_token : ""; - - const params = { - client_id: provider.options!.clientId, - client_secret: provider.options!.clientSecret, - grant_type: "refresh_token", - refresh_token: refresh_token, - }; - - log.trace( - { - url: tokenEndpoint, - }, - "Refreshing token", - ); - - const response = await fetch(tokenEndpoint, { - headers: { "Content-Type": "application/x-www-form-urlencoded" }, - body: new URLSearchParams(params), - method: "POST", - }); - - const refreshToken: TokenSet = await response.json(); - if (!response.ok) { - throw new Error(response.statusText); - } - log.trace(refreshToken, "Got refresh token"); - - let expires_in = - typeof refreshToken.expires_in === "number" - ? refreshToken.expires_in - : -1; - - const newToken: JWT = { - ...token, // Keep the previous token properties - access_token: refreshToken.access_token, - expires_at: Math.floor(Date.now() / 1000 + expires_in), - // Fall back to old refresh token, but note that - // many providers may only allow using a refresh token once. - refresh_token: refreshToken.refresh_token ?? token.refresh_token, - }; - log.trace(newToken, "New token"); - return newToken; - } - } catch (error: unknown) { - if (typeof error === "string") { - log.error({ message: error }, "Error refreshing access token"); - } else if (error instanceof Error) { - log.error(error, "Error refreshing access token"); - } else { - log.error("Unknown error refreshing access token"); - } - // The error property will be used client-side to handle the refresh token error - return { ...token, error: "RefreshAccessTokenError" as const }; - } - } - - return provider; - - // return { - // providers: [provider], - // callbacks: { - // async jwt({ token, account }: { token: JWT; account: any }) { - // // Persist the OAuth access_token and or the user id to the token right after signin - // if (account) { - // log.trace("account present, saving new token"); - // // Save the access token and refresh token in the JWT on the initial login - // return { - // access_token: account.access_token, - // expires_at: account.expires_at, - // refresh_token: account.refresh_token, - // email: token.email, - // name: token.name, - // picture: token.picture, - // sub: token.sub, - // }; - // } - // - // return refreshToken(token); - // }, - // async session({ session, token }: { session: Session; token: JWT }) { - // // Send properties to the client, like an access_token from a provider. - // log.trace(token, "Creating session from token"); - // return { - // ...session, - // error: token.error, - // accessToken: token.access_token, - // }; - // }, - // }, - // }; -} diff --git a/ui/app/api/auth/[...nextauth]/oidc.ts b/ui/app/api/auth/[...nextauth]/oidc.ts new file mode 100644 index 000000000..cff9b3b58 --- /dev/null +++ b/ui/app/api/auth/[...nextauth]/oidc.ts @@ -0,0 +1,182 @@ +import { logger } from "@/utils/logger"; +import { Session, TokenSet } from "next-auth"; +import { JWT } from "next-auth/jwt"; +import { OAuthConfig } from "next-auth/providers/index"; +import config from '@/utils/config'; + +const log = logger.child({ module: "oidc" }); + +class OpenIdConnect { + + provider: OAuthConfig | null; + + constructor( + authServerUrl: string | null, + clientId: string | null, + clientSecret: string | null + ) { + if (clientId && clientSecret && authServerUrl) { + this.provider = { + id: "oidc", + name: "OpenID Connect Provider", + type: "oauth", + clientId: clientId, + clientSecret: clientSecret, + wellKnown: `${authServerUrl}/.well-known/openid-configuration`, + authorization: { params: { scope: "openid email profile groups" } }, + idToken: true, + profile(profile) { + return { + id: profile.sub, + name: profile.name ?? profile.preferred_username, + email: profile.email, + image: profile.image, + } + }, + } + } else { + this.provider = null; + } + } + + isEnabled() { + return this.provider != null; + } + + async getTokenEndpoint() { + let _tokenEndpoint: string | undefined = undefined; + + if (this.provider?.wellKnown) { + log.debug(`wellKnown endpoint: ${this.provider.wellKnown}`); + const kc = await fetch(this.provider.wellKnown); + const res = await kc.json(); + _tokenEndpoint = res.token_endpoint; + } + + log.debug(`token endpoint: ${_tokenEndpoint}`); + + return _tokenEndpoint; + } + + async refreshToken(token: JWT): Promise { + if (this.provider == null) { + throw new Error("OIDC is not properly configured"); + } + + try { + let tokenExpiration = new Date( + (typeof token?.expires_at === "number" ? token.expires_at : 0) * 1000, + ); + log.trace({ tokenExpiration }, "Token expiration"); + + if (Date.now() < tokenExpiration.getTime()) { + log.trace(token, "Token not yet expired"); + return token; + } + log.trace(token, "Token has expired"); + let refresh_token = + typeof token.refresh_token === "string" ? token.refresh_token : ""; + + const params = { + client_id: this.provider.clientId!, + client_secret: this.provider.clientSecret!, + grant_type: "refresh_token", + refresh_token: refresh_token, + }; + + const tokenEndpoint = await this.getTokenEndpoint(); + + if (!tokenEndpoint) { + log.error("Invalid OIDC wellKnown"); + throw token; + } + + log.trace( + { + url: tokenEndpoint, + }, + "Refreshing token", + ); + + const response = await fetch(tokenEndpoint, { + headers: { "Content-Type": "application/x-www-form-urlencoded" }, + body: new URLSearchParams(params), + method: "POST", + }); + + const refreshToken: TokenSet = await response.json(); + if (!response.ok) { + throw new Error(response.statusText); + } + log.trace(refreshToken, "Got refresh token"); + + let expires_in = + typeof refreshToken.expires_in === "number" + ? refreshToken.expires_in + : -1; + + const newToken: JWT = { + ...token, // Keep the previous token properties + access_token: refreshToken.access_token, + expires_at: Math.floor(Date.now() / 1000 + expires_in), + // Fall back to old refresh token, but note that + // many providers may only allow using a refresh token once. + refresh_token: refreshToken.refresh_token ?? token.refresh_token, + }; + log.trace(newToken, "New token"); + return newToken; + } catch (error: unknown) { + if (typeof error === "string") { + log.error({ message: error }, "Error refreshing access token"); + } else if (error instanceof Error) { + log.error(error, "Error refreshing access token"); + } else { + log.error("Unknown error refreshing access token"); + } + // The error property will be used client-side to handle the refresh token error + return { ...token, error: "RefreshAccessTokenError" as const }; + } + } + + async jwt({ token, account }: { token: JWT, account: any }) { + // Persist the OAuth access_token and or the user id to the token right after signin + log.info("jwt callback invoked") + if (account) { + log.trace(`account ${JSON.stringify(account)} present, saving new token: ${JSON.stringify(token)}`); + // Save the access token and refresh token in the JWT on the initial login + return { + access_token: account.access_token, + expires_at: account.expires_at, + refresh_token: account.refresh_token, + email: token.email, + name: token.name, + picture: token.picture, + sub: token.sub, + }; + } + + return this.refreshToken(token); + }; + + async session({ session, token }: { session: Session, token: JWT }) { + // Send properties to the client, like an access_token from a provider. + log.trace(token, "Creating session from token"); + return { + ...session, + error: token.error, + accessToken: token.access_token, + authorization: `Bearer ${token.access_token}`, + }; + }; +} + + +export default async function oidcSource() { + let cfg = await config(); + let oidcCfg = cfg?.security?.oidc; + const authServerUrl: string | null = oidcCfg?.authServerUrl ?? null; + const clientId: string | null = oidcCfg?.clientId ?? null; + const clientSecret: string | null = oidcCfg?.clientSecret ?? null; + const oidcProvider = new OpenIdConnect(authServerUrl, clientId, clientSecret); + return oidcProvider; +}; diff --git a/ui/app/api/auth/[...nextauth]/route.ts b/ui/app/api/auth/[...nextauth]/route.ts index 812db7da4..4d3d88a93 100644 --- a/ui/app/api/auth/[...nextauth]/route.ts +++ b/ui/app/api/auth/[...nextauth]/route.ts @@ -1,37 +1,60 @@ import { getKafkaClusters } from "@/api/kafka/actions"; import { ClusterList } from "@/api/kafka/schema"; import { logger } from "@/utils/logger"; -import NextAuth, { AuthOptions } from "next-auth"; +import NextAuth, { AuthOptions, Session } from "next-auth"; +import { JWT } from "next-auth/jwt"; import { Provider } from "next-auth/providers/index"; import { NextRequest, NextResponse } from "next/server"; import { makeAnonymous } from "./anonymous"; import { makeOauthTokenProvider } from "./oauth-token"; import { makeScramShaProvider } from "./scram"; +import oidcSource from "./oidc"; const log = logger.child({ module: "auth" }); export async function getAuthOptions(): Promise { - // retrieve the authentication method required by the default Kafka cluster - const clusters = await getKafkaClusters(); - const providers = clusters.map(makeAuthOption); - log.trace({ providers }, "getAuthOptions"); - return { - providers, - callbacks: { - async jwt({ token, user }) { - if (user) { - token.authorization = user.authorization; + let providers: Provider[]; + log.info("fetching the oidcSource"); + let oidc = await oidcSource(); + + if (oidc.isEnabled()) { + log.info("OIDC is enabled"); + providers = [ oidc.provider! ]; + return { + providers, + callbacks: { + async jwt({ token, account }: { token: JWT, account: any }) { + return oidc.jwt({ token, account }); + }, + async session({ session, token }: { session: Session, token: JWT }) { + return oidc.session({ session, token }); } - return token; - }, - async session({ session, token, user }) { - // Send properties to the client, like an access_token and user id from a provider. - session.authorization = token.authorization; + } + } + } else { + log.info("OIDC is disabled"); + // retrieve the authentication method required by the default Kafka cluster + const clusters = await getKafkaClusters(); + providers = clusters.map(makeAuthOption); + log.trace({ providers }, "getAuthOptions"); + return { + providers, + callbacks: { + async jwt({ token, user }) { + if (user) { + token.authorization = user.authorization; + } + return token; + }, + async session({ session, token, user }) { + // Send properties to the client, like an access_token and user id from a provider. + session.authorization = token.authorization; - return session; + return session; + }, }, - }, - }; + }; + } } function makeAuthOption(cluster: ClusterList): Provider { diff --git a/ui/app/api/auth/[...nextauth]/scram.ts b/ui/app/api/auth/[...nextauth]/scram.ts index 3bcbd5eb0..b24e6f8c9 100644 --- a/ui/app/api/auth/[...nextauth]/scram.ts +++ b/ui/app/api/auth/[...nextauth]/scram.ts @@ -1,5 +1,3 @@ -import { getKafkaClusters } from "@/api/kafka/actions"; -import { AuthOptions } from "next-auth"; import CredentialsProvider from "next-auth/providers/credentials"; import { Provider } from "next-auth/providers/index"; diff --git a/ui/components/ClustersTable.tsx b/ui/components/ClustersTable.tsx index 9fc35d60c..103d737a3 100644 --- a/ui/components/ClustersTable.tsx +++ b/ui/components/ClustersTable.tsx @@ -6,6 +6,7 @@ import { ResponsiveTable } from "@/components/Table"; import { Truncate } from "@/libs/patternfly/react-core"; import { TableVariant } from "@/libs/patternfly/react-table"; import { useTranslations } from "next-intl"; +import { Link } from "@/i18n/routing"; const columns = [ "name", @@ -13,14 +14,28 @@ const columns = [ "namespace", "authentication", "login", -] as const; +]; export function ClustersTable({ clusters, + authenticated, }: { clusters: ClusterList[] | undefined; + authenticated: boolean }) { const t = useTranslations(); + const columns = authenticated ? [ + "name", + "version", + "namespace", + ] as const : [ + "name", + "version", + "namespace", + "authentication", + "login", + ] as const; + return ( - + {authenticated + ? + + + : + } ); case "version": @@ -87,8 +107,8 @@ export function ClustersTable({ case "login": return ( - - Login to cluster + + { authenticated ? "View" : "Login to cluster" } ); diff --git a/ui/environment.d.ts b/ui/environment.d.ts index 06950d380..e6d03d648 100644 --- a/ui/environment.d.ts +++ b/ui/environment.d.ts @@ -3,9 +3,6 @@ namespace NodeJS { NEXTAUTH_URL: string; NEXTAUTH_SECRET: string; BACKEND_URL: string; - KEYCLOAK_CLIENTID?: string; - KEYCLOAK_CLIENTSECRET?: string; - NEXT_PUBLIC_KEYCLOAK_URL?: string; NEXT_PUBLIC_PRODUCTIZED_BUILD?: "true" | "false"; CONSOLE_METRICS_PROMETHEUS_URL?: string; LOG_LEVEL?: "fatal" | "error" | "warn" | "info" | "debug" | "trace"; diff --git a/ui/middleware.ts b/ui/middleware.ts index 8b5909822..c5850d9b5 100644 --- a/ui/middleware.ts +++ b/ui/middleware.ts @@ -3,6 +3,7 @@ import withAuth from "next-auth/middleware"; import createIntlMiddleware from "next-intl/middleware"; import { NextRequest, NextResponse } from "next/server"; +import consoleConfig from '@/utils/config'; import { logger } from "@/utils/logger"; const log = logger.child({ module: "middleware" }); @@ -24,7 +25,7 @@ const authMiddleware = withAuth( authorized: ({ token }) => token != null, }, pages: { - signIn: `/kafka/1/login`, + //signIn: `/kafka/1/login`, }, }, ) as any; @@ -44,16 +45,20 @@ const protectedPathnameRegex = RegExp( ); export default async function middleware(req: NextRequest) { + let cfg = await consoleConfig(); + let oidcCfg = cfg?.['security']?.['oidc']; + let oidcEnabled = !!oidcCfg; + const requestPath = req.nextUrl.pathname; - const isPublicPage = publicPathnameRegex.test(requestPath); - const isProtectedPage = protectedPathnameRegex.test(requestPath); + const isPublicPage = !oidcEnabled && publicPathnameRegex.test(requestPath); + const isProtectedPage = oidcEnabled || protectedPathnameRegex.test(requestPath); if (isPublicPage) { log.trace({ requestPath: requestPath }, "public page"); return intlMiddleware(req); } else if (isProtectedPage) { log.trace({ requestPath: requestPath }, "protected page"); - return (authMiddleware as any)(req); + return (authMiddleware)(req); } else { log.debug( { diff --git a/ui/package-lock.json b/ui/package-lock.json index 185f00980..b8a0eb11a 100644 --- a/ui/package-lock.json +++ b/ui/package-lock.json @@ -36,6 +36,7 @@ "eslint-import-resolver-typescript": "^3.6.3", "eslint-plugin-storybook": "^0.10.1", "iron-session": "^8.0.3", + "js-yaml": "^4.1.0", "lodash.groupby": "^4.6.0", "next": "^14.2.15", "next-auth": "^4.24.10", @@ -63,6 +64,7 @@ "@storybook/react": "^8.3.6", "@storybook/test": "^8.0.0", "@storybook/test-runner": "^0.19.1", + "@types/js-yaml": "^4.0.9", "pino-pretty": "^11.3.0", "playwright": "^1.45.2", "prettier": "^3.3.3", @@ -2502,11 +2504,6 @@ "url": "https://opencollective.com/eslint" } }, - "node_modules/@eslint/eslintrc/node_modules/argparse": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", - "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==" - }, "node_modules/@eslint/eslintrc/node_modules/globals": { "version": "13.24.0", "resolved": "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz", @@ -2521,17 +2518,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/@eslint/eslintrc/node_modules/js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", - "dependencies": { - "argparse": "^2.0.1" - }, - "bin": { - "js-yaml": "bin/js-yaml.js" - } - }, "node_modules/@eslint/eslintrc/node_modules/type-fest": { "version": "0.20.2", "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", @@ -3150,6 +3136,15 @@ "node": ">=8" } }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "dev": true, + "dependencies": { + "sprintf-js": "~1.0.2" + } + }, "node_modules/@istanbuljs/load-nyc-config/node_modules/find-up": { "version": "4.1.0", "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", @@ -3163,6 +3158,19 @@ "node": ">=8" } }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/js-yaml": { + "version": "3.14.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", + "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", + "dev": true, + "dependencies": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, "node_modules/@istanbuljs/load-nyc-config/node_modules/locate-path": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", @@ -7742,6 +7750,13 @@ "@types/istanbul-lib-report": "*" } }, + "node_modules/@types/js-yaml": { + "version": "4.0.9", + "resolved": "https://registry.npmjs.org/@types/js-yaml/-/js-yaml-4.0.9.tgz", + "integrity": "sha512-k4MGaQl5TGo/iipqb2UDG2UwjXziSWkh0uysQelTlJpX1qGlpUZYm8PnO4DxG1qBomtJUdYJ6qR6xdIah10JLg==", + "dev": true, + "license": "MIT" + }, "node_modules/@types/json-schema": { "version": "7.0.15", "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", @@ -8808,13 +8823,9 @@ "dev": true }, "node_modules/argparse": { - "version": "1.0.10", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", - "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", - "dev": true, - "dependencies": { - "sprintf-js": "~1.0.2" - } + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==" }, "node_modules/aria-query": { "version": "5.3.0", @@ -11879,11 +11890,6 @@ "url": "https://opencollective.com/eslint" } }, - "node_modules/eslint/node_modules/argparse": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", - "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==" - }, "node_modules/eslint/node_modules/globals": { "version": "13.24.0", "resolved": "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz", @@ -11898,17 +11904,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/eslint/node_modules/js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", - "dependencies": { - "argparse": "^2.0.1" - }, - "bin": { - "js-yaml": "bin/js-yaml.js" - } - }, "node_modules/eslint/node_modules/type-fest": { "version": "0.20.2", "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", @@ -15616,13 +15611,12 @@ "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==" }, "node_modules/js-yaml": { - "version": "3.14.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", - "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", - "dev": true, + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", + "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "license": "MIT", "dependencies": { - "argparse": "^1.0.7", - "esprima": "^4.0.0" + "argparse": "^2.0.1" }, "bin": { "js-yaml": "bin/js-yaml.js" @@ -17516,12 +17510,6 @@ } } }, - "node_modules/postcss-loader/node_modules/argparse": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", - "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", - "dev": true - }, "node_modules/postcss-loader/node_modules/cosmiconfig": { "version": "9.0.0", "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-9.0.0.tgz", @@ -17548,18 +17536,6 @@ } } }, - "node_modules/postcss-loader/node_modules/js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", - "dev": true, - "dependencies": { - "argparse": "^2.0.1" - }, - "bin": { - "js-yaml": "bin/js-yaml.js" - } - }, "node_modules/postcss-loader/node_modules/semver": { "version": "7.6.2", "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.2.tgz", diff --git a/ui/package.json b/ui/package.json index 5fda3bae4..f731d23ea 100644 --- a/ui/package.json +++ b/ui/package.json @@ -41,6 +41,7 @@ "eslint-import-resolver-typescript": "^3.6.3", "eslint-plugin-storybook": "^0.10.1", "iron-session": "^8.0.3", + "js-yaml": "^4.1.0", "lodash.groupby": "^4.6.0", "next": "^14.2.15", "next-auth": "^4.24.10", @@ -68,6 +69,7 @@ "@storybook/react": "^8.3.6", "@storybook/test": "^8.0.0", "@storybook/test-runner": "^0.19.1", + "@types/js-yaml": "^4.0.9", "pino-pretty": "^11.3.0", "playwright": "^1.45.2", "prettier": "^3.3.3", diff --git a/ui/utils/config.ts b/ui/utils/config.ts new file mode 100644 index 000000000..16e98a132 --- /dev/null +++ b/ui/utils/config.ts @@ -0,0 +1,26 @@ +"use server"; + +import * as yaml from 'js-yaml'; +import { logger } from "@/utils/logger"; + +const log = logger.child({ module: "utils" }); + +export interface OidcConfig { + authServerUrl: string | null; + clientId: string | null; + clientSecret: string | null; +} + +export interface GlobalSecurityConfig { + oidc: OidcConfig | null; +} + +export interface ConsoleConfig { + security: GlobalSecurityConfig | null; +} + +export default async function config(): Promise { + const cfg = yaml.load(process.env.CONSOLE_CONFIG!) as ConsoleConfig; + log.trace("console configuration loaded"); + return cfg; +} diff --git a/ui/utils/env.ts b/ui/utils/env.ts index c413be6c4..8c716b2bd 100644 --- a/ui/utils/env.ts +++ b/ui/utils/env.ts @@ -3,14 +3,6 @@ export const isReadonly = (() => { return true; } - if ( - process.env.NEXT_PUBLIC_KEYCLOAK_URL && - process.env.KEYCLOAK_CLIENTID && - process.env.KEYCLOAK_CLIENTSECRET - ) { - return false; - } - return true; })();