Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ metadata:
categories: OpenShift Optional, Logging & Tracing
certified: "false"
containerImage: docker.io/grafana/loki-operator:0.8.0
createdAt: "2025-11-03T17:23:38Z"
createdAt: "2025-12-03T14:54:31Z"
description: The Community Loki Operator provides Kubernetes native deployment
and management of Loki and related logging components.
features.operators.openshift.io/disconnected: "true"
Expand Down Expand Up @@ -1739,6 +1739,14 @@ spec:
- create
- get
- update
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- get
- list
- watch
- apiGroups:
- loki.grafana.com
resources:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ metadata:
categories: OpenShift Optional, Logging & Tracing
certified: "false"
containerImage: docker.io/grafana/loki-operator:0.8.0
createdAt: "2025-11-03T17:23:36Z"
createdAt: "2025-12-03T14:54:30Z"
description: The Community Loki Operator provides Kubernetes native deployment
and management of Loki and related logging components.
operators.operatorframework.io/builder: operator-sdk-unknown
Expand Down Expand Up @@ -1719,6 +1719,14 @@ spec:
- create
- get
- update
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- get
- list
- watch
- apiGroups:
- loki.grafana.com
resources:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ metadata:
categories: OpenShift Optional, Logging & Tracing
certified: "false"
containerImage: quay.io/openshift-logging/loki-operator:0.1.0
createdAt: "2025-11-03T17:23:40Z"
createdAt: "2025-12-03T14:54:33Z"
description: |
The Loki Operator for OCP provides a means for configuring and managing a Loki stack for cluster logging.
## Prerequisites and Requirements
Expand Down Expand Up @@ -1724,6 +1724,14 @@ spec:
- create
- get
- update
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- get
- list
- watch
- apiGroups:
- loki.grafana.com
resources:
Expand Down
8 changes: 8 additions & 0 deletions operator/config/rbac/role.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,14 @@ rules:
- create
- get
- update
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- get
- list
- watch
- apiGroups:
- loki.grafana.com
resources:
Expand Down
60 changes: 60 additions & 0 deletions operator/hack/deploy-noobaa-storage-secret.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
#!/usr/bin/env bash
#
# usage: deploy-aws-storage-secret.sh <bucket-name> (<role_arn>)
#
# This scripts deploys a LokiStack Secret resource holding the
# authentication credentials to access AWS S3. It supports three
# modes: static authentication, managed with custom role_arn and
# fully managed by OpeShift's Cloud-Credentials-Operator. To use
# one of the managed you need to pass the environment variable
# STS=true. If you pass the second optional argument you can set
# your custom managed role_arn.
#
# bucket_name is the name of the bucket to be used in the LokiStack
# object storage secret.
#
# role_arn is the ARN value of the upfront manually provisioned AWS
# Role that grants access to the <bucket_name> and it's object on
# AWS S3.
#

set -euo pipefail

readonly bucket_claim_name=${1-}

if [[ -z "${bucket_claim_name}" ]]; then
echo "Provide a bucket name"
exit 1
fi

readonly namespace=${NAMESPACE:-openshift-logging}
readonly region

# static authentication from the current select AWS CLI profile.
bucket_name=${BUCKET_NAME:-$(oc -n $namespace get configmap $bucket_claim_name -o json | jq -r '.data.BUCKET_NAME')}
readonly bucket_name
access_key_id=${ACCESS_KEY_ID:-$(oc -n $namespace get secret $bucket_claim_name -o json | jq -r '.data.AWS_ACCESS_KEY_ID' | base64 -d)}
readonly access_key_id
secret_access_key=${SECRET_ACCESS_KEY:-$( oc -n $namespace get secret $bucket_claim_name -o json | jq -r '.data.AWS_SECRET_ACCESS_KEY' | base64 -d)}
readonly secret_access_key


create_secret_args=( \
--from-literal=bucketnames="$(echo -n "${bucket_name}")" \
--from-literal=access_key_id="$(echo -n "${access_key_id}")" \
--from-literal=access_key_secret="$(echo -n "${secret_access_key}")" \
--from-literal=endpoint="$(echo -n "https://s3.openshift-storage.svc")" \
)

kubectl --ignore-not-found=true -n "${namespace}" delete secret test
kubectl -n "${namespace}" create secret generic test "${create_secret_args[@]}"

cat << 'EOF'

Remember to update your LokiStack CR with:
spec:
storage:
tls:
caName: openshift-service-ca.crt

EOF
Original file line number Diff line number Diff line change
Expand Up @@ -130,6 +130,7 @@ type LokiStackReconciler struct {
// +kubebuilder:rbac:groups=route.openshift.io,resources=routes,verbs=get;list;watch;create;update;delete
// +kubebuilder:rbac:groups=cloudcredential.openshift.io,resources=credentialsrequests,verbs=get;list;watch;create;update;delete
// +kubebuilder:rbac:groups=networking.k8s.io,resources=networkpolicies,verbs=get;list;watch;create;update;delete
// +kubebuilder:rbac:groups=discovery.k8s.io,resources=endpointslices,verbs=get;list;watch

// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
Expand Down
175 changes: 175 additions & 0 deletions operator/internal/handlers/internal/networkpolicy/service.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,175 @@
package networkpolicy

import (
"context"
"errors"
"net/url"
"strconv"
"strings"

"github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
discoveryv1 "k8s.io/api/discovery/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"sigs.k8s.io/controller-runtime/pkg/client"

"github.com/grafana/loki/operator/internal/external/k8s"
"github.com/grafana/loki/operator/internal/manifests/storage"
)

var (
errInvalidServiceEndpoint = errors.New("couldn't parse target object storage service endpoint")
errMissingEndpointSlices = errors.New("no endpoint slices found for target object storage service")
errMissingTargetPort = errors.New("couldn't resolve target object storage service port to target Pod port")
errMissingDefaultPort = errors.New("couldn't resolve default ports to target ports")
)

func ServicePortToPodPort(ctx context.Context, log logr.Logger, k k8s.Client, objStore storage.Options) ([]int32, error) {
if objStore.S3 == nil {
return []int32{}, nil
}
endpoint := objStore.S3.Endpoint

// Check if endpoint contains a Kubernetes Service DNS pattern
if !strings.Contains(endpoint, ".svc") {
return []int32{}, nil
}

serviceName, namespace, endpointPort, https := parseServiceEndpoint(endpoint)
if serviceName == "" || namespace == "" {
return []int32{}, errInvalidServiceEndpoint
}

// List EndpointSlices for the service using the standard label
endpointSlices := &discoveryv1.EndpointSliceList{}
if err := k.List(ctx, endpointSlices, client.InNamespace(namespace), client.MatchingLabels{discoveryv1.LabelServiceName: serviceName}); err != nil {
log.Error(err, "failed to list endpoint slices for target object storage service", "service", serviceName, "namespace", namespace)
return []int32{}, err
}

if len(endpointSlices.Items) == 0 {
log.Error(errMissingEndpointSlices, "service", serviceName, "namespace", namespace)
return []int32{}, errMissingEndpointSlices
}

// Case 1: Port specified in URL
if endpointPort > 0 {
// If SVC and Pod have the same port then we can return it directly
for _, slice := range endpointSlices.Items {
for _, p := range slice.Ports {
if p.Port != nil && *p.Port == endpointPort {
return []int32{*p.Port}, nil
}
}
}

// Port not in EndpointSlices - it's likely a service port, need to resolve via Service
service := &corev1.Service{}
if err := k.Get(ctx, client.ObjectKey{Name: serviceName, Namespace: namespace}, service); err != nil {
log.Error(err, "failed to get target object storage service", "service", serviceName, "namespace", namespace)
return []int32{}, err
}

var targetPort int32
for _, sp := range service.Spec.Ports {
if sp.Port == endpointPort {
targetPort = resolveServicePortToTarget(endpointSlices.Items, sp)
if targetPort != 0 {
break
}
}
}

if targetPort == 0 {
return []int32{}, errMissingTargetPort
}

return []int32{targetPort}, nil
}

// Case 2: No port specified - default to 443/80 and resolve their target ports
service := &corev1.Service{}
if err := k.Get(ctx, client.ObjectKey{Name: serviceName, Namespace: namespace}, service); err != nil {
log.Error(err, "failed to get service for default port resolution", "service", serviceName, "namespace", namespace)
return []int32{}, err
}

defaultPort := int32(80)
if https {
defaultPort = int32(443)
}

for _, sp := range service.Spec.Ports {
if sp.Port == defaultPort {
targetPort := resolveServicePortToTarget(endpointSlices.Items, sp)
if targetPort == 0 {
return []int32{}, errMissingDefaultPort
}
return []int32{targetPort}, nil
}
}

return []int32{}, errMissingDefaultPort
}

func parseServiceEndpoint(endpoint string) (string, string, int32, bool) {
https := strings.HasPrefix(endpoint, "https://")

var host string
var portStr string
if strings.HasPrefix(endpoint, "http://") || strings.HasPrefix(endpoint, "https://") {
parsedURL, err := url.Parse(endpoint)
if err != nil {
return "", "", 0, false
}
host = parsedURL.Hostname()
portStr = parsedURL.Port()
} else {
// Bare hostname:port format
host = endpoint
if idx := strings.LastIndex(endpoint, ":"); idx != -1 {
possiblePort := endpoint[idx+1:]
if _, err := strconv.Atoi(possiblePort); err == nil {
host = endpoint[:idx]
portStr = possiblePort
}
}
}

parts := strings.Split(host, ".")
if len(parts) < 3 {
return "", "", 0, false
}

serviceName := parts[0]
namespace := parts[1]

var port int32
if portStr != "" {
p, err := strconv.Atoi(portStr)
if err != nil {
return "", "", 0, false
}
port = int32(p)
}

return serviceName, namespace, port, https
}

func resolveServicePortToTarget(slices []discoveryv1.EndpointSlice, servicePort corev1.ServicePort) int32 {
for _, slice := range slices {
for _, p := range slice.Ports {
switch servicePort.TargetPort.Type {
case intstr.Int:
if p.Port != nil && *p.Port == servicePort.TargetPort.IntVal {
return *p.Port
}
case intstr.String:
if p.Name != nil && *p.Name == servicePort.TargetPort.StrVal {
return *p.Port
}
}
}
}
return 0
}
Loading
Loading