diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 526a8ea..be6e186 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -3,18 +3,20 @@ name: CI/CD Pipeline
permissions:
actions: read
contents: read
- security-events: write
on:
push:
- branches: [ main, develop ]
+ branches: [ main ]
pull_request:
branches: [ main ]
env:
- DOCKER_REGISTRY: otomato
- DOCKER_IMAGE: khook
- GO_VERSION: '1.21'
+ # Cache key components for better organization
+ CACHE_KEY_PREFIX: kagent-v2
+ BRANCH_CACHE_KEY: ${{ github.head_ref || github.ref_name }}
+ # Consistent builder configuration
+ BUILDX_BUILDER_NAME: kagent-builder-v0.23.0
+ BUILDX_VERSION: v0.23.0
jobs:
test:
@@ -26,8 +28,6 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v4
- with:
- go-version: ${{ env.GO_VERSION }}
- name: Cache Go modules
uses: actions/cache@v3
@@ -58,100 +58,31 @@ jobs:
flags: unittests
name: codecov-umbrella
- build:
- name: Build
- runs-on: ubuntu-latest
- needs: test
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
-
- - name: Set up Go
- uses: actions/setup-go@v4
- with:
- go-version: ${{ env.GO_VERSION }}
-
- - name: Build binary
- run: make build
-
- - name: Upload binary artifact
- uses: actions/upload-artifact@v4
- with:
- name: khook-controller
- path: bin/manager
-
docker:
- name: Docker Build and Push
+ services:
+ registry:
+ image: registry:2
+ ports:
+ - 5001:5000
+ name: Docker Build
runs-on: ubuntu-latest
- needs: [test, build]
- if: github.ref == 'refs/heads/main'
steps:
- name: Checkout code
uses: actions/checkout@v4
+ - name: Set up QEMU
+ uses: docker/setup-qemu-action@v3
+ with:
+ platforms: linux/amd64,linux/arm64
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
-
- - name: Login to Docker Hub
- uses: docker/login-action@v3
with:
- username: ${{ secrets.DOCKERHUB_USER }}
- password: ${{ secrets.DOCKERHUB_PASSWORD }}
-
- - name: Extract metadata
- id: meta
- uses: docker/metadata-action@v5
- with:
- images: ${{ env.DOCKER_REGISTRY }}/${{ env.DOCKER_IMAGE }}
- tags: |
- type=ref,event=branch
- type=ref,event=pr
- type=sha,prefix={{branch}}-
- type=raw,value=latest,enable={{is_default_branch}}
-
- - name: Build and push Docker image
- uses: docker/build-push-action@v5
- with:
- context: .
+ name: ${{ env.BUILDX_BUILDER_NAME }}
+ version: ${{ env.BUILDX_VERSION }}
platforms: linux/amd64,linux/arm64
- push: true
- tags: |
- ${{ env.DOCKER_REGISTRY }}/${{ env.DOCKER_IMAGE }}:${{ github.sha }}
- ${{ env.DOCKER_REGISTRY }}/${{ env.DOCKER_IMAGE }}:latest
- labels: ${{ steps.meta.outputs.labels }}
- cache-from: type=gha
- cache-to: type=gha,mode=max
-
- security:
- name: Security Scan
- runs-on: ubuntu-latest
- needs: test
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
-
- - name: Run Gosec Security Scanner
- uses: securego/gosec@master
- with:
- args: './...'
-
- - name: Run Trivy vulnerability scanner
- uses: aquasecurity/trivy-action@master
- with:
- scan-type: 'fs'
- scan-ref: '.'
- format: 'sarif'
- output: 'trivy-results.sarif'
-
- - name: Check for Trivy SARIF file
- run: |
- if [ ! -f "trivy-results.sarif" ]; then
- echo "Trivy results file not found!"
- exit 1
- fi
-
- - name: Upload Trivy scan results to GitHub Security tab
- uses: github/codeql-action/upload-sarif@v3
- if: always()
- with:
- sarif_file: 'trivy-results.sarif'
+ use: 'true'
+ driver-opts: network=host
+ - name: Build Docker image
+ env:
+ DOCKER_BUILD_ARGS: --push --platform linux/amd64,linux/arm64
+ run: make docker-build
\ No newline at end of file
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index 75e4d0d..c4653e2 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -3,102 +3,103 @@ name: Release
on:
push:
tags:
- - 'v*'
+ - "v*.*.*"
env:
- DOCKER_REGISTRY: otomato
- DOCKER_IMAGE: khook
- GO_VERSION: '1.21'
+ # Cache key components for better organization
+ CACHE_KEY_PREFIX: kagent-v2
+ BRANCH_CACHE_KEY: ${{ github.head_ref || github.ref_name }}
+ # Consistent builder configuration
+ BUILDX_BUILDER_NAME: kagent-builder-v0.23.0
+ BUILDX_VERSION: v0.23.0
+ DOCKER_REGISTRY: ghcr.io
+ DOCKER_REPO: kagent-dev/khook
jobs:
- release:
- name: Create Release
+ push-images:
+ services:
+ registry:
+ image: registry:2
+ ports:
+ - 5001:5000
+ name: Docker Build
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- with:
- fetch-depth: 0
- - name: Set up Go
- uses: actions/setup-go@v4
+ - name: Set up QEMU
+ uses: docker/setup-qemu-action@v3
with:
- go-version: ${{ env.GO_VERSION }}
-
- - name: Build binaries
- run: |
- make build
- # Build for multiple architectures
- GOOS=linux GOARCH=amd64 go build -o bin/manager-linux-amd64 cmd/main.go
- GOOS=linux GOARCH=arm64 go build -o bin/manager-linux-arm64 cmd/main.go
- GOOS=darwin GOARCH=amd64 go build -o bin/manager-darwin-amd64 cmd/main.go
- GOOS=darwin GOARCH=arm64 go build -o bin/manager-darwin-arm64 cmd/main.go
- GOOS=windows GOARCH=amd64 go build -o bin/manager-windows-amd64.exe cmd/main.go
-
+ platforms: linux/amd64,linux/arm64
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
+ with:
+ name: ${{ env.BUILDX_BUILDER_NAME }}
+ version: ${{ env.BUILDX_VERSION }}
+ platforms: linux/amd64,linux/arm64
+ use: 'true'
+ driver-opts: network=host
- - name: Login to Docker Hub
+ - name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
- username: ${{ secrets.DOCKER_USERNAME }}
- password: ${{ secrets.DOCKER_PASSWORD }}
+ registry: ghcr.io
+ username: ${{ github.actor }}
+ password: ${{ secrets.GITHUB_TOKEN }}
+ - name: Build Docker image
+ env:
+ DOCKER_BUILD_ARGS: --push --platform linux/amd64,linux/arm64
+ run: make docker-build
- - name: Extract tag name
- id: tag
- run: echo "tag=${GITHUB_REF#refs/tags/}" >> $GITHUB_OUTPUT
+ push-helm-chart:
+ needs:
+ - push-images
+ runs-on: ubuntu-latest
+ permissions:
+ contents: read
+ packages: write
+ steps:
+ - name: 'Checkout GitHub Action'
+ uses: actions/checkout@main
- - name: Build and push Docker image
- uses: docker/build-push-action@v5
+ - name: Login to GitHub Container Registry
+ uses: docker/login-action@v3
with:
- context: .
- platforms: linux/amd64,linux/arm64
- push: true
- tags: |
- ${{ env.DOCKER_REGISTRY }}/${{ env.DOCKER_IMAGE }}:${{ steps.tag.outputs.tag }}
- ${{ env.DOCKER_REGISTRY }}/${{ env.DOCKER_IMAGE }}:latest
- cache-from: type=gha
- cache-to: type=gha,mode=max
+ registry: ghcr.io
+ username: ${{ github.actor }}
+ password: ${{ secrets.GITHUB_TOKEN }}
+ - name: Build Helm chart
+ run: make helm-publish
- - name: Generate release manifests
- run: |
- mkdir -p release
- # Generate install manifest
- cat > release/install.yaml << EOF
- # KHook Controller Installation Manifest
- # Version: ${{ steps.tag.outputs.tag }}
- ---
- EOF
- cat config/crd/bases/kagent.dev_hooks.yaml >> release/install.yaml
- echo "---" >> release/install.yaml
- # Add RBAC and deployment manifests here when they exist
-
- # Generate CRD-only manifest
- cp config/crd/bases/kagent.dev_hooks.yaml release/crds.yaml
+ release:
+ needs:
+ - push-helm-chart
+ name: Create Release
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
- name: Create GitHub Release
- uses: softprops/action-gh-release@v1
+ uses: softprops/action-gh-release@v2
+ if: startsWith(github.ref, 'refs/tags/')
with:
- tag_name: ${{ steps.tag.outputs.tag }}
- name: Release ${{ steps.tag.outputs.tag }}
draft: false
prerelease: false
generate_release_notes: true
- files: |
- bin/manager-*
- release/install.yaml
- release/crds.yaml
- body: |
- ## Docker Images
-
- ```bash
- docker pull ${{ env.DOCKER_REGISTRY }}/${{ env.DOCKER_IMAGE }}:${{ steps.tag.outputs.tag }}
- ```
-
+ body: |
## Installation
```bash
- kubectl apply -f https://github.com/${{ github.repository }}/releases/download/${{ steps.tag.outputs.tag }}/install.yaml
+ helm install khook-crds oci://ghcr.io/kagent-dev/khook/helm/khook-crds \
+ --namespace kagent \
+ --create-namespace
+ helm install khook oci://ghcr.io/kagent-dev/khook/helm/khook \
+ --namespace kagent \
+ --create-namespace
```
## What's Changed
diff --git a/.gitignore b/.gitignore
index 1ed7684..3bca95b 100644
--- a/.gitignore
+++ b/.gitignore
@@ -61,8 +61,8 @@ coverage.html
.env.*.local
# Helm charts
-charts/*/charts/
-charts/*/requirements.lock
+helm/*/charts/
+helm/*/requirements.lock
# Generated files
*.pb.go
diff --git a/Dockerfile b/Dockerfile
index e4e4055..5f468b6 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,21 +1,33 @@
# Build the manager binary
-FROM golang:1.24 AS builder
+ARG BUILDPLATFORM
+FROM --platform=$BUILDPLATFORM golang:1.24 AS builder
+ARG TARGETARCH
+ARG TARGETPLATFORM
+# This is used to print the build platform in the logs
+ARG BUILDPLATFORM
+
WORKDIR /workspace
# Copy the Go Modules manifests
COPY go.mod go.mod
COPY go.sum go.sum
+
# cache deps before building and copying source so that we don't need to re-download as much
# and so that source changes don't invalidate our downloaded layer
-RUN go mod download
+RUN --mount=type=cache,target=/root/go/pkg/mod,rw \
+ --mount=type=cache,target=/root/.cache/go-build,rw \
+ go mod download
# Copy the go source
COPY cmd/ cmd/
COPY api/ api/
COPY internal/ internal/
-# Build
-RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -o manager cmd/main.go
+ARG LDFLAGS
+RUN --mount=type=cache,target=/root/go/pkg/mod,rw \
+ --mount=type=cache,target=/root/.cache/go-build,rw \
+ echo "Building on $BUILDPLATFORM -> linux/$TARGETARCH" && \
+ CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -a -ldflags "$LDFLAGS" -o manager cmd/main.go
# Use distroless as minimal base image to package the manager binary
# Refer to https://github.com/GoogleContainerTools/distroless for more details
@@ -24,4 +36,11 @@ WORKDIR /
COPY --from=builder /workspace/manager .
USER 65532:65532
+ARG VERSION
+
+LABEL org.opencontainers.image.source=https://github.com/kagent-dev/khook
+LABEL org.opencontainers.image.description="Khook is the controller for running hooks for agents."
+LABEL org.opencontainers.image.authors="Kagent Creators 🤖"
+LABEL org.opencontainers.image.version="$VERSION"
+
ENTRYPOINT ["/manager"]
\ No newline at end of file
diff --git a/Makefile b/Makefile
index 989e068..a4b652b 100644
--- a/Makefile
+++ b/Makefile
@@ -1,9 +1,33 @@
# Image URL to use all building/pushing image targets
-IMG ?= kagent/hook-controller:latest
-DOCKER_REGISTRY ?= otomato
+
+# Image configuration
+DOCKER_REGISTRY ?= localhost:5001
+BASE_IMAGE_REGISTRY ?= ghcr.io
+DOCKER_REPO ?= kagent-dev/khook
+HELM_REPO ?= oci://ghcr.io/kagent-dev
+HELM_DIST_FOLDER ?= dist
+
+BUILD_DATE := $(shell date -u '+%Y-%m-%d')
+GIT_COMMIT := $(shell git rev-parse --short HEAD || echo "unknown")
+VERSION ?= $(shell git describe --tags --always 2>/dev/null | grep v || echo "v0.0.0-$(GIT_COMMIT)")
+
+# Local architecture detection to build for the current platform
+LOCALARCH ?= $(shell uname -m | sed 's/x86_64/amd64/' | sed 's/aarch64/arm64/')
+
+
+# Docker buildx configuration
+BUILDKIT_VERSION = v0.23.0
+BUILDX_NO_DEFAULT_ATTESTATIONS=1
+BUILDX_BUILDER_NAME ?= kagent-builder-$(BUILDKIT_VERSION)
+
+DOCKER_BUILDER ?= docker buildx
+DOCKER_BUILD_ARGS ?= --push --platform linux/$(LOCALARCH)
+KIND_CLUSTER_NAME ?= kagent
+
DOCKER_IMAGE ?= khook
-GIT_HASH ?= $(shell git rev-parse --short HEAD)
-DOCKER_TAG ?= $(GIT_HASH)
+
+IMG ?= $(DOCKER_REGISTRY)/$(DOCKER_REPO)/$(DOCKER_IMAGE):$(VERSION)
+
# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
ifeq (,$(shell go env GOBIN))
@@ -51,33 +75,30 @@ test: fmt vet ## Run tests.
build: fmt vet ## Build manager binary.
go build -o bin/manager cmd/main.go
+.PHONY: generate
+generate: ## Generate code and manifests (CRDs, RBAC, webhooks)
+ $(shell go env GOPATH)/bin/controller-gen object:headerFile="hack/boilerplate.go.txt" paths="./api/..."
+ $(shell go env GOPATH)/bin/controller-gen crd:allowDangerousTypes=true paths="./api/..." output:crd:artifacts:config=config/crd/bases
+ cp config/crd/bases/kagent.dev_hooks.yaml helm/khook-crds/crds/kagent.dev_hooks.yaml
+
.PHONY: run
run: fmt vet ## Run a controller from your host.
go run ./cmd/main.go
.PHONY: docker-build
-docker-build: ## Build docker image with the manager.
- docker build -t ${IMG} .
-
-.PHONY: docker-push
-docker-push: ## Push docker image with the manager.
- docker push ${IMG}
+docker-build:
+ $(DOCKER_BUILDER) build --build-arg VERSION=$(VERSION) $(DOCKER_BUILD_ARGS) -t $(IMG) .
-.PHONY: docker-build-hash
-docker-build-hash: ## Build docker image with git hash tag.
- docker build -t $(DOCKER_REGISTRY)/$(DOCKER_IMAGE):$(DOCKER_TAG) .
- docker tag $(DOCKER_REGISTRY)/$(DOCKER_IMAGE):$(DOCKER_TAG) $(DOCKER_REGISTRY)/$(DOCKER_IMAGE):latest
-
-.PHONY: docker-push-hash
-docker-push-hash: docker-build-hash ## Build and push docker image with git hash tag to Docker Hub.
- docker push $(DOCKER_REGISTRY)/$(DOCKER_IMAGE):$(DOCKER_TAG)
- docker push $(DOCKER_REGISTRY)/$(DOCKER_IMAGE):latest
+##@ Deployment
-.PHONY: docker-login
-docker-login: ## Login to Docker Hub (requires DOCKER_USERNAME and DOCKER_PASSWORD env vars).
- @echo "$$DOCKER_PASSWORD" | docker login -u "$$DOCKER_USERNAME" --password-stdin
+.PHONY: create-kind-cluster
+create-kind-cluster:
+ bash ./scripts/kind/setup-kind.sh
+ bash ./scripts/kind/setup-metallb.sh
-##@ Deployment
+.PHONY: delete-kind-cluster
+delete-kind-cluster:
+ kind delete cluster --name $(KIND_CLUSTER_NAME)
.PHONY: install
install: ## Install CRDs into the K8s cluster specified in ~/.kube/config.
@@ -109,25 +130,43 @@ kustomize-build: ## Build kustomized manifests.
##@ Helm
+.PHONY: helm-cleanup
+helm-cleanup:
+ rm -f ./$(HELM_DIST_FOLDER)/*.tgz
+
+.PHONY: helm-version
+helm-version:
+ VERSION=$(VERSION) envsubst < helm/khook-crds/Chart-template.yaml > helm/khook-crds/Chart.yaml
+ VERSION=$(VERSION) envsubst < helm/khook/Chart-template.yaml > helm/khook/Chart.yaml
+ helm dependency update helm/khook
+ helm dependency update helm/khook-crds
+ helm package -d $(HELM_DIST_FOLDER) helm/khook-crds
+ helm package -d $(HELM_DIST_FOLDER) helm/khook
+
+.PHONY: helm-publish
+helm-publish: helm-version
+ helm push ./$(HELM_DIST_FOLDER)/khook-crds-$(VERSION).tgz $(HELM_REPO)/khook/helm
+ helm push ./$(HELM_DIST_FOLDER)/khook-$(VERSION).tgz $(HELM_REPO)/khook/helm
+
.PHONY: helm-lint
helm-lint: ## Lint Helm chart.
- helm lint charts/khook-controller
+ helm lint helm/khook
.PHONY: helm-template
-helm-template: ## Generate Helm templates.
- helm template khook charts/khook-controller
+helm-template: helm-version## Generate Helm templates.
+ helm template khook helm/khook
.PHONY: helm-install
-helm-install: ## Install Helm chart.
- helm install khook charts/khook-controller \
+helm-install: helm-version## Install Helm chart.
+ helm install khook helm/khook \
--namespace kagent \
--create-namespace
.PHONY: helm-upgrade
-helm-upgrade: ## Upgrade Helm chart.
- helm upgrade khook charts/khook-controller \
+helm-upgrade: helm-version## Upgrade Helm chart.
+ helm upgrade khook helm/khook \
--namespace kagent
.PHONY: helm-uninstall
-helm-uninstall: ## Uninstall Helm chart.
+helm-uninstall: helm-version## Uninstall Helm chart.
helm uninstall khook --namespace kagent
\ No newline at end of file
diff --git a/README.md b/README.md
index 7d546e0..6e8ff6b 100644
--- a/README.md
+++ b/README.md
@@ -2,8 +2,8 @@
-
-
+
+
@@ -68,14 +68,14 @@ For how agents respond with either a Message or a Task in A2A, see “Life of a
1. **Install via Helm (recommended)**:
```bash
- git clone https://github.com/antweiss/khook.git
+ git clone https://github.com/kagent-dev/khook.git
cd khook
# Install CRDs first
- helm install khook-crds ./charts/khook-crds \
+ helm install khook-crds ./helm/khook-crds \
--namespace kagent \
--create-namespace
# Install controller
- helm install khook ./charts/khook-controller \
+ helm install khook ./helm/khook \
--namespace kagent \
--create-namespace
```
@@ -83,11 +83,11 @@ For how agents respond with either a Message or a Task in A2A, see “Life of a
One-liner (no checkout):
```bash
TMP_DIR="$(mktemp -d)" && \
- git clone --depth 1 https://github.com/antweiss/khook.git "$TMP_DIR/khook" && \
- helm install khook-crds "$TMP_DIR/khook/charts/khook-crds" \
+ git clone --depth 1 https://github.com/kagent-dev/khook.git "$TMP_DIR/khook" && \
+ helm install khook-crds "$TMP_DIR/khook/helm/khook-crds" \
--namespace kagent \
--create-namespace && \
- helm install khook "$TMP_DIR/khook/charts/khook-controller" \
+ helm install khook "$TMP_DIR/khook/helm/khook" \
--namespace kagent \
--create-namespace && \
rm -rf "$TMP_DIR"
@@ -391,10 +391,10 @@ Health check endpoints are available on port 8081:
**Solutions**:
```bash
# Check controller logs
-kubectl logs -n kagent deployment/khook-controller
+kubectl logs -n kagent deployment/khook
# Verify RBAC permissions
-kubectl auth can-i get events --as=system:serviceaccount:kagent:khook-controller
+kubectl auth can-i get events --as=system:serviceaccount:kagent:khook
# Check hook status
kubectl describe hook your-hook-name
@@ -415,11 +415,11 @@ kubectl describe hook your-hook-name
kubectl get secret kagent-credentials -o yaml
# Test API connectivity from controller pod
-kubectl exec -n kagent deployment/khook-controller -- \
+kubectl exec -n kagent deployment/khook -- \
curl -H "Authorization: Bearer $KAGENT_API_KEY" $KAGENT_BASE_URL/health
# Check controller logs for API errors
-kubectl logs -n kagent deployment/khook-controller | grep "kagent-api"
+kubectl logs -n kagent deployment/khook | grep "kagent-api"
```
#### Events Not Being Deduplicated
@@ -437,10 +437,10 @@ kubectl logs -n kagent deployment/khook-controller | grep "kagent-api"
kubectl get pods -n kagent
# Verify leader election is working
-kubectl logs -n kagent deployment/khook-controller | grep "leader"
+kubectl logs -n kagent deployment/khook | grep "leader"
# Check system time synchronization
-kubectl exec -n kagent deployment/khook-controller -- date
+kubectl exec -n kagent deployment/khook -- date
```
#### High Memory Usage
@@ -461,7 +461,7 @@ kubectl get hooks -A -o jsonpath='{range .items[*]}{.metadata.name}: {.status.ac
kubectl top pod -n kagent
# Adjust resource limits
-kubectl patch deployment -n kagent khook-controller -p '{"spec":{"template":{"spec":{"containers":[{"name":"manager","resources":{"limits":{"memory":"512Mi"}}}]}}}}'
+kubectl patch deployment -n kagent khook -p '{"spec":{"template":{"spec":{"containers":[{"name":"manager","resources":{"limits":{"memory":"512Mi"}}}]}}}}'
```
### Debug Mode
@@ -469,14 +469,14 @@ kubectl patch deployment -n kagent khook-controller -p '{"spec":{"template":{"sp
Enable debug logging for detailed troubleshooting:
```bash
-kubectl set env deployment/khook-controller -n kagent LOG_LEVEL=debug
+kubectl set env deployment/khook -n kagent LOG_LEVEL=debug
```
### Support
For additional support:
-1. Check the [GitHub Issues](https://github.com/antweiss/khook/issues)
+1. Check the [GitHub Issues](https://github.com/kagent-dev/khook/issues)
2. Review the [troubleshooting guide](docs/troubleshooting.md)
3. Join the [Kagent community](https://community.kagent.dev)
@@ -493,7 +493,7 @@ For additional support:
1. **Clone the repository**:
```bash
- git clone https://github.com/antweiss/khook.git
+ git clone https://github.com/kagent-dev/khook.git
cd khook
```
diff --git a/api/v1alpha2/hook_types.go b/api/v1alpha2/hook_types.go
index fb85323..8c64713 100644
--- a/api/v1alpha2/hook_types.go
+++ b/api/v1alpha2/hook_types.go
@@ -29,10 +29,9 @@ type EventConfiguration struct {
// +kubebuilder:validation:Required
EventType string `json:"eventType"`
- // AgentId specifies the Kagent agent to call when this event occurs
+ // AgentRef specifies the Kagent agent to call when this event occurs
// +kubebuilder:validation:Required
- // +kubebuilder:validation:MinLength=1
- AgentId string `json:"agentId"`
+ AgentRef ObjectReference `json:"agentRef"`
// Prompt specifies the prompt template to send to the agent
// +kubebuilder:validation:Required
@@ -40,6 +39,19 @@ type EventConfiguration struct {
Prompt string `json:"prompt"`
}
+type ObjectReference struct {
+ // Name of the referent.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinLength=1
+ Name string `json:"name"`
+
+ // Namespace of the referent.
+ // If unspecified, the namespace of the Hook will be used.
+ // +kubebuilder:validation:Optional
+ Namespace *string `json:"namespace,omitempty"`
+}
+
// HookStatus defines the observed state of Hook
type HookStatus struct {
// ActiveEvents contains the list of currently active events
@@ -51,7 +63,7 @@ type HookStatus struct {
// Validate validates the Hook resource
func (h *Hook) Validate() error {
- if h.Spec.EventConfigurations == nil || len(h.Spec.EventConfigurations) == 0 {
+ if len(h.Spec.EventConfigurations) == 0 {
return fmt.Errorf("at least one event configuration is required")
}
@@ -82,17 +94,17 @@ func (h *Hook) validateEventConfiguration(config EventConfiguration, index int)
return fmt.Errorf("event configuration %d: invalid event type '%s', must be one of: pod-restart, pod-pending, oom-kill, probe-failed", index, config.EventType)
}
- // Validate AgentId
- if strings.TrimSpace(config.AgentId) == "" {
- return fmt.Errorf("event configuration %d: agentId cannot be empty", index)
+ // Validate AgentRef
+ if strings.TrimSpace(config.AgentRef.Name) == "" {
+ return fmt.Errorf("event configuration %d: agentRef.name cannot be empty", index)
}
- if len(config.AgentId) > 100 {
- return fmt.Errorf("event configuration %d: agentId too long: %d characters (max 100)", index, len(config.AgentId))
+ if len(config.AgentRef.Name) > 100 {
+ return fmt.Errorf("event configuration %d: agentId too long: %d characters (max 100)", index, len(config.AgentRef.Name))
}
// Validate agent ID format (alphanumeric, hyphens, underscores only)
- for _, r := range config.AgentId {
+ for _, r := range config.AgentRef.Name {
if !((r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') || (r >= '0' && r <= '9') || r == '-' || r == '_') {
return fmt.Errorf("event configuration %d: agentId contains invalid character '%c', only alphanumeric, hyphens, and underscores allowed", index, r)
}
@@ -382,7 +394,7 @@ func validateHook(hook *Hook) (admission.Warnings, error) {
}
// Validate agentId is not empty
- if strings.TrimSpace(config.AgentId) == "" {
+ if strings.TrimSpace(config.AgentRef.Name) == "" {
allErrs = append(allErrs, fmt.Sprintf("spec.eventConfigurations[%d].agentId: cannot be empty", i))
}
diff --git a/api/v1alpha2/hook_types_test.go b/api/v1alpha2/hook_types_test.go
index 22ba660..471add4 100644
--- a/api/v1alpha2/hook_types_test.go
+++ b/api/v1alpha2/hook_types_test.go
@@ -33,8 +33,10 @@ func TestHookValidation(t *testing.T) {
EventConfigurations: []EventConfiguration{
{
EventType: "pod-restart",
- AgentId: "agent-123",
- Prompt: "Pod has restarted",
+ AgentRef: ObjectReference{
+ Name: "agent-123",
+ },
+ Prompt: "Pod has restarted",
},
},
},
@@ -69,8 +71,10 @@ func TestHookDeepCopy(t *testing.T) {
EventConfigurations: []EventConfiguration{
{
EventType: "pod-restart",
- AgentId: "agent-123",
- Prompt: "Pod has restarted",
+ AgentRef: ObjectReference{
+ Name: "agent-123",
+ },
+ Prompt: "Pod has restarted",
},
},
},
diff --git a/charts/khook-crds/Chart.yaml b/charts/khook-crds/Chart.yaml
deleted file mode 100644
index 893405e..0000000
--- a/charts/khook-crds/Chart.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
-apiVersion: v2
-name: khook-crds
-description: CRDs for the Kagent Hook Controller (hooks.kagent.dev)
-type: application
-version: 0.1.0
-appVersion: "0.1.0"
-
diff --git a/cmd/main.go b/cmd/main.go
index 02811a8..309d579 100644
--- a/cmd/main.go
+++ b/cmd/main.go
@@ -14,10 +14,10 @@ import (
"sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
- kagentv1alpha2 "github.com/antweiss/khook/api/v1alpha2"
- kclient "github.com/antweiss/khook/internal/client"
- "github.com/antweiss/khook/internal/config"
- "github.com/antweiss/khook/internal/workflow"
+ kagentv1alpha2 "github.com/kagent-dev/khook/api/v1alpha2"
+ kclient "github.com/kagent-dev/khook/internal/client"
+ "github.com/kagent-dev/khook/internal/config"
+ "github.com/kagent-dev/khook/internal/workflow"
)
var (
diff --git a/config/crd/bases/kagent.dev_hooks.yaml b/config/crd/bases/kagent.dev_hooks.yaml
index 0998f95..de09607 100644
--- a/config/crd/bases/kagent.dev_hooks.yaml
+++ b/config/crd/bases/kagent.dev_hooks.yaml
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.18.0
+ controller-gen.kubebuilder.io/version: v0.19.0
name: hooks.kagent.dev
spec:
group: kagent.dev
@@ -45,11 +45,24 @@ spec:
items:
description: EventConfiguration defines a single event type configuration
properties:
- agentId:
- description: AgentId specifies the Kagent agent to call when
+ agentRef:
+ description: AgentRef specifies the Kagent agent to call when
this event occurs
- minLength: 1
- type: string
+ properties:
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ minLength: 1
+ type: string
+ namespace:
+ description: |-
+ Namespace of the referent.
+ If unspecified, the namespace of the Hook will be used.
+ type: string
+ required:
+ - name
+ type: object
eventType:
description: EventType specifies the type of Kubernetes event
to monitor
@@ -65,7 +78,7 @@ spec:
minLength: 1
type: string
required:
- - agentId
+ - agentRef
- eventType
- prompt
type: object
diff --git a/docs/deployment.md b/docs/deployment.md
index 821692a..5b09a52 100644
--- a/docs/deployment.md
+++ b/docs/deployment.md
@@ -18,7 +18,7 @@ This method uses Kubernetes native Kustomize for deployment.
#### 1. Clone the Repository
```bash
-git clone https://github.com/antweiss/khook.git
+git clone https://github.com/kagent-dev/khook.git
cd khook
```
@@ -29,11 +29,11 @@ cd khook
kubectl create namespace kagent --dry-run=client -o yaml | kubectl apply -f -
# Install CRDs first
-helm install khook-crds ./charts/kagent-hook-crds \
+helm install khook-crds ./helm/khook-crds \
--namespace kagent \
# Install controller
-helm install khook ./charts/khook-controller \
+helm install khook ./helm/khook \
--namespace kagent \
--create-namespace
```
@@ -223,7 +223,7 @@ make deploy
git pull origin main
# Upgrade release
-helm upgrade khook ./charts/khook-controller \
+helm upgrade khook ./helm/khook \
--namespace kagent
```
diff --git a/docs/installation.md b/docs/installation.md
index 70581f1..2ec0718 100644
--- a/docs/installation.md
+++ b/docs/installation.md
@@ -17,23 +17,23 @@ Install using the Helm charts from this repository (install CRDs first, then con
```bash
# Clone the repository
-git clone https://github.com/antweiss/khook.git
+git clone https://github.com/kagent-dev/khook.git
cd khook
# Create namespace (recommended to pre-create to avoid Helm ownership issues)
kubectl create namespace kagent --dry-run=client -o yaml | kubectl apply -f -
# Install CRDs
-helm install khook-crds ./charts/kagent-hook-crds \
+helm install khook-crds ./helm/khook-crds \
--namespace kagent \
# Install controller with default values
-helm install khook ./charts/khook-controller \
+helm install khook ./helm/khook \
--namespace kagent \
--create-namespace
# Optional: customize API URL and other values
-helm install khook ./charts/khook-controller \
+helm install khook ./helm/khook \
--namespace kagent \
--create-namespace \
--set kagent.apiUrl="https://api.kagent.dev"
@@ -42,17 +42,17 @@ helm install khook ./charts/khook-controller \
kubectl get pods -n kagent
```
-Chart location: charts/khook-controller (see repo tree).
+Chart location: helm/khook (see repo tree).
#### One-liner install
```bash
TMP_DIR="$(mktemp -d)" && \
- git clone --depth 1 https://github.com/antweiss/khook.git "$TMP_DIR/khook" && \
- helm install khook-crds "$TMP_DIR/khook/charts/khook-crds" \
+ git clone --depth 1 https://github.com/kagent-dev/khook.git "$TMP_DIR/khook" && \
+ helm install khook-crds "$TMP_DIR/khook/helm/khook-crds" \
--namespace kagent \
--create-namespace && \
- helm install khook "$TMP_DIR/khook/charts/khook-controller" \
+ helm install khook "$TMP_DIR/khook/helm/khook" \
--namespace kagent \
--create-namespace && \
rm -rf "$TMP_DIR"
@@ -64,7 +64,7 @@ For custom deployments or development:
```bash
# Clone the repository
-git clone https://github.com/antweiss/khook.git
+git clone https://github.com/kagent-dev/khook.git
cd khook
# Install CRDs
@@ -319,7 +319,7 @@ kubectl set env deployment/khook -n kagent LOG_LEVEL=debug
```bash
# From the cloned repository root
-helm upgrade khook ./charts/khook-controller \
+helm upgrade khook ./helm/khook \
--namespace kagent
```
@@ -327,10 +327,10 @@ helm upgrade khook ./charts/khook-controller \
```bash
# Update CRDs first
-kubectl apply -f https://github.com/antweiss/khook/releases/latest/download/crds.yaml
+kubectl apply -f https://github.com/kagent-dev/khook/releases/latest/download/crds.yaml
# Update controller
-kubectl apply -f https://github.com/antweiss/khook/releases/latest/download/install.yaml
+kubectl apply -f https://github.com/kagent-dev/khook/releases/latest/download/install.yaml
```
## Uninstallation
diff --git a/docs/kagent-integration.md b/docs/kagent-integration.md
index a770298..a684967 100644
--- a/docs/kagent-integration.md
+++ b/docs/kagent-integration.md
@@ -316,4 +316,4 @@ For integration issues:
1. **Check Controller Logs**: `kubectl logs -n kagent deployment/khook`
2. **Verify Kagent Controller**: `kubectl get pods -n kagent -l app=kagent-controller`
3. **Test Connectivity**: Use the health check commands above
-4. **GitHub Issues**: [https://github.com/antweiss/khook/issues](https://github.com/antweiss/khook/issues)
\ No newline at end of file
+4. **GitHub Issues**: [https://github.com/kagent-dev/khook/issues](https://github.com/kagent-dev/khook/issues)
\ No newline at end of file
diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md
index 744b04f..a452938 100644
--- a/docs/troubleshooting.md
+++ b/docs/troubleshooting.md
@@ -160,7 +160,7 @@ kubectl logs -n kagent deployment/khook | grep "leader"
3. **Clock skew issues:**
```bash
# Check system time on controller
- kubectl exec -n kagent deployment/khook-controller -- date
+ kubectl exec -n kagent deployment/khook -- date
# Compare with cluster time
kubectl get nodes -o jsonpath='{.items[0].status.conditions[?(@.type=="Ready")].lastTransitionTime}'
@@ -177,20 +177,20 @@ kubectl logs -n kagent deployment/khook | grep "leader"
```bash
# Monitor memory usage
-kubectl top pod -n kagent -l app=khook-controller
+kubectl top pod -n kagent -l app=khook
# Check active events across all hooks
kubectl get hooks -A -o jsonpath='{range .items[*]}{.metadata.name}: {.status.activeEvents}{"\n"}{end}'
# Check for memory leaks in logs
-kubectl logs -n kagent deployment/khook-controller | grep -i "memory\|leak\|gc"
+kubectl logs -n kagent deployment/khook | grep -i "memory\|leak\|gc"
```
**Solutions:**
1. **Increase resource limits:**
```bash
- kubectl patch deployment khook-controller -n kagent -p '{
+ kubectl patch deployment khook -n kagent -p '{
"spec": {
"template": {
"spec": {
@@ -210,7 +210,7 @@ kubectl logs -n kagent deployment/khook-controller | grep -i "memory\|leak\|gc"
2. **Clean up stale events:**
```bash
# Restart controller to clean up memory
- kubectl rollout restart deployment/khook-controller -n kagent
+ kubectl rollout restart deployment/khook -n kagent
```
### 5. Permission Denied Errors
@@ -224,12 +224,12 @@ kubectl logs -n kagent deployment/khook-controller | grep -i "memory\|leak\|gc"
```bash
# Check current permissions
-kubectl auth can-i get events --as=system:serviceaccount:kagent:khook-controller
-kubectl auth can-i update hooks --as=system:serviceaccount:kagent:khook-controller
+kubectl auth can-i get events --as=system:serviceaccount:kagent:khook
+kubectl auth can-i update hooks --as=system:serviceaccount:kagent:khook
# Verify ClusterRole and ClusterRoleBinding
-kubectl get clusterrole khook-controller -o yaml
-kubectl get clusterrolebinding khook-controller -o yaml
+kubectl get clusterrole khook -o yaml
+kubectl get clusterrolebinding khook -o yaml
```
**Solutions:**
@@ -241,7 +241,7 @@ kubectl get clusterrolebinding khook-controller -o yaml
2. **Verify service account:**
```bash
- kubectl get serviceaccount khook-controller -n kagent
+ kubectl get serviceaccount khook -n kagent
```
## Debug Mode
@@ -253,7 +253,7 @@ Enable debug logging for detailed troubleshooting:
kubectl set env deployment/kagent-hook-controller -n kagent LOG_LEVEL=debug
# Watch debug logs
-kubectl logs -n kagent deployment/khook-controller -f | grep DEBUG
+kubectl logs -n kagent deployment/khook -f | grep DEBUG
```
## Performance Issues
@@ -268,7 +268,7 @@ kubectl logs -n kagent deployment/khook-controller -f | grep DEBUG
1. **Increase controller resources:**
```bash
- kubectl patch deployment khook-controller -n kagent -p '{
+ kubectl patch deployment khook -n kagent -p '{
"spec": {
"template": {
"spec": {
@@ -316,7 +316,7 @@ Collect comprehensive logs for support:
```bash
# Controller logs
-kubectl logs -n kagent deployment/khook-controller --previous > controller-logs.txt
+kubectl logs -n kagent deployment/khook --previous > controller-logs.txt
# Hook status
kubectl get hooks -A -o yaml > hooks-status.yaml
diff --git a/examples/basic-pod-monitoring.yaml b/examples/basic-pod-monitoring.yaml
index 7ee5617..c7382e6 100644
--- a/examples/basic-pod-monitoring.yaml
+++ b/examples/basic-pod-monitoring.yaml
@@ -6,7 +6,7 @@ apiVersion: kagent.dev/v1alpha2
kind: Hook
metadata:
name: basic-pod-monitoring
- namespace: default
+ namespace: kagent
labels:
environment: development
monitoring-type: basic
@@ -14,7 +14,8 @@ spec:
eventConfigurations:
# Monitor pod restarts
- eventType: pod-restart
- agentId: kagent/k8s-agent
+ agentRef:
+ name: k8s-agent
prompt: |
AUTONOMOUS MODE: Pod {{.ResourceName}} restarted at {{.EventTime}}. Analyze and fix immediately.
@@ -28,7 +29,8 @@ spec:
# Monitor OOM kills
- eventType: oom-kill
- agentId: kagent/k8s-agent
+ agentRef:
+ name: k8s-agent
prompt: |
AUTONOMOUS MODE: OOM kill for {{.ResourceName}} at {{.EventTime}}. Analyze memory and optimize immediately.
diff --git a/examples/ci-cd-monitoring.yaml b/examples/ci-cd-monitoring.yaml
index 6fd877d..6178e33 100644
--- a/examples/ci-cd-monitoring.yaml
+++ b/examples/ci-cd-monitoring.yaml
@@ -14,7 +14,8 @@ spec:
eventConfigurations:
# Monitor pod pending issues in CI/CD environments
- eventType: pod-pending
- agentId: kagent/ci-cd-engineer
+ agentRef:
+ name: ci-cd-engineer
prompt: |
AUTONOMOUS MODE: CI/CD ALERT - Pod {{.ResourceName}} pending since {{.EventTime}}. Pipeline blocked.
@@ -29,7 +30,9 @@ spec:
# Monitor restarts in CI/CD environments
- eventType: pod-restart
- agentId: kagent/ci-cd-engineer
+ agentRef:
+ name: ci-cd-engineer
+ namespace: kagent
prompt: |
🔧 CI/CD ALERT: Build/test pod restarted
diff --git a/examples/development-monitoring.yaml b/examples/development-monitoring.yaml
index 02d9aa9..3e97d00 100644
--- a/examples/development-monitoring.yaml
+++ b/examples/development-monitoring.yaml
@@ -14,7 +14,9 @@ spec:
eventConfigurations:
# Development-friendly pod restart monitoring
- eventType: pod-restart
- agentId: kagent/dev-helper
+ agentRef:
+ name: dev-helper
+ namespace: kagent
prompt: |
AUTONOMOUS MODE: DEV ENV - Pod {{.ResourceName}} restarted at {{.EventTime}}. Debug and fix immediately.
@@ -29,7 +31,9 @@ spec:
# OOM kills in development
- eventType: oom-kill
- agentId: kagent/dev-helper
+ agentRef:
+ name: dev-helper
+ namespace: kagent
prompt: |
AUTONOMOUS MODE: DEV ENV - OOM kill for {{.ResourceName}} at {{.EventTime}}. Optimize memory immediately.
diff --git a/examples/multi-namespace-monitoring.yaml b/examples/multi-namespace-monitoring.yaml
index b10650a..5ff02f8 100644
--- a/examples/multi-namespace-monitoring.yaml
+++ b/examples/multi-namespace-monitoring.yaml
@@ -14,7 +14,9 @@ spec:
eventConfigurations:
# Staging environment monitoring with production-like analysis
- eventType: pod-restart
- agentId: kagent/staging-analyzer
+ agentRef:
+ name: staging-analyzer
+ namespace: kagent
prompt: |
AUTONOMOUS MODE: STAGING - Pod {{.ResourceName}} restarted at {{.EventTime}}. Analyze and fix immediately.
@@ -27,7 +29,9 @@ spec:
• Complete remediation without human approval
- eventType: probe-failed
- agentId: kagent/staging-analyzer
+ agentRef:
+ name: staging-analyzer
+ namespace: kagent
prompt: |
AUTONOMOUS MODE: STAGING - Health probe failed for {{.ResourceName}} at {{.EventTime}}. Fix immediately.
@@ -52,7 +56,9 @@ spec:
eventConfigurations:
# Test environment with focus on CI/CD pipeline issues
- eventType: pod-pending
- agentId: kagent/ci-cd-helper
+ agentRef:
+ name: ci-cd-helper
+ namespace: kagent
prompt: |
AUTONOMOUS MODE: TEST ENV - Pod {{.ResourceName}} pending since {{.EventTime}}. Resolve scheduling issue immediately.
diff --git a/examples/performance-monitoring.yaml b/examples/performance-monitoring.yaml
index b8b4db0..a36cd43 100644
--- a/examples/performance-monitoring.yaml
+++ b/examples/performance-monitoring.yaml
@@ -6,7 +6,7 @@ apiVersion: kagent.dev/v1alpha2
kind: Hook
metadata:
name: performance-monitoring
- namespace: production
+ namespace: performance
labels:
monitoring-type: performance
focus: optimization
@@ -14,7 +14,9 @@ spec:
eventConfigurations:
# Monitor OOM kills for performance optimization
- eventType: oom-kill
- agentId: kagent/performance-optimizer
+ agentRef:
+ name: performance-optimizer
+ namespace: kagent
prompt: |
AUTONOMOUS MODE: PERFORMANCE ALERT - OOM kill for {{.ResourceName}} at {{.EventTime}}. Optimize immediately.
@@ -29,7 +31,9 @@ spec:
# Monitor pod pending for resource allocation issues
- eventType: pod-pending
- agentId: kagent/capacity-planner
+ agentRef:
+ name: capacity-planner
+ namespace: kagent
prompt: |
AUTONOMOUS MODE: CAPACITY ALERT - Pod {{.ResourceName}} pending since {{.EventTime}}. Resolve scheduling issue.
@@ -44,7 +48,9 @@ spec:
# Monitor restarts for performance degradation patterns
- eventType: pod-restart
- agentId: kagent/reliability-engineer
+ agentRef:
+ name: reliability-engineer
+ namespace: kagent
prompt: |
AUTONOMOUS MODE: RELIABILITY ALERT - Pod {{.ResourceName}} restarted at {{.EventTime}}. Performance analysis required.
diff --git a/examples/production-monitoring.yaml b/examples/production-monitoring.yaml
index 52c4cad..34163bd 100644
--- a/examples/production-monitoring.yaml
+++ b/examples/production-monitoring.yaml
@@ -15,7 +15,9 @@ spec:
eventConfigurations:
# Critical: Pod restarts in production
- eventType: pod-restart
- agentId: kagent/incident-manager
+ agentRef:
+ name: incident-manager
+ namespace: kagent
prompt: |
🚨 PRODUCTION ALERT: Pod restart detected
@@ -58,7 +60,9 @@ spec:
# Pod stuck in pending state
- eventType: pod-pending
- agentId: kagent/scheduling-analyzer
+ agentRef:
+ name: scheduling-analyzer
+ namespace: kagent
prompt: |
⚠️ PRODUCTION ISSUE: Pod scheduling problem
@@ -99,7 +103,9 @@ spec:
# Health probe failures
- eventType: probe-failed
- agentId: kagent/health-checker
+ agentRef:
+ name: health-checker
+ namespace: kagent
prompt: |
🔍 PRODUCTION HEALTH ISSUE: Probe failure detected
@@ -141,7 +147,9 @@ spec:
# Critical: OOM kills in production
- eventType: oom-kill
- agentId: kagent/capacity-planner
+ agentRef:
+ name: capacity-planner
+ namespace: kagent
prompt: |
🚨 CRITICAL PRODUCTION ISSUE: OOM Kill Event
diff --git a/examples/security-monitoring.yaml b/examples/security-monitoring.yaml
index 37a6282..c8490d6 100644
--- a/examples/security-monitoring.yaml
+++ b/examples/security-monitoring.yaml
@@ -6,7 +6,7 @@ apiVersion: kagent.dev/v1alpha2
kind: Hook
metadata:
name: security-monitoring
- namespace: kube-system
+ namespace: security
labels:
monitoring-type: security
priority: critical
@@ -14,7 +14,9 @@ spec:
eventConfigurations:
# Monitor unexpected pod restarts that could indicate security issues
- eventType: pod-restart
- agentId: kagent/security-analyst
+ agentRef:
+ name: security-analyst
+ namespace: kagent
prompt: |
AUTONOMOUS MODE: SECURITY ALERT - Pod {{.ResourceName}} restarted at {{.EventTime}}. Security analysis required.
@@ -30,7 +32,9 @@ spec:
# Monitor probe failures that could indicate tampering
- eventType: probe-failed
- agentId: kagent/security-analyst
+ agentRef:
+ name: security-analyst
+ namespace: kagent
prompt: |
AUTONOMOUS MODE: SECURITY ALERT - Health probe failed for {{.ResourceName}} at {{.EventTime}}. Security implications.
diff --git a/go.mod b/go.mod
index 0f0c194..595ce6b 100644
--- a/go.mod
+++ b/go.mod
@@ -1,4 +1,4 @@
-module github.com/antweiss/khook
+module github.com/kagent-dev/khook
go 1.24.6
@@ -7,10 +7,11 @@ require (
github.com/kagent-dev/kagent/go v0.0.0-20250827151700-a9cc8a1f7d57
github.com/stretchr/testify v1.10.0
gopkg.in/yaml.v2 v2.4.0
- k8s.io/api v0.33.3
- k8s.io/apimachinery v0.33.4
- k8s.io/client-go v0.33.3
+ k8s.io/api v0.34.0
+ k8s.io/apimachinery v0.34.0
+ k8s.io/client-go v0.34.0
sigs.k8s.io/controller-runtime v0.21.0
+ trpc.group/trpc-go/trpc-a2a-go v0.2.3
)
require (
@@ -23,7 +24,7 @@ require (
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
github.com/evanphx/json-patch/v5 v5.9.11 // indirect
github.com/fsnotify/fsnotify v1.9.0 // indirect
- github.com/fxamacker/cbor/v2 v2.8.0 // indirect
+ github.com/fxamacker/cbor/v2 v2.9.0 // indirect
github.com/glebarez/go-sqlite v1.21.2 // indirect
github.com/glebarez/sqlite v1.11.0 // indirect
github.com/go-logr/zapr v1.3.0 // indirect
@@ -34,7 +35,7 @@ require (
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v5 v5.2.2 // indirect
github.com/google/btree v1.1.3 // indirect
- github.com/google/gnostic-models v0.6.9 // indirect
+ github.com/google/gnostic-models v0.7.0 // indirect
github.com/google/go-cmp v0.7.0 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
@@ -54,7 +55,7 @@ require (
github.com/mailru/easyjson v0.9.0 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
- github.com/modern-go/reflect2 v1.0.2 // indirect
+ github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
@@ -70,6 +71,7 @@ require (
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
go.yaml.in/yaml/v2 v2.4.2 // indirect
+ go.yaml.in/yaml/v3 v3.0.4 // indirect
golang.org/x/crypto v0.39.0 // indirect
golang.org/x/net v0.41.0 // indirect
golang.org/x/oauth2 v0.30.0 // indirect
@@ -87,7 +89,7 @@ require (
gorm.io/gorm v1.30.1 // indirect
k8s.io/apiextensions-apiserver v0.33.3 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
- k8s.io/kube-openapi v0.0.0-20250610211856-8b98d1ed966a // indirect
+ k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 // indirect
modernc.org/libc v1.22.5 // indirect
modernc.org/mathutil v1.5.0 // indirect
@@ -95,7 +97,6 @@ require (
modernc.org/sqlite v1.23.1 // indirect
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
sigs.k8s.io/randfill v1.0.0 // indirect
- sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect
+ sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
sigs.k8s.io/yaml v1.6.0 // indirect
- trpc.group/trpc-go/trpc-a2a-go v0.2.3 // indirect
)
diff --git a/go.sum b/go.sum
index e91bde5..3cfd1c1 100644
--- a/go.sum
+++ b/go.sum
@@ -20,8 +20,8 @@ github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjT
github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM=
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
-github.com/fxamacker/cbor/v2 v2.8.0 h1:fFtUGXUzXPHTIUdne5+zzMPTfffl3RD5qYnkY40vtxU=
-github.com/fxamacker/cbor/v2 v2.8.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
+github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
+github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
github.com/glebarez/go-sqlite v1.21.2 h1:3a6LFC4sKahUunAmynQKLZceZCOzUthkRkEAl9gAXWo=
github.com/glebarez/go-sqlite v1.21.2/go.mod h1:sfxdZyhQjTM2Wry3gVYWaW072Ri1WMdWJi0k6+3382k=
github.com/glebarez/sqlite v1.11.0 h1:wSG0irqzP6VurnMEpFGer5Li19RpIRi2qvQz++w0GMw=
@@ -46,9 +46,8 @@ github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeD
github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
-github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw=
-github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw=
-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo=
+github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@@ -105,8 +104,9 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8=
+github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus=
@@ -158,8 +158,8 @@ go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
-go.yaml.in/yaml/v3 v3.0.3 h1:bXOww4E/J3f66rav3pX3m8w6jDE4knZjGOw8b5Y6iNE=
-go.yaml.in/yaml/v3 v3.0.3/go.mod h1:tBHosrYAkRZjRAOREWbDnBXUf08JOwYq++0QNwQiWzI=
+go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
+go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
@@ -224,18 +224,18 @@ gorm.io/driver/postgres v1.6.0 h1:2dxzU8xJ+ivvqTRph34QX+WrRaJlmfyPqXmoGVjMBa4=
gorm.io/driver/postgres v1.6.0/go.mod h1:vUw0mrGgrTK+uPHEhAdV4sfFELrByKVGnaVRkXDhtWo=
gorm.io/gorm v1.30.1 h1:lSHg33jJTBxs2mgJRfRZeLDG+WZaHYCk3Wtfl6Ngzo4=
gorm.io/gorm v1.30.1/go.mod h1:8Z33v652h4//uMA76KjeDH8mJXPm1QNCYrMeatR0DOE=
-k8s.io/api v0.33.3 h1:SRd5t//hhkI1buzxb288fy2xvjubstenEKL9K51KBI8=
-k8s.io/api v0.33.3/go.mod h1:01Y/iLUjNBM3TAvypct7DIj0M0NIZc+PzAHCIo0CYGE=
+k8s.io/api v0.34.0 h1:L+JtP2wDbEYPUeNGbeSa/5GwFtIA662EmT2YSLOkAVE=
+k8s.io/api v0.34.0/go.mod h1:YzgkIzOOlhl9uwWCZNqpw6RJy9L2FK4dlJeayUoydug=
k8s.io/apiextensions-apiserver v0.33.3 h1:qmOcAHN6DjfD0v9kxL5udB27SRP6SG/MTopmge3MwEs=
k8s.io/apiextensions-apiserver v0.33.3/go.mod h1:oROuctgo27mUsyp9+Obahos6CWcMISSAPzQ77CAQGz8=
-k8s.io/apimachinery v0.33.4 h1:SOf/JW33TP0eppJMkIgQ+L6atlDiP/090oaX0y9pd9s=
-k8s.io/apimachinery v0.33.4/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM=
-k8s.io/client-go v0.33.3 h1:M5AfDnKfYmVJif92ngN532gFqakcGi6RvaOF16efrpA=
-k8s.io/client-go v0.33.3/go.mod h1:luqKBQggEf3shbxHY4uVENAxrDISLOarxpTKMiUuujg=
+k8s.io/apimachinery v0.34.0 h1:eR1WO5fo0HyoQZt1wdISpFDffnWOvFLOOeJ7MgIv4z0=
+k8s.io/apimachinery v0.34.0/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw=
+k8s.io/client-go v0.34.0 h1:YoWv5r7bsBfb0Hs2jh8SOvFbKzzxyNo0nSb0zC19KZo=
+k8s.io/client-go v0.34.0/go.mod h1:ozgMnEKXkRjeMvBZdV1AijMHLTh3pbACPvK7zFR+QQY=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
-k8s.io/kube-openapi v0.0.0-20250610211856-8b98d1ed966a h1:ZV3Zr+/7s7aVbjNGICQt+ppKWsF1tehxggNfbM7XnG8=
-k8s.io/kube-openapi v0.0.0-20250610211856-8b98d1ed966a/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8=
+k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA=
+k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts=
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y=
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
modernc.org/libc v1.22.5 h1:91BNch/e5B0uPbJFgqbxXuOnxBQjlS//icfQEGmvyjE=
@@ -250,12 +250,10 @@ sigs.k8s.io/controller-runtime v0.21.0 h1:CYfjpEuicjUecRk+KAeyYh+ouUBn4llGyDYytI
sigs.k8s.io/controller-runtime v0.21.0/go.mod h1:OSg14+F65eWqIu4DceX7k/+QRAbTTvxeQSNSOQpukWM=
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
-sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
-sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc=
-sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
-sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
+sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco=
+sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=
trpc.group/trpc-go/trpc-a2a-go v0.2.3 h1:fImDVqA6geyXAhK/4qKqITnDkBG+eAWJMBilzd7i0CM=
diff --git a/helm/.gitignore b/helm/.gitignore
new file mode 100644
index 0000000..ec92b06
--- /dev/null
+++ b/helm/.gitignore
@@ -0,0 +1,2 @@
+Chart.yaml
+Chart.lock
diff --git a/helm/khook-crds/Chart-template.yaml b/helm/khook-crds/Chart-template.yaml
new file mode 100644
index 0000000..4793748
--- /dev/null
+++ b/helm/khook-crds/Chart-template.yaml
@@ -0,0 +1,19 @@
+apiVersion: v2
+name: khook-crds
+description: CRDs for the Kagent Hook Controller (hooks.kagent.dev)
+type: application
+version: ${VERSION}
+appVersion: "${VERSION}"
+home: https://github.com/kagent-dev/khook
+sources:
+- https://github.com/kagent-dev/khook
+maintainers:
+- name: Kagent Team
+ email: support@kagent.dev
+keywords:
+- kubernetes
+- controller
+- automation
+- events
+- kagent
+- hooks
\ No newline at end of file
diff --git a/charts/khook-crds/README.md b/helm/khook-crds/README.md
similarity index 73%
rename from charts/khook-crds/README.md
rename to helm/khook-crds/README.md
index 92cdec1..edba861 100644
--- a/charts/khook-crds/README.md
+++ b/helm/khook-crds/README.md
@@ -9,7 +9,7 @@ This chart installs the CustomResourceDefinitions (CRDs) required by the Kagent
```bash
# From the repository root
-helm install khook-crds ./charts/kagent-hook-crds \
+helm install khook-crds ./helm/khook-crds \
--namespace kagent \
--create-namespace
```
@@ -17,7 +17,7 @@ helm install khook-crds ./charts/kagent-hook-crds \
Install the controller after CRDs are installed:
```bash
-helm install khook-controller ./charts/kagent-hook-controller \
+helm install khook ./helm/khook-controller \
--namespace kagent \
--create-namespace
```
@@ -25,6 +25,6 @@ helm install khook-controller ./charts/kagent-hook-controller \
## Uninstall
```bash
-helm uninstall khook-controller -n kagent
+helm uninstall khook -n kagent
helm uninstall khook-crds -n kagent
```
diff --git a/charts/khook-crds/crds/kagent.dev_hooks.yaml b/helm/khook-crds/crds/kagent.dev_hooks.yaml
similarity index 83%
rename from charts/khook-crds/crds/kagent.dev_hooks.yaml
rename to helm/khook-crds/crds/kagent.dev_hooks.yaml
index 0998f95..de09607 100644
--- a/charts/khook-crds/crds/kagent.dev_hooks.yaml
+++ b/helm/khook-crds/crds/kagent.dev_hooks.yaml
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.18.0
+ controller-gen.kubebuilder.io/version: v0.19.0
name: hooks.kagent.dev
spec:
group: kagent.dev
@@ -45,11 +45,24 @@ spec:
items:
description: EventConfiguration defines a single event type configuration
properties:
- agentId:
- description: AgentId specifies the Kagent agent to call when
+ agentRef:
+ description: AgentRef specifies the Kagent agent to call when
this event occurs
- minLength: 1
- type: string
+ properties:
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ minLength: 1
+ type: string
+ namespace:
+ description: |-
+ Namespace of the referent.
+ If unspecified, the namespace of the Hook will be used.
+ type: string
+ required:
+ - name
+ type: object
eventType:
description: EventType specifies the type of Kubernetes event
to monitor
@@ -65,7 +78,7 @@ spec:
minLength: 1
type: string
required:
- - agentId
+ - agentRef
- eventType
- prompt
type: object
diff --git a/charts/khook-crds/templates/namespace.yaml b/helm/khook-crds/templates/namespace.yaml
similarity index 100%
rename from charts/khook-crds/templates/namespace.yaml
rename to helm/khook-crds/templates/namespace.yaml
diff --git a/charts/khook-crds/values.yaml b/helm/khook-crds/values.yaml
similarity index 100%
rename from charts/khook-crds/values.yaml
rename to helm/khook-crds/values.yaml
diff --git a/charts/khook-controller/Chart.yaml b/helm/khook/Chart-template.yaml
similarity index 68%
rename from charts/khook-controller/Chart.yaml
rename to helm/khook/Chart-template.yaml
index f98be24..b17907a 100644
--- a/charts/khook-controller/Chart.yaml
+++ b/helm/khook/Chart-template.yaml
@@ -2,11 +2,11 @@ apiVersion: v2
name: khook
description: khook - a Kubernetes hook controller that integrates with Kagent (A2A)
type: application
-version: 0.1.0
-appVersion: "0.1.0"
-home: https://github.com/antweiss/khook
+version: ${VERSION}
+appVersion: "${VERSION}"
+home: https://github.com/kagent-dev/khook
sources:
-- https://github.com/antweiss/khook
+- https://github.com/kagent-dev/khook
maintainers:
- name: Kagent Team
email: support@kagent.dev
diff --git a/charts/khook-controller/README.md b/helm/khook/README.md
similarity index 91%
rename from charts/khook-controller/README.md
rename to helm/khook/README.md
index 68d5049..ffbf944 100644
--- a/charts/khook-controller/README.md
+++ b/helm/khook/README.md
@@ -14,9 +14,9 @@ This Helm chart deploys the Kagent Hook Controller, a Kubernetes controller that
```bash
# Clone the repository and install from local chart
-git clone https://github.com/antweiss/khook.git
+git clone https://github.com/kagent-dev/khook.git
cd khook
-helm install khook-controller ./charts/khook-controller \
+helm install khook ./helm/khook \
--namespace kagent \
--create-namespace \
# no API token required
@@ -25,7 +25,7 @@ helm install khook-controller ./charts/khook-controller \
### Install with Custom Values
```bash
-helm install khook-controller ./charts/khook-controller \
+helm install khook ./helm/khook \
--namespace kagent \
--create-namespace \
--values custom-values.yaml
@@ -65,7 +65,7 @@ The following table lists the configurable parameters and their default values:
### Basic Installation
```bash
-helm install khook-controller ./charts/khook-controller
+helm install khook ./helm/khook
```
### Production Installation with Monitoring
@@ -109,12 +109,12 @@ affinity:
- key: app.kubernetes.io/name
operator: In
values:
- - khook-controller
+ - khook
topologyKey: kubernetes.io/hostname
```
```bash
-helm install khook-controller ./charts/khook-controller \
+helm install khook ./helm/khook \
--namespace kagent \
--create-namespace \
--values production-values.yaml
@@ -169,8 +169,8 @@ The controller exposes metrics on port 8080 at `/metrics` endpoint. Key metrics
### Check Controller Status
```bash
-kubectl get pods -n kagent -l app.kubernetes.io/name=khook-controller
-kubectl logs -n kagent -l app.kubernetes.io/name=khook-controller
+kubectl get pods -n kagent -l app.kubernetes.io/name=khook
+kubectl logs -n kagent -l app.kubernetes.io/name=khook
```
### Verify Hook Resources
@@ -189,7 +189,7 @@ kubectl get events -n kagent --field-selector involvedObject.kind=Hook
## Uninstallation
```bash
-helm uninstall khook-controller -n kagent
+helm uninstall khook -n kagent
kubectl delete namespace kagent
```
diff --git a/charts/khook-controller/templates/NOTES.txt b/helm/khook/templates/NOTES.txt
similarity index 100%
rename from charts/khook-controller/templates/NOTES.txt
rename to helm/khook/templates/NOTES.txt
diff --git a/charts/khook-controller/templates/_helpers.tpl b/helm/khook/templates/_helpers.tpl
similarity index 100%
rename from charts/khook-controller/templates/_helpers.tpl
rename to helm/khook/templates/_helpers.tpl
diff --git a/charts/khook-controller/templates/configmap.yaml b/helm/khook/templates/configmap.yaml
similarity index 100%
rename from charts/khook-controller/templates/configmap.yaml
rename to helm/khook/templates/configmap.yaml
diff --git a/charts/khook-controller/templates/deployment.yaml b/helm/khook/templates/deployment.yaml
similarity index 100%
rename from charts/khook-controller/templates/deployment.yaml
rename to helm/khook/templates/deployment.yaml
diff --git a/charts/khook-controller/templates/namespace.yaml b/helm/khook/templates/namespace.yaml
similarity index 100%
rename from charts/khook-controller/templates/namespace.yaml
rename to helm/khook/templates/namespace.yaml
diff --git a/charts/khook-controller/templates/rbac.yaml b/helm/khook/templates/rbac.yaml
similarity index 100%
rename from charts/khook-controller/templates/rbac.yaml
rename to helm/khook/templates/rbac.yaml
diff --git a/charts/khook-controller/templates/service.yaml b/helm/khook/templates/service.yaml
similarity index 100%
rename from charts/khook-controller/templates/service.yaml
rename to helm/khook/templates/service.yaml
diff --git a/charts/khook-controller/templates/serviceaccount.yaml b/helm/khook/templates/serviceaccount.yaml
similarity index 100%
rename from charts/khook-controller/templates/serviceaccount.yaml
rename to helm/khook/templates/serviceaccount.yaml
diff --git a/charts/khook-controller/templates/servicemonitor.yaml b/helm/khook/templates/servicemonitor.yaml
similarity index 100%
rename from charts/khook-controller/templates/servicemonitor.yaml
rename to helm/khook/templates/servicemonitor.yaml
diff --git a/charts/khook-controller/values.yaml b/helm/khook/values.yaml
similarity index 94%
rename from charts/khook-controller/values.yaml
rename to helm/khook/values.yaml
index 430aaa7..b253e70 100644
--- a/charts/khook-controller/values.yaml
+++ b/helm/khook/values.yaml
@@ -1,4 +1,4 @@
-# Default values for khook-controller
+# Default values for khook
replicaCount: 1
image:
@@ -24,7 +24,7 @@ controller:
logFormat: "json"
leaderElection:
enabled: true
- resourceName: "khook-controller-leader-election"
+ resourceName: "khook-leader-election"
deduplication:
timeoutMinutes: 10
cleanupIntervalMinutes: 5
diff --git a/internal/client/kagent_client.go b/internal/client/kagent_client.go
index 8aee2fa..d6d2af1 100644
--- a/internal/client/kagent_client.go
+++ b/internal/client/kagent_client.go
@@ -3,14 +3,13 @@ package client
import (
"context"
"fmt"
- "reflect"
"strings"
"time"
- "github.com/antweiss/khook/internal/interfaces"
"github.com/go-logr/logr"
"github.com/kagent-dev/kagent/go/pkg/client"
"github.com/kagent-dev/kagent/go/pkg/client/api"
+ "github.com/kagent-dev/khook/internal/interfaces"
a2aclient "trpc.group/trpc-go/trpc-a2a-go/client"
"trpc.group/trpc-go/trpc-a2a-go/protocol"
)
@@ -134,15 +133,15 @@ func (c *Client) Authenticate() error {
func (c *Client) CallAgent(ctx context.Context, request interfaces.AgentRequest) (*interfaces.AgentResponse, error) {
// Create a session for this agent call
sessionName := fmt.Sprintf("hook-%s-%d", request.EventName, time.Now().Unix())
-
+ agentRefString := request.AgentRef.String()
sessionReq := &api.SessionRequest{
- AgentRef: &request.AgentId,
+ AgentRef: &agentRefString,
Name: &sessionName,
}
c.logger.Info("Creating session for agent call",
"sessionName", sessionName,
- "agentId", request.AgentId,
+ "agentId", request.AgentRef.String(),
"eventName", request.EventName)
sessionResp, err := c.clientSet.Session.CreateSession(ctx, sessionReq)
@@ -178,7 +177,7 @@ func (c *Client) CallAgent(ctx context.Context, request interfaces.AgentRequest)
}
// Use A2A SendMessage (POST). Provide a clean base URL with trailing slash; no query params.
- a2aURL := fmt.Sprintf("%s/api/a2a/%s/", c.config.BaseURL, request.AgentId)
+ a2aURL := fmt.Sprintf("%s/api/a2a/%s/", c.config.BaseURL, request.AgentRef.String())
a2a, err := a2aclient.NewA2AClient(a2aURL)
if err != nil {
return nil, fmt.Errorf("failed to create A2A client: %w", err)
@@ -197,27 +196,15 @@ func (c *Client) CallAgent(ctx context.Context, request interfaces.AgentRequest)
})
if err != nil {
c.logger.Error(err, "Failed to send message to agent",
- "agentId", request.AgentId,
+ "agentRef", request.AgentRef.String(),
"sessionId", sessionResp.Data.ID)
return nil, fmt.Errorf("failed to send A2A message: %w", err)
}
- // Best-effort check whether a Task was returned (per A2A Life of a Task)
- isTask := false
- if res != nil {
- rv := reflect.ValueOf(res)
- if rv.Kind() == reflect.Ptr {
- rv = rv.Elem()
- }
- if rv.IsValid() {
- if f := rv.FieldByName("Task"); f.IsValid() && !f.IsZero() {
- isTask = true
- }
- }
- }
+ _, isTask := res.Result.(*protocol.Task)
c.logger.Info("Agent accepted message via A2A",
- "agentId", request.AgentId,
+ "agentRef", request.AgentRef.String(),
"sessionId", sessionID,
"taskReturned", isTask)
@@ -228,7 +215,7 @@ func (c *Client) CallAgent(ctx context.Context, request interfaces.AgentRequest)
}
c.logger.Info("Agent call completed successfully",
- "agentId", request.AgentId,
+ "agentRef", request.AgentRef.String(),
"sessionId", response.RequestId)
return response, nil
diff --git a/internal/client/kagent_client_test.go b/internal/client/kagent_client_test.go
index bb1156e..4831359 100644
--- a/internal/client/kagent_client_test.go
+++ b/internal/client/kagent_client_test.go
@@ -5,9 +5,10 @@ import (
"testing"
"time"
- "github.com/antweiss/khook/internal/interfaces"
+ "github.com/kagent-dev/khook/internal/interfaces"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/log"
)
@@ -66,7 +67,7 @@ func TestClient_CallAgent(t *testing.T) {
client := NewClient(config, logger)
request := interfaces.AgentRequest{
- AgentId: "test-agent",
+ AgentRef: types.NamespacedName{Name: "test-agent"},
Prompt: "Test prompt",
EventName: "pod-restart",
EventTime: time.Now(),
@@ -94,7 +95,7 @@ func TestClient_CallAgent(t *testing.T) {
defer cancel()
request := interfaces.AgentRequest{
- AgentId: "test-agent",
+ AgentRef: types.NamespacedName{Name: "test-agent"},
Prompt: "Test prompt",
EventName: "pod-restart",
EventTime: time.Now(),
diff --git a/internal/deduplication/manager.go b/internal/deduplication/manager.go
index 40c980f..fe57ae5 100644
--- a/internal/deduplication/manager.go
+++ b/internal/deduplication/manager.go
@@ -5,7 +5,8 @@ import (
"sync"
"time"
- "github.com/antweiss/khook/internal/interfaces"
+ "github.com/kagent-dev/khook/internal/interfaces"
+ "k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/log"
)
@@ -43,12 +44,12 @@ func (m *Manager) eventKey(event interfaces.Event) string {
}
// ShouldProcessEvent determines if an event should be processed based on deduplication logic
-func (m *Manager) ShouldProcessEvent(hookName string, event interfaces.Event) bool {
- logger := log.Log.WithName("dedup").WithValues("hook", hookName, "eventType", event.Type, "resource", event.ResourceName)
+func (m *Manager) ShouldProcessEvent(hookRef types.NamespacedName, event interfaces.Event) bool {
+ logger := log.Log.WithName("dedup").WithValues("hook", hookRef.String(), "eventType", event.Type, "resource", event.ResourceName)
m.mutex.RLock()
defer m.mutex.RUnlock()
- hookEventMap, exists := m.hookEvents[hookName]
+ hookEventMap, exists := m.hookEvents[hookRef.String()]
if !exists {
// No events for this hook, should process
logger.V(1).Info("No existing events for hook; will process")
@@ -83,28 +84,28 @@ func (m *Manager) ShouldProcessEvent(hookName string, event interfaces.Event) bo
}
// RecordEvent records an event in the deduplication storage
-func (m *Manager) RecordEvent(hookName string, event interfaces.Event) error {
- logger := log.Log.WithName("dedup").WithValues("hook", hookName, "eventType", event.Type, "resource", event.ResourceName)
+func (m *Manager) RecordEvent(hookRef types.NamespacedName, event interfaces.Event) error {
+ logger := log.Log.WithName("dedup").WithValues("hook", hookRef.String(), "eventType", event.Type, "resource", event.ResourceName)
m.mutex.Lock()
defer m.mutex.Unlock()
// Initialize hook event map if it doesn't exist
- if m.hookEvents[hookName] == nil {
- m.hookEvents[hookName] = make(map[string]*interfaces.ActiveEvent)
+ if m.hookEvents[hookRef.String()] == nil {
+ m.hookEvents[hookRef.String()] = make(map[string]*interfaces.ActiveEvent)
}
key := m.eventKey(event)
now := time.Now()
// Check if event already exists
- if existingEvent, exists := m.hookEvents[hookName][key]; exists {
+ if existingEvent, exists := m.hookEvents[hookRef.String()][key]; exists {
// Update existing event
existingEvent.LastSeen = now
existingEvent.Status = StatusFiring
logger.V(1).Info("Updated existing active event", "lastSeen", existingEvent.LastSeen)
} else {
// Create new event record
- m.hookEvents[hookName][key] = &interfaces.ActiveEvent{
+ m.hookEvents[hookRef.String()][key] = &interfaces.ActiveEvent{
EventType: event.Type,
ResourceName: event.ResourceName,
FirstSeen: now,
@@ -118,21 +119,21 @@ func (m *Manager) RecordEvent(hookName string, event interfaces.Event) error {
}
// MarkNotified marks that we successfully notified the agent for this event now
-func (m *Manager) MarkNotified(hookName string, event interfaces.Event) {
+func (m *Manager) MarkNotified(hookRef types.NamespacedName, event interfaces.Event) {
m.mutex.Lock()
defer m.mutex.Unlock()
- if m.hookEvents[hookName] == nil {
- m.hookEvents[hookName] = make(map[string]*interfaces.ActiveEvent)
+ if m.hookEvents[hookRef.String()] == nil {
+ m.hookEvents[hookRef.String()] = make(map[string]*interfaces.ActiveEvent)
}
key := m.eventKey(event)
now := time.Now()
- if ae, ok := m.hookEvents[hookName][key]; ok {
+ if ae, ok := m.hookEvents[hookRef.String()][key]; ok {
ae.LastNotifiedAt = &now
if ae.NotifiedAt == nil {
ae.NotifiedAt = &now
}
} else {
- m.hookEvents[hookName][key] = &interfaces.ActiveEvent{
+ m.hookEvents[hookRef.String()][key] = &interfaces.ActiveEvent{
EventType: event.Type,
ResourceName: event.ResourceName,
FirstSeen: now,
@@ -145,11 +146,11 @@ func (m *Manager) MarkNotified(hookName string, event interfaces.Event) {
}
// CleanupExpiredEvents removes events that have exceeded the timeout duration
-func (m *Manager) CleanupExpiredEvents(hookName string) error {
+func (m *Manager) CleanupExpiredEvents(hookRef types.NamespacedName) error {
m.mutex.Lock()
defer m.mutex.Unlock()
- hookEventMap, exists := m.hookEvents[hookName]
+ hookEventMap, exists := m.hookEvents[hookRef.String()]
if !exists {
// No events for this hook
return nil
@@ -174,18 +175,18 @@ func (m *Manager) CleanupExpiredEvents(hookName string) error {
// Clean up empty hook map
if len(hookEventMap) == 0 {
- delete(m.hookEvents, hookName)
+ delete(m.hookEvents, hookRef.String())
}
return nil
}
// GetActiveEvents returns all active events for a specific hook
-func (m *Manager) GetActiveEvents(hookName string) []interfaces.ActiveEvent {
+func (m *Manager) GetActiveEvents(hookRef types.NamespacedName) []interfaces.ActiveEvent {
m.mutex.RLock()
defer m.mutex.RUnlock()
- hookEventMap, exists := m.hookEvents[hookName]
+ hookEventMap, exists := m.hookEvents[hookRef.String()]
if !exists {
return []interfaces.ActiveEvent{}
}
@@ -204,8 +205,8 @@ func (m *Manager) GetActiveEvents(hookName string) []interfaces.ActiveEvent {
// GetActiveEventsWithStatus returns all active events with their current status
// This method handles status calculation without race conditions
-func (m *Manager) GetActiveEventsWithStatus(hookName string) []interfaces.ActiveEvent {
- activeEvents := m.GetActiveEvents(hookName)
+func (m *Manager) GetActiveEventsWithStatus(hookRef types.NamespacedName) []interfaces.ActiveEvent {
+ activeEvents := m.GetActiveEvents(hookRef)
now := time.Now()
for i := range activeEvents {
diff --git a/internal/deduplication/manager_test.go b/internal/deduplication/manager_test.go
index 914891e..437ea88 100644
--- a/internal/deduplication/manager_test.go
+++ b/internal/deduplication/manager_test.go
@@ -5,9 +5,10 @@ import (
"testing"
"time"
- "github.com/antweiss/khook/internal/interfaces"
+ "github.com/kagent-dev/khook/internal/interfaces"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "k8s.io/apimachinery/pkg/types"
)
func TestNewManager(t *testing.T) {
@@ -43,7 +44,7 @@ func TestShouldProcessEvent_NewEvent(t *testing.T) {
}
// New event should be processed
- shouldProcess := manager.ShouldProcessEvent("test-hook", event)
+ shouldProcess := manager.ShouldProcessEvent(types.NamespacedName{Name: "test-hook", Namespace: "default"}, event)
assert.True(t, shouldProcess)
}
@@ -58,11 +59,11 @@ func TestShouldProcessEvent_DuplicateWithinTimeout(t *testing.T) {
}
// Record the event first
- err := manager.RecordEvent("test-hook", event)
+ err := manager.RecordEvent(types.NamespacedName{Name: "test-hook", Namespace: "default"}, event)
require.NoError(t, err)
// Same event within timeout should not be processed
- shouldProcess := manager.ShouldProcessEvent("test-hook", event)
+ shouldProcess := manager.ShouldProcessEvent(types.NamespacedName{Name: "test-hook", Namespace: "default"}, event)
assert.False(t, shouldProcess)
}
@@ -77,16 +78,17 @@ func TestShouldProcessEvent_ExpiredEvent(t *testing.T) {
}
// Record the event
- err := manager.RecordEvent("test-hook", event)
+ err := manager.RecordEvent(types.NamespacedName{Name: "test-hook", Namespace: "default"}, event)
require.NoError(t, err)
// Manually set the event to be older than timeout
- hookEventMap := manager.hookEvents["test-hook"]
+ hookEventMap, exists := manager.hookEvents[types.NamespacedName{Name: "test-hook", Namespace: "default"}.String()]
+ require.True(t, exists)
key := manager.eventKey(event)
hookEventMap[key].FirstSeen = time.Now().Add(-EventTimeoutDuration - time.Minute)
// Expired event should be processed again
- shouldProcess := manager.ShouldProcessEvent("test-hook", event)
+ shouldProcess := manager.ShouldProcessEvent(types.NamespacedName{Name: "test-hook", Namespace: "default"}, event)
assert.True(t, shouldProcess)
}
@@ -100,11 +102,11 @@ func TestRecordEvent_NewEvent(t *testing.T) {
Timestamp: time.Now(),
}
- err := manager.RecordEvent("test-hook", event)
+ err := manager.RecordEvent(types.NamespacedName{Name: "test-hook", Namespace: "default"}, event)
require.NoError(t, err)
// Verify event was recorded
- activeEvents := manager.GetActiveEvents("test-hook")
+ activeEvents := manager.GetActiveEvents(types.NamespacedName{Name: "test-hook", Namespace: "default"})
assert.Equal(t, 1, len(activeEvents))
assert.Equal(t, "pod-restart", activeEvents[0].EventType)
assert.Equal(t, "test-pod", activeEvents[0].ResourceName)
@@ -122,19 +124,19 @@ func TestRecordEvent_UpdateExistingEvent(t *testing.T) {
}
// Record event first time
- err := manager.RecordEvent("test-hook", event)
+ err := manager.RecordEvent(types.NamespacedName{Name: "test-hook", Namespace: "default"}, event)
require.NoError(t, err)
- activeEvents := manager.GetActiveEvents("test-hook")
+ activeEvents := manager.GetActiveEvents(types.NamespacedName{Name: "test-hook", Namespace: "default"})
firstSeen := activeEvents[0].FirstSeen
// Wait a bit and record same event again
time.Sleep(10 * time.Millisecond)
- err = manager.RecordEvent("test-hook", event)
+ err = manager.RecordEvent(types.NamespacedName{Name: "test-hook", Namespace: "default"}, event)
require.NoError(t, err)
// Verify event was updated, not duplicated
- activeEvents = manager.GetActiveEvents("test-hook")
+ activeEvents = manager.GetActiveEvents(types.NamespacedName{Name: "test-hook", Namespace: "default"})
assert.Equal(t, 1, len(activeEvents))
assert.Equal(t, firstSeen, activeEvents[0].FirstSeen) // FirstSeen should not change
assert.True(t, activeEvents[0].LastSeen.After(firstSeen)) // LastSeen should be updated
@@ -151,15 +153,15 @@ func TestRecordEvent_MultipleHooks(t *testing.T) {
}
// Record same event for different hooks
- err := manager.RecordEvent("hook1", event)
+ err := manager.RecordEvent(types.NamespacedName{Name: "hook1", Namespace: "default"}, event)
require.NoError(t, err)
- err = manager.RecordEvent("hook2", event)
+ err = manager.RecordEvent(types.NamespacedName{Name: "hook2", Namespace: "default"}, event)
require.NoError(t, err)
// Verify both hooks have the event
- activeEvents1 := manager.GetActiveEvents("hook1")
- activeEvents2 := manager.GetActiveEvents("hook2")
+ activeEvents1 := manager.GetActiveEvents(types.NamespacedName{Name: "hook1", Namespace: "default"})
+ activeEvents2 := manager.GetActiveEvents(types.NamespacedName{Name: "hook2", Namespace: "default"})
assert.Equal(t, 1, len(activeEvents1))
assert.Equal(t, 1, len(activeEvents2))
@@ -185,23 +187,24 @@ func TestCleanupExpiredEvents(t *testing.T) {
}
// Record both events
- err := manager.RecordEvent("test-hook", recentEvent)
+ err := manager.RecordEvent(types.NamespacedName{Name: "test-hook", Namespace: "default"}, recentEvent)
require.NoError(t, err)
- err = manager.RecordEvent("test-hook", oldEvent)
+ err = manager.RecordEvent(types.NamespacedName{Name: "test-hook", Namespace: "default"}, oldEvent)
require.NoError(t, err)
// Manually age the old event
- hookEventMap := manager.hookEvents["test-hook"]
+ hookEventMap, exists := manager.hookEvents[types.NamespacedName{Name: "test-hook", Namespace: "default"}.String()]
+ require.True(t, exists)
oldKey := manager.eventKey(oldEvent)
hookEventMap[oldKey].FirstSeen = time.Now().Add(-EventTimeoutDuration - time.Minute)
// Cleanup expired events
- err = manager.CleanupExpiredEvents("test-hook")
+ err = manager.CleanupExpiredEvents(types.NamespacedName{Name: "test-hook", Namespace: "default"})
require.NoError(t, err)
// Verify only recent event remains
- activeEvents := manager.GetActiveEvents("test-hook")
+ activeEvents := manager.GetActiveEvents(types.NamespacedName{Name: "test-hook", Namespace: "default"})
assert.Equal(t, 1, len(activeEvents))
assert.Equal(t, "recent-pod", activeEvents[0].ResourceName)
}
@@ -210,7 +213,7 @@ func TestCleanupExpiredEvents_EmptyHook(t *testing.T) {
manager := NewManager()
// Cleanup non-existent hook should not error
- err := manager.CleanupExpiredEvents("non-existent-hook")
+ err := manager.CleanupExpiredEvents(types.NamespacedName{Name: "non-existent-hook", Namespace: "default"})
assert.NoError(t, err)
}
@@ -225,30 +228,31 @@ func TestCleanupExpiredEvents_AllEventsExpired(t *testing.T) {
}
// Record event
- err := manager.RecordEvent("test-hook", event)
+ err := manager.RecordEvent(types.NamespacedName{Name: "test-hook", Namespace: "default"}, event)
require.NoError(t, err)
// Age the event
- hookEventMap := manager.hookEvents["test-hook"]
+ hookEventMap, exists := manager.hookEvents[types.NamespacedName{Name: "test-hook", Namespace: "default"}.String()]
+ require.True(t, exists)
key := manager.eventKey(event)
hookEventMap[key].FirstSeen = time.Now().Add(-EventTimeoutDuration - time.Minute)
// Cleanup expired events
- err = manager.CleanupExpiredEvents("test-hook")
+ err = manager.CleanupExpiredEvents(types.NamespacedName{Name: "test-hook", Namespace: "default"})
require.NoError(t, err)
// Verify hook map is cleaned up
- _, exists := manager.hookEvents["test-hook"]
+ _, exists = manager.hookEvents[types.NamespacedName{Name: "test-hook", Namespace: "default"}.String()]
assert.False(t, exists)
- activeEvents := manager.GetActiveEvents("test-hook")
+ activeEvents := manager.GetActiveEvents(types.NamespacedName{Name: "test-hook", Namespace: "default"})
assert.Equal(t, 0, len(activeEvents))
}
func TestGetActiveEvents_EmptyHook(t *testing.T) {
manager := NewManager()
- activeEvents := manager.GetActiveEvents("non-existent-hook")
+ activeEvents := manager.GetActiveEvents(types.NamespacedName{Name: "non-existent-hook", Namespace: "default"})
assert.Equal(t, 0, len(activeEvents))
assert.NotNil(t, activeEvents) // Should return empty slice, not nil
}
@@ -272,27 +276,29 @@ func TestGetActiveEvents_WithExpiredEvents(t *testing.T) {
}
// Record both events
- err := manager.RecordEvent("test-hook", recentEvent)
+ err := manager.RecordEvent(types.NamespacedName{Name: "test-hook", Namespace: "default"}, recentEvent)
require.NoError(t, err)
- err = manager.RecordEvent("test-hook", oldEvent)
+ err = manager.RecordEvent(types.NamespacedName{Name: "test-hook", Namespace: "default"}, oldEvent)
require.NoError(t, err)
// Age the old event
- hookEventMap := manager.hookEvents["test-hook"]
+ hookEventMap, exists := manager.hookEvents[types.NamespacedName{Name: "test-hook", Namespace: "default"}.String()]
+ require.True(t, exists)
oldKey := manager.eventKey(oldEvent)
hookEventMap[oldKey].FirstSeen = time.Now().Add(-EventTimeoutDuration - time.Minute)
// Get active events with status (should mark old event as resolved)
- activeEvents := manager.GetActiveEventsWithStatus("test-hook")
+ activeEvents := manager.GetActiveEventsWithStatus(types.NamespacedName{Name: "test-hook", Namespace: "default"})
assert.Equal(t, 2, len(activeEvents))
// Find the events and check their status
var recentEventStatus, oldEventStatus string
for _, event := range activeEvents {
- if event.ResourceName == "recent-pod" {
+ switch event.ResourceName {
+ case "recent-pod":
recentEventStatus = event.Status
- } else if event.ResourceName == "old-pod" {
+ case "old-pod":
oldEventStatus = event.Status
}
}
@@ -319,16 +325,16 @@ func TestGetAllHookNames(t *testing.T) {
}
// Record events for different hooks
- err := manager.RecordEvent("hook1", event1)
+ err := manager.RecordEvent(types.NamespacedName{Name: "hook1", Namespace: "default"}, event1)
require.NoError(t, err)
- err = manager.RecordEvent("hook2", event2)
+ err = manager.RecordEvent(types.NamespacedName{Name: "hook2", Namespace: "default"}, event2)
require.NoError(t, err)
hookNames := manager.GetAllHookNames()
assert.Equal(t, 2, len(hookNames))
- assert.Contains(t, hookNames, "hook1")
- assert.Contains(t, hookNames, "hook2")
+ assert.Contains(t, hookNames, "default/hook1")
+ assert.Contains(t, hookNames, "default/hook2")
}
func TestGetEventCount(t *testing.T) {
@@ -351,15 +357,15 @@ func TestGetEventCount(t *testing.T) {
}
// Record events
- err := manager.RecordEvent("hook1", event1)
+ err := manager.RecordEvent(types.NamespacedName{Name: "hook1", Namespace: "default"}, event1)
require.NoError(t, err)
assert.Equal(t, 1, manager.GetEventCount())
- err = manager.RecordEvent("hook1", event2)
+ err = manager.RecordEvent(types.NamespacedName{Name: "hook1", Namespace: "default"}, event2)
require.NoError(t, err)
assert.Equal(t, 2, manager.GetEventCount())
- err = manager.RecordEvent("hook2", event1)
+ err = manager.RecordEvent(types.NamespacedName{Name: "hook2", Namespace: "default"}, event1)
require.NoError(t, err)
assert.Equal(t, 3, manager.GetEventCount())
}
@@ -385,19 +391,19 @@ func TestConcurrentAccess(t *testing.T) {
hookName := fmt.Sprintf("hook-%d", id)
// Record event
- err := manager.RecordEvent(hookName, event)
+ err := manager.RecordEvent(types.NamespacedName{Name: hookName, Namespace: "default"}, event)
assert.NoError(t, err)
// Check if should process
- shouldProcess := manager.ShouldProcessEvent(hookName, event)
+ shouldProcess := manager.ShouldProcessEvent(types.NamespacedName{Name: hookName, Namespace: "default"}, event)
assert.False(t, shouldProcess) // Should be false since we just recorded it
// Get active events
- activeEvents := manager.GetActiveEvents(hookName)
+ activeEvents := manager.GetActiveEvents(types.NamespacedName{Name: hookName, Namespace: "default"})
assert.Equal(t, 1, len(activeEvents))
// Cleanup
- err = manager.CleanupExpiredEvents(hookName)
+ err = manager.CleanupExpiredEvents(types.NamespacedName{Name: hookName, Namespace: "default"})
assert.NoError(t, err)
}(i)
}
@@ -424,7 +430,7 @@ func BenchmarkRecordEvent(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
- err := manager.RecordEvent("test-hook", event)
+ err := manager.RecordEvent(types.NamespacedName{Name: "test-hook", Namespace: "default"}, event)
if err != nil {
b.Fatal(err)
}
@@ -442,13 +448,13 @@ func BenchmarkShouldProcessEvent(b *testing.B) {
}
// Record event first
- err := manager.RecordEvent("test-hook", event)
+ err := manager.RecordEvent(types.NamespacedName{Name: "test-hook", Namespace: "default"}, event)
if err != nil {
b.Fatal(err)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
- manager.ShouldProcessEvent("test-hook", event)
+ manager.ShouldProcessEvent(types.NamespacedName{Name: "test-hook", Namespace: "default"}, event)
}
}
diff --git a/internal/event/watcher.go b/internal/event/watcher.go
index 94db47f..e5b4807 100644
--- a/internal/event/watcher.go
+++ b/internal/event/watcher.go
@@ -14,7 +14,8 @@ import (
"k8s.io/client-go/kubernetes"
"sigs.k8s.io/controller-runtime/pkg/log"
- "github.com/antweiss/khook/internal/interfaces"
+ "github.com/kagent-dev/khook/api/v1alpha2"
+ "github.com/kagent-dev/khook/internal/interfaces"
)
// Watcher implements the EventWatcher interface
@@ -184,7 +185,7 @@ func (w *Watcher) WatchEvents(ctx context.Context) (<-chan interfaces.Event, err
}
// FilterEvent matches an event against hook configurations and returns matches
-func (w *Watcher) FilterEvent(event interfaces.Event, hooks []interface{}) []interfaces.EventMatch {
+func (w *Watcher) FilterEvent(event interfaces.Event, hooks []*v1alpha2.Hook) []interfaces.EventMatch {
var matches []interfaces.EventMatch
// This will be implemented when we have the actual hook processing logic
diff --git a/internal/event/watcher_test.go b/internal/event/watcher_test.go
index 59e7ece..8c7952b 100644
--- a/internal/event/watcher_test.go
+++ b/internal/event/watcher_test.go
@@ -12,7 +12,8 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/fake"
- "github.com/antweiss/khook/internal/interfaces"
+ "github.com/kagent-dev/khook/api/v1alpha2"
+ "github.com/kagent-dev/khook/internal/interfaces"
)
func TestMapEventType(t *testing.T) {
@@ -135,7 +136,7 @@ func TestFilterEvent(t *testing.T) {
// For now, just test that FilterEvent returns empty matches
// This will be expanded when we implement the actual filtering logic
- hooks := []interface{}{}
+ hooks := []*v1alpha2.Hook{}
matches := watcher.FilterEvent(event, hooks)
// Should return empty matches for now
diff --git a/internal/interfaces/controller.go b/internal/interfaces/controller.go
index 425e508..8e5f950 100644
--- a/internal/interfaces/controller.go
+++ b/internal/interfaces/controller.go
@@ -4,16 +4,17 @@ import (
"context"
"time"
- "github.com/antweiss/khook/api/v1alpha2"
+ "github.com/kagent-dev/khook/api/v1alpha2"
"k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/types"
)
// ControllerManager orchestrates the controller lifecycle and watches
type ControllerManager interface {
Start(ctx context.Context) error
Stop() error
- AddHookWatch(hook interface{}) error
- RemoveHookWatch(hookName string) error
+ AddHookWatch(hook *v1alpha2.Hook) error
+ RemoveHookWatch(hookRef types.NamespacedName) error
}
// Event represents a Kubernetes event with relevant metadata
@@ -30,22 +31,21 @@ type Event struct {
// EventMatch represents a matched event with its corresponding hook configuration
type EventMatch struct {
- Hook interface{} `json:"hook"`
- Configuration interface{} `json:"configuration"`
- Event Event `json:"event"`
+ Hook *v1alpha2.Hook `json:"hook"`
+ Event Event `json:"event"`
}
// EventWatcher monitors Kubernetes events and filters them against hook configurations
type EventWatcher interface {
WatchEvents(ctx context.Context) (<-chan Event, error)
- FilterEvent(event Event, hooks []interface{}) []EventMatch
+ FilterEvent(event Event, hooks []*v1alpha2.Hook) []EventMatch
Start(ctx context.Context) error
Stop() error
}
// AgentRequest represents a request to the Kagent API
type AgentRequest struct {
- AgentId string `json:"agentId"`
+ AgentRef types.NamespacedName `json:"agentId"`
Prompt string `json:"prompt"`
EventName string `json:"eventName"`
EventTime time.Time `json:"eventTime"`
@@ -79,12 +79,12 @@ type ActiveEvent struct {
// DeduplicationManager implements event deduplication logic with timeout
type DeduplicationManager interface {
- ShouldProcessEvent(hookName string, event Event) bool
- RecordEvent(hookName string, event Event) error
- CleanupExpiredEvents(hookName string) error
- GetActiveEvents(hookName string) []ActiveEvent
- GetActiveEventsWithStatus(hookName string) []ActiveEvent
- MarkNotified(hookName string, event Event)
+ ShouldProcessEvent(hookRef types.NamespacedName, event Event) bool
+ RecordEvent(hookRef types.NamespacedName, event Event) error
+ CleanupExpiredEvents(hookRef types.NamespacedName) error
+ GetActiveEvents(hookRef types.NamespacedName) []ActiveEvent
+ GetActiveEventsWithStatus(hookRef types.NamespacedName) []ActiveEvent
+ MarkNotified(hookRef types.NamespacedName, event Event)
}
// EventRecorder handles Kubernetes event recording
@@ -96,14 +96,14 @@ type EventRecorder interface {
// StatusManager handles status updates and event recording for Hook resources
type StatusManager interface {
- UpdateHookStatus(ctx context.Context, hook interface{}, activeEvents []ActiveEvent) error
- RecordEventFiring(ctx context.Context, hook interface{}, event Event, agentId string) error
- RecordEventResolved(ctx context.Context, hook interface{}, eventType, resourceName string) error
- RecordError(ctx context.Context, hook interface{}, event Event, err error, agentId string) error
- RecordAgentCallSuccess(ctx context.Context, hook interface{}, event Event, agentId, requestId string) error
- RecordAgentCallFailure(ctx context.Context, hook interface{}, event Event, agentId string, err error) error
- RecordDuplicateEvent(ctx context.Context, hook interface{}, event Event) error
- GetHookStatus(ctx context.Context, hookName, namespace string) (*v1alpha2.HookStatus, error)
+ UpdateHookStatus(ctx context.Context, hook *v1alpha2.Hook, activeEvents []ActiveEvent) error
+ RecordEventFiring(ctx context.Context, hook *v1alpha2.Hook, event Event, agentRef types.NamespacedName) error
+ RecordEventResolved(ctx context.Context, hook *v1alpha2.Hook, eventType, resourceName string) error
+ RecordError(ctx context.Context, hook *v1alpha2.Hook, event Event, err error, agentRef types.NamespacedName) error
+ RecordAgentCallSuccess(ctx context.Context, hook *v1alpha2.Hook, event Event, agentRef types.NamespacedName, requestId string) error
+ RecordAgentCallFailure(ctx context.Context, hook *v1alpha2.Hook, event Event, agentRef types.NamespacedName, err error) error
+ RecordDuplicateEvent(ctx context.Context, hook *v1alpha2.Hook, event Event) error
+ GetHookStatus(ctx context.Context, hookRef types.NamespacedName) (*v1alpha2.HookStatus, error)
LogControllerStartup(ctx context.Context, version string, config map[string]interface{})
LogControllerShutdown(ctx context.Context, reason string)
}
diff --git a/internal/pipeline/integration_test.go b/internal/pipeline/integration_test.go
index b389473..c843697 100644
--- a/internal/pipeline/integration_test.go
+++ b/internal/pipeline/integration_test.go
@@ -9,14 +9,15 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/record"
- "github.com/antweiss/khook/api/v1alpha2"
- "github.com/antweiss/khook/internal/deduplication"
- "github.com/antweiss/khook/internal/event"
- "github.com/antweiss/khook/internal/interfaces"
- "github.com/antweiss/khook/internal/status"
+ "github.com/kagent-dev/khook/api/v1alpha2"
+ "github.com/kagent-dev/khook/internal/deduplication"
+ "github.com/kagent-dev/khook/internal/event"
+ "github.com/kagent-dev/khook/internal/interfaces"
+ "github.com/kagent-dev/khook/internal/status"
)
// MockKagentClientForIntegration provides a simple mock for integration testing
@@ -35,7 +36,7 @@ func NewMockKagentClientForIntegration() *MockKagentClientForIntegration {
func (m *MockKagentClientForIntegration) CallAgent(ctx context.Context, request interfaces.AgentRequest) (*interfaces.AgentResponse, error) {
m.calls = append(m.calls, request)
- if response, exists := m.responses[request.AgentId]; exists {
+ if response, exists := m.responses[request.AgentRef.String()]; exists {
if response == nil {
return nil, errors.New("mock agent call failed")
}
@@ -46,7 +47,7 @@ func (m *MockKagentClientForIntegration) CallAgent(ctx context.Context, request
return &interfaces.AgentResponse{
Success: true,
Message: "Mock response",
- RequestId: "mock-request-" + request.AgentId,
+ RequestId: "mock-request-" + request.AgentRef.String(),
}, nil
}
@@ -54,8 +55,8 @@ func (m *MockKagentClientForIntegration) Authenticate() error {
return nil
}
-func (m *MockKagentClientForIntegration) SetResponse(agentId string, response *interfaces.AgentResponse) {
- m.responses[agentId] = response
+func (m *MockKagentClientForIntegration) SetResponse(agentRef types.NamespacedName, response *interfaces.AgentResponse) {
+ m.responses[agentRef.String()] = response
}
func (m *MockKagentClientForIntegration) GetCalls() []interfaces.AgentRequest {
@@ -91,8 +92,10 @@ func TestEventProcessingIntegration(t *testing.T) {
EventConfigurations: []v1alpha2.EventConfiguration{
{
EventType: "pod-restart",
- AgentId: "restart-agent",
- Prompt: "Pod {{.ResourceName}} restarted in {{.Namespace}}",
+ AgentRef: v1alpha2.ObjectReference{
+ Name: "restart-agent",
+ },
+ Prompt: "Pod {{.ResourceName}} restarted in {{.Namespace}}",
},
},
},
@@ -107,13 +110,17 @@ func TestEventProcessingIntegration(t *testing.T) {
EventConfigurations: []v1alpha2.EventConfiguration{
{
EventType: "pod-restart",
- AgentId: "multi-restart-agent",
- Prompt: "Multi-hook: Pod {{.ResourceName}} restarted",
+ AgentRef: v1alpha2.ObjectReference{
+ Name: "multi-restart-agent",
+ },
+ Prompt: "Multi-hook: Pod {{.ResourceName}} restarted",
},
{
EventType: "oom-kill",
- AgentId: "oom-agent",
- Prompt: "OOM kill detected for {{.ResourceName}}",
+ AgentRef: v1alpha2.ObjectReference{
+ Name: "oom-agent",
+ },
+ Prompt: "OOM kill detected for {{.ResourceName}}",
},
},
},
@@ -148,25 +155,25 @@ func TestEventProcessingIntegration(t *testing.T) {
// Verify first call (restart-agent)
call1 := calls[0]
- assert.Equal(t, "restart-agent", call1.AgentId)
+ assert.Equal(t, "restart-agent", call1.AgentRef.Name)
assert.Equal(t, "pod-restart", call1.EventName)
assert.Equal(t, "test-pod-1", call1.ResourceName)
assert.Contains(t, call1.Prompt, "Pod test-pod-1 restarted in default")
// Verify second call (multi-restart-agent)
call2 := calls[1]
- assert.Equal(t, "multi-restart-agent", call2.AgentId)
+ assert.Equal(t, "multi-restart-agent", call2.AgentRef.Name)
assert.Equal(t, "pod-restart", call2.EventName)
assert.Equal(t, "test-pod-1", call2.ResourceName)
assert.Contains(t, call2.Prompt, "Multi-hook: Pod test-pod-1 restarted")
// Verify deduplication state
- activeEvents1 := deduplicationManager.GetActiveEvents("default/pod-restart-hook")
+ activeEvents1 := deduplicationManager.GetActiveEvents(types.NamespacedName{Name: "pod-restart-hook", Namespace: "default"})
assert.Len(t, activeEvents1, 1)
assert.Equal(t, "pod-restart", activeEvents1[0].EventType)
assert.Equal(t, "test-pod-1", activeEvents1[0].ResourceName)
- activeEvents2 := deduplicationManager.GetActiveEvents("default/multi-event-hook")
+ activeEvents2 := deduplicationManager.GetActiveEvents(types.NamespacedName{Name: "multi-event-hook", Namespace: "default"})
assert.Len(t, activeEvents2, 1)
assert.Equal(t, "pod-restart", activeEvents2[0].EventType)
assert.Equal(t, "test-pod-1", activeEvents2[0].ResourceName)
@@ -220,7 +227,7 @@ func TestEventProcessingIntegration(t *testing.T) {
assert.Len(t, calls, 1, "Should call only the OOM agent")
call := calls[0]
- assert.Equal(t, "oom-agent", call.AgentId)
+ assert.Equal(t, "oom-agent", call.AgentRef.Name)
assert.Equal(t, "oom-kill", call.EventName)
assert.Equal(t, "test-pod-2", call.ResourceName)
assert.Contains(t, call.Prompt, "OOM kill detected for test-pod-2")
@@ -251,14 +258,14 @@ func TestEventProcessingIntegration(t *testing.T) {
// Test 5: Verify active events state
t.Run("VerifyActiveEventsState", func(t *testing.T) {
// Check active events for pod-restart-hook
- activeEvents1 := deduplicationManager.GetActiveEvents("default/pod-restart-hook")
+ activeEvents1 := deduplicationManager.GetActiveEvents(types.NamespacedName{Name: "pod-restart-hook", Namespace: "default"})
assert.Len(t, activeEvents1, 1)
assert.Equal(t, "pod-restart", activeEvents1[0].EventType)
assert.Equal(t, "test-pod-1", activeEvents1[0].ResourceName)
assert.Equal(t, "firing", activeEvents1[0].Status)
// Check active events for multi-event-hook
- activeEvents2 := deduplicationManager.GetActiveEvents("default/multi-event-hook")
+ activeEvents2 := deduplicationManager.GetActiveEvents(types.NamespacedName{Name: "multi-event-hook", Namespace: "default"})
assert.Len(t, activeEvents2, 2) // pod-restart and oom-kill
eventTypes := make(map[string]bool)
@@ -293,8 +300,10 @@ func TestEventProcessingWithErrors(t *testing.T) {
EventConfigurations: []v1alpha2.EventConfiguration{
{
EventType: "pod-restart",
- AgentId: "failing-agent",
- Prompt: "This will fail",
+ AgentRef: v1alpha2.ObjectReference{
+ Name: "failing-agent",
+ },
+ Prompt: "This will fail",
},
},
},
@@ -309,8 +318,10 @@ func TestEventProcessingWithErrors(t *testing.T) {
EventConfigurations: []v1alpha2.EventConfiguration{
{
EventType: "pod-restart",
- AgentId: "working-agent",
- Prompt: "This will work",
+ AgentRef: v1alpha2.ObjectReference{
+ Name: "working-agent",
+ },
+ Prompt: "This will work",
},
},
},
@@ -320,8 +331,8 @@ func TestEventProcessingWithErrors(t *testing.T) {
ctx := context.Background()
// Set up one agent to fail and one to succeed
- mockKagentClient.SetResponse("failing-agent", nil) // This will cause an error
- mockKagentClient.SetResponse("working-agent", &interfaces.AgentResponse{
+ mockKagentClient.SetResponse(types.NamespacedName{Name: "failing-agent", Namespace: "default"}, nil) // This will cause an error
+ mockKagentClient.SetResponse(types.NamespacedName{Name: "working-agent", Namespace: "default"}, &interfaces.AgentResponse{
Success: true,
Message: "Success",
RequestId: "working-request",
@@ -350,7 +361,7 @@ func TestEventProcessingWithErrors(t *testing.T) {
// Verify both agents were attempted
agentIds := make(map[string]bool)
for _, call := range calls {
- agentIds[call.AgentId] = true
+ agentIds[call.AgentRef.Name] = true
}
assert.True(t, agentIds["failing-agent"])
assert.True(t, agentIds["working-agent"])
diff --git a/internal/pipeline/processor.go b/internal/pipeline/processor.go
index 4d307a6..288962e 100644
--- a/internal/pipeline/processor.go
+++ b/internal/pipeline/processor.go
@@ -9,10 +9,11 @@ import (
"time"
"github.com/go-logr/logr"
+ "k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/log"
- "github.com/antweiss/khook/api/v1alpha2"
- "github.com/antweiss/khook/internal/interfaces"
+ "github.com/kagent-dev/khook/api/v1alpha2"
+ "github.com/kagent-dev/khook/internal/interfaces"
)
// Processor handles the complete event processing pipeline
@@ -70,7 +71,7 @@ func (p *Processor) ProcessEvent(ctx context.Context, event interfaces.Event, ho
"hook", match.Hook.Name,
"eventType", event.Type,
"resourceName", event.ResourceName,
- "agentId", match.Configuration.AgentId)
+ "agentRef", match.Configuration.AgentRef)
lastError = err
// Continue processing other matches even if one fails
continue
@@ -108,72 +109,84 @@ func (p *Processor) findEventMatches(event interfaces.Event, hooks []*v1alpha2.H
// processEventMatch processes a single event match through the complete pipeline
func (p *Processor) processEventMatch(ctx context.Context, match EventMatch) error {
- hookName := fmt.Sprintf("%s/%s", match.Hook.Namespace, match.Hook.Name)
+ hookRef := types.NamespacedName{
+ Namespace: match.Hook.Namespace,
+ Name: match.Hook.Name,
+ }
// Check deduplication - should we process this event?
- if !p.deduplicationManager.ShouldProcessEvent(hookName, match.Event) {
+ if !p.deduplicationManager.ShouldProcessEvent(hookRef, match.Event) {
p.logger.V(1).Info("Event ignored due to deduplication",
- "hook", hookName,
+ "hook", hookRef,
"eventType", match.Event.Type,
"resourceName", match.Event.ResourceName)
// Record that we ignored a duplicate event
if err := p.statusManager.RecordDuplicateEvent(ctx, match.Hook, match.Event); err != nil {
- p.logger.Error(err, "Failed to record duplicate event", "hook", hookName)
+ p.logger.Error(err, "Failed to record duplicate event", "hook", hookRef)
}
return nil
}
// Record the event in deduplication manager
- if err := p.deduplicationManager.RecordEvent(hookName, match.Event); err != nil {
+ if err := p.deduplicationManager.RecordEvent(hookRef, match.Event); err != nil {
return fmt.Errorf("failed to record event in deduplication manager: %w", err)
}
+ agentRefNs := match.Hook.Namespace
+ if match.Configuration.AgentRef.Namespace != nil {
+ agentRefNs = *match.Configuration.AgentRef.Namespace
+ }
+ agentRef := types.NamespacedName{
+ Name: match.Configuration.AgentRef.Name,
+ Namespace: agentRefNs,
+ }
+
// Record that the event is firing
- if err := p.statusManager.RecordEventFiring(ctx, match.Hook, match.Event, match.Configuration.AgentId); err != nil {
- p.logger.Error(err, "Failed to record event firing", "hook", hookName)
+ if err := p.statusManager.RecordEventFiring(ctx, match.Hook, match.Event, agentRef); err != nil {
+ p.logger.Error(err, "Failed to record event firing", "hook", hookRef)
// Continue processing even if status recording fails
}
// Create agent request with event context
- agentRequest := p.createAgentRequest(match)
+ agentRequest := p.createAgentRequest(match, agentRef)
// Call the Kagent agent
response, err := p.kagentClient.CallAgent(ctx, agentRequest)
if err != nil {
// Record the failure
- if statusErr := p.statusManager.RecordAgentCallFailure(ctx, match.Hook, match.Event, match.Configuration.AgentId, err); statusErr != nil {
- p.logger.Error(statusErr, "Failed to record agent call failure", "hook", hookName)
+ if statusErr := p.statusManager.RecordAgentCallFailure(ctx, match.Hook, match.Event, agentRef, err); statusErr != nil {
+ p.logger.Error(statusErr, "Failed to record agent call failure", "hook", hookRef)
}
- return fmt.Errorf("failed to call agent %s: %w", match.Configuration.AgentId, err)
+ return fmt.Errorf("failed to call agent %s: %w", agentRef.Name, err)
}
// Record successful agent call
- if err := p.statusManager.RecordAgentCallSuccess(ctx, match.Hook, match.Event, match.Configuration.AgentId, response.RequestId); err != nil {
- p.logger.Error(err, "Failed to record agent call success", "hook", hookName)
+ if err := p.statusManager.RecordAgentCallSuccess(ctx, match.Hook, match.Event, agentRef, response.RequestId); err != nil {
+ p.logger.Error(err, "Failed to record agent call success", "hook", hookRef)
// Continue even if status recording fails
}
// Mark event as notified to suppress re-sending within suppression window
- p.deduplicationManager.MarkNotified(hookName, match.Event)
+ p.deduplicationManager.MarkNotified(hookRef, match.Event)
p.logger.Info("Successfully processed event match",
- "hook", hookName,
+ "hook", hookRef,
"eventType", match.Event.Type,
"resourceName", match.Event.ResourceName,
- "agentId", match.Configuration.AgentId,
+ "agentRef", agentRef,
"requestId", response.RequestId)
return nil
}
// createAgentRequest creates an agent request from an event match
-func (p *Processor) createAgentRequest(match EventMatch) interfaces.AgentRequest {
+func (p *Processor) createAgentRequest(match EventMatch, agentRef types.NamespacedName) interfaces.AgentRequest {
// Expand prompt template with event context
prompt := p.expandPromptTemplate(match.Configuration.Prompt, match.Event)
return interfaces.AgentRequest{
- AgentId: match.Configuration.AgentId,
+ AgentRef: agentRef,
Prompt: prompt,
EventName: match.Event.Type,
EventTime: match.Event.Timestamp,
@@ -328,20 +341,23 @@ func (p *Processor) UpdateHookStatuses(ctx context.Context, hooks []*v1alpha2.Ho
p.logger.Info("Updating hook statuses", "hookCount", len(hooks))
for _, hook := range hooks {
- hookName := fmt.Sprintf("%s/%s", hook.Namespace, hook.Name)
+ hookRef := types.NamespacedName{
+ Namespace: hook.Namespace,
+ Name: hook.Name,
+ }
// Get active events for this hook with current status
- activeEvents := p.deduplicationManager.GetActiveEventsWithStatus(hookName)
+ activeEvents := p.deduplicationManager.GetActiveEventsWithStatus(hookRef)
// Update the hook status
if err := p.statusManager.UpdateHookStatus(ctx, hook, activeEvents); err != nil {
- p.logger.Error(err, "Failed to update hook status", "hook", hookName)
+ p.logger.Error(err, "Failed to update hook status", "hook", hookRef)
// Continue updating other hooks even if one fails
continue
}
p.logger.V(1).Info("Updated hook status",
- "hook", hookName,
+ "hook", hookRef,
"activeEventsCount", len(activeEvents))
}
@@ -353,10 +369,13 @@ func (p *Processor) CleanupExpiredEvents(ctx context.Context, hooks []*v1alpha2.
p.logger.V(1).Info("Cleaning up expired events", "hookCount", len(hooks))
for _, hook := range hooks {
- hookName := fmt.Sprintf("%s/%s", hook.Namespace, hook.Name)
+ hookRef := types.NamespacedName{
+ Namespace: hook.Namespace,
+ Name: hook.Name,
+ }
- if err := p.deduplicationManager.CleanupExpiredEvents(hookName); err != nil {
- p.logger.Error(err, "Failed to cleanup expired events", "hook", hookName)
+ if err := p.deduplicationManager.CleanupExpiredEvents(hookRef); err != nil {
+ p.logger.Error(err, "Failed to cleanup expired events", "hook", hookRef)
// Continue cleaning up other hooks even if one fails
continue
}
diff --git a/internal/pipeline/processor_test.go b/internal/pipeline/processor_test.go
index a2dc3a1..743488d 100644
--- a/internal/pipeline/processor_test.go
+++ b/internal/pipeline/processor_test.go
@@ -9,9 +9,10 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
- "github.com/antweiss/khook/api/v1alpha2"
- "github.com/antweiss/khook/internal/interfaces"
+ "github.com/kagent-dev/khook/api/v1alpha2"
+ "github.com/kagent-dev/khook/internal/interfaces"
)
// Mock implementations for testing
@@ -24,7 +25,7 @@ func (m *MockEventWatcher) WatchEvents(ctx context.Context) (<-chan interfaces.E
return args.Get(0).(<-chan interfaces.Event), args.Error(1)
}
-func (m *MockEventWatcher) FilterEvent(event interfaces.Event, hooks []interface{}) []interfaces.EventMatch {
+func (m *MockEventWatcher) FilterEvent(event interfaces.Event, hooks []*v1alpha2.Hook) []interfaces.EventMatch {
args := m.Called(event, hooks)
return args.Get(0).([]interfaces.EventMatch)
}
@@ -43,33 +44,33 @@ type MockDeduplicationManager struct {
mock.Mock
}
-func (m *MockDeduplicationManager) ShouldProcessEvent(hookName string, event interfaces.Event) bool {
- args := m.Called(hookName, event)
+func (m *MockDeduplicationManager) ShouldProcessEvent(hookRef types.NamespacedName, event interfaces.Event) bool {
+ args := m.Called(hookRef, event)
return args.Bool(0)
}
-func (m *MockDeduplicationManager) RecordEvent(hookName string, event interfaces.Event) error {
- args := m.Called(hookName, event)
+func (m *MockDeduplicationManager) RecordEvent(hookRef types.NamespacedName, event interfaces.Event) error {
+ args := m.Called(hookRef, event)
return args.Error(0)
}
-func (m *MockDeduplicationManager) CleanupExpiredEvents(hookName string) error {
- args := m.Called(hookName)
+func (m *MockDeduplicationManager) CleanupExpiredEvents(hookRef types.NamespacedName) error {
+ args := m.Called(hookRef)
return args.Error(0)
}
-func (m *MockDeduplicationManager) GetActiveEvents(hookName string) []interfaces.ActiveEvent {
- args := m.Called(hookName)
+func (m *MockDeduplicationManager) GetActiveEvents(hookRef types.NamespacedName) []interfaces.ActiveEvent {
+ args := m.Called(hookRef)
return args.Get(0).([]interfaces.ActiveEvent)
}
-func (m *MockDeduplicationManager) GetActiveEventsWithStatus(hookName string) []interfaces.ActiveEvent {
- args := m.Called(hookName)
+func (m *MockDeduplicationManager) GetActiveEventsWithStatus(hookRef types.NamespacedName) []interfaces.ActiveEvent {
+ args := m.Called(hookRef)
return args.Get(0).([]interfaces.ActiveEvent)
}
-func (m *MockDeduplicationManager) MarkNotified(hookName string, event interfaces.Event) {
- m.Called(hookName, event)
+func (m *MockDeduplicationManager) MarkNotified(hookRef types.NamespacedName, event interfaces.Event) {
+ m.Called(hookRef, event)
}
type MockKagentClient struct {
@@ -93,43 +94,43 @@ type MockStatusManager struct {
mock.Mock
}
-func (m *MockStatusManager) UpdateHookStatus(ctx context.Context, hook interface{}, activeEvents []interfaces.ActiveEvent) error {
+func (m *MockStatusManager) UpdateHookStatus(ctx context.Context, hook *v1alpha2.Hook, activeEvents []interfaces.ActiveEvent) error {
args := m.Called(ctx, hook, activeEvents)
return args.Error(0)
}
-func (m *MockStatusManager) RecordEventFiring(ctx context.Context, hook interface{}, event interfaces.Event, agentId string) error {
- args := m.Called(ctx, hook, event, agentId)
+func (m *MockStatusManager) RecordEventFiring(ctx context.Context, hook *v1alpha2.Hook, event interfaces.Event, agentRef types.NamespacedName) error {
+ args := m.Called(ctx, hook, event, agentRef)
return args.Error(0)
}
-func (m *MockStatusManager) RecordEventResolved(ctx context.Context, hook interface{}, eventType, resourceName string) error {
+func (m *MockStatusManager) RecordEventResolved(ctx context.Context, hook *v1alpha2.Hook, eventType, resourceName string) error {
args := m.Called(ctx, hook, eventType, resourceName)
return args.Error(0)
}
-func (m *MockStatusManager) RecordError(ctx context.Context, hook interface{}, event interfaces.Event, err error, agentId string) error {
- args := m.Called(ctx, hook, event, err, agentId)
+func (m *MockStatusManager) RecordError(ctx context.Context, hook *v1alpha2.Hook, event interfaces.Event, err error, agentRef types.NamespacedName) error {
+ args := m.Called(ctx, hook, event, err, agentRef)
return args.Error(0)
}
-func (m *MockStatusManager) RecordAgentCallSuccess(ctx context.Context, hook interface{}, event interfaces.Event, agentId, requestId string) error {
- args := m.Called(ctx, hook, event, agentId, requestId)
+func (m *MockStatusManager) RecordAgentCallSuccess(ctx context.Context, hook *v1alpha2.Hook, event interfaces.Event, agentRef types.NamespacedName, requestId string) error {
+ args := m.Called(ctx, hook, event, agentRef, requestId)
return args.Error(0)
}
-func (m *MockStatusManager) RecordAgentCallFailure(ctx context.Context, hook interface{}, event interfaces.Event, agentId string, err error) error {
- args := m.Called(ctx, hook, event, agentId, err)
+func (m *MockStatusManager) RecordAgentCallFailure(ctx context.Context, hook *v1alpha2.Hook, event interfaces.Event, agentRef types.NamespacedName, err error) error {
+ args := m.Called(ctx, hook, event, agentRef, err)
return args.Error(0)
}
-func (m *MockStatusManager) RecordDuplicateEvent(ctx context.Context, hook interface{}, event interfaces.Event) error {
+func (m *MockStatusManager) RecordDuplicateEvent(ctx context.Context, hook *v1alpha2.Hook, event interfaces.Event) error {
args := m.Called(ctx, hook, event)
return args.Error(0)
}
-func (m *MockStatusManager) GetHookStatus(ctx context.Context, hookName, namespace string) (*v1alpha2.HookStatus, error) {
- args := m.Called(ctx, hookName, namespace)
+func (m *MockStatusManager) GetHookStatus(ctx context.Context, hookRef types.NamespacedName) (*v1alpha2.HookStatus, error) {
+ args := m.Called(ctx, hookRef)
if args.Get(0) == nil {
return nil, args.Error(1)
}
@@ -185,8 +186,10 @@ func TestProcessor_ProcessEvent_Success(t *testing.T) {
hook := createTestHook("test-hook", "default", []v1alpha2.EventConfiguration{
{
EventType: "pod-restart",
- AgentId: "test-agent",
- Prompt: "Handle pod restart for {{.ResourceName}}",
+ AgentRef: v1alpha2.ObjectReference{
+ Name: "test-agent",
+ },
+ Prompt: "Handle pod restart for {{.ResourceName}}",
},
})
@@ -196,9 +199,9 @@ func TestProcessor_ProcessEvent_Success(t *testing.T) {
ctx := context.Background()
// Setup expectations
- mockDeduplicationManager.On("ShouldProcessEvent", "default/test-hook", event).Return(true)
- mockDeduplicationManager.On("RecordEvent", "default/test-hook", event).Return(nil)
- mockStatusManager.On("RecordEventFiring", ctx, hook, event, "test-agent").Return(nil)
+ mockDeduplicationManager.On("ShouldProcessEvent", types.NamespacedName{Name: "test-hook", Namespace: "default"}, event).Return(true)
+ mockDeduplicationManager.On("RecordEvent", types.NamespacedName{Name: "test-hook", Namespace: "default"}, event).Return(nil)
+ mockStatusManager.On("RecordEventFiring", ctx, hook, event, types.NamespacedName{Name: "test-agent", Namespace: "default"}).Return(nil)
expectedResponse := &interfaces.AgentResponse{
Success: true,
@@ -206,13 +209,13 @@ func TestProcessor_ProcessEvent_Success(t *testing.T) {
RequestId: "test-request-id",
}
mockKagentClient.On("CallAgent", ctx, mock.MatchedBy(func(req interfaces.AgentRequest) bool {
- return req.AgentId == "test-agent" &&
+ return req.AgentRef.Name == "test-agent" &&
req.EventName == "pod-restart" &&
req.ResourceName == "test-pod"
})).Return(expectedResponse, nil)
- mockStatusManager.On("RecordAgentCallSuccess", ctx, hook, event, "test-agent", "test-request-id").Return(nil)
- mockDeduplicationManager.On("MarkNotified", "default/test-hook", event).Return()
+ mockStatusManager.On("RecordAgentCallSuccess", ctx, hook, event, types.NamespacedName{Name: "test-agent", Namespace: "default"}, "test-request-id").Return(nil)
+ mockDeduplicationManager.On("MarkNotified", types.NamespacedName{Name: "test-hook", Namespace: "default"}, event).Return()
// Execute
err := processor.ProcessEvent(ctx, event, hooks)
@@ -237,8 +240,10 @@ func TestProcessor_ProcessEvent_DuplicateEvent(t *testing.T) {
hook := createTestHook("test-hook", "default", []v1alpha2.EventConfiguration{
{
EventType: "pod-restart",
- AgentId: "test-agent",
- Prompt: "Handle pod restart",
+ AgentRef: v1alpha2.ObjectReference{
+ Name: "test-agent",
+ },
+ Prompt: "Handle pod restart",
},
})
@@ -248,7 +253,7 @@ func TestProcessor_ProcessEvent_DuplicateEvent(t *testing.T) {
ctx := context.Background()
// Setup expectations - event should be ignored due to deduplication
- mockDeduplicationManager.On("ShouldProcessEvent", "default/test-hook", event).Return(false)
+ mockDeduplicationManager.On("ShouldProcessEvent", types.NamespacedName{Name: "test-hook", Namespace: "default"}, event).Return(false)
mockStatusManager.On("RecordDuplicateEvent", ctx, hook, event).Return(nil)
// Execute
@@ -275,8 +280,10 @@ func TestProcessor_ProcessEvent_AgentCallFailure(t *testing.T) {
hook := createTestHook("test-hook", "default", []v1alpha2.EventConfiguration{
{
EventType: "pod-restart",
- AgentId: "test-agent",
- Prompt: "Handle pod restart",
+ AgentRef: v1alpha2.ObjectReference{
+ Name: "test-agent",
+ },
+ Prompt: "Handle pod restart",
},
})
@@ -287,11 +294,11 @@ func TestProcessor_ProcessEvent_AgentCallFailure(t *testing.T) {
agentError := errors.New("agent call failed")
// Setup expectations
- mockDeduplicationManager.On("ShouldProcessEvent", "default/test-hook", event).Return(true)
- mockDeduplicationManager.On("RecordEvent", "default/test-hook", event).Return(nil)
- mockStatusManager.On("RecordEventFiring", ctx, hook, event, "test-agent").Return(nil)
+ mockDeduplicationManager.On("ShouldProcessEvent", types.NamespacedName{Name: "test-hook", Namespace: "default"}, event).Return(true)
+ mockDeduplicationManager.On("RecordEvent", types.NamespacedName{Name: "test-hook", Namespace: "default"}, event).Return(nil)
+ mockStatusManager.On("RecordEventFiring", ctx, hook, event, types.NamespacedName{Name: "test-agent", Namespace: "default"}).Return(nil)
mockKagentClient.On("CallAgent", ctx, mock.AnythingOfType("interfaces.AgentRequest")).Return(nil, agentError)
- mockStatusManager.On("RecordAgentCallFailure", ctx, hook, event, "test-agent", agentError).Return(nil)
+ mockStatusManager.On("RecordAgentCallFailure", ctx, hook, event, types.NamespacedName{Name: "test-agent", Namespace: "default"}, agentError).Return(nil)
// Execute
err := processor.ProcessEvent(ctx, event, hooks)
@@ -317,16 +324,20 @@ func TestProcessor_ProcessEvent_MultipleHooks(t *testing.T) {
hook1 := createTestHook("hook1", "default", []v1alpha2.EventConfiguration{
{
EventType: "pod-restart",
- AgentId: "agent1",
- Prompt: "Agent 1 prompt",
+ AgentRef: v1alpha2.ObjectReference{
+ Name: "agent1",
+ },
+ Prompt: "Agent 1 prompt",
},
})
hook2 := createTestHook("hook2", "default", []v1alpha2.EventConfiguration{
{
EventType: "pod-restart",
- AgentId: "agent2",
- Prompt: "Agent 2 prompt",
+ AgentRef: v1alpha2.ObjectReference{
+ Name: "agent2",
+ },
+ Prompt: "Agent 2 prompt",
},
})
@@ -336,29 +347,29 @@ func TestProcessor_ProcessEvent_MultipleHooks(t *testing.T) {
ctx := context.Background()
// Setup expectations for both hooks
- mockDeduplicationManager.On("ShouldProcessEvent", "default/hook1", event).Return(true)
- mockDeduplicationManager.On("RecordEvent", "default/hook1", event).Return(nil)
- mockStatusManager.On("RecordEventFiring", ctx, hook1, event, "agent1").Return(nil)
+ mockDeduplicationManager.On("ShouldProcessEvent", types.NamespacedName{Name: "hook1", Namespace: "default"}, event).Return(true)
+ mockDeduplicationManager.On("RecordEvent", types.NamespacedName{Name: "hook1", Namespace: "default"}, event).Return(nil)
+ mockStatusManager.On("RecordEventFiring", ctx, hook1, event, types.NamespacedName{Name: "agent1", Namespace: "default"}).Return(nil)
- mockDeduplicationManager.On("ShouldProcessEvent", "default/hook2", event).Return(true)
- mockDeduplicationManager.On("RecordEvent", "default/hook2", event).Return(nil)
- mockStatusManager.On("RecordEventFiring", ctx, hook2, event, "agent2").Return(nil)
+ mockDeduplicationManager.On("ShouldProcessEvent", types.NamespacedName{Name: "hook2", Namespace: "default"}, event).Return(true)
+ mockDeduplicationManager.On("RecordEvent", types.NamespacedName{Name: "hook2", Namespace: "default"}, event).Return(nil)
+ mockStatusManager.On("RecordEventFiring", ctx, hook2, event, types.NamespacedName{Name: "agent2", Namespace: "default"}).Return(nil)
response1 := &interfaces.AgentResponse{Success: true, Message: "Success 1", RequestId: "req1"}
response2 := &interfaces.AgentResponse{Success: true, Message: "Success 2", RequestId: "req2"}
mockKagentClient.On("CallAgent", ctx, mock.MatchedBy(func(req interfaces.AgentRequest) bool {
- return req.AgentId == "agent1"
+ return req.AgentRef.Name == "agent1"
})).Return(response1, nil)
mockKagentClient.On("CallAgent", ctx, mock.MatchedBy(func(req interfaces.AgentRequest) bool {
- return req.AgentId == "agent2"
+ return req.AgentRef.Name == "agent2"
})).Return(response2, nil)
- mockStatusManager.On("RecordAgentCallSuccess", ctx, hook1, event, "agent1", "req1").Return(nil)
- mockStatusManager.On("RecordAgentCallSuccess", ctx, hook2, event, "agent2", "req2").Return(nil)
- mockDeduplicationManager.On("MarkNotified", "default/hook1", event).Return()
- mockDeduplicationManager.On("MarkNotified", "default/hook2", event).Return()
+ mockStatusManager.On("RecordAgentCallSuccess", ctx, hook1, event, types.NamespacedName{Name: "agent1", Namespace: "default"}, "req1").Return(nil)
+ mockStatusManager.On("RecordAgentCallSuccess", ctx, hook2, event, types.NamespacedName{Name: "agent2", Namespace: "default"}, "req2").Return(nil)
+ mockDeduplicationManager.On("MarkNotified", types.NamespacedName{Name: "hook1", Namespace: "default"}, event).Return()
+ mockDeduplicationManager.On("MarkNotified", types.NamespacedName{Name: "hook2", Namespace: "default"}, event).Return()
// Execute
err := processor.ProcessEvent(ctx, event, hooks)
@@ -383,8 +394,10 @@ func TestProcessor_ProcessEvent_NoMatchingHooks(t *testing.T) {
hook := createTestHook("test-hook", "default", []v1alpha2.EventConfiguration{
{
EventType: "oom-kill",
- AgentId: "test-agent",
- Prompt: "Handle OOM kill",
+ AgentRef: v1alpha2.ObjectReference{
+ Name: "test-agent",
+ },
+ Prompt: "Handle OOM kill",
},
})
@@ -433,7 +446,7 @@ func TestProcessor_UpdateHookStatuses(t *testing.T) {
// Create test data
hook := createTestHook("test-hook", "default", []v1alpha2.EventConfiguration{
- {EventType: "pod-restart", AgentId: "agent1", Prompt: "prompt1"},
+ {EventType: "pod-restart", AgentRef: v1alpha2.ObjectReference{Name: "agent1"}, Prompt: "prompt1"},
})
hooks := []*v1alpha2.Hook{hook}
@@ -450,7 +463,7 @@ func TestProcessor_UpdateHookStatuses(t *testing.T) {
ctx := context.Background()
// Setup expectations
- mockDeduplicationManager.On("GetActiveEventsWithStatus", "default/test-hook").Return(activeEvents)
+ mockDeduplicationManager.On("GetActiveEventsWithStatus", types.NamespacedName{Name: "test-hook", Namespace: "default"}).Return(activeEvents)
mockStatusManager.On("UpdateHookStatus", ctx, hook, activeEvents).Return(nil)
// Execute
@@ -473,14 +486,14 @@ func TestProcessor_CleanupExpiredEvents(t *testing.T) {
// Create test data
hook := createTestHook("test-hook", "default", []v1alpha2.EventConfiguration{
- {EventType: "pod-restart", AgentId: "agent1", Prompt: "prompt1"},
+ {EventType: "pod-restart", AgentRef: v1alpha2.ObjectReference{Name: "agent1"}, Prompt: "prompt1"},
})
hooks := []*v1alpha2.Hook{hook}
ctx := context.Background()
// Setup expectations
- mockDeduplicationManager.On("CleanupExpiredEvents", "default/test-hook").Return(nil)
+ mockDeduplicationManager.On("CleanupExpiredEvents", types.NamespacedName{Name: "test-hook", Namespace: "default"}).Return(nil)
// Execute
err := processor.CleanupExpiredEvents(ctx, hooks)
diff --git a/internal/status/README.md b/internal/status/README.md
index 9df182b..3779424 100644
--- a/internal/status/README.md
+++ b/internal/status/README.md
@@ -18,7 +18,7 @@ The Status Manager package provides comprehensive status management and reportin
import (
"k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/client"
- "github.com/antweiss/khook/internal/status"
+ "github.com/kagent-dev/khook/internal/status"
)
// Create a new status manager
diff --git a/internal/status/manager.go b/internal/status/manager.go
index 2cbb7bf..9f3aed5 100644
--- a/internal/status/manager.go
+++ b/internal/status/manager.go
@@ -8,12 +8,13 @@ import (
"github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
- "github.com/antweiss/khook/api/v1alpha2"
- "github.com/antweiss/khook/internal/interfaces"
+ "github.com/kagent-dev/khook/api/v1alpha2"
+ "github.com/kagent-dev/khook/internal/interfaces"
)
// Manager handles status updates for Hook resources
@@ -33,11 +34,7 @@ func NewManager(client client.Client, recorder record.EventRecorder) *Manager {
}
// UpdateHookStatus updates the status of a Hook resource with active events
-func (m *Manager) UpdateHookStatus(ctx context.Context, hookInterface interface{}, activeEvents []interfaces.ActiveEvent) error {
- hook, ok := hookInterface.(*v1alpha2.Hook)
- if !ok {
- return fmt.Errorf("expected *v1alpha2.Hook, got %T", hookInterface)
- }
+func (m *Manager) UpdateHookStatus(ctx context.Context, hook *v1alpha2.Hook, activeEvents []interfaces.ActiveEvent) error {
m.logger.Info("Updating hook status",
"hook", hook.Name,
"namespace", hook.Namespace,
@@ -75,32 +72,24 @@ func (m *Manager) UpdateHookStatus(ctx context.Context, hookInterface interface{
}
// RecordEventFiring records that an event has started firing
-func (m *Manager) RecordEventFiring(ctx context.Context, hookInterface interface{}, event interfaces.Event, agentId string) error {
- hook, ok := hookInterface.(*v1alpha2.Hook)
- if !ok {
- return fmt.Errorf("expected *v1alpha2.Hook, got %T", hookInterface)
- }
+func (m *Manager) RecordEventFiring(ctx context.Context, hook *v1alpha2.Hook, event interfaces.Event, agentRef types.NamespacedName) error {
m.logger.Info("Recording event firing",
"hook", hook.Name,
"namespace", hook.Namespace,
"eventType", event.Type,
"resourceName", event.ResourceName,
- "agentId", agentId)
+ "agentRef", agentRef)
// Emit Kubernetes event for audit trail
m.recorder.Event(hook, corev1.EventTypeNormal, "EventFiring",
fmt.Sprintf("Event %s fired for resource %s, calling agent %s",
- event.Type, event.ResourceName, agentId))
+ event.Type, event.ResourceName, agentRef.Name))
return nil
}
// RecordEventResolved records that an event has been resolved
-func (m *Manager) RecordEventResolved(ctx context.Context, hookInterface interface{}, eventType, resourceName string) error {
- hook, ok := hookInterface.(*v1alpha2.Hook)
- if !ok {
- return fmt.Errorf("expected *v1alpha2.Hook, got %T", hookInterface)
- }
+func (m *Manager) RecordEventResolved(ctx context.Context, hook *v1alpha2.Hook, eventType, resourceName string) error {
m.logger.Info("Recording event resolved",
"hook", hook.Name,
"namespace", hook.Namespace,
@@ -116,75 +105,59 @@ func (m *Manager) RecordEventResolved(ctx context.Context, hookInterface interfa
}
// RecordError records an error that occurred during event processing
-func (m *Manager) RecordError(ctx context.Context, hookInterface interface{}, event interfaces.Event, err error, agentId string) error {
- hook, ok := hookInterface.(*v1alpha2.Hook)
- if !ok {
- return fmt.Errorf("expected *v1alpha2.Hook, got %T", hookInterface)
- }
+func (m *Manager) RecordError(ctx context.Context, hook *v1alpha2.Hook, event interfaces.Event, err error, agentRef types.NamespacedName) error {
m.logger.Error(err, "Recording event processing error",
"hook", hook.Name,
"namespace", hook.Namespace,
"eventType", event.Type,
"resourceName", event.ResourceName,
- "agentId", agentId)
+ "agentRef", agentRef)
// Emit Kubernetes event for error tracking
m.recorder.Event(hook, corev1.EventTypeWarning, "EventProcessingError",
fmt.Sprintf("Failed to process event %s for resource %s with agent %s: %v",
- event.Type, event.ResourceName, agentId, err))
+ event.Type, event.ResourceName, agentRef.Name, err))
return nil
}
// RecordAgentCallSuccess records a successful agent call
-func (m *Manager) RecordAgentCallSuccess(ctx context.Context, hookInterface interface{}, event interfaces.Event, agentId, requestId string) error {
- hook, ok := hookInterface.(*v1alpha2.Hook)
- if !ok {
- return fmt.Errorf("expected *v1alpha2.Hook, got %T", hookInterface)
- }
+func (m *Manager) RecordAgentCallSuccess(ctx context.Context, hook *v1alpha2.Hook, event interfaces.Event, agentRef types.NamespacedName, requestId string) error {
m.logger.Info("Recording successful agent call",
"hook", hook.Name,
"namespace", hook.Namespace,
"eventType", event.Type,
"resourceName", event.ResourceName,
- "agentId", agentId,
+ "agentRef", agentRef,
"requestId", requestId)
// Emit Kubernetes event for successful processing
m.recorder.Event(hook, corev1.EventTypeNormal, "AgentCallSuccess",
fmt.Sprintf("Successfully called agent %s for event %s on resource %s (request: %s)",
- agentId, event.Type, event.ResourceName, requestId))
+ agentRef.Name, event.Type, event.ResourceName, requestId))
return nil
}
// RecordAgentCallFailure records a failed agent call
-func (m *Manager) RecordAgentCallFailure(ctx context.Context, hookInterface interface{}, event interfaces.Event, agentId string, err error) error {
- hook, ok := hookInterface.(*v1alpha2.Hook)
- if !ok {
- return fmt.Errorf("expected *v1alpha2.Hook, got %T", hookInterface)
- }
+func (m *Manager) RecordAgentCallFailure(ctx context.Context, hook *v1alpha2.Hook, event interfaces.Event, agentRef types.NamespacedName, err error) error {
m.logger.Error(err, "Recording failed agent call",
"hook", hook.Name,
"namespace", hook.Namespace,
"eventType", event.Type,
"resourceName", event.ResourceName,
- "agentId", agentId)
+ "agentRef", agentRef)
// Emit Kubernetes event for failed processing
m.recorder.Event(hook, corev1.EventTypeWarning, "AgentCallFailure",
fmt.Sprintf("Failed to call agent %s for event %s on resource %s: %v",
- agentId, event.Type, event.ResourceName, err))
+ agentRef.Name, event.Type, event.ResourceName, err))
return nil
}
// RecordDuplicateEvent records that a duplicate event was ignored
-func (m *Manager) RecordDuplicateEvent(ctx context.Context, hookInterface interface{}, event interfaces.Event) error {
- hook, ok := hookInterface.(*v1alpha2.Hook)
- if !ok {
- return fmt.Errorf("expected *v1alpha2.Hook, got %T", hookInterface)
- }
+func (m *Manager) RecordDuplicateEvent(ctx context.Context, hook *v1alpha2.Hook, event interfaces.Event) error {
m.logger.Info("Recording duplicate event ignored",
"hook", hook.Name,
"namespace", hook.Namespace,
@@ -201,15 +174,14 @@ func (m *Manager) RecordDuplicateEvent(ctx context.Context, hookInterface interf
}
// GetHookStatus retrieves the current status of a Hook resource
-func (m *Manager) GetHookStatus(ctx context.Context, hookName, namespace string) (*v1alpha2.HookStatus, error) {
+func (m *Manager) GetHookStatus(ctx context.Context, hookRef types.NamespacedName) (*v1alpha2.HookStatus, error) {
hook := &v1alpha2.Hook{}
- key := client.ObjectKey{Name: hookName, Namespace: namespace}
+ key := client.ObjectKey{Name: hookRef.Name, Namespace: hookRef.Namespace}
if err := m.client.Get(ctx, key, hook); err != nil {
m.logger.Error(err, "Failed to get hook for status retrieval",
- "hook", hookName,
- "namespace", namespace)
- return nil, fmt.Errorf("failed to get hook %s/%s: %w", namespace, hookName, err)
+ "hook", hookRef)
+ return nil, fmt.Errorf("failed to get hook %s: %w", hookRef, err)
}
return &hook.Status, nil
diff --git a/internal/status/manager_test.go b/internal/status/manager_test.go
index 625a84c..47c85a6 100644
--- a/internal/status/manager_test.go
+++ b/internal/status/manager_test.go
@@ -10,12 +10,13 @@ import (
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
- "github.com/antweiss/khook/api/v1alpha2"
- "github.com/antweiss/khook/internal/interfaces"
+ "github.com/kagent-dev/khook/api/v1alpha2"
+ "github.com/kagent-dev/khook/internal/interfaces"
)
func TestNewManager(t *testing.T) {
@@ -53,7 +54,7 @@ func TestUpdateHookStatus(t *testing.T) {
EventConfigurations: []v1alpha2.EventConfiguration{
{
EventType: "pod-restart",
- AgentId: "test-agent",
+ AgentRef: v1alpha2.ObjectReference{Name: "test-agent"},
Prompt: "test prompt",
},
},
@@ -81,7 +82,7 @@ func TestUpdateHookStatus(t *testing.T) {
EventConfigurations: []v1alpha2.EventConfiguration{
{
EventType: "pod-pending",
- AgentId: "test-agent",
+ AgentRef: v1alpha2.ObjectReference{Name: "test-agent"},
Prompt: "test prompt",
},
},
@@ -153,7 +154,7 @@ func TestRecordEventFiring(t *testing.T) {
manager := NewManager(fakeClient, fakeRecorder)
ctx := context.Background()
- err := manager.RecordEventFiring(ctx, hook, event, "test-agent")
+ err := manager.RecordEventFiring(ctx, hook, event, types.NamespacedName{Name: "test-agent", Namespace: "default"})
assert.NoError(t, err)
@@ -226,7 +227,7 @@ func TestRecordError(t *testing.T) {
manager := NewManager(fakeClient, fakeRecorder)
ctx := context.Background()
- err := manager.RecordError(ctx, hook, event, testError, "test-agent")
+ err := manager.RecordError(ctx, hook, event, testError, types.NamespacedName{Name: "test-agent", Namespace: "default"})
assert.NoError(t, err)
@@ -266,7 +267,7 @@ func TestRecordAgentCallSuccess(t *testing.T) {
manager := NewManager(fakeClient, fakeRecorder)
ctx := context.Background()
- err := manager.RecordAgentCallSuccess(ctx, hook, event, "test-agent", "req-123")
+ err := manager.RecordAgentCallSuccess(ctx, hook, event, types.NamespacedName{Name: "test-agent", Namespace: "default"}, "req-123")
assert.NoError(t, err)
@@ -308,7 +309,7 @@ func TestRecordAgentCallFailure(t *testing.T) {
manager := NewManager(fakeClient, fakeRecorder)
ctx := context.Background()
- err := manager.RecordAgentCallFailure(ctx, hook, event, "test-agent", testError)
+ err := manager.RecordAgentCallFailure(ctx, hook, event, types.NamespacedName{Name: "test-agent", Namespace: "default"}, testError)
assert.NoError(t, err)
@@ -392,7 +393,7 @@ func TestGetHookStatus(t *testing.T) {
manager := NewManager(fakeClient, fakeRecorder)
ctx := context.Background()
- status, err := manager.GetHookStatus(ctx, "test-hook", "default")
+ status, err := manager.GetHookStatus(ctx, types.NamespacedName{Name: "test-hook", Namespace: "default"})
assert.NoError(t, err)
assert.NotNil(t, status)
@@ -411,11 +412,11 @@ func TestGetHookStatusNotFound(t *testing.T) {
manager := NewManager(fakeClient, fakeRecorder)
ctx := context.Background()
- status, err := manager.GetHookStatus(ctx, "nonexistent-hook", "default")
+ status, err := manager.GetHookStatus(ctx, types.NamespacedName{Name: "nonexistent-hook", Namespace: "default"})
assert.Error(t, err)
assert.Nil(t, status)
- assert.Contains(t, err.Error(), "failed to get hook")
+ assert.Contains(t, err.Error(), "failed to get hook default/nonexistent-hook")
}
func TestLogControllerStartup(t *testing.T) {
diff --git a/internal/workflow/coordinator.go b/internal/workflow/coordinator.go
index be5165d..f70c444 100644
--- a/internal/workflow/coordinator.go
+++ b/internal/workflow/coordinator.go
@@ -7,10 +7,10 @@ import (
"github.com/go-logr/logr"
"sigs.k8s.io/controller-runtime/pkg/log"
- kagentv1alpha2 "github.com/antweiss/khook/api/v1alpha2"
- "github.com/antweiss/khook/internal/deduplication"
- "github.com/antweiss/khook/internal/interfaces"
- "github.com/antweiss/khook/internal/status"
+ kagentv1alpha2 "github.com/kagent-dev/khook/api/v1alpha2"
+ "github.com/kagent-dev/khook/internal/deduplication"
+ "github.com/kagent-dev/khook/internal/interfaces"
+ "github.com/kagent-dev/khook/internal/status"
"k8s.io/client-go/kubernetes"
"sigs.k8s.io/controller-runtime/pkg/client"
)
diff --git a/internal/workflow/hook_discovery.go b/internal/workflow/hook_discovery.go
index ff0e97f..606e5a5 100644
--- a/internal/workflow/hook_discovery.go
+++ b/internal/workflow/hook_discovery.go
@@ -6,7 +6,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
- kagentv1alpha2 "github.com/antweiss/khook/api/v1alpha2"
+ kagentv1alpha2 "github.com/kagent-dev/khook/api/v1alpha2"
)
// HookDiscoveryService handles cluster-wide discovery of Hook resources
diff --git a/internal/workflow/workflow_manager.go b/internal/workflow/workflow_manager.go
index db4e60f..96521ed 100644
--- a/internal/workflow/workflow_manager.go
+++ b/internal/workflow/workflow_manager.go
@@ -8,10 +8,10 @@ import (
"github.com/go-logr/logr"
"sigs.k8s.io/controller-runtime/pkg/log"
- kagentv1alpha2 "github.com/antweiss/khook/api/v1alpha2"
- "github.com/antweiss/khook/internal/event"
- "github.com/antweiss/khook/internal/interfaces"
- "github.com/antweiss/khook/internal/pipeline"
+ kagentv1alpha2 "github.com/kagent-dev/khook/api/v1alpha2"
+ "github.com/kagent-dev/khook/internal/event"
+ "github.com/kagent-dev/khook/internal/interfaces"
+ "github.com/kagent-dev/khook/internal/pipeline"
"k8s.io/client-go/kubernetes"
"sigs.k8s.io/controller-runtime/pkg/client"
)
@@ -131,7 +131,7 @@ func (wm *WorkflowManager) CalculateSignature(hooks []*kagentv1alpha2.Hook) stri
for _, h := range hooks {
cfgs := make([]string, 0, len(h.Spec.EventConfigurations))
for _, ec := range h.Spec.EventConfigurations {
- cfgs = append(cfgs, ec.EventType+"|"+ec.AgentId+"|"+ec.Prompt)
+ cfgs = append(cfgs, ec.EventType+"|"+ec.AgentRef.Name+"|"+ec.Prompt)
}
parts = append(parts, h.Namespace+"/"+h.Name+"@"+strings.Join(cfgs, ";"))
}
diff --git a/scripts/kind/kind-config.yaml b/scripts/kind/kind-config.yaml
new file mode 100644
index 0000000..fe76f8d
--- /dev/null
+++ b/scripts/kind/kind-config.yaml
@@ -0,0 +1,41 @@
+########################################################################
+# https://kind.sigs.k8s.io/docs/user/configuration/
+########################################################################
+kind: Cluster
+apiVersion: kind.x-k8s.io/v1alpha4
+name: kagent
+containerdConfigPatches:
+- |-
+ [plugins."io.containerd.grpc.v1.cri".registry]
+ config_path = "/etc/containerd/certs.d"
+# network configuration
+networking:
+ # WARNING: It is _strongly_ recommended that you keep this the default
+ # (127.0.0.1) for security reasons. However, it is possible to change this.
+ apiServerAddress: "127.0.0.1"
+ # By default, the API server listens on a random open port.
+ # You may choose a specific port but probably don't need to in most cases.
+ # Using a random port makes it easier to spin up multiple clusters.
+ # apiServerPort: 6443
+
+# this may be used to e.g. disable beta / alpha APIs.
+runtimeConfig:
+ "api/alpha": "false"
+
+# add to the apiServer certSANs the name of the docker (dind) service in order to be able to reach the cluster through it
+kubeadmConfigPatchesJSON6902:
+ - group: kubeadm.k8s.io
+ version: v1beta2
+ kind: ClusterConfiguration
+ patch: |
+ - op: add
+ path: /apiServer/certSANs/-
+ value: docker
+
+# this is the default configuration for nodes
+nodes:
+ - role: control-plane
+# Example of port mappings for the control-plane node
+# extraPortMappings:
+# - containerPort: 30950
+# hostPort: 30950
\ No newline at end of file
diff --git a/scripts/kind/setup-kind.sh b/scripts/kind/setup-kind.sh
new file mode 100755
index 0000000..164723c
--- /dev/null
+++ b/scripts/kind/setup-kind.sh
@@ -0,0 +1,70 @@
+#!/usr/bin/env bash
+
+set -o errexit
+set -o pipefail
+
+KIND_CLUSTER_NAME=${KIND_CLUSTER_NAME:-kagent}
+KIND_IMAGE_VERSION=${KIND_IMAGE_VERSION:-1.33.2}
+
+# 1. Create registry container unless it already exists
+reg_name='kind-registry'
+reg_port='5001'
+if [ "$(docker inspect -f '{{.State.Running}}' "${reg_name}" 2>/dev/null || true)" != 'true' ]; then
+ docker run \
+ -d --restart=always -p "127.0.0.1:${reg_port}:5000" --network bridge --name "${reg_name}" \
+ registry:2
+fi
+
+# 2. Create kind cluster with containerd registry config dir enabled
+#
+# NOTE: the containerd config patch is not necessary with images from kind v0.27.0+
+# It may enable some older images to work similarly.
+# If you're only supporting newer releases, you can just use `kind create cluster` here.
+#
+# See:
+# https://github.com/kubernetes-sigs/kind/issues/2875
+# https://github.com/containerd/containerd/blob/main/docs/cri/config.md#registry-configuration
+# See: https://github.com/containerd/containerd/blob/main/docs/hosts.md
+if kind get clusters | grep -qx "${KIND_CLUSTER_NAME}"; then
+ echo "Kind cluster '${KIND_CLUSTER_NAME}' already exists; skipping create."
+else
+ kind create cluster --name "${KIND_CLUSTER_NAME}" \
+ --config scripts/kind/kind-config.yaml \
+ --image="kindest/node:v${KIND_IMAGE_VERSION}"
+fi
+
+# 3. Add the registry config to the nodes
+#
+# This is necessary because localhost resolves to loopback addresses that are
+# network-namespace local.
+# In other words: localhost in the container is not localhost on the host.
+#
+# We want a consistent name that works from both ends, so we tell containerd to
+# alias localhost:${reg_port} to the registry container when pulling images
+REGISTRY_DIR="/etc/containerd/certs.d/localhost:${reg_port}"
+for node in $(kind get nodes --name "${KIND_CLUSTER_NAME}"); do
+ docker exec "${node}" mkdir -p "${REGISTRY_DIR}"
+ cat <