diff --git a/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/CalicoWindowsInstall.js b/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/CalicoWindowsInstall.js deleted file mode 100644 index e74ebaa1e4..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/CalicoWindowsInstall.js +++ /dev/null @@ -1,185 +0,0 @@ -import React from 'react'; - -import Admonition from '@theme/Admonition'; -import CodeBlock from '@theme/CodeBlock'; -import Link from '@docusaurus/Link'; - -import { prodname, prodnameWindows } from '../../variables'; - -function CalicoWindowsInstallFirstStep(props) { - if (props.networkingType === 'vxlan') { - return ( -
  • - Ensure that BGP is disabled. - -
  • - ); - } - - return ( -
  • -

    - Enable BGP service on the Windows nodes. Install the RemoteAccess service using the following Powershell - commands: -

    - - Install-WindowsFeature RemoteAccess{'\n'} - Install-WindowsFeature RSAT-RemoteAccess-PowerShell - {'\n'} - Install-WindowsFeature Routing - -

    Then restart the computer:

    - Restart-Computer -Force -

    before running:

    - Install-RemoteAccess -VpnType RoutingOnly -

    - Sometimes the remote access service fails to start automatically after install. To make sure it is running, run - the following command: -

    - Start-Service RemoteAccess -
  • - ); -} - -export default function CalicoWindowsInstall(props) { - return ( -
      - -
    1. -

      - Get the cluster's Kubernetes API server host and port, which will be used to update the {prodnameWindows}{' '} - config map. The API server host and port is required so that the {prodnameWindows} installation script can - create a kubeconfig file for {prodname} services. If your Windows nodes already have {prodnameWindows}{' '} - installed manually, skip this step. The installation script will use the API server host and port from your - node's existing kubeconfig file if the KUBERNETES_SERVICE_HOST and{' '} - KUBERNETES_SERVICE_PORT variables are not provided in the calico-windows-config{' '} - ConfigMap. -

      -

      First, make a note of the address of the API server:

      -
        -
      • -

        - If you have a single API server with a static IP address, you can use its IP address and port. The IP can - be found by running: -

        - kubectl get endpoints kubernetes -o wide -

        The output should look like the following, with a single IP address and port under "ENDPOINTS":

        - - {`NAME ENDPOINTS AGE -kubernetes 172.16.101.157:6443 40m`} - -

        - If there are multiple entries under "ENDPOINTS", then your cluster must have more than one API server. In - this case, use the appropriate load balancing option below for your cluster. -

        -
      • -
      • -

        - If using DNS load balancing (as used by kops), use the FQDN and port of the API server{' '} - - api.internal.{'<'}clustername{'>'} - - . -

        -
      • -
      • -

        - If you have multiple API servers with a load balancer in front, you should use the IP and port of the load - balancer. -

        -
      • - -

        - If your cluster uses a ConfigMap to configure kube-proxy you can find the "right" way to - reach the API server by examining the config map. For example: -

        - - kubectl get configmap -n kube-system kube-proxy -o yaml | grep server{'\n'} - server: https://d881b853ae312e00302a84f1e346a77.gr7.us-west-2.eks.amazonaws.com - -

        - In this case, the server is d881b853aea312e00302a84f1e346a77.gr7.us-west-2.eks.amazonaws.com{' '} - and the port is 443 (the standard HTTPS port). -

        -
        -
      -
    2. -
    3. -

      - Create the kubernetes-services-endpoint ConfigMap with the Kubernetes API server - host and port (discovered in the previous step) used to create a kubeconfig file for {prodname} services. - {`kubectl apply -f - << EOF -kind: ConfigMap -apiVersion: v1 -metadata: - name: kubernetes-services-endpoint - namespace: tigera-operator -data: - KUBERNETES_SERVICE_HOST: "$\{APISERVER_ADDR\}" - KUBERNETES_SERVICE_PORT: "$\{APISERVER_PORT\}" -EOF`} - -

      -
    4. -
    5. -

      - Get the Kubernetes service clusterIP range configured in your cluster. This must - match the service-cluster-ip-range used by kube-apiserver. -

      -
    6. -
    7. -

      - Add the Kubernetes service CIDR (discovered in the previous step) enable {prodnameWindows} on the Tigera Operator installation resource. -

      -

      - For example, with a Kubernetes service clusterIP range of 10.96.0.0/12: - {`kubectl patch installation default --type merge --patch='{"spec": {"serviceCIDRs": ["10.96.0.0/12"], "calicoNetwork": {"windowsDataplane": "HNS"}}}'`} -

      -
    8. -
    9. - Install kube-proxy on Windows nodes. -

      - Depending on your platform, you may already have kube-proxy running on your Windows nodes. If kube-proxy is - already running on your Windows nodes, skip this step. If kube-proxy is not running, you must install and run - kube-proxy on each of the Windows nodes in your cluster. Note: The - manifest provided in the kubernetes-sigs sig-windows-tools repository depends on the kubeconfig - provided by the kube-proxy ConfigMap in the kube-system namespace. -

      -

      - You must replace KUBE_PROXY_VERSION with your cluster's Kubernetes version in kube-proxy.yml to ensure the daemonset uses a kube-proxy Windows image that is compatible with your Kubernetes cluster. Use a command like the following to retrieve the YAML file, replace the version, and apply it: - {`curl -L https://raw.githubusercontent.com/kubernetes-sigs/sig-windows-tools/master/hostprocess/calico/kube-proxy/kube-proxy.yml | sed "s/KUBE_PROXY_VERSION/\/g" | kubectl apply -f -`} -

      -
    10. -
    11. -

      Monitor the installation.

      - The {prodnameWindows} HPC installation has 2 initContainers: uninstall-calico, which deals with removing previous manually installed {prodnameWindows} services, if any - and install-cni, which installs needed CNI binaries and configuration, when using Calico CNI. - - kubectl logs -f -n calico-system -l k8s-app=calico-node-windows -c uninstall-calico{'\n'} - kubectl logs -f -n calico-system -l k8s-app=calico-node-windows -c install-cni{'\n'} - -

      - After these initContainers finish their execution, installation is complete. Next, the - {prodnameWindows} services are started in separate containers: -

      - - kubectl logs -f -n calico-system -l k8s-app=calico-node-windows -c node{'\n'} - kubectl logs -f -n calico-system -l k8s-app=calico-node-windows -c felix{'\n'} - {props.networkingType === 'windows-bgp' - ? `kubectl logs -f -n calico-system -l k8s-app=calico-node-windows -c confd` - : null} - -

      - The calico-node-windows pods will be ready after their containers finish initializing. -

      -
    12. -
    - ); -} diff --git a/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/CliConfigIntro.js b/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/CliConfigIntro.js deleted file mode 100644 index 930d8de8a3..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/CliConfigIntro.js +++ /dev/null @@ -1,53 +0,0 @@ -import React from 'react'; - -import Admonition from '@theme/Admonition'; -import CodeBlock from '@theme/CodeBlock'; -import Link from '@docusaurus/Link'; - -import { prodname, baseUrl } from '../../variables'; - -export default function CliConfigIntro(props) { - return ( - <> -

    - Many {props.cli} commands require access to the {prodname} datastore. In most circumstances,{' '} - {props.cli} cannot achieve this connection by default. You can provide - {props.cli} with the information it needs using either of the following. -

    -
      -
    1. -

      - Configuration file: by default, {props.cli} will look for a configuration file - at /etc/calico/{props.cli}.cfg. You can override this using the --config option - with commands that require datastore access. The file can be in either YAML or JSON format. It must be valid - and readable by {props.cli}. A YAML example follows. -

      - - {`apiVersion: projectcalico.org/v3 -kind: CalicoAPIConfig -metadata: -spec: - datastoreType: "kdd" - ...`} - -
    2. -
    3. -

      - Environment variables: If {props.cli} cannot locate, read, or access a - configuration file, it will check a specific set of environment variables. -

      -
    4. -
    -

    - See the section that corresponds to your datastore type for a - full set of options and examples. -

    - - When running {props.cli} inside a container, any environment variables and configuration files must - be passed to the container so they are available to the process inside. It can be useful to keep a running - container (that sleeps) configured for your datastore, then it is possible to exec into the - container and have an already configured environment. - - - ); -} diff --git a/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/ComponentVersions.js b/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/ComponentVersions.js deleted file mode 100644 index 3e7ceb9b33..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/ComponentVersions.js +++ /dev/null @@ -1,64 +0,0 @@ -import React from 'react'; -import Link from '@docusaurus/Link'; -import Heading from '@theme/Heading'; - -import { toKebab } from '@site/src/components/utils/formatters'; - -import variables from '../../variables'; - -export default function ComponentVersions() { - const { prodname, version, downloadsurl } = variables; - - const releases = variables.releases.map((release) => { - return release; - }).filter(release => release); - - return ( - <> - {releases.map((release) => ( -
    - - Component versions for {variables.prodname} {release.title.startsWith('v') ? release.title.substring(1) : release.title} - -

    - {release.title !== 'master' && ( -

    - - Release archive - {' '} - with Kubernetes manifests. Based on Calico {releases[0].calico.minor_version}. -

    - )} - This release comprises the following components, and can be installed using{' '} - - {release['tigera-operator'].registry}/{release['tigera-operator'].image}: - {release['tigera-operator'].version} - -

    - - - - - - - - - {Object.keys(release.components).map((componentName) => ( - - - - - ))} - -
    ComponentVersion
    {componentName}{release.components[componentName].version}
    -
    - ))} - - ); -} diff --git a/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/ConfigureManagedCluster.js b/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/ConfigureManagedCluster.js deleted file mode 100644 index ab08a5de46..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/ConfigureManagedCluster.js +++ /dev/null @@ -1,122 +0,0 @@ -import React from 'react'; - -import CodeBlock from '@theme/CodeBlock'; -import Link from '@docusaurus/Link'; -import Heading from '@theme/Heading'; - -import { baseUrl } from '../../variables'; - -export default function ConfigureManagedCluster(props) { - const kubectlCmd = props.kubectlCmd || 'kubectl'; - - return ( - <> - - Create the connection manifest for your managed cluster - -

    - To connect the managed cluster to your management cluster, you need to create and apply a connection manifest. - You can create a connection manifest from the Manager UI in the management cluster or manually using{' '} - {kubectlCmd}. -

    - - Connect cluster - Manager UI - -
      -
    1. -

      - In the Manager UI left navbar, click Managed Clusters. -

      -
    2. -
    3. -

      - On the Managed Clusters page, click the button, Add Cluster. -

      -
    4. -
    5. -

      Name your cluster that is easily recognized in a list of managed clusters, and click Create Cluster.

      -
    6. -
    7. -

      Download the manifest.

      -
    8. -
    - - connect-cluster---kubectl - -

    - Choose a name for your managed cluster and then add it to your management cluster. The - following commands will create a manifest with the name of your managed cluster in your current directory. -

    -
      -
    1. -

      - First, decide on the name for your managed cluster. Because you will eventually have several managed - clusters, choose a name that can be easily recognized in a list of managed clusters. The name is also used - in steps that follow. -

      - export MANAGED_CLUSTER=my-managed-cluster -
    2. -
    3. -

      - Get the namespace in which the Tigera Operator is running in your managed cluster (in most cases this will - be tigera-operator): -

      - export MANAGED_CLUSTER_OPERATOR_NS=tigera-operator -
    4. -
    5. -

      - Add a managed cluster and save the manifest containing a{' '} - - ManagementClusterConnection - {' '} - and a Secret. -

      - - {`${kubectlCmd} -o jsonpath="{.spec.installationManifest}" > $MANAGED_CLUSTER.yaml create -f - < -
    6. -
    7. - Verify that the managementClusterAddr in the manifest is correct. -
    8. -
    - - Apply the connection manifest to your managed cluster - -
      -
    1. -

      - Apply the manifest that you modified in the step, - Add a managed cluster to the management cluster. -

      - {`${kubectlCmd} apply -f $MANAGED_CLUSTER.yaml`} -
    2. -
    3. -

      Monitor progress with the following command:

      - {`watch ${kubectlCmd} get tigerastatus`} - Wait until the management-cluster-connection and tigera-compliance show a status of{' '} - Available. -
    4. -
    -

    You have now successfully installed a managed cluster!

    - - ); -} diff --git a/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/EnvironmentFile.js b/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/EnvironmentFile.js deleted file mode 100644 index cd6eb04b03..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/EnvironmentFile.js +++ /dev/null @@ -1,58 +0,0 @@ -import React from 'react'; - -import Admonition from '@theme/Admonition'; -import CodeBlock from '@theme/CodeBlock'; -import Link from '@docusaurus/Link'; - -import { baseUrl } from '../../variables'; - -export default function EnvironmentFile(props) { - return ( - <> -

    - - Use the following guidelines and sample file to define the environment variables for starting Calico on the - host. For more help, see the{' '} - - {props.install === 'container' ? ( - {props.nodecontainer} configuration reference - ) : ( - Felix configuration reference - )} -

    -

    For the Kubernetes datastore set the following:

    - - - - - - - - - - - - - -
    VariableConfiguration guidance
    KUBECONFIGPath to kubeconfig file to access the Kubernetes API Server
    - {props.install === 'container' && ( - - If using certificates and keys, you will need to volume mount them into the container at the location - specified by the paths mentioned above. - - )} -

    - Sample EnvironmentFile - save to /etc/calico/calico.env -

    - - {`DATASTORE_TYPE=kubernetes -CALICO_NODENAME="" -NO_DEFAULT_POOLS="true" -CALICO_IP="" -CALICO_IP6="" -CALICO_AS="" -CALICO_NETWORKING_BACKEND=bird`} - - - ); -} diff --git a/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/GettingStartedInstallOnClustersKubernetesHelm.js b/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/GettingStartedInstallOnClustersKubernetesHelm.js deleted file mode 100644 index c220becbf3..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/GettingStartedInstallOnClustersKubernetesHelm.js +++ /dev/null @@ -1,185 +0,0 @@ -// Temporary component for "calico-enterprise/getting-started/install-on-clusters/kubernetes/helm.mdx" - -import React from 'react'; - -import CodeBlock from '@theme/CodeBlock'; -import Heading from '@theme/Heading'; - -import { chart_version_name, prodname, prodnamedash, version } from '../../variables'; - -export default function GettingStartedInstallOnClustersKubernetesHelm() { - return ( - <> - {renderCond1()} - - Customize the Helm chart - -

    - If you are installing on a cluster installed by EKS, GKE, AKS or Mirantis Kubernetes Engine (MKE), or you need - to customize TLS certificates, you must customize this Helm chart by creating a{' '} - values.yaml file. Otherwise, you can skip this step. -

    -
      -
    1. -

      - If you are installing on a cluster installed by EKS, GKE, AKS or Mirantis Kubernetes Engine (MKE), set the{' '} - kubernetesProvider as described in the{' '} - Installation reference. For - example: -

      -
    2. - {`echo '{ installation: {kubernetesProvider: EKS }}' > values.yaml`} -

      - For Azure AKS cluster with no Kubernetes CNI pre-installed, create values.yaml with the following - command: -

      - - {`cat > values.yaml < -
    3. -

      - Add any other customizations you require to values.yaml by running the following command. - For help, see Helm installation reference, - or helm docs. -

      -
    4. - {renderCond2()} -
    - - Install {prodname} - -
      -
    1. -

      - Configure a storage class for {prodname} -

      -
    2. -
    3. -

      - Create the tigera-operator namespace: -

      -
      -            kubectl create namespace tigera-operator
      -          
      -
    4. -
    5. -

      - Install the Tigera {prodname} operator and custom resource definitions using the Helm chart, and passing in - your image pull secrets -

      - {renderCond3()} -
    6. -
    7. -

      - Monitor progress, wait until apiserver shows a status of Available, then proceed - to the next step. -

      - watch kubectl get tigerastatus/apiserver -
    8. -
    9. -

      Install your {prodname} license:

      - kubectl apply -f </path/to/license.yaml> -
    10. -
    11. -

      You can now monitor progress with the following command:

      - watch kubectl get tigerastatus -
    12. -

      Congratulations! You have now installed {prodname} using the Helm 3 chart.

      -
    - - ); - - function renderCond1() { - if (version === 'master') { - return ( -
      -
    1. -

      Install the Google cloud storage helm repo plugin:

      -
    2. - helm plugin install https://github.com/viglesiasce/helm-gcs.git -
    3. -

      Add the Calico helm repo:

      - helm repo add tigera gs://tigera-helm-charts -
    4. -
    - ); - } - - return ( - <> -
      -
    1. -

      Get the Helm chart:

      - - {`curl -O -L https://downloads.tigera.io/ee/charts/tigera-operator-${chart_version_name}.tgz`} - -
    2. -
    - - ); - } - - function renderCond2() { - if (version === 'master') { - return helm show values tigera/tigera-operator --version v0.0; - } - - return {`helm show values ./tigera-operator-${chart_version_name}.tgz`}; - } - - function renderCond3() { - if (version === 'master') { - return ( - <> - - {`helm install calico-enterprise tigera/tigera-operator --version v0.0 \\ ---set-file imagePullSecrets.tigera-pull-secret=,tigera-prometheus-operator.imagePullSecrets.tigera-pull-secret= \\ ---namespace tigera-operator`} - -

    - or if you created a values.yaml above: -

    - - {`helm install calico-enterprise tigera/tigera-operator --version v0.0 -f values.yaml \\ ---set-file imagePullSecrets.tigera-pull-secret=,tigera-prometheus-operator.imagePullSecrets.tigera-pull-secret= \\ ---namespace tigera-operator`} - - - ); - } - - return ( - <> - - {`helm install calico-enterprise tigera-operator-${chart_version_name}.tgz \\ ---set-file imagePullSecrets.tigera-pull-secret=,tigera-prometheus-operator.imagePullSecrets.tigera-pull-secret= \\ ---namespace tigera-operator`} - -

    - or if you created a values.yaml above: -

    - - {`helm install calico-enterprise tigera-operator-${chart_version_name}.tgz -f values.yaml \\ ---set-file imagePullSecrets.tigera-pull-secret=,tigera-prometheus-operator.imagePullSecrets.tigera-pull-secret= \\ ---namespace tigera-operator`} - - - ); - } -} diff --git a/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/InstallAKS.js b/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/InstallAKS.js deleted file mode 100644 index d23b6ec54d..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/InstallAKS.js +++ /dev/null @@ -1,376 +0,0 @@ -import React from 'react'; - -import CodeBlock from '@theme/CodeBlock'; -import Link from '@docusaurus/Link'; -import Admonition from '@theme/Admonition'; -import Heading from '@theme/Heading'; - -import ConfigureManagedCluster from './ConfigureManagedCluster'; -import { prodname, prodnamedash, baseUrl, filesUrl } from '../../variables'; - -export default function InstallAKS(props) { - return ( - <> - - Install with Azure CNI networking - -
      -
    1. -

      Install the Tigera Operator and custom resource definitions.

      - kubectl create -f {filesUrl}/manifests/tigera-operator.yaml -
    2. -
    3. -

      - Install the Prometheus operator and related custom resource definitions. The Prometheus operator will be - used to deploy Prometheus server and Alertmanager to monitor {prodname} metrics. -

      - - If you have an existing Prometheus operator in your cluster that you want to use, skip this step. To work - with {prodname}, your Prometheus operator must be v0.40.0 or higher. - - kubectl create -f {filesUrl}/manifests/tigera-prometheus-operator.yaml -
    4. -
    5. -

      Install your pull secret.

      -

      - If pulling images directly from quay.io/tigera, you will likely want to use the credentials - provided to you by your Tigera support representative. If using a private registry, use your private - registry credentials instead. -

      - - {`kubectl create secret generic tigera-pull-secret \\ - --type=kubernetes.io/dockerconfigjson -n tigera-operator \\ - --from-file=.dockerconfigjson=`} - -
    6. -
    7. -

      - Install any extra {prodname} resources needed at - cluster start using calicoctl. -

      -
    8. - {props.clusterType === 'managed' ? ( -
    9. -

      - Download the Tigera custom resources. For more information on configuration options available in this - manifest, see the installation reference. -

      - curl -O -L {filesUrl}/manifests/aks/custom-resources.yaml -

      - Remove the Manager custom resource from the manifest file. -

      - - {`apiVersion: operator.tigera.io/v1 -kind: Manager -metadata: - name: tigera-secure -spec: - # Authentication configuration for accessing the Tigera manager. - # Default is to use token-based authentication. - auth: - type: Token`} - -

      - Remove the LogStorage custom resource from the manifest file. -

      - - {`apiVersion: operator.tigera.io/v1 -kind: LogStorage -metadata: - name: tigera-secure -spec: - nodes: - count: 1`} - -

      Now apply the modified manifest.

      - kubectl create -f ./custom-resources.yaml -

      You can now monitor progress with the following command:

      - watch kubectl get tigerastatus -
    10. - ) : ( -
    11. -

      - Install the Tigera custom resources. For more information on configuration options available in this - manifest, see the installation reference. -

      - kubectl create -f {filesUrl}/manifests/aks/custom-resources.yaml -

      You can now monitor progress with the following command:

      - watch kubectl get tigerastatus -
    12. - )} -
    - {props.clusterType !== 'managed' ? ( -

    - Wait until the apiserver shows a status of Available, then proceed to{' '} - install the {prodname} license. -

    - ) : ( -

    - Wait until the apiserver shows a status of Available, then proceed to the next - section. -

    - )} - - Install with {prodname} networking - -
      -
    1. -

      - - Configure a storage class for {prodname} - - . -

      -
    2. -
    3. -

      Install the Tigera Operator and custom resource definitions.

      - kubectl create -f {filesUrl}/manifests/tigera-operator.yaml -
    4. -
    5. -

      - Install the Prometheus operator and related custom resource definitions. The Prometheus operator will be - used to deploy Prometheus server and Alertmanager to monitor {prodname} metrics. -

      - - If you have an existing Prometheus operator in your cluster that you want to use, skip this step. To work - with {prodname}, your Prometheus operator must be v0.40.0 or higher. - - kubectl create -f {filesUrl}/manifests/tigera-prometheus-operator.yaml -
    6. -
    7. -

      Install your pull secret.

      -

      - If pulling images directly from quay.io/tigera, you will likely want to use the credentials - provided to you by your Tigera support representative. If using a private registry, use your private - registry credentials instead. -

      - - {`kubectl create secret generic tigera-pull-secret \\ - --type=kubernetes.io/dockerconfigjson -n tigera-operator \\ - --from-file=.dockerconfigjson=`} - -
    8. -
    9. -

      - Install any extra {prodname} resources needed at - cluster start using calicoctl. -

      -
    10. - {props.clusterType === 'managed' ? ( -
    11. -

      - Download the Tigera custom resources. For more information on configuration options available in this - manifest, see the installation reference. -

      - curl -O -L {filesUrl}/manifests/aks/custom-resources-calico-cni.yaml -

      - Remove the Manager custom resource from the manifest file. -

      - - {`apiVersion: operator.tigera.io/v1 -kind: Manager -metadata: - name: tigera-secure -spec: - # Authentication configuration for accessing the Tigera manager. - # Default is to use token-based authentication. - auth: - type: Token`} - -

      - Remove the LogStorage custom resource from the manifest file. -

      - - {`apiVersion: operator.tigera.io/v1 -kind: LogStorage -metadata: - name: tigera-secure -spec: - nodes: - count: 1`} - -

      Now apply the modified manifest.

      - kubectl create -f ./custom-resources-calico-cni.yaml -

      You can now monitor progress with the following command:

      - watch kubectl get tigerastatus -
    12. - ) : ( -
    13. -

      - Install the Tigera custom resources. For more information on configuration options available in this - manifest, see the installation reference. -

      - kubectl create -f {filesUrl}/manifests/aks/custom-resources-calico-cni.yaml -

      You can now monitor progress with the following command:

      - watch kubectl get tigerastatus -
    14. - )} -
    - {props.clusterType === 'managed' ? ( -

    - Wait until the apiserver shows a status of Available, then proceed to the next - section. -

    - ) : ( -

    - Wait until the apiserver shows a status of Available, then proceed to{' '} - install the {prodname} license. -

    - )} - {(props.clusterType === 'standalone' || props.clusterType === 'management') && ( - <> - - Install the {prodname} license - -

    In order to use {prodname}, you must install the license provided to you by Tigera.

    - {`kubectl create -f `} -

    You can now monitor progress with the following command:

    - watch kubectl get tigerastatus - - )} - {props.clusterType === 'management' && ( - <> - - Create a management cluster - -

    - To control managed clusters from your central management plane, you must ensure it is reachable for - connections. The simplest way to get started (but not for production scenarios), is to configure a{' '} - NodePort service to expose the management cluster. Note that the service must live within the{' '} - tigera-manager namespace. -

    -
      -
    1. -

      Create a service to expose the management cluster.

      -

      - The following example of a NodePort service may not be suitable for production and high availability. - For options, see{' '} - - Fine-tune multi-cluster management for production - - . -

      -

      Apply the following service manifest.

      - - {`kubectl create -f - < -
    2. -
    3. -

      - Export the service port number, and the public IP or host of the management cluster. (Ex. - "example.com:1234" or "10.0.0.10:1234".) -

      - {`export MANAGEMENT_CLUSTER_ADDR=`} -
    4. -
    5. -

      - Apply the{' '} - - ManagementCluster - {' '} - CR. -

      - - {`kubectl apply -f - < -
    6. -
    - - Create an admin user and verify management cluster connection - -

    - To access resources in a managed cluster from the {prodname} Manager within the management cluster, the - logged-in user must have appropriate permissions defined in that managed cluster (clusterrole bindings). -

    -
      -
    1. -

      - Create an admin user called, mcm-user in the default namespace with full permissions, by - applying the following commands. -

      - - {`kubectl create sa mcm-user -kubectl create clusterrolebinding mcm-user-admin --serviceaccount=default:mcm-user --clusterrole=tigera-network-admin`} - -
    2. -
    3. -

      Get the login token for your new admin user, and log in to {prodname} Manager.

      - - {`kubectl get secret $(kubectl get serviceaccount mcm-user -o jsonpath='{range .secrets[*]}{.name}{"\n"}{end}' | grep token) -o go-template='{{.data.token | base64decode}}' && echo`} - -

      - In the top right banner, your management cluster is displayed as the first entry in the cluster - selection drop-down menu with the fixed name, management cluster. -

      - Cluster Created -
    4. -
    -

    You have successfully installed a management cluster.

    - - )} - {props.clusterType === 'managed' && ( - <> - - - Provide permissions to view the managed cluster - -

    - To access resources belonging to a managed cluster from the {prodname} Manager UI, the service or user - account used to log in must have appropriate permissions defined in the managed cluster. -

    -

    - Let's define admin-level permissions for the service account (mcm-user) we created to log in to - the Manager UI. Run the following command against your managed cluster. -

    - - kubectl create clusterrolebinding mcm-user-admin --serviceaccount=default:mcm-user - --clusterrole=tigera-network-admin - - - )} - - ); -} diff --git a/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/InstallEKS.js b/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/InstallEKS.js deleted file mode 100644 index afbd134d46..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/InstallEKS.js +++ /dev/null @@ -1,446 +0,0 @@ -import React from 'react'; - -import CodeBlock from '@theme/CodeBlock'; -import Link from '@docusaurus/Link'; -import Admonition from '@theme/Admonition'; -import Heading from '@theme/Heading'; - -import GeekDetails from '@site/src/components/partials/GeekDetails'; - -import ConfigureManagedCluster from './ConfigureManagedCluster'; -import { prodname, prodnamedash, baseUrl, filesUrl } from '../../variables'; - -export default function InstallEKS(props) { - return ( - <> - - Install EKS with Amazon VPC networking - -
      -
    1. -

      Install the Tigera Operator and custom resource definitions.

      - kubectl create -f {filesUrl}/manifests/tigera-operator.yaml -
    2. -
    3. -

      - Install the Prometheus operator and related custom resource definitions. The Prometheus operator will be - used to deploy Prometheus server and Alertmanager to monitor {prodname} metrics. -

      - - If you have an existing Prometheus operator in your cluster that you want to use, skip this step. To work - with {prodname}, your Prometheus operator must be v0.40.0 or higher. - - kubectl create -f {filesUrl}/manifests/tigera-prometheus-operator.yaml -
    4. -
    5. -

      Install your pull secret.

      -

      - If pulling images directly from quay.io/tigera, you will likely want to use the credentials - provided to you by your Tigera support representative. If using a private registry, use your private - registry credentials instead. -

      - - {`kubectl create secret generic tigera-pull-secret \\ - --type=kubernetes.io/dockerconfigjson -n tigera-operator \\ - --from-file=.dockerconfigjson=`} - -
    6. -
    7. -

      - Install any extra {prodname} resources needed at - cluster start using calicoctl. -

      -
    8. - {props.clusterType === 'managed' ? ( - <> -
    9. -

      - Download the Tigera custom resources. For more information on configuration options available in this - manifest, see the installation reference. -

      - curl -O -L {filesUrl}/manifests/eks/custom-resources.yaml -

      - Remove the Manager custom resource from the manifest file. -

      - - {`apiVersion: operator.tigera.io/v1 -kind: Manager -metadata: - name: tigera-secure -spec: - # Authentication configuration for accessing the Tigera manager. - # Default is to use token-based authentication. - auth: - type: Token`} - -

      - Remove the LogStorage custom resource from the manifest file. -

      - - {`apiVersion: operator.tigera.io/v1 -kind: LogStorage -metadata: - name: tigera-secure -spec: - nodes: - count: 1`} - -

      Now apply the modified manifest.

      - kubectl create -f ./custom-resources.yaml -
    10. -
    11. -

      Monitor progress with the following command:

      - watch kubectl get tigerastatus -

      - Wait until the apiserver shows a status of Available, then proceed to the next - section. -

      -
    12. - - ) : ( -
    13. -

      - Install the Tigera custom resources. For more information on configuration options available in this - manifest, see the installation reference. -

      - kubectl create -f {filesUrl}/manifests/eks/custom-resources.yaml -

      You can now monitor progress with the following command:

      - watch kubectl get tigerastatus -

      - Wait until the apiserver shows a status of Available, then proceed to{' '} - install the {prodname} license. -

      -
    14. - )} -
    - - Install EKS with Calico networking - -

    - {prodname} networking cannot currently be installed on the EKS control plane nodes. As a result the control - plane nodes will not be able to initiate network connections to {prodname} pods. (This is a general limitation - of EKS's custom networking support, not specific to {prodname}.) As a workaround, trusted pods that require - control plane nodes to connect to them, such as those implementing admission controller webhooks, can include{' '} - hostNetwork:true in their pod spec. See the Kubernetes API{' '} - - pod spec - {' '} - definition for more information on this setting. -

    - - Create an EKS cluster - -

    - For these instructions, we will use eksctl to provision the cluster. However, you can use any of - the methods in{' '} - - Getting Started with Amazon EKS - -

    -

    - Before you get started, make sure you have downloaded and configured the{' '} - - necessary prerequisites - -

    -
      -
    1. -

      First, create an Amazon EKS cluster without any nodes.

      - eksctl create cluster --name my-calico-cluster --without-nodegroup -
    2. -
    3. -

      - Since this cluster will use {prodname} for networking, you must delete the aws-node daemon set - to disable AWS VPC networking for pods. -

      - kubectl delete daemonset -n kube-system aws-node -
    4. -
    - - Install {prodname} - -
      -
    1. -

      - - Configure a storage class for {prodname}. - -

      -
    2. -
    3. -

      Install the Tigera Operator and custom resource definitions.

      - kubectl create -f {filesUrl}/manifests/tigera-operator.yaml -
    4. -
    5. -

      - Install the Prometheus operator and related custom resource definitions. The Prometheus operator will be - used to deploy Prometheus server and Alertmanager to monitor {prodname} metrics. -

      - - If you have an existing Prometheus operator in your cluster that you want to use, skip this step. To work - with {prodname}, your Prometheus operator must be v0.40.0 or higher. - - kubectl create -f {filesUrl}/manifests/tigera-prometheus-operator.yaml -
    6. -
    7. -

      Install your pull secret.

      -

      - If pulling images directly from quay.io/tigera, you will likely want to use the credentials - provided to you by your Tigera support representative. If using a private registry, use your private - registry credentials instead. -

      - - {`kubectl create secret generic tigera-pull-secret \\ - --type=kubernetes.io/dockerconfigjson -n tigera-operator \\ - --from-file=.dockerconfigjson=`} - -
    8. -
    9. -

      - Install any extra {prodname} resources needed at - cluster start using calicoctl. -

      -
    10. -
    11. -

      - To configure {prodname} for use with the Calico CNI plugin, we must create an Installation{' '} - resource that has spec.cni.type: Calico. Install the{' '} - custom-resources-calico-cni.yaml manifest, which includes this configuration. For more - information on configuration options available in this manifest, see{' '} - the installation reference. -

      - {props.clusterType !== 'managed' && ( - kubectl create -f {filesUrl}/manifests/eks/custom-resources-calico-cni.yaml - )} -
    12. - {props.clusterType === 'managed' && ( - <> -
    13. -

      - Download the Tigera custom resources. For more information on configuration options available in this - manifest, see the installation reference. -

      - - curl -O -L {filesUrl}/manifests/eks/custom-resources-calico-cni.yaml - -

      - Remove the Manager custom resource from the manifest file. -

      - - {`apiVersion: operator.tigera.io/v1 -kind: Manager -metadata: - name: tigera-secure -spec: - # Authentication configuration for accessing the Tigera manager. - # Default is to use token-based authentication. - auth: - type: Token`} - -

      - Remove the LogStorage custom resource from the manifest file. -

      - - {`apiVersion: operator.tigera.io/v1 -kind: LogStorage -metadata: - name: tigera-secure -spec: - nodes: - count: 1`} - -

      Now apply the modified manifest.

      - {`kubectl create -f ./custom-resources-calico-cni.yaml`} -
    14. -
    15. -

      Monitor progress with the following command:

      - watch kubectl get tigerastatus -
    16. - - )} -
    17. -

      Finally, add nodes to the cluster.

      - - {`eksctl create nodegroup --cluster my-calico-cluster --node-type t3.xlarge --node-ami auto --max-pods-per-node 100`} - -
      -

      - Tip: Without the --max-pods-per-node option above, EKS will limit the{' '} - - number of pods based on node-type - - . See eksctl create nodegroup --help for the full set of node group options. -

      -
      -
    18. -
    19. -

      Monitor progress with the following command:

      - watch kubectl get tigerastatus -

      - Wait until the apiserver shows a status of Available, then proceed to the next - section. -

      -
    20. -
    - {(props.clusterType === 'standalone' || props.clusterType === 'management') && ( - <> - - Install the {prodname} license - -

    In order to use {prodname}, you must install the license provided to you by Tigera.

    - {`kubectl create -f `} -

    You can now monitor progress with the following command:

    - watch kubectl get tigerastatus - - )} - {props.clusterType === 'management' && ( - <> - - Create a management cluster - -

    - To control managed clusters from your central management plane, you must ensure it is reachable for - connections. The simplest way to get started (but not for production scenarios), is to configure a{' '} - NodePort service to expose the management cluster. Note that the service must live within the{' '} - tigera-manager namespace. -

    -
      -
    1. -

      Create a service to expose the management cluster.

      -

      - The following example of a NodePort service may not be suitable for production and high availability. - For options, see{' '} - - Fine-tune multi-cluster management for production - - . -

      -

      Apply the following service manifest.

      - - {`kubectl create -f - < -
    2. -
    3. -

      - Export the service port number, and the public IP or host of the management cluster. (Ex. - "example.com:1234" or "10.0.0.10:1234".) -

      - {`export MANAGEMENT_CLUSTER_ADDR=`} -
    4. -
    5. -

      - Apply the{' '} - - ManagementCluster - {' '} - CR. -

      - - {`kubectl apply -f - < -
    6. -
    - - Create an admin user and verify management cluster connection - -

    - To access resources in a managed cluster from the {prodname} Manager within the management cluster, the - logged-in user must have appropriate permissions defined in that managed cluster (clusterrole bindings). -

    -
      -
    1. -

      - Create an admin user called, mcm-user in the default namespace with full permissions, by - applying the following commands. -

      - - {`kubectl create sa mcm-user -kubectl create clusterrolebinding mcm-user-admin --serviceaccount=default:mcm-user --clusterrole=tigera-network-admin`} - -
    2. -
    3. -

      Get the login token for your new admin user, and log in to {prodname} Manager.

      - - {`kubectl get secret $(kubectl get serviceaccount mcm-user -o jsonpath='{range .secrets[*]}{.name}{"\n"}{end}' | grep token) -o go-template='{{.data.token | base64decode}}' && echo`} - -

      - In the top right banner, your management cluster is displayed as the first entry in the cluster - selection drop-down menu with the fixed name, management cluster. -

      - Cluster Created -
    4. -
    -

    You have successfully installed a management cluster.

    - - )} - {props.clusterType === 'managed' && ( - <> - - - Provide permissions to view the managed cluster - -

    - To access resources belonging to a managed cluster from the {prodname} Manager UI, the service or user - account used to log in must have appropriate permissions defined in the managed cluster. -

    -

    - Let's define admin-level permissions for the service account (mcm-user) we created to log in to - the Manager UI. Run the following command against your managed cluster. -

    - - {`kubectl create clusterrolebinding mcm-user-admin --serviceaccount=default:mcm-user --clusterrole=tigera-network-admin`} - - - )} - - ); -} diff --git a/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/InstallGKE.js b/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/InstallGKE.js deleted file mode 100644 index 89ebe93449..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/InstallGKE.js +++ /dev/null @@ -1,262 +0,0 @@ -import React from 'react'; -import { If, When, Then, Else } from 'react-if'; - -import CodeBlock from '@theme/CodeBlock'; -import Link from '@docusaurus/Link'; -import Admonition from '@theme/Admonition'; -import Heading from '@theme/Heading'; - -import ConfigureManagedCluster from './ConfigureManagedCluster'; -import { prodname, baseUrl, filesUrl } from '../../variables'; - -export default function InstallGKE(props) { - return ( - <> - - Install {prodname} - -
      -
    1. -

      Install the Tigera Operator and custom resource definitions.

      - kubectl create -f {filesUrl}/manifests/tigera-operator.yaml -
    2. -
    3. -

      - Install the Prometheus operator and related custom resource definitions. The Prometheus operator will be - used to deploy Prometheus server and Alertmanager to monitor {prodname} metrics. -

      - - If you have an existing Prometheus operator in your cluster that you want to use, skip this step. To work - with {prodname}, your Prometheus operator must be v0.40.0 or higher. - - kubectl create -f {filesUrl}/manifests/tigera-prometheus-operator.yaml -
    4. -
    5. -

      Install your pull secret.

      -

      - If pulling images directly from quay.io/tigera, you will likely want to use the credentials - provided to you by your Tigera support representative. If using a private registry, use your private - registry credentials instead. -

      - - {`kubectl create secret generic tigera-pull-secret \\ - --type=kubernetes.io/dockerconfigjson -n tigera-operator \\ - --from-file=.dockerconfigjson=`} - -
    6. -
    7. -

      - Install any extra Calico resources needed at cluster - start using calicoctl. -

      -
    8. - - -
    9. -

      - Download the Tigera custom resources. For more information on configuration options available in this - manifest, see the installation reference. -

      - curl -O -L {filesUrl}/manifests/custom-resources.yaml -

      - Remove the Manager custom resource from the manifest file. -

      - - {`apiVersion: operator.tigera.io/v1 -kind: Manager -metadata: - name: tigera-secure -spec: - # Authentication configuration for accessing the Tigera manager. - # Default is to use token-based authentication. - auth: - type: Token`} - -

      - Remove the LogStorage custom resource from the manifest file. -

      - - {`apiVersion: operator.tigera.io/v1 -kind: LogStorage -metadata: - name: tigera-secure -spec: - nodes: - count: 1`} - -

      Now apply the modified manifest.

      - {`kubectl create -f ./custom-resources.yaml`} -

      You can now monitor progress with the following command:

      - watch kubectl get tigerastatus -

      - Wait until the apiserver shows a status of Available, then proceed to the next - section. -

      -
    10. -
      - -
    11. -

      - Install the Tigera custom resources. For more information on configuration options available in this - manifest, see the installation reference. -

      - kubectl create -f {filesUrl}/manifests/custom-resources.yaml -

      You can now monitor progress with the following command:

      - watch kubectl get tigerastatus -

      - Wait until the apiserver shows a status of Available, then proceed to the next - section. -

      -
    12. -
      -
      -
    - - - Install the {prodname} license - -

    In order to use {prodname}, you must install the license provided to you by Tigera.

    - {`kubectl create -f `} -

    You can now monitor progress with the following command:

    - watch kubectl get tigerastatus -
    - - - Create a management cluster - -

    - To control managed clusters from your central management plane, you must ensure it is reachable for - connections. The simplest way to get started (but not for production scenarios), is to configure a{' '} - NodePort service to expose the management cluster. Note that the service must live within the{' '} - tigera-manager namespace. -

    -
      -
    1. -

      Create a service to expose the management cluster.

      -

      - The following example of a NodePort service may not be suitable for production and high availability. For - options, see{' '} - - Fine-tune multi-cluster management for production - - . -

      -

      Apply the following service manifest.

      - - {`kubectl create -f - < -
    2. -
    3. -

      - Export the service port number, and the public IP or host of the management cluster. (Ex. - "example.com:1234" or "10.0.0.10:1234".) -

      - {`export MANAGEMENT_CLUSTER_ADDR=`} -
    4. -
    5. -

      - Apply the{' '} - - ManagementCluster - {' '} - CR. -

      - - {`kubectl apply -f - < -
    6. -
    - - Create an admin user and verify management cluster connection - -

    - To access resources in a managed cluster from the {prodname} Manager within the management cluster, the - logged-in user must have appropriate permissions defined in that managed cluster (clusterrole bindings). -

    -
      -
    1. -

      - Create an admin user called, mcm-user in the default namespace with full permissions, by - applying the following commands. -

      - - {`kubectl create sa mcm-user -kubectl create clusterrolebinding mcm-user-admin --serviceaccount=default:mcm-user --clusterrole=tigera-network-admin`} - -
    2. -
    3. -

      Get the login token for your new admin user, and log in to {prodname} Manager.

      - - {`kubectl get secret $(kubectl get serviceaccount mcm-user -o jsonpath='{range .secrets[*]}{.name}{"\n"}{end}' | grep token) -o go-template='{{.data.token | base64decode}}' && echo`} - -

      - In the top right banner, your management cluster is displayed as the first entry in the cluster selection - drop-down menu with the fixed name, management cluster. -

      - Cluster Created -
    4. -
    -

    You have successfully installed a management cluster.

    -
    - -

    - -

    - - Provide permissions to view the managed cluster - -

    - To access resources belonging to a managed cluster from the {prodname} Manager UI, the service or user account - used to log in must have appropriate permissions defined in the managed cluster. -

    -

    - Let's define admin-level permissions for the service account (mcm-user) we created to log in to - the Manager UI. Run the following command against your managed cluster. -

    - - {`kubectl create clusterrolebinding mcm-user-admin --serviceaccount=default:mcm-user --clusterrole=tigera-network-admin`} - -
    - - ); -} diff --git a/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/InstallGeneric.js b/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/InstallGeneric.js deleted file mode 100644 index 72da35004d..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/InstallGeneric.js +++ /dev/null @@ -1,267 +0,0 @@ -import React from 'react'; -import { When } from 'react-if'; - -import Admonition from '@theme/Admonition'; -import CodeBlock from '@theme/CodeBlock'; -import Link from '@docusaurus/Link'; -import Heading from '@theme/Heading'; - -import ConfigureManagedCluster from './ConfigureManagedCluster'; -import { prodname, prodnamedash, baseUrl, filesUrl } from '../../variables'; - -export default function InstallGeneric(props) { - return ( - <> - - Install {prodname} - - -
  • - Configure storage for {prodname}. -
  • -
    -
      -
    1. -

      Install the Tigera Operator and custom resource definitions.

      - kubectl create -f {filesUrl}/manifests/tigera-operator.yaml -
    2. -
    3. -

      - Install the Prometheus operator and related custom resource definitions. The Prometheus operator will be - used to deploy Prometheus server and Alertmanager to monitor {prodname} metrics. -

      - - If you have an existing Prometheus operator in your cluster that you want to use, skip this step. To work - with {prodname}, your Prometheus operator must be v0.40.0 or higher. - - kubectl create -f {filesUrl}/manifests/tigera-prometheus-operator.yaml -
    4. -
    5. - Install your pull secret. -

      - If pulling images directly from quay.io/tigera, you will likely want to use the credentials - provided to you by your Tigera support representative. If using a private registry, use your private - registry credentials. -

      - - {`kubectl create secret generic tigera-pull-secret \\ - --type=kubernetes.io/dockerconfigjson -n tigera-operator \\ - --from-file=.dockerconfigjson=`} - -
    6. -
    7. - (Optional) If your cluster architecture requires any custom{' '} - {prodname} resources to function at startup, install them - now using calicoctl. -
    8. - -
    9. -

      - Download the Tigera custom resources. For more information on configuration options available in this - manifest, see the installation reference. -

      - curl -O -L {filesUrl}/manifests/custom-resources.yaml -

      - Remove the Manager custom resource from the manifest file. -

      - - {`apiVersion: operator.tigera.io/v1 -kind: Manager -metadata: - name: tigera-secure -spec: - # Authentication configuration for accessing the Tigera manager. - # Default is to use token-based authentication. - auth: - type: Token`} - -

      - Remove the LogStorage custom resource from the manifest file. -

      - - {`apiVersion: operator.tigera.io/v1 -kind: LogStorage -metadata: - name: tigera-secure -spec: - nodes: - count: 1`} - -

      Now apply the modified manifest.

      - kubectl create -f ./custom-resources.yaml -
    10. -
      - -
    11. -

      - Install the Tigera custom resources. For more information on configuration options available, see{' '} - the installation reference. -

      - kubectl create -f {filesUrl}/manifests/custom-resources.yaml -
    12. -
      -

      You can now monitor progress with the following command:

      - watch kubectl get tigerastatus -

      - Wait until the apiserver shows a status of Available, then proceed to the next - section. -

      -
    - - <> - - Install {prodname} license - -

    Install the {prodname} license provided to you by Tigera.

    - - kubectl create -f {'<'}/path/to/license.yaml{'>'} - -

    You can now monitor progress with the following command:

    - watch kubectl get tigerastatus - -
    - - <> - - Create a management cluster - -

    - To control managed clusters from your central management plane, you must ensure it is reachable for - connections. The simplest way to get started (but not for production scenarios), is to configure a{' '} - NodePort service to expose the management cluster. Note that the service must live within the{' '} - tigera-manager namespace. -

    -
      -
    1. -

      - Create a service to expose the management cluster. The following example of a NodePort service may not - be suitable for production and high availability. For options, see{' '} - - Fine-tune multi-cluster management for production - - . Apply the following service manifest. -

      - - {`kubectl create -f - < -
    2. -
    3. -

      - Export the service port number, and the public IP or host of the management cluster. (Ex. - "example.com:1234" or "10.0.0.10:1234".) -

      - - export MANAGEMENT_CLUSTER_ADDR={'<'}your-management-cluster-addr{'>'} - -
    4. -
    5. -

      - - ManagementCluster - {' '} - CR. -

      - - {`kubectl apply -f - < -
    6. -
    - - Create an admin user and verify management cluster connection - -

    - To access resources in a managed cluster from the {prodname} Manager within the management cluster, the - logged-in user must have appropriate permissions defined in that managed cluster (clusterrole bindings). -

    -
      -
    1. -

      - Create an admin user called, mcm-user in the default namespace with full permissions, by - applying the following commands. -

      - - kubectl create sa mcm-user{'\n'} - kubectl create clusterrolebinding mcm-user-admin --serviceaccount=default:mcm-user - --clusterrole=tigera-network-admin - -
    2. -
    3. -

      Get the login token for your new admin user, and log in to {prodname} Manager.

      - - {`kubectl get secret $(kubectl get serviceaccount mcm-user -o jsonpath='{range .secrets[*]}{.name}{"\n"}{end}' | grep token) -o go-template='{{.data.token | base64decode}}' && echo`} - -

      - In the top right banner, your management cluster is displayed as the first entry in the cluster - selection drop-down menu with the fixed name, management cluster. -

      -

      - Cluster Created -

      -
    4. -
    -

    You have successfully installed a management cluster.

    - -
    - - <> - - - Provide permissions to view the managed cluster - -

    - To access resources belonging to a managed cluster from the {prodname} Manager UI, the service or user - account used to log in must have appropriate permissions defined in the managed cluster. -

    -

    - Let's define admin-level permissions for the service account (mcm-user) we created to log - in to the Manager UI. Run the following command against your managed cluster. -

    - - kubectl create clusterrolebinding mcm-user-admin --serviceaccount=default:mcm-user - --clusterrole=tigera-network-admin - - -
    - - ); -} diff --git a/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/InstallOpenShift.js b/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/InstallOpenShift.js deleted file mode 100644 index 9fabee3170..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/InstallOpenShift.js +++ /dev/null @@ -1,662 +0,0 @@ -import React from 'react'; -import { When } from 'react-if'; - -import Admonition from '@theme/Admonition'; -import CodeBlock from '@theme/CodeBlock'; -import Link from '@docusaurus/Link'; -import Heading from '@theme/Heading'; - -import InstallOpenShiftManifests from './InstallOpenShiftManifests'; -import OpenShiftPullSecret from './OpenShiftPullSecret'; -import OpenShiftPrometheusOperator from './OpenShiftPrometheusOperator'; -import ConfigureManagedCluster from './ConfigureManagedCluster'; - -import { prodname, prodnameWindows, prodnamedash, rootDirWindows, baseUrl, filesUrl, tempFilesURL } from '../../variables'; - -export default function InstallOpenShift(props) { - return ( - <> - - Create a configuration file for the OpenShift installer - -

    - First, create a staging directory for the installation. This directory will contain the configuration file, - along with cluster state files, that OpenShift installer will create: -

    - mkdir openshift-tigera-install && cd openshift-tigera-install -

    Now run OpenShift installer to create a default configuration file:

    - openshift-install create install-config - - See the{' '} - - OpenShift installer documentation - {' '} - for more information about the installer and any configuration changes required for your platform. - -

    - After the installer finishes, your staging directory will contain the configuration file{' '} - install-config.yaml. -

    - - - Update the configuration file to use {prodname} - -

    - Override the OpenShift networking to use {prodname} and update the AWS instance types to meet the{' '} - system requirements: -

    - - sed -i 's/\(OpenShiftSDN\|OVNKubernetes\)/Calico/' install-config.yaml{'\n'} - - - -

    By default openshift-installer creates 3 replicas, you can change these settings by modifying the cloud-provider part in the install-config.yaml

    -

    The following example changes the default deployment instance type and replica quantity.

    - - {`... - platform: - aws: - type: m5.xlarge - replicas: 2 - ...`} - -
    - - - Generate the install manifests - -

    Now generate the Kubernetes manifests using your configuration file:

    - openshift-install create manifests - -

    For OpenShift v4.16 or newer on AWS, configure AWS security groups to allow BGP, typha and IP-in-IP encapsulation traffic by editing the OpenShift cluster-api manifests.

    -

    Edit spec.network.cni.cniIngressRules in the cluster-api/02_infra-cluster.yaml file to add

    - -{` cniIngressRules: - (...) - - description: BGP (calico) - fromPort: 179 - protocol: tcp - toPort: 179 - - description: IP-in-IP (calico) - fromPort: -1 - protocol: "4" - toPort: -1 - - description: Typha (calico) - fromPort: 5473 - protocol: tcp - toPort: 5473`} - -
    - - - {/* For IPI hybrid clusters (Linux + Windows) we need to enable VXLAN and disable BGP */} - - <> -

    - Edit the Installation custom resource manifest manifests/01-cr-installation.yaml so that it - enables VXLAN and disables BGP. This is required for {prodnameWindows}: -

    - - {`apiVersion: operator.tigera.io/v1 -kind: Installation -metadata: - name: default -spec: - variant: Calico - calicoNetwork: - bgp: Disabled - ipPools: - - blockSize: 26 - cidr: 10.128.0.0/14 - encapsulation: VXLAN - natOutgoing: Enabled - nodeSelector: all()`} - - -
    - - Add an image pull secret - - - - - Provide additional configuration - -

    - To provide additional configuration during installation (for example, BGP configuration or peers), use a - Kubernetes ConfigMap with your desired {prodname} resources. If you do not need to provide additional - configuration, skip this section. -

    -

    - To include {prodname} resources during installation, edit{' '} - manifests/02-configmap-calico-resources.yaml in order to add your own configuration. -

    - -

    If you have a directory with the {prodname} resources, you can create the file with the command:

    - - {`kubectl create configmap -n tigera-operator calico-resources \\ - --from-file= --dry-run -o yaml \\ - > manifests/02-configmap-calico-resources.yaml`} - -

    - With recent versions of kubectl it is necessary to have a kubeconfig configured or add{' '} - --server='127.0.0.1:443' even though it is not used. -

    -
    - -

    - If you have provided a calico-resources configmap and the tigera-operator pod fails to come up - with Init:CrashLoopBackOff, check the output of the init-container with{' '} - kubectl logs -n tigera-operator -l k8s-app=tigera-operator -c create-initial-resources. -

    -
    - - - Create the cluster - -

    Start the cluster creation with the following command and wait for it to complete.

    - openshift-install create cluster - - - Create a storage class - -

    - {prodname} requires storage for logs and reports. Before finishing the installation, you must{' '} - create a StorageClass for {prodname}. -

    - - - <> - - Install the {prodname} license - -

    - In order to use {prodname}, you must install the license provided to you by Tigera support representative. - Before applying the license, wait until the Tigera API server is ready with the following command: -

    - watch oc get tigerastatus -

    - Wait until the apiserver shows a status of Available. -

    -

    After the Tigera API server is ready, apply the license:

    - {'oc create -f '} - -
    - - - Install {prodname} resources - - - {/* OCP_ENTERPRISE_RESOURCES variable in Makefile needs to be updated for any addition or deletion of enterprise resources */} - - - <> -

    - Download the Tigera custom resources. For more information on configuration options available in this - manifest, see the installation reference. -

    - curl -O -L {filesUrl}/manifests/ocp/tigera-enterprise-resources.yaml -

    - Remove the Manager custom resource from the manifest file. -

    - - {`apiVersion: operator.tigera.io/v1 -kind: Manager -metadata: - name: tigera-secure -spec: - # Authentication configuration for accessing the Tigera manager. - # Default is to use token-based authentication. - auth: - type: Token`} - -

    - Remove the LogStorage custom resource from the manifest file. -

    - - {`apiVersion: operator.tigera.io/v1 -kind: LogStorage -metadata: - name: tigera-secure -spec: - nodes: - count: 1`} - -

    Now apply the modified manifest.

    - oc create -f ./tigera-enterprise-resources.yaml - -
    - - - <> -

    Apply the custom resources for enterprise features.

    - oc create -f {filesUrl}/manifests/ocp/tigera-enterprise-resources.yaml -
    - - - -

    You can now monitor progress with the following command:

    - watch oc get tigerastatus -

    - When it shows all components with status Available, proceed to the next step. -

    - -

    (Optional) Apply the full CRDs including descriptions.

    - oc apply --server-side --force-conflicts -f {filesUrl}/manifests/operator-crds.yaml - - - <> - - Create a management cluster - -

    - To control managed clusters from your central management plane, you must ensure it is reachable for - connections. The simplest way to get started (but not for production scenarios), is to configure a{' '} - NodePort service to expose the management cluster. Note that the service must live within the{' '} - tigera-manager namespace. -

    -
      -
    1. -

      - Create a service to expose the management cluster. The following example of a NodePort service may not - be suitable for production and high availability. For options, see{' '} - - Fine-tune multi-cluster management for production - - . Apply the following service manifest. -

      - - {`oc create -f - < -
    2. -
    3. -

      - Export the service port number, and the public IP or host of the management cluster. (Ex. - "example.com:1234" or "10.0.0.10:1234".) -

      - {'export MANAGEMENT_CLUSTER_ADDR='} -
    4. -
    5. -

      - Apply the{' '} - - ManagementCluster - {' '} - CR. -

      - - {`oc apply -f - < -
    6. -
    - - Create an admin user and verify management cluster connection - -

    - To access resources in a managed cluster from the {prodname} Manager within the management cluster, the - logged-in user must have appropriate permissions defined in that managed cluster (clusterrole bindings). -

    -
      -
    1. -

      - Create an admin user called, mcm-user in the default namespace with full permissions, by - applying the following commands. -

      - - oc create sa mcm-user{'\n'} - oc create clusterrolebinding mcm-user-admin --serviceaccount=default:mcm-user - --clusterrole=tigera-network-admin - -
    2. -
    3. -

      Get the login token for your new admin user, and log in to {prodname} Manager.

      - - {`oc get secret $(oc get serviceaccount mcm-user -o jsonpath='{range .secrets[*]}{.name}{"\n"}{end}' | grep token) -o go-template='{{.data.token | base64decode}}' && echo`} - -

      - In the top right banner, your management cluster is displayed as the first entry in the cluster - selection drop-down menu with the fixed name, management cluster. -

      -

      - Cluster Created -

      -
    4. -
    -

    You have successfully installed a management cluster.

    - -
    - - - <> - - - Provide permissions to view the managed cluster - -

    - To access resources belonging to a managed cluster from the {prodname} Manager UI, the service or user - account used to log in must have appropriate permissions defined in the managed cluster. -

    -

    - Let's define admin-level permissions for the service account (mcm-user) we created to log - in to the Manager UI. Run the following command against your managed cluster. -

    - - oc create clusterrolebinding mcm-user-admin --serviceaccount=default:mcm-user - --clusterrole=tigera-network-admin - - -
    - - - - - - ); -} - -// Contains extra OpenShift installation instructions for hybrid Linux+Windows clusters. -function InstallOpenShiftWindows() { - return ( - <> - - Configure strict affinity - -

    - Next, install calicoctl and ensure strict - affinity is true: -

    - calicoctl ipam configure --strictaffinity=true - - Add Windows nodes to the cluster - -

    - Download the latest{' '} - - Windows Node Installer (WNI) - {' '} - binary wni that matches your OpenShift minor version. -

    - - For OpenShift 4.6, use the latest wni for OpenShift 4.5. A wni binary for OpenShift 4.6 is not published yet. - -

    - Next, determine the AMI id corresponding to Windows Server 1903 (build 18317) or greater. wni{' '} - defaults to using Windows Server 2019 (build 10.0.17763) which does not include WinDSR support. One way to do - this is by searching for AMI's matching the string{' '} - Windows_Server-1903-English-Core-ContainersLatest in the Amazon EC2 console. -

    -

    - Next, run wni to add a Windows node to your cluster. Replace AMI_ID, AWS_CREDENTIALS_PATH, - AWS_KEY_NAME and AWS_PRIVATE_KEY_PATH with your values: -

    - - {`chmod u+x wni -./wni aws create \\ - --image-id AMI_ID \\ - --kubeconfig openshift-tigera-install/auth/kubeconfig \\ - --credentials AWS_CREDENTIALS_PATH \\ - --credential-account default \\ - --instance-type m5a.large \\ - --ssh-key AWS_KEY_NAME \\ - --private-key AWS_PRIVATE_KEY_PATH`} - -

    An example of running the above steps:

    - - {`$ chmod u+x wni -$ ./wni aws create \ -> --kubeconfig openshift-tigera-install/auth/kubeconfig \\ -> --credentials ~/.aws/credentials \\ -> --credential-account default \\ -> --instance-type m5a.large \\ -> --ssh-key test-key \\ -> --private-key /home/user/.ssh/test-key.pem -2020/10/05 12:52:51 kubeconfig source: /home/user/openshift-tigera-install/auth/kubeconfig -2020/10/05 12:52:59 Added rule with port 5986 to the security groups of your local IP -2020/10/05 12:52:59 Added rule with port 22 to the security groups of your local IP -2020/10/05 12:52:59 Added rule with port 3389 to the security groups of your local IP -2020/10/05 12:52:59 Using existing Security Group: sg-06d1de22807d5dc48 -2020/10/05 12:57:30 External IP: 52.35.12.231 -2020/10/05 12:57:30 Internal IP: 10.0.90.193`} - - - Get the administrator password - -

    - The wni binary writes the instance details to the file windows-node-installer.json. An - example of the file: -

    - {`{"InstanceIDs":["i-02e13d4cc76c13c83"],"SecurityGroupIDs":["sg-0a777565d64e1d2ef"]}`} -

    - Use the instance ID from the file and the path of the private key used to create the instance to get the - Administrator user's password: -

    - - {'aws ec2 get-password-data --instance-id --priv-launch-key '} - - - - Install {prodnameWindows} - -
      -
    1. -

      Remote into the Windows node, open a Powershell window, and prepare the directory for Kubernetes files.

      - mkdir c:\k -
    2. -
    3. -

      - Copy the Kubernetes kubeconfig file (default location: openshift-tigera-install/auth/kubeconfig), to the - file c:\k\config. -

      -
    4. -
    5. -

      - Download the powershell script, install-calico-windows.ps1. -

      - - Invoke-WebRequest {tempFilesURL}/scripts/install-calico-windows.ps1 -OutFile c:install-calico-windows.ps1 - -
    6. -
    7. -

      - Run the installation script, replacing the Kubernetes version with the version corresponding to your version - of OpenShift. -

      - - { - 'c:install-calico-windows.ps1 -KubeVersion -ServiceCidr 172.30.0.0/16 -DNSServerIPs 172.30.0.10' - } - - -

      - Get the Kubernetes version with oc version and use only the major, minor, and patch version - numbers. For example from a cluster that returns: -

      - - {`$ oc version -Client Version: 4.5.3 -Server Version: 4.5.14 -Kubernetes Version: v1.18.3+5302882`} - -

      - You will use 1.18.3: -

      -
      -
    8. -
    9. -

      Install and start kube-proxy service. Execute following powershell script/commands.

      - - {rootDirWindows}\kubernetes\install-kube-services.ps1 -service kube-proxy{'\n'} - Start-Service -Name kube-proxy - -
    10. -
    11. -

      Verify kube-proxy service is running.

      - Get-Service -Name kube-proxy -
    12. -
    - - Configure kubelet - -

    - From the Windows node, download the Windows Machine Config Bootstrapper wmcb.exe that matches your - OpenShift minor version from{' '} - - Windows Machine Config Bootstrapper releases - - . For example, for OpenShift 4.5.x: -

    - - curl https://github.com/openshift/windows-machine-config-bootstrapper/releases/download/v4.5.2-alpha/wmcb.exe -o - c:\wmcb.exe - - - For OpenShift 4.6, use the latest wmcb.exe for OpenShift 4.5. A wmcb.ex binary for OpenShift 4.6 is not - published yet. - -

    - Next, we will download the worker.ign file from the API server: -

    - - {`$apiServer = c:\k\kubectl --kubeconfig c:\k\config get po -n openshift-kube-apiserver -l apiserver=true --no-headers -o custom-columns=":metadata.name" | select -first 1 -c:\k\kubectl --kubeconfig c:\k\config -n openshift-kube-apiserver exec $apiserver -- curl -ks https://localhost:22623/config/worker > c:\worker.ign -((Get-Content c:\worker.ign) -join "\`n") + "\`n" | Set-Content -NoNewline c:\worker.ign`} - -

    Next, we run wmcb to configure the kubelet:

    - - c:\wmcb.exe initialize-kubelet --ignition-file worker.ign --kubelet-path c:\k\kubelet.exe - - - The kubelet configuration installed by Windows Machine Config Bootstrapper includes{' '} - --register-with-taints="os=Windows:NoSchedule" which will require Windows pods to - tolerate that taint. - -

    - Next, we make a copy of the kubeconfig because wmcb.exe expects the kubeconfig to be the file{' '} - c:\k\kubeconfig. Then we configure kubelet to use Calico CNI: -

    - - cp c:\k\config c:\k\kubeconfig{'\n'} - c:\wmcb.exe configure-cni --cni-dir c:\k\cni --cni-config c:\k\cni\config\10-calico.conf - -

    Finally, clean up the additional files created on the Windows node:

    - rm c:\k\kubeconfig,c:\wmcb.exe,c:\worker.ign -

    Exit the remote session to the Windows node and return to a shell to a Linux node.

    -

    - We need to approve the CSR's generated by the kubelet's bootstrapping process. First, view the pending - CSR's: -

    - oc get csr -

    For example:

    - - {`$ oc get csr -NAME AGE SIGNERNAME REQUESTOR CONDITION -csr-55brx 4m32s kubernetes.io/kube-apiserver-client-kubelet system:admin Approved,Issued -csr-bmnfd 4m30s kubernetes.io/kubelet-serving system:node:ip-10-0-45-102.us-west-2.compute.internal Pending -csr-hwl89 5m1s kubernetes.io/kube-apiserver-client-kubelet system:serviceaccount:openshift-machine-config-operator:node-bootstrapper Pending`} - -

    To approve the pending CSR's:

    - oc get csr -o name | xargs oc adm certificate approve -

    For example:

    - - {`$ oc get csr -o name | xargs oc adm certificate approve -certificatesigningrequest.certificates.k8s.io/csr-55brx approved -certificatesigningrequest.certificates.k8s.io/csr-bmnfd approved -certificatesigningrequest.certificates.k8s.io/csr-hwl89 approved`} - -

    Finally, wait a minute or so and get all nodes:

    - $ oc get node -owide -

    - If the Windows node registered itself successfully, it should appear in the list with a Ready status, ready to - run Windows pods! -

    - - ); -} diff --git a/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/InstallOpenShiftManifests.js b/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/InstallOpenShiftManifests.js deleted file mode 100644 index a2bb6a43c5..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/InstallOpenShiftManifests.js +++ /dev/null @@ -1,24 +0,0 @@ -import React from 'react'; - -import CodeBlock from '@theme/CodeBlock'; - -import { prodname, filesUrl } from '../../variables'; - -export default function InstallOpenShiftManifests(props) { - const uf = props.upgradeFrom; - const exclude1 = ' --exclude=01-cr-*'; - const exclude2 = ' --exclude=02-pull-secret.yaml'; - const flag1 = uf ? exclude1 : ''; - const flag2 = uf === 'Enterprise' ? exclude2 : ''; - - return ( - <> -

    Download the {prodname} manifests for OpenShift and add them to the generated manifests directory:

    - - {`mkdir calico -wget -qO- ${filesUrl}/manifests/ocp.tgz | tar xvz --strip-components=1 -C calico ${flag1}${flag2} -cp calico/* manifests/`} - - - ); -} diff --git a/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/InstallOpenshiftBeforeYouBegin.js b/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/InstallOpenshiftBeforeYouBegin.js deleted file mode 100644 index f8291a77eb..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/InstallOpenshiftBeforeYouBegin.js +++ /dev/null @@ -1,116 +0,0 @@ -import React from 'react'; - -import Link from '@docusaurus/Link'; -import GeekDetails from '@site/src/components/partials/GeekDetails'; - -import { prodname, prodnameWindows, baseUrl } from '../../variables'; - -export default function InstallOpenshiftBeforeYouBegin(props) { - return ( - <> -

    - CNI support -

    -

    Calico CNI for networking with {prodname} network policy

    -

    The geeky details of what you get:

    - -

    - Required -

    -
      -
    • -

      - A compatible OpenShift cluster -

      -

      - Your environment meets the {prodname}{' '} - - system requirements - -

      -
    • -
    • -

      - A{' '} - - RedHat account - {' '} - for the pull secret to provision an OpenShift cluster. -

      -
    • -
    • -

      - OpenShift command line interface from{' '} - - cloud.redhat.com - -

      -
    • -
    • -

      - Cluster meets the {prodname}{' '} - system requirements -

      -
    • -
    • -

      - If installing on AWS, a{' '} - - configured AWS account - {' '} - appropriate for OpenShift 4, and have{' '} - - set up your AWS credentials - - . Note that the OpenShift installer supports a subset of{' '} - - AWS regions - - . -

      -
    • -
    • -

      - OpenShift installer and OpenShift command line interface from{' '} - cloud.redhat.com -

      -
    • -
    • -

      - A{' '} - - generated a local SSH private key - {' '} - that is added to your ssh-agent -

      -
    • -
    • -

      - A Tigera license key and credentials -

      -
    • - {props.clusterOS === 'hybrid' && ( - <> -
    • The {prodnameWindows} installation zip archive, which you can get from your support representative.
    • -

      - Limitations -

      -

      - Due to an upstream issue, Windows - pods can only be run in specific namespaces if you disable SCC. To do this, label the namespace with{' '} - openshift.io/run-level: "1". -

      - - )} -
    - - ); -} diff --git a/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/MaintenanceClisCalicoqInstalling.js b/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/MaintenanceClisCalicoqInstalling.js deleted file mode 100644 index e5796989b7..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/MaintenanceClisCalicoqInstalling.js +++ /dev/null @@ -1,17 +0,0 @@ -// Temporary component for "calico-enterprise\maintenance\clis\calicoq\installing.mdx" - -import React from 'react'; - -import CodeBlock from '@theme/CodeBlock'; - -import { downloadsurl, releaseTitle, filesUrl, version } from '../../variables'; - -export default function MaintenanceClisCalicoqInstalling() { - const ver = version === 'master' ? version : releaseTitle; - const code = - ver === 'master' - ? `${filesUrl}/download/binaries/${ver}/calicoq` - : `curl -o calicoq -O -L ${downloadsurl}/ee/binaries/${ver}/calicoq`; - - return {code}; -} diff --git a/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/OpenShiftPrometheusOperator.js b/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/OpenShiftPrometheusOperator.js deleted file mode 100644 index 003e6e431b..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/OpenShiftPrometheusOperator.js +++ /dev/null @@ -1,28 +0,0 @@ -import React from 'react'; - -import Admonition from '@theme/Admonition'; -import CodeBlock from '@theme/CodeBlock'; -import Link from '@docusaurus/Link'; - -import { prodname, baseUrl, filesUrl } from '../../variables'; - -export default function OpenShiftPrometheusOperator(props) { - const createSecret = `oc create secret generic tigera-pull-secret \\ - --type=kubernetes.io/dockerconfigjson -n tigera-prometheus \\ - --from-file=.dockerconfigjson=\n`; - const notOSCodeBlock = props.upgradeFrom !== 'OpenSource' ? createSecret : ''; - - return ( - <> -

    Apply the {prodname} manifests for the Prometheus operator.

    - - Complete this step only if you are using the {prodname} Prometheus operator (including adding your own - Prometheus operator). Skip this step if you are using{' '} - BYO Prometheus that you manage yourself. - - {props.operation === 'install' - ? oc create -f {filesUrl}/manifests/ocp/tigera-prometheus-operator.yaml - : oc apply -f {filesUrl}/manifests/ocp/tigera-prometheus-operator.yaml} - - ); -} diff --git a/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/OpenShiftPullSecret.js b/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/OpenShiftPullSecret.js deleted file mode 100644 index c807e51edb..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/OpenShiftPullSecret.js +++ /dev/null @@ -1,20 +0,0 @@ -import React from 'react'; - -import CodeBlock from '@theme/CodeBlock'; - -export default function OpenShiftPullSecret() { - return ( - <> -

    - Update the contents of the secret with the image pull secret provided to you by Tigera support representative. -

    -

    - For example, if the secret is located at ~/.docker/config.json, run the following commands. -

    - - {"SECRET=$(cat ~/.docker/config.json | tr -d '\\n\\r\\t ' | base64 -w 0)\n"} - {'sed -i "s/SECRET/${SECRET}/" manifests/02-pull-secret.yaml'} - - - ); -} diff --git a/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/PrivateRegistryImagePath.js b/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/PrivateRegistryImagePath.js deleted file mode 100644 index eb25118cb3..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/PrivateRegistryImagePath.js +++ /dev/null @@ -1,178 +0,0 @@ -import React from 'react'; - -import Admonition from '@theme/Admonition'; -import CodeBlock from '@theme/CodeBlock'; - -import { prodname, prodnamedash, prodnameWindows, registry, releases, tigeraOperator } from '../../variables'; - -export default function PrivateRegistryImagePath() { - const components = releases[0].components; - const componentsWithImage = Object.values(components).filter(filters.withImage); - - return ( - <> -

    - Push {prodname} images to your private registry image path -

    -

    - In order to install images from your private registry, you must first pull the images from Tigera's - registry, re-tag them with your own registry, and then push the newly tagged images to your own registry. -

    -
      -
    1. -

      Use the following commands to pull the required {prodname} images.

      -
    2. - - {`docker pull ${tigeraOperator.registry}/${tigeraOperator.image}:${tigeraOperator.version}\n`} - {componentsWithImage.filter(filters.isNotWindows).map(renderPullCommand).join('')} - - -
    3. -

      - Retag the images with the name of your private registry $PRIVATE_REGISTRY and{' '} - $IMAGE_PATH. -

      -
    4. - - {`docker tag ${tigeraOperator.registry}/${tigeraOperator.image}:${ - tigeraOperator.version - } $PRIVATE_REGISTRY/$IMAGE_PATH/${mapImageToImageName(tigeraOperator.image)}:${tigeraOperator.version}\n`} - {componentsWithImage.filter(filters.isNotWindows).map((component) => { - const registry = mapComponentToRegistry(component); - const imageName = mapImageToImageName(component.image); - - return ( - `docker tag ${registry}${component.image}:${component.version} $PRIVATE_REGISTRY/$IMAGE_PATH/${imageName}:${component.version}\n` - ); - }).join('')} - - -
    5. -

      Push the images to your private registry.

      -
    6. - - {`docker push $PRIVATE_REGISTRY/$IMAGE_PATH/${mapImageToImageName(tigeraOperator.image)}:${ - tigeraOperator.version - }`} - {componentsWithImage.filter(filters.isNotWindows).map((component) => { - const imageName = mapImageToImageName(component.image); - - return `docker push $PRIVATE_REGISTRY/$IMAGE_PATH/${imageName}:${component.version}\n`; - }).join('')} - - Do not push the private {prodname} images to a public registry. -
    7. -

      Use crane cp to copy the Windows images to your private registry.

      -
    8. -

      For hybrid Linux + Windows clusters, use crane cp on the following Windows images to copy them to your private registry.

      - - {componentsWithImage.filter(filters.isWindows).map((component) => { - const imageName = mapImageToImageName(component.image); - - return `crane cp ${registry}${component.image}:${component.version} $PRIVATE_REGISTRY/$IMAGE_PATH/${imageName}:${component.version}\n`; - }).join('')} - - - Do not crane cp the private {prodnameWindows} images to a public registry. -
    - -

    - Run the operator using images from your private registry image path -

    -

    - Before applying tigera-operator.yaml, modify registry references to use your custom registry: -

    - - {`sed -ie "s?quay.io.*/?$PRIVATE_REGISTRY/$IMAGE_PATH/?" tigera-operator.yaml`} - - - {/* The second 'sed' should be removed once operator launches Prometheus & Alertmanager */} - -

    - Next, ensure that an image pull secret has been configured for your custom registry. Set the enviroment variable{' '} - PRIVATE_REGISTRY_PULL_SECRET to the secret name. Then add the image pull secret to the operator - deployment spec: -

    - - {`sed -ie "/serviceAccountName: tigera-operator/a \ imagePullSecrets:\\n\ - name: $PRIVATE_REGISTRY_PULL_SECRET" tigera-operator.yaml`} - -

    - If you are installing Prometheus operator as part of {prodname}, then before applying{' '} - tigera-prometheus-operator.yaml, modify registry references to use your custom registry: -

    - - {`sed -ie "s?quay.io.*/?$PRIVATE_REGISTRY/$IMAGE_PATH/?" tigera-prometheus-operator.yaml -sed -ie "/serviceAccountName: calico-prometheus-operator/a \ imagePullSecrets:\\n\ - name: $PRIVATE_REGISTRY_PULL_SECRET" tigera-prometheus-operator.yaml`} - - - {/* The second 'sed' should be removed once operator launches Prometheus & Alertmanager */} - -

    - Before applying custom-resources.yaml, modify registry references to use your custom registry: -

    - {`sed -ie "s?quay.io.*/?$PRIVATE_REGISTRY/$IMAGE_PATH/?" custom-resources.yaml`} - - {/* The second 'sed' should be removed once operator launches Prometheus & Alertmanager */} - -

    - For Openshift, after downloading all manifests modify the following to use your custom registry: -

    - - {`sed -ie "s?quay.io.*/?$PRIVATE_REGISTRY/$IMAGE_PATH/?" manifests/02-tigera-operator.yaml`} - - - - Add the image pull secret for your registry to the secret tigera-pull-secret - - -

    - Configure the operator to use images from your private registry image path. -

    -

    - Set the spec.registry and spec.imagePath field of your Installation resource to the - name of your custom registry. For example: -

    - - {`apiVersion: operator.tigera.io/v1 -kind: Installation -metadata: - name: default -spec: - variant: TigeraSecureEnterprise - imagePullSecrets: - - name: tigera-pull-secret - // highlight-next-line - registry: myregistry.com - // highlight-next-line - imagePath: my-image-path`} - - - ); - - function renderPullCommand(component) { - const registry = mapComponentToRegistry(component); - - return `docker pull ${registry}${component.image}:${component.version}\n`; - } - - - function mapComponentToRegistry(component) { - if (!component.registry) { - return registry; - } - - return `${component.registry}/`; - } - - function mapImageToImageName(image) { - const imageName = image.split('/').pop(); - - return imageName; - } -} - -const filters = { - withImage: (component) => !!component.image, - isWindows: (component) => component.image.includes('-windows'), - isNotWindows: (component) => !filters.isWindows(component), -}; diff --git a/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/PrivateRegistryRegular.js b/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/PrivateRegistryRegular.js deleted file mode 100644 index d12ee6e426..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/PrivateRegistryRegular.js +++ /dev/null @@ -1,152 +0,0 @@ -import React from 'react'; - -import Admonition from '@theme/Admonition'; -import CodeBlock from '@theme/CodeBlock'; -import Heading from '@theme/Heading'; - -import { tigeraOperator, prodname, prodnamedash, prodnameWindows, registry, releases } from '../../variables'; - -export default function PrivateRegistryRegular() { - const components = releases[0].components; - const componentsWithImage = Object.values(components).filter(filters.withImage); - - return ( - <> - - Push {prodname} images to your private registry - -

    - In order to install images from your private registry, you must first pull the images from Tigera's - registry, re-tag them with your own registry, and then push the newly tagged images to your own registry. -

    -
      -
    1. -

      Use the following commands to pull the required {prodname} images.

      -
    2. - - {`docker pull ${tigeraOperator.registry}/${tigeraOperator.image}:${tigeraOperator.version}\n`} - {componentsWithImage.filter(filters.isNotWindows).map(renderPullCommand).join('')} - - -
    3. -

      - Retag the images with the name of your private registry $PRIVATE_REGISTRY. -

      - - {`docker tag ${tigeraOperator.registry}/${tigeraOperator.image}:${tigeraOperator.version} $PRIVATE_REGISTRY/${tigeraOperator.image}:${tigeraOperator.version}\n`} - {componentsWithImage.filter(filters.isNotWindows).map((component) => { - const registry = mapComponentToRegistry(component); - - return ( - `docker tag ${registry}${component.image}:${component.version} $PRIVATE_REGISTRY/${component.image}:${component.version}\n` - ); - }).join('')} - -
    4. - -
    5. -

      Push the images to your private registry.

      - - {`docker push $PRIVATE_REGISTRY/${tigeraOperator.image}:${tigeraOperator.version}\n`} - {componentsWithImage.filter(filters.isNotWindows).map((component) => { - return `docker push $PRIVATE_REGISTRY/${component.image}:${component.version}\n`; - }).join('')} - - Do not push the private {prodname} images to a public registry. -
    6. -
    7. -

      Use crane cp to copy the Windows images to your private registry.

      -

      For hybrid Linux + Windows clusters, use crane cp on the following Windows images to copy them to your private registry.

      - - {componentsWithImage.filter(filters.isWindows).map((component) => { - const registry = mapComponentToRegistry(component); - const imageName = component.image.split('/').pop(); - - return `crane cp ${registry}${component.image}:${component.version} $PRIVATE_REGISTRY/$IMAGE_PATH/${imageName}:${component.version}\n`; - }).join('')} - - - Do not crane cp the private {prodnameWindows} images to a public registry. -
    8. -
    - - - Run the operator using images from your private registry - -

    - Before applying tigera-operator.yaml, modify registry references to use your custom registry: -

    - {`sed -ie "s?quay.io?$PRIVATE_REGISTRY?g" tigera-operator.yaml`} -

    - Next, ensure that an image pull secret has been configured for your custom registry. Set the enviroment variable{' '} - PRIVATE_REGISTRY_PULL_SECRET to the secret name. Then add the image pull secret to the operator - deployment spec: -

    - - {`sed -ie "/serviceAccountName: tigera-operator/a \ imagePullSecrets:\\n\ - name: $PRIVATE_REGISTRY_PULL_SECRET" tigera-operator.yaml`} - - {/* The second 'sed' should be removed once operator launches Prometheus & Alertmanager */} -

    - If you are installing Prometheus operator as part of {prodname}, then before applying{' '} - tigera-prometheus-operator.yaml, modify registry references to use your custom registry: -

    - - {`sed -ie "s?quay.io?$PRIVATE_REGISTRY?g" tigera-prometheus-operator.yaml -sed -ie "/serviceAccountName: calico-prometheus-operator/a \ imagePullSecrets:\\n\ - name: $PRIVATE_REGISTRY_PULL_SECRET" tigera-prometheus-operator.yaml`} - - {/* The second 'sed' should be removed once operator launches Prometheus & Alertmanager */} -

    - Before applying custom-resources.yaml, modify registry references to use your custom registry: -

    - sed -ie "s?quay.io?$PRIVATE_REGISTRY?g" custom-resources.yaml - {/* This step should be removed once operator launches Prometheus & Alertmanager */} - - - Configure the operator to use images from your private registry. - -

    - Set the spec.registry field of your Installation resource to the name of your custom registry. For - example: -

    - {`apiVersion: operator.tigera.io/v1 -kind: Installation -metadata: - name: default -spec: - variant: TigeraSecureEnterprise - imagePullSecrets: - - name: tigera-pull-secret - // highlight-next-line - registry: myregistry.com`} - - ); - - function renderPullCommand(component) { - const registry = mapComponentToRegistry(component); - - return `docker pull ${registry}${component.image}:${component.version}\n`; - } - - function mapComponentToRegistry(component) { - if (!component.registry) { - return registry; - } - - return `${component.registry}/`; - } -} - -const filters = { - withImage: (component) => !!component.image, - isWindows: (component) => component.image.includes('-windows'), - isNotWindows: (component) => !filters.isWindows(component), -}; diff --git a/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/ReleaseArchiveTable.js b/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/ReleaseArchiveTable.js deleted file mode 100644 index e302a5a62b..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/ReleaseArchiveTable.js +++ /dev/null @@ -1,38 +0,0 @@ -import React from 'react'; -import variables from '../../variables'; - -export default function ReleaseArchiveTable() { - const releases = variables.releases.map((release) => { - const tigeraOperatorVersion = release['tigera-operator'].version; - const releaseArchiveURL = `https://downloads.tigera.io/ee/archives/release-${release.title}-${tigeraOperatorVersion}.tgz`; - - return { - ...release, - tigeraOperatorVersion, - releaseArchiveURL - }; - }); - - return ( - <> - - - - - - - - - {releases.map((release) => ( - - - - - ))} - -
    Patch versionRelease archive link
    {release.title} - {release.releaseArchiveURL} -
    - - ); -} diff --git a/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/ReqsKernel.js b/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/ReqsKernel.js deleted file mode 100644 index 55b773426e..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/ReqsKernel.js +++ /dev/null @@ -1,69 +0,0 @@ -import React from 'react'; -import Admonition from '@theme/Admonition'; -import Heading from '@theme/Heading'; - -import { prodname } from '../../variables'; - -export default function ReqsKernel() { - return ( - <> - - Kernel Dependencies - - -

    If you are using one of the recommended distributions, you will already satisfy these.

    -
    -

    - Due to the large number of distributions and kernel version out there, it’s hard to be precise about the names - of the particular kernel modules that are required to run {prodname}. However, in general, you’ll need: -

    -
      -
    • -

      - The iptables modules (both the “legacy” and “nft” variants are supported). These are typically - broken up into many small modules, one for each type of match criteria and one for each type of action.{' '} - {prodname} requires: -

      -
        -
      • The “base” modules (including the IPv6 versions if IPv6 is enabled in your cluster).
      • -
      • - At least the following match criteria: set, rpfilter, addrtype,{' '} - comment, conntrack, icmp, tcp, udp,{' '} - ipvs, icmpv6 (if IPv6 is enabled in your kernel), mark,{' '} - multiport, rpfilter, sctp, ipvs (if using - kube-proxy in IPVS mode). -
      • -
      • - At least the following actions: REJECT, ACCEPT, DROP,{' '} - LOG. -
      • -
      -
    • -
    • -

      IP sets support.

      -
    • -
    • -

      Netfilter Conntrack support compiled in (with SCTP support if using SCTP).

      -
    • -
    • -

      - IPVS support if using kube-proxy in IPVS mode. -

      -
    • -
    • -

      - IPIP, VXLAN, Wireguard support, if using {prodname} networking in one of those modes. -

      -
    • -
    • -

      - eBPF (including the tc hook support) and XDP (if you want to use the eBPF data plane). -

      -
    • -
    - - ); -} diff --git a/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/ReqsSys.js b/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/ReqsSys.js deleted file mode 100644 index 6a40775ab8..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/ReqsSys.js +++ /dev/null @@ -1,425 +0,0 @@ -import React from 'react'; - -import Admonition from '@theme/Admonition'; -import Link from '@docusaurus/Link'; -import Heading from '@theme/Heading'; -import CodeBlock from '@theme/CodeBlock'; - -import { orchestrators } from '@site/variables'; -import { prodname, baseUrl } from '../../variables'; - -function NodeRequirementsEnt(props) { - return ( - <> - - Node requirements - -
      -
    • -

      x86-64 or arm64 processor with at least 2 cores, 8.0GB RAM and 20 GB free disk space

      -
    • -
    • -

      - Linux kernel 5.10 or later with required dependencies. The - following distributions have the required kernel, its dependencies, and are known to work well with{' '} - {prodname} and {props.orch}. -

      -
        - {(props.orch === orchestrators.Kubernetes || props.orch === orchestrators.HostProtection) && ( - <> -
      • Ubuntu 20.04 and 22.04
      • -
      • RHEL 8 and 9
      • -
      • Debian 10
      • -
      • Azure Linux Container Host
      • - - )} - {props.orch === orchestrators.OpenShift && ( - <> -
      • Red Hat Enterprise Linux CoreOS
      • - - )} - {props.orch === orchestrators.OpenStack && ( - <> -
      • Ubuntu 20.04 and 22.04
      • -
      • CentOS 8
      • - - )} -
      -
    • -
    • -

      - If your node is running RHEL 8 or RHEL 9, you must install a specialized policy package before you install {prodname}. - With this package, {prodname} can use SELinux contexts in a series of rules that allow it to interact with persistent and ephemeral data in nonstandard host system locations. -

      -
        -
      • -

        If your node has RHEL 8 installed, then run the following command:

        - - {`dnf install https://downloads.tigera.io/ee/archives/calico-selinux-1.0-1.el8.noarch.rpm`} - -
      • -
      • -

        If your node has RHEL 9 installed, then run the following command:

        - - {`dnf install https://downloads.tigera.io/ee/archives/calico-selinux-1.0-1.el9.noarch.rpm`} - -
      • -
      -
    • -
    • -

      - {prodname} must be able to manage cali* - interfaces on the host. When IPIP is enabled (the default), - {prodname} also needs to be able to manage tunl* - interfaces. When VXLAN is enabled, {prodname} also needs to be able to manage the vxlan.calico{' '} - interface. -

      - -

      - Many Linux distributions, such as most of the above, include NetworkManager. By default, NetworkManager - does not allow - {prodname} to manage interfaces. If your nodes have NetworkManager, complete the steps in{' '} - - Preventing NetworkManager from controlling {prodname} interfaces - {' '} - before installing {prodname}. -

      -
      -
    • -
    • -

      - If your Linux distribution comes with installed Firewalld or another iptables manager it should be disabled. - These may interfere with rules added by {prodname} and result in unexpected behavior. -

      - -

      - If a host firewall is needed, it can be configured by {prodname} HostEndpoint and GlobalNetworkPolicy. - More information about configuration at Security for host. -

      -
      -
    • -
    • -

      - In order to properly run Elasticsearch, nodes must be configured according to the{' '} - - Elasticsearch system configuration documentation. - -

      -
    • -
    • -

      - The Typha autoscaler requires a minimum number of Linux worker nodes based on total number of schedulable - nodes. -

      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
      Total schedulable nodesRequired Linux nodes for Typha replicas
      11
      22
      33
      up to 2504
      up to 5005
      up to 10006
      up to 15007
      up to 20008
      2000 or more10
      -
    • -
    - - ); -} - -function NetworkRequirementsEnt(props) { - return ( - <> - - Network requirements - -

    - Ensure that your hosts and firewalls allow the necessary traffic based on your configuration. See{' '} - Component architecture to view the following - components. -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - {props.orch === orchestrators.Kubernetes && ( - <> - - - - - - - - - - - - )} - {props.orch === orchestrators.OpenShift && ( - <> - - - - - - - - - - - - )} - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    ConfigurationHost(s)Port/protocol
    - {prodname} networking options - IP-in-IP (default)Protocol number 4
    BGPTCP 179
    VXLANUDP 4789
    WireguardUDP 51820 (default)
    IPv6 WireguardUDP 51821 (default)
    - Cluster scaling - Any {prodname} networking option above with Typha agents enabledTCP 5473 (default)
    - APIs - Kubernetes API (kube-apiserver) to access Kubernetes API datastoreOften TCP 443 or 6443*
    {prodname} API serverTCP 8080 and 5443 (default)
    - APIs - Kubernetes API (kube-apiserver) to access Kubernetes API datastoreOften TCP 443 or 8443*
    {prodname} API serverTCP 8080 and 5443 (default)
    - Nodes - calico-node (Felix, BIRD, confd)TCP 9090 (default)
    - Component metrics - Prometheus metricsTCP 9081 (default)
    Prometheus BGP metricsTCP 9900 (default)
    Prometheus API serviceTCP 9090 (default)
    Prometheus AlertmanagerTCP 9093 (default)
    - Logs and storage - Elasticsearch with fluentd datastoreTCP 9200 (default)
    Elasticssearch for cloud (ECK)TCP 9443 (default)
    Elasticsearch gatewayTCP 5444 (default)
    - Visibility and troubleshooting - KibanaTCP 5601 (default)
    Packet capture APITCP 8444 (default)
    {prodname} Manager UITCP 9443 (default)
    - Intrusion Detection System (IDS) - {prodname} intrusion detectionTCP 5443 (default)
    - Compliance - {prodname} complianceTCP 5443 (default)
    - Multi-cluster management - Additional port required for Manager UITCP 9449
    - Egress gateway - {prodname} egress gateway UDP 4790
    - {(props.orch === orchestrators.Kubernetes || props.orch === orchestrators.OpenShift) && ( - <> -

    - *{' '} - - The value passed to kube-apiserver using the --secure-port - flag. If you cannot locate this, check the targetPort value returned by   - kubectl get svc kubernetes -o yaml. - -

    - - )} - {props.orch === orchestrators.OpenStack && ( -

    - *{' '} - - If your compute hosts connect directly and don’t use IP-in-IP, you don’t need to allow IP-in-IP traffic. - -

    - )} - - ); -} - -function Privileges(props) { - return ( - <> - - Privilege requirements - -

    - Ensure that {prodname} has the CAP_SYS_ADMIN privilege. -

    -

    - The simplest way to provide the necessary privilege is to run {prodname} as root or in a privileged container. -

    - {props.orch === orchestrators.Kubernetes && ( - <> -

    - When installed as a Kubernetes daemon set, {prodname} meets this requirement by running as a privileged - container. This requires that the kubelet be allowed to run privileged containers. There are two ways this - can be achieved. -

    -
      -
    • - Specify --allow-privileged on the kubelet (deprecated). -
    • -
    • - Use a{' '} - pod security policy. -
    • -
    - - )} - - ); -} - -export default function ReqsSys(props) { - return ( - <> - - - - - ); -} diff --git a/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/UpgradeOperatorSimple.js b/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/UpgradeOperatorSimple.js deleted file mode 100644 index 0759b11a2c..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/UpgradeOperatorSimple.js +++ /dev/null @@ -1,250 +0,0 @@ -import React from 'react'; -import {Else, If, Then, When} from 'react-if'; - -import Admonition from '@theme/Admonition'; -import CodeBlock from '@theme/CodeBlock'; -import Link from '@docusaurus/Link'; - -import { baseUrl, filesUrl, tmpScriptsURL } from '../../variables'; - -export default function UpgradeOperatorSimple(props) { - return ( - <> -
      - -
    1. -

      - Switch the active operator to the one that will be installed to the new namespace. First, download the - helper script: -

      - curl -L -O {tmpScriptsURL}/scripts/switch-active-operator.sh -

      Then switch the active operator. This will deactivate the currently running operator.

      - - chmod a+x ./switch-active-operator.sh{'\n'} - ./switch-active-operator.sh tigera-operator-enterprise - -
    2. -
      -
    3. -

      Download the new manifests for Tigera Operator.

      - - {props.provider === 'AKS' - ? `curl -L -o tigera-operator.yaml ${filesUrl}/manifests/aks/tigera-operator-upgrade.yaml` - : `curl -L -O ${filesUrl}/manifests/tigera-operator.yaml`} - -
    4. - -
    5. -

      Download the new manifests for Prometheus operator.

      - - If you have an existing Prometheus operator in your cluster that you want to use, skip this step. To work - with Calico Enterprise, your Prometheus operator must be v0.40.0 or higher. - - curl -L -O {filesUrl}/manifests/tigera-prometheus-operator.yaml -
    6. - -
    7. -

      - If you previously{' '} - - installed using a private registry - - , you will need to{' '} - - push the new images{' '} - - and then{' '} - - update the manifest - {' '} - downloaded in the previous step. -

      -
    8. - -
    9. -

      Apply the manifest for Tigera Operator.

      - kubectl apply --server-side --force-conflicts -f tigera-operator.yaml - - - If you intend to update any operator.tigera.io or projectcalico.org resources to - utilize new fields available in the update you must make sure you make those changes after applying the{' '} - tigera-operator.yaml. - - -
    10. - -
    11. -

      If you downloaded the manifests for Prometheus operator from the earlier step, then apply them now.

      - - kubectl apply --server-side --force-conflicts -f tigera-prometheus-operator.yaml - -
    12. - - -
    13. -

      Install your pull secret.

      -

      - {' '} - If pulling images directly from quay.io/tigera, you will likely want to use the credentials - provided to you by your Tigera support representative. If using a private registry, use your private - registry credentials instead. -

      - - {`kubectl create secret generic tigera-pull-secret \\ - --type=kubernetes.io/dockerconfigjson -n ${ - props.provider === 'AKS' ? 'tigera-operator-enterprise' : 'tigera-operator' - } \\ - --from-file=.dockerconfigjson=`} - -
    14. -
      - - - - -
    15. -

      Download the custom resources manifest.

      - - curl -L -o custom-resources.yaml {filesUrl}/manifests/aks/custom-resources-upgrade-from-calico.yaml - -
    16. - -
    17. -

      - If you are{' '} - - installing using a private registry - - , you will need to update the manifest downloaded in the previous step. Update the spec.registry, spec.imagePath, and spec.imagePrefix fields of the installation resource with the registry name, image path, and image prefix of your private registry. -

      -
    18. - -
    19. -

      - Apply the Tigera custom resources manifest. For more information on configuration options available in this - manifest, see the installation reference. -

      - kubectl apply -f custom-resources.yaml -
    20. -
      - -
    21. -

      - Install the Tigera custom resources. For more information on configuration options available in this - manifest, see the installation reference. -

      - - {props.provider === 'EKS' - ? `kubectl apply -f ${filesUrl}/manifests/eks/custom-resources-upgrade-from-calico.yaml` - : `kubectl apply -f ${filesUrl}/manifests/custom-resources-upgrade-from-calico.yaml`} - -
    22. -
      -
      -
      - - - <> -
    23. -

      If your cluster has OIDC login configured, follow these steps:

      -

      a. Save a copy of your Manager for reference.

      - {'kubectl get manager tigera-secure -o yaml > manager.yaml'} -

      b. Remove the deprecated fields from your Manager resource.

      - {`kubectl patch manager tigera-secure --type merge -p '{"spec": null}'`} -

      - c. If you are currently using v3.2 and are using OIDC with Kibana verify that you have the following - resources in your cluster: -

      - - kubectl get authentication tigera-secure{'\n'} - kubectl get secret tigera-oidc-credentials -n tigera-operator - -

      - If both of these resources are present, you can continue with the next step. Otherwise, use the - instructions to{' '} - - configure an identity provider - {' '} - to configure OIDC. -

      -

      - d. Follow{' '} - - configure an identity provider - - . -

      -
    24. -
    25. -

      - If your cluster is a management cluster using v3.1 or older, apply a{' '} - - ManagementCluster{' '} - - CR to your cluster. -

      - - {`kubectl apply -f - < -
    26. -
    27. -

      - If your cluster is v3.7 or older, apply a new{' '} - Monitor - CR to your cluster. -

      - - {`kubectl apply -f - < -
    28. -
    29. -

      - If your cluster is v3.16 or older, apply a new{' '} - PolicyRecommendation - CR to your cluster. -

      - - {`kubectl apply -f - < -
    30. -
    31. -

      You can monitor progress with the following command:

      - watch kubectl get tigerastatus - - If there are any problems you can use kubectl get tigerastatus -o yaml to get more details. - -
    32. -
    33. -

      - If your cluster includes egress gateways, follow the{' '} - - egress gateway upgrade instructions - - . -

      -
    34. - -
      -
    - - ); -} diff --git a/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_calicoctl-version.mdx b/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_calicoctl-version.mdx deleted file mode 100644 index 5ae136c92d..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_calicoctl-version.mdx +++ /dev/null @@ -1,7 +0,0 @@ -| Field | Value | -| ------------------- | ------------------------------------------------------ | -| `Client Version` | Version of `calicoctl` | -| `Build date` | Time and date of `calicoctl` build | -| `Git commit` | Git commit number of `calicoctl` | -| `Cluster Version`\* | Version number of `$[nodecontainer]` and $[prodname] | -| `Cluster Type`\* | Other details about the cluster | diff --git a/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_cli-config-datastore.mdx b/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_cli-config-datastore.mdx deleted file mode 100644 index 74400ed2e3..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_cli-config-datastore.mdx +++ /dev/null @@ -1,97 +0,0 @@ -## Default configuration - -By default, calicoctl will attempt to read from the Kubernetes API using the default kubeconfig located at `$(HOME)/.kube/config`. - -If the default kubeconfig does not exist, or you would like to specify alternative API access information, you can do so using the following configuration options. - -## Complete list of Kubernetes API connection configuration - -| Configuration file option | Environment variable | Description | Schema | -| ------------------------- | -------------------- | --------------------------------------------------------------------------------------------------------- | ---------------------- | -| `datastoreType` | `DATASTORE_TYPE` | Indicates the datastore to use. [Default: `kubernetes`] | `kubernetes`, `etcdv3` | -| `kubeconfig` | `KUBECONFIG` | When using the Kubernetes datastore, the location of a kubeconfig file to use, e.g. /path/to/kube/config. | string | -| `k8sAPIEndpoint` | `K8S_API_ENDPOINT` | Location of the Kubernetes API. Not required if using kubeconfig. [Default: `https://kubernetes-api:443`] | string | -| `k8sCertFile` | `K8S_CERT_FILE` | Location of a client certificate for accessing the Kubernetes API, e.g., `/path/to/cert`. | string | -| `k8sKeyFile` | `K8S_KEY_FILE` | Location of a client key for accessing the Kubernetes API, e.g., `/path/to/key`. | string | -| `k8sCAFile` | `K8S_CA_FILE` | Location of a CA for accessing the Kubernetes API, e.g., `/path/to/ca`. | string | -| `k8sToken` | `K8S_TOKEN` | Token to be used for accessing the Kubernetes API. | string | - -:::note - -The `kubeconfig` file specifies the user whose privileges are used. We recommend -giving only trusted administrators the permission to modify $[prodname] -Custom Resource Definitions. -All environment variables may also be prefixed with `"CALICO_"`, for -example `"CALICO_DATASTORE_TYPE"` and `"CALICO_KUBECONFIG"` etc. may be used. -This is useful if the non-prefixed names clash with existing environment -variables defined on your system. - -::: - -## Examples - -### Kubernetes command line - -```bash -DATASTORE_TYPE=kubernetes KUBECONFIG=~/.kube/config calicoctl get nodes -``` - -### Example configuration file - -```yaml -apiVersion: projectcalico.org/v3 -kind: CalicoAPIConfig -metadata: -spec: - datastoreType: 'kubernetes' - kubeconfig: '/path/to/.kube/config' -``` - -### Example using environment variables - -```bash -export DATASTORE_TYPE=kubernetes -export KUBECONFIG=~/.kube/config -calicoctl get workloadendpoints -``` - -And using `CALICO_` prefixed names: - -```bash -export CALICO_DATASTORE_TYPE=kubernetes -export CALICO_KUBECONFIG=~/.kube/config -calicoctl get workloadendpoints -``` - -With multiple `kubeconfig` files: - -```bash -export DATASTORE_TYPE=kubernetes -export KUBECONFIG=~/.kube/main:~/.kube/auxy -calicoctl get --context main workloadendpoints -calicoctl get --context auxy workloadendpoints -``` - -## Checking the configuration - -Here is a simple command to check that the installation and configuration is -correct. - -```bash -calicoctl get nodes -``` - -A correct setup will yield a list of the nodes that have registered. If an -empty list is returned you are either pointed at the wrong datastore or no -nodes have registered. If an error is returned then attempt to correct the -issue then try again. - -## Next steps - -Now you are ready to read and configure most aspects of $[prodname]. You can -find the full list of commands in the -[Command Reference](../../reference/clis/calicoctl/overview.mdx). - -The full list of resources that can be managed, including a description of each, -can be found in the -[Resource Definitions](../../reference/resources/overview.mdx). diff --git a/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_create-kubeconfig.mdx b/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_create-kubeconfig.mdx deleted file mode 100644 index a22ffb6a45..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_create-kubeconfig.mdx +++ /dev/null @@ -1,67 +0,0 @@ -1. Create a service account - - ```bash - SA_NAME=my-host - kubectl create serviceaccount $SA_NAME -n calico-system -o yaml - ``` - -1. Create a secret for the service account - - :::note - - This step is needed if your Kubernetes cluster is version v1.24 or above. Prior to Kubernetes v1.24, this secret is created automatically. - - ::: - - ```bash - kubectl apply -f - < - - clusters: - - cluster: - certificate-authority-data: - server: - name: - - contexts: - - context: - cluster: my-cluster - user: my-host - name: my-host - - current-context: my-cluster - ``` - - Take the cluster information from an already existing kubeconfig. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_default-install.mdx b/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_default-install.mdx deleted file mode 100644 index e458061512..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_default-install.mdx +++ /dev/null @@ -1,10 +0,0 @@ -| **Initial install** | **You get...** | -| ------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| Cluster | A working cluster of $[prodname] nodes with verified connectivity. | -| Networking | Default networking implementation based on your provider and platform.
    Default IP pool/CIDR range. | -| Tiered network policy | Tiered network policy components.
    Existing Kubernetes clusters are put in default tier.
    $[prodname] components are secured with network policy. | -| User interface | the $[prodname] web console user interface (with a default of “no access outside the cluster”). | -| Logs and data | All nodes configured for log data collection using fluentdLog storage for a single node.
    One instance of Elasticsearch and Kibana. | -| Threat prevention and detection | Components and dashboards for tracing and blocking suspicious activity. | -| Compliance reporting | Components and dashboards to assess Kubernetes workloads and environments for regulatory compliance to enforce controls, and generate audit and evidence data. | -| Scaling | Built-in node scaling using Typha. | diff --git a/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_determine-ipam.mdx b/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_determine-ipam.mdx deleted file mode 100644 index f44121d82a..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_determine-ipam.mdx +++ /dev/null @@ -1,9 +0,0 @@ -If you are not sure which IPAM your cluster is using, the way to tell depends on install method. - -The IPAM plugin can be queried on the default Installation resource. - -``` -kubectl get installation default -o go-template --template {{.spec.cni.ipam.type}} -``` - -If your cluster is using Calico IPAM, the above command should return a result of `Calico`. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_docker-container-service.mdx b/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_docker-container-service.mdx deleted file mode 100644 index f5bc82f817..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_docker-container-service.mdx +++ /dev/null @@ -1,70 +0,0 @@ -import NonClusterReadOnlyStep from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_non-cluster-read-only-step.mdx'; -import EnvironmentFile from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/EnvironmentFile'; - -This section describes how to run `$[nodecontainer]` as a Docker container. - - - -### Step 2: Create environment file - - - -### Step 3: Configure the init system - -Use an init daemon (like systemd or upstart) to start the $[nodecontainer] image as a service using the EnvironmentFile values. - -Sample systemd service file: `$[noderunning].service` - -```shell -[Unit] -Description=$[noderunning] -After=docker.service -Requires=docker.service - -[Service] -EnvironmentFile=/etc/calico/calico.env -ExecStartPre=-/usr/bin/docker rm -f $[noderunning] -ExecStart=/usr/bin/docker run --net=host --privileged \ - --name=$[noderunning] \ - -e NODENAME=${CALICO_NODENAME} \ - -e IP=${CALICO_IP} \ - -e IP6=${CALICO_IP6} \ - -e CALICO_NETWORKING_BACKEND=${CALICO_NETWORKING_BACKEND} \ - -e AS=${CALICO_AS} \ - -e NO_DEFAULT_POOLS=${NO_DEFAULT_POOLS} \ - -e DATASTORE_TYPE=${DATASTORE_TYPE} \ - -e KUBECONFIG=${KUBECONFIG} \ - -v /var/log/calico:/var/log/calico \ - -v /var/lib/calico:/var/lib/calico \ - -v /var/run/calico:/var/run/calico \ - -v /run/docker/plugins:/run/docker/plugins \ - -v /lib/modules:/lib/modules \ - -v /etc/pki:/pki \ - $[registry]$[componentImage.cnxNode] /bin/calico-node -felix - -ExecStop=-/usr/bin/docker stop $[noderunning] - -Restart=on-failure -StartLimitBurst=3 -StartLimitInterval=60s - -[Install] -WantedBy=multi-user.target -``` - -Upon start, the systemd service: - -- Confirms Docker is installed under the `[Unit]` section -- Gets environment variables from the environment file above -- Removes existing `$[nodecontainer]` container (if it exists) -- Starts `$[nodecontainer]` - -The script also stops the `$[nodecontainer]` container when the service is stopped. - -:::note - -Depending on how you've installed Docker, the name of the Docker service -under the `[Unit]` section may be different (such as `docker-engine.service`). -Be sure to check this before starting the service. - -::: diff --git a/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_domain-names.mdx b/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_domain-names.mdx deleted file mode 100644 index 0c62a87bc4..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_domain-names.mdx +++ /dev/null @@ -1,16 +0,0 @@ -When a configured domain name has no wildcard (`*`), it matches exactly that domain name. For example: - -- `microsoft.com` -- `tigera.io` - -With a single asterisk in any part of the domain name, it matches 1 or more path components at that position. For example: - -- `*.google.com` matches `www.google.com` and `www.ipv6.google.com`, but not `google.com` -- `www.*.com` matches `www.sun.com` and `www.apple.com`, but not `www.com` -- `update.*.mycompany.com` matches `update.tools.mycompany.com`, `update.secure.suite.mycompany.com`, and so on - -**Not** supported are: - -- Multiple wildcards in the same domain, for example: `*.*.mycompany.com` -- Asterisks that are not the entire component, for example: `www.g*.com` -- More general wildcards, such as regular expressions diff --git a/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_ebpf-value.mdx b/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_ebpf-value.mdx deleted file mode 100644 index fe2072d88f..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_ebpf-value.mdx +++ /dev/null @@ -1,12 +0,0 @@ -The eBPF data plane mode has several advantages over standard Linux networking pipeline mode: - -- It scales to higher throughput. -- It uses less CPU per GBit. -- It has native support for Kubernetes services (without needing kube-proxy) that: - - - Reduces first packet latency for packets to services. - - Preserves external client source IP addresses all the way to the pod. - - Supports DSR (Direct Server Return) for more efficient service routing. - - Uses less CPU than kube-proxy to keep the data plane in sync. - -To learn more and see performance metrics from our test environment, see the blog, [Introducing the Calico eBPF data plane](https://www.projectcalico.org/introducing-the-calico-ebpf-dataplane/). diff --git a/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_endpointport.mdx b/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_endpointport.mdx deleted file mode 100644 index 05dde33612..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_endpointport.mdx +++ /dev/null @@ -1,15 +0,0 @@ -An EndpointPort associates a name with a particular TCP/UDP/SCTP port of the endpoint, allowing it to -be referenced as a named port in [policy rules](../../reference/resources/networkpolicy.mdx#entityrule). - -| Field | Description | Accepted Values | Schema | Default | -| -------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------- | ------ | ------- | -| name | The name to attach to this port, allowing it to be referred to in [policy rules](../../reference/resources/networkpolicy.mdx#entityrule). Names must be unique within an endpoint. | | string | | -| protocol | The protocol of this named port. | `TCP`, `UDP`, `SCTP` | string | | -| port | The workload port number. | `1`-`65535` | int | | - -:::note - -On their own, EndpointPort entries don't result in any change to the connectivity of the port. -They only have an effect if they are referred to in policy. - -::: diff --git a/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_entityrule.mdx b/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_entityrule.mdx deleted file mode 100644 index d12a16b6d4..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_entityrule.mdx +++ /dev/null @@ -1,82 +0,0 @@ -import DomainNames from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_domain-names.mdx'; - -Entity rules specify the attributes of the source or destination of a packet that must match for the rule as a whole -to match. Packets can be matched on combinations of: - -- Identity of the source/destination, by using [Selectors](#selectors) or by specifying a particular - Kubernetes `Service`. Selectors can match [workload endpoints](../../reference/resources/workloadendpoint.mdx), - [host endpoints](../../reference/resources/hostendpoint.mdx) and ([namespaced](../../reference/resources/networkset.mdx) or - [global](../../reference/resources/globalnetworkset.mdx)) network sets. -- Source/destination IP address, protocol and port. - -If the rule contains multiple match criteria (for example, an IP and a port) then all match criteria must match -for the rule as a whole to match a packet. - -| Field | Description | Accepted Values | Schema | Default | -| ----------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------- | ------------------------------------------- | ------- | -| nets | Match packets with IP in any of the listed CIDRs. | List of valid IPv4 CIDRs or list of valid IPv6 CIDRs (IPv4 and IPv6 CIDRs shouldn't be mixed in one rule) | list of cidrs | -| notNets | Negative match on CIDRs. Match packets with IP not in any of the listed CIDRs. | List of valid IPv4 CIDRs or list of valid IPv6 CIDRs (IPv4 and IPv6 CIDRs shouldn't be mixed in one rule) | list of cidrs | -| selector | Positive match on selected endpoints. If a `namespaceSelector` is also defined, the set of endpoints this applies to is limited to the endpoints in the selected namespaces. | Valid selector | [selector](#selector) | | -| notSelector | Negative match on selected endpoints. If a `namespaceSelector` is also defined, the set of endpoints this applies to is limited to the endpoints in the selected namespaces. | Valid selector | [selector](#selector) | | -| namespaceSelector | Positive match on selected namespaces. If specified, only workload endpoints in the selected Kubernetes namespaces are matched. Matches namespaces based on the labels that have been applied to the namespaces. Defines the scope that selectors will apply to, if not defined then selectors apply to the NetworkPolicy's namespace. Match a specific namespace by name using the `projectcalico.org/name` label. Select the non-namespaced resources like GlobalNetworkSet(s), host endpoints to which this policy applies by using `global()` selector. | Valid selector | [selector](#selector) | | -| ports | Positive match on the specified ports | | list of [ports](#ports) | | -| domains | Positive match on [domain names](#exact-and-wildcard-domain-names). | List of [exact or wildcard domain names](#exact-and-wildcard-domain-names) | list of strings | -| notPorts | Negative match on the specified ports | | list of [ports](#ports) | | -| serviceAccounts | Match endpoints running under service accounts. If a `namespaceSelector` is also defined, the set of service accounts this applies to is limited to the service accounts in the selected namespaces. | | [ServiceAccountMatch](#serviceaccountmatch) | | -| services | Match the specified service(s). If specified on egress rule destinations, no other selection criteria can be set. If specified on ingress rule sources, only positive or negative matches on ports can be specified. | | [ServiceMatch](#servicematch) | | - -:::note - -You cannot mix IPv4 and IPv6 CIDRs in a single rule using `nets` or `notNets`. If you need to match both, create 2 rules. - -::: - -#### Selector performance in EntityRules - -When rendering policy into the data plane, $[prodname] must identify the endpoints that match the selectors -in all active rules. This calculation is optimized for certain common selector types. -Using the optimized selector types reduces CPU usage (and policy rendering time) by orders of magnitude. -This becomes important at high scale (hundreds of active rules, hundreds of thousands of endpoints). - -The optimized operators are as follows: - -- `label == "value"` -- `label in { 'v1', 'v2' }` -- `has(label)` -- ` && ` is optimized if **either** `` or `` is - optimized. - -The following perform like `has(label)`. All endpoints with the label will be scanned to find matches: - -- `label contains 's'` -- `label starts with 's'` -- `label ends with 's'` - -The other operators, and in particular, `all()`, `!`, `||` and `!=` are not optimized. - -Examples: - -- `a == 'b'` - optimized -- `a == 'b' && has(c)` - optimized -- `a == 'b' || has(c)` - **not** optimized due to use of `||` -- `c != 'd'` - **not** optimized due to use of `!=` -- `!has(a)` - **not** optimized due to use of `!` -- `a == 'b' && c != 'd'` - optimized, `a =='b'` is optimized so `a == 'b' && ` is optimized. -- `c != 'd' && a == 'b'` - optimized, `a =='b'` is optimized so ` && a == 'b'` is optimized. - -### Exact and wildcard domain names - -The `domains` field is only valid for egress Allow rules. It restricts the -rule to apply only to traffic to one of the specified domains. If this field is specified, the -parent [Rule](#rule)'s `action` must be `Allow`, and `nets` and `selector` must both be left empty. - - - -:::note - -$[prodname] implements policy for domain names by learning the -corresponding IPs from DNS, then programming rules to allow those IPs. This means that -if multiple domain names A, B and C all map to the same IP, and there is domain-based -policy to allow A, traffic to B and C will be allowed as well. - -::: diff --git a/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_httpmatch.mdx b/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_httpmatch.mdx deleted file mode 100644 index 613bb97c3d..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_httpmatch.mdx +++ /dev/null @@ -1,23 +0,0 @@ -An HTTPMatch matches attributes of an HTTP request. The presence of an HTTPMatch clause on a Rule will cause that rule to only match HTTP traffic. Other application layer protocols will not match the rule. - -Example: - -```yaml -http: - methods: ['GET', 'PUT'] - paths: - - exact: '/projects/calico' - - prefix: '/users' -``` - -| Field | Description | Schema | -| ------- | -------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------- | -| methods | Match HTTP methods. Case sensitive. [Standard HTTP method descriptions.](https://www.w3.org/Protocols/rfc2616/rfc2616-sec9.html) | list of strings | -| paths | Match HTTP paths. Case sensitive. | list of [HTTPPathMatch](#httppathmatch) | - -### HTTPPathMatch - -| Syntax | Example | Description | -| ------ | ------------------- | ------------------------------------------------------------------------------- | -| exact | `exact: "/foo/bar"` | Matches the exact path as written, not including the query string or fragments. | -| prefix | `prefix: "/keys"` | Matches any path that begins with the given prefix. | diff --git a/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_icmp.mdx b/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_icmp.mdx deleted file mode 100644 index 1adb456472..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_icmp.mdx +++ /dev/null @@ -1,4 +0,0 @@ -| Field | Description | Accepted Values | Schema | Default | -| ----- | ------------------- | -------------------- | ------- | ------- | -| type | Match on ICMP type. | Can be integer 0-254 | integer | -| code | Match on ICMP code. | Can be integer 0-255 | integer | diff --git a/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_ipnat.mdx b/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_ipnat.mdx deleted file mode 100644 index 9cfa2fb904..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_ipnat.mdx +++ /dev/null @@ -1,6 +0,0 @@ -IPNAT contains a single NAT mapping for a WorkloadEndpoint resource. - -| Field | Description | Accepted Values | Schema | Default | -| ---------- | ------------------------------------------- | ------------------ | ------ | ------- | -| internalIP | The internal IP address of the NAT mapping. | A valid IP address | string | | -| externalIP | The external IP address. | A valid IP address | string | | diff --git a/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_license.mdx b/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_license.mdx deleted file mode 100644 index 930ba1df31..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_license.mdx +++ /dev/null @@ -1,30 +0,0 @@ -### How long does it take to get a new $[prodname] license? - -After you submit a sales purchase order to Tigera, 1-2 days. - -### Is there a grace period? - -Yes, there is a grace period of 30 days (as of April 2022). - -### Does the web console display license expiration? - -Yes. The license indicator in the web console (top right banner) turns red when the license expires. - -![expiration](/img/calico-enterprise/expiration.png) - -### What happens when a license expires or is invalid? - -:::caution - -Although users can still log in to the web console, your deployment is no longer operational. All policy enforcement stops, except for policies in the default tier. In most cases, you will experience broken connectivity (depending on your policies in the default tier). $[prodname] stops reporting flow logs, DNS logs, and $[prodname] metrics, which affects other UI elements like Service Graph and dashboards. Although some elements may appear to work, actions are not saved, and you should regard your deployment as non-functional. We recommend that you proactively manage your license to avoid disruption. - -::: - -### Do licenses cover free upgrades in $[prodname]? - -Yes. - -### How do I get information about my license? Monitor the expiration date? - -- [Prometheus!](../../operations/monitor/metrics/license-agent.mdx). -- Use `kubectl` to get [license key information](../../reference/resources/licensekey.mdx#viewing-information-about-your-license-key) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_non-cluster-binary-install.mdx b/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_non-cluster-binary-install.mdx deleted file mode 100644 index f575d514e8..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_non-cluster-binary-install.mdx +++ /dev/null @@ -1,149 +0,0 @@ -import NonClusterReadOnlyStep from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_non-cluster-read-only-step.mdx'; -import EnvironmentFile from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/EnvironmentFile'; - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - - - -### Step 2: Download and extract the binary - -This step requires Docker, but it can be run from any machine with Docker installed. It doesn't have to be the host you will run it on (i.e your laptop is fine). - -1. Use the following command to download the $[nodecontainer] image. - - ```bash - docker pull $[registry]$[componentImage.cnxNode] - ``` - -1. Confirm that the image has loaded by typing `docker images`. - - ``` - REPOSITORY TAG IMAGE ID CREATED SIZE - $[registry]$[releases.0.components.cnx-node.image] $[releases.0.components.cnx-node.version] e07d59b0eb8a 2 minutes ago 42MB - ``` - -1. Create a temporary $[nodecontainer] container. - - ```bash - docker create --name container $[registry]$[componentImage.cnxNode] - ``` - -1. Copy the calico-node binary from the container to the local file system. - - ```bash - docker cp container:/bin/calico-node $[nodecontainer] - ``` - -1. Delete the temporary container. - - ```bash - docker rm container - ``` - -1. Set the extracted binary file to be executable. - - ```bash - chmod +x $[nodecontainer] - chown root:root $[nodecontainer] - ``` - -### Step 3: Copy the `calico-node` binary - -Copy the binary from Step 2 to the target machine, using any means (`scp`, `ftp`, USB stick, etc.). - -### Step 4: Create environment file - - - -### Step 5: Start Felix - -There are a few ways to start Felix: create a startup script, or manually configure Felix. - - - - -Felix should be started at boot by your init system and the init system -**must** be configured to restart Felix if it stops. Felix relies on -that behavior for certain configuration changes. - -If your distribution uses systemd, then you could use the following unit file: - -```bash -[Unit] -Description=Calico Felix agent -After=syslog.target network.target - -[Service] -User=root -EnvironmentFile=/etc/calico/calico.env -ExecStartPre=/usr/bin/mkdir -p /var/run/calico -ExecStart=/usr/local/bin/$[nodecontainer] -felix -KillMode=process -Restart=on-failure -LimitNOFILE=32000 - -[Install] -WantedBy=multi-user.target -``` - -Or, for upstart: - -```bash -description "Felix (Calico agent)" -author "Project Calico Maintainers " - -start on stopped rc RUNLEVEL=[2345] -stop on runlevel [!2345] - -limit nofile 32000 32000 - -respawn -respawn limit 5 10 - -chdir /var/run - -pre-start script - mkdir -p /var/run/calico - chown root:root /var/run/calico -end script - -exec /usr/local/bin/$[nodecontainer] -felix -``` - -**Start Felix** - -After you've configured Felix, start it via your init system. - -```bash -service calico-felix start -``` - - - - -Configure Felix by creating a file at `/kubernetes/calico/felix.cfg`. -See [Felix configuration](../../reference/component-resources/node/felix/configuration.mdx) for help with environment variables. - -:::note - -Felix tries to detect whether IPv6 is available on your platform but -the detection can fail on older (or more unusual) systems. If Felix -exits soon after startup with `ipset` or `iptables` errors try -setting the `Ipv6Support` setting to `false`. - -::: - -Next, configure Felix to interact with a Kubernetes datastore. You -must set the `DatastoreType` setting to `kubernetes`. You must also set the environment variable `CALICO_KUBECONFIG` -to point to a valid kubeconfig for your kubernetes cluster and `CALICO_NETWORKING_BACKEND` to `none`. - -:::note - -For the Kubernetes datastore, Felix works in policy-only mode. Even though pod networking is -disabled on the baremetal host Felix is running on, policy can still be used to secure the host. - -::: - - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_non-cluster-read-only-step.mdx b/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_non-cluster-read-only-step.mdx deleted file mode 100644 index 95b7b72361..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_non-cluster-read-only-step.mdx +++ /dev/null @@ -1,21 +0,0 @@ -import CreateKubeconfig from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_create-kubeconfig.mdx'; - -### Step 1: (Optional) Configure access for the non-cluster-host - -To run Calico Node as a container, it will need a kubeconfig. You can skip this step if you already have a kubeconfig ready to use. - - - -Run the following two commands to create a cluster role with read-only access and a corresponding cluster role binding. - -```bash -kubectl apply -f $[filesUrl]/manifests/non-cluster-host-clusterrole.yaml -kubectl create clusterrolebinding $SA_NAME --serviceaccount=calico-system:$SA_NAME --clusterrole=non-cluster-host-read-only -``` - -:::note - -We include examples for systemd, but the commands can be -applied to other init daemons such as upstart. - -::: diff --git a/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_persistent-storage-terms.mdx b/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_persistent-storage-terms.mdx deleted file mode 100644 index 5f0038ef6d..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_persistent-storage-terms.mdx +++ /dev/null @@ -1,17 +0,0 @@ -### Persistent volume - -Used by pods to persist storage within the cluster. Combined with **persistent volume claims**, pods can persist data across restarts and rescheduling. - -### Persistent volume claim - -Used by pods to request and mount storage volumes. The claim specifies the volume requirements for the request: size, access rights, and storage class. - -### Dynamic provisioner - -Provisions types of persistent volumes on demand. Although most managed public-cloud clusters provide a dynamic provisioner using cloud-specific storage APIs (for example, Amazon EBS or Google persistent disks), not all clusters have a dynamic provisioner. - -When a pod makes a persistent volume claim from a storage class that uses a dynamic provisioner, the volume is automatically created. If the storage class does not use a dynamic provisioner (for example the local storage class), the volumes must be created in advance. For help, see the [Kubernetes documentation](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/). - -### Storage class - -The storage provided by the cluster. Storage classes can be used with dynamic provisioners to automatically provision persistent volumes on demand, or with manually-provisioned persistent volumes. Different storage classes provide different service levels. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_ports.mdx b/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_ports.mdx deleted file mode 100644 index 4517f59290..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_ports.mdx +++ /dev/null @@ -1,33 +0,0 @@ -$[prodname] supports the following syntaxes for expressing ports. - -| Syntax | Example | Description | -| --------- | ---------- | ------------------------------------------------------------------- | -| int | 80 | The exact (numeric) port specified | -| start:end | 6040:6050 | All (numeric) ports within the range start ≤ x ≤ end | -| string | named-port | A named port, as defined in the ports list of one or more endpoints | - -An individual numeric port may be specified as a YAML/JSON integer. A port range or -named port must be represented as a string. For example, this would be a valid list of ports: - -```yaml -ports: [8080, '1234:5678', 'named-port'] -``` - -#### Named ports - -Using a named port in an `EntityRule`, instead of a numeric port, gives a layer of indirection, -allowing for the named port to map to different numeric values for each endpoint. - -For example, suppose you have multiple HTTP servers running as workloads; some exposing their HTTP -port on port 80 and others on port 8080. In each workload, you could create a named port called -`http-port` that maps to the correct local port. Then, in a rule, you could refer to the name -`http-port` instead of writing a different rule for each type of server. - -:::note - -Since each named port may refer to many endpoints (and $[prodname] has to expand a named port into -a set of endpoint/port combinations), using a named port is considerably more expensive in terms -of CPU than using a simple numeric port. We recommend that they are used sparingly, only where -the extra indirection is required. - -::: diff --git a/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_rule.mdx b/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_rule.mdx deleted file mode 100644 index bf7042888f..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_rule.mdx +++ /dev/null @@ -1,46 +0,0 @@ -A single rule matches a set of packets and applies some action to them. When multiple rules are specified, they -are executed in order. - -| Field | Description | Accepted Values | Schema | Default | -| ----------- | ------------------------------------------ | ------------------------------------------------------------ | ----------------------------- | ------- | -| metadata | Per-rule metadata. | | [RuleMetadata](#rulemetadata) | | -| action | Action to perform when matching this rule. | `Allow`, `Deny`, `Log`, `Pass` | string | | -| protocol | Positive protocol match. | `TCP`, `UDP`, `ICMP`, `ICMPv6`, `SCTP`, `UDPLite`, `1`-`255` | string \| integer | | -| notProtocol | Negative protocol match. | `TCP`, `UDP`, `ICMP`, `ICMPv6`, `SCTP`, `UDPLite`, `1`-`255` | string \| integer | | -| icmp | ICMP match criteria. | | [ICMP](#icmp) | | -| notICMP | Negative match on ICMP. | | [ICMP](#icmp) | | -| ipVersion | Positive IP version match. | `4`, `6` | integer | | -| source | Source match parameters. | | [EntityRule](#entityrule) | | -| destination | Destination match parameters. | | [EntityRule](#entityrule) | | -| http | Match HTTP request parameters. Application layer policy must be enabled to use this field.| | [HTTPMatch](#httpmatch) | | - -After a `Log` action, processing continues with the next rule; `Allow` and `Deny` are immediate -and final and no further rules are processed. - -An `action` of `Pass` in a `NetworkPolicy` or `GlobalNetworkPolicy` will skip over the remaining policies and jump to the -first [profile](../../reference/resources/profile.mdx) assigned to the endpoint, applying the policy configured in the -profile; if there are no Profiles configured for the endpoint the default applied action is `Deny`. - -### RuleMetadata - -Metadata associated with a specific rule (rather than the policy as a whole). The contents of the metadata does not affect how a rule is interpreted or enforced; it is -simply a way to store additional information for use by operators or applications that interact with $[prodname]. - -| Field | Description | Schema | Default | -| ----------- | ----------------------------------- | ----------------------- | ------- | -| annotations | Arbitrary non-identifying metadata. | map of string to string | | - -Example: - -```yaml -metadata: - annotations: - app: database - owner: devops -``` - -Annotations follow the -[same rules as Kubernetes for valid syntax and character set](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set). - -On Linux with the iptables data plane, rule annotations are rendered as comments in the form `-m comment --comment "="` on the iptables rule(s) that correspond -to the $[prodname] rule. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_selector-scopes.mdx b/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_selector-scopes.mdx deleted file mode 100644 index 9d9fbc8c54..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_selector-scopes.mdx +++ /dev/null @@ -1,20 +0,0 @@ -Understanding scopes and the `all()` and `global()` operators: selectors have a scope of resources -that they are matched against, which depends on the context in which they are used. For example: - -- The `nodeSelector` in an `IPPool` selects over `Node` resources. - -- The top-level selector in a `NetworkPolicy` selects over the workloads _in the same namespace_ as the - `NetworkPolicy`. -- The top-level selector in a `GlobalNetworkPolicy` doesn't have the same restriction, it selects over all endpoints - including namespaced `WorkloadEndpoint`s and non-namespaced `HostEndpoint`s. - -- The `namespaceSelector` in a `NetworkPolicy` (or `GlobalNetworkPolicy`) _rule_ selects over the labels on namespaces - rather than workloads. - -- The `namespaceSelector` determines the scope of the accompanying `selector` in the entity rule. If no `namespaceSelector` - is present then the rule's `selector` matches the default scope for that type of policy. (This is the same namespace - for `NetworkPolicy` and all endpoints/network sets for `GlobalNetworkPolicy`) -- The `global()` operator can be used (only) in a `namespaceSelector` to change the scope of the main `selector` to - include non-namespaced resources such as [GlobalNetworkSet](../../reference/resources/globalnetworkset.mdx). - This allows namespaced `NetworkPolicy` resources to refer to global non-namespaced resources, which would otherwise - be impossible. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_selectors.mdx b/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_selectors.mdx deleted file mode 100644 index 52baf960ae..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_selectors.mdx +++ /dev/null @@ -1,50 +0,0 @@ -A label selector is an expression which either matches or does not match a resource based on its labels. - -$[prodname] label selectors support a number of operators, which can be combined into larger expressions -using the boolean operators and parentheses. - -| Expression | Meaning | -| ------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| **Logical operators** | -| `( )` | Matches if and only if `` matches. (Parentheses are used for grouping expressions.) | -| `! ` | Matches if and only if `` does not match. **Tip:** `!` is a special character at the start of a YAML string, if you need to use `!` at the start of a YAML string, enclose the string in quotes. | -| ` && ` | "And": matches if and only if both ``, and, `` matches | -| \ || \ | "Or": matches if and only if either ``, or, `` matches. | -| **Match operators** | -| `all()` | Match all in-scope resources. To match _no_ resources, combine this operator with `!` to form `!all()`. | -| `global()` | Match all non-namespaced resources. Useful in a `namespaceSelector` to select global resources such as global network sets. | -| `k == 'v'` | Matches resources with the label 'k' and value 'v'. | -| `k != 'v'` | Matches resources without label 'k' or with label 'k' and value _not_ equal to `v` | -| `has(k)` | Matches resources with label 'k', independent of value. To match pods that do not have label `k`, combine this operator with `!` to form `!has(k)` | -| `k in { 'v1', 'v2' }` | Matches resources with label 'k' and value in the given set | -| `k not in { 'v1', 'v2' }` | Matches resources without label 'k' or with label 'k' and value _not_ in the given set | -| `k contains 's'` | Matches resources with label 'k' and value containing the substring 's' | -| `k starts with 's'` | Matches resources with label 'k' and value starting with the substring 's' | -| `k ends with 's'` | Matches resources with label 'k' and value ending with the substring 's' | - -Operators have the following precedence: - -- **Highest**: all the match operators -- Parentheses `( ... )` -- Negation with `!` -- Conjunction with `&&` -- **Lowest**: Disjunction with `||` - -For example, the expression - -``` -! has(my-label) || my-label starts with 'prod' && role in {'frontend','business'} -``` - -Would be "bracketed" like this: - -``` -((!(has(my-label)) || ((my-label starts with 'prod') && (role in {'frontend','business'})) -``` - -It would match: - -- Any resource that did not have label "my-label". -- Any resource that both: - - Has a value for `my-label` that starts with "prod", and, - - Has a role label with value either "frontend", or "business". diff --git a/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_serviceaccountmatch.mdx b/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_serviceaccountmatch.mdx deleted file mode 100644 index c3aff9c184..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_serviceaccountmatch.mdx +++ /dev/null @@ -1,6 +0,0 @@ -A ServiceAccountMatch matches service accounts in an EntityRule. - -| Field | Description | Schema | -| -------- | ------------------------------- | --------------------- | -| names | Match service accounts by name | list of strings | -| selector | Match service accounts by label | [selector](#selector) | diff --git a/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_servicematch.mdx b/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_servicematch.mdx deleted file mode 100644 index 2d47fed02c..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_servicematch.mdx +++ /dev/null @@ -1,6 +0,0 @@ -A ServiceMatch matches a service in an EntityRule. - -| Field | Description | Schema | -| --------- | ------------------------ | ------ | -| name | The service's name. | string | -| namespace | The service's namespace. | string | diff --git a/calico-enterprise_versioned_docs/version-3.19-2/about/calico-product-editions.mdx b/calico-enterprise_versioned_docs/version-3.19-2/about/calico-product-editions.mdx deleted file mode 100644 index 2d14b64cd4..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/about/calico-product-editions.mdx +++ /dev/null @@ -1,95 +0,0 @@ ---- -description: Describes Tigera products and provides a feature comparison table. ---- - -import { CheckIcon } from '@chakra-ui/icons'; - -# Tigera product comparison - -## Calico Open Source -The base product that comprises both Calico Enterprise and Calico Cloud. It provides the core networking and network policy features. - -![calico-open-source](/img/calico/calico-open-source.svg) - -## Calico Enterprise - -Includes the Calico Open Source core networking and network policy, but adds advanced features for networking, network policy, visibility and troubleshooting, threat defense, and compliance reports. - -![calico-enterprise](/img/calico/calico-enterprise.svg) - -## Calico Cloud - -The SaaS version of Calico Enterprise. It adds Image Assurance to scan and detect vulnerabilities in images, and container threat defense to detect malware. It also adds onboarding tutorials, and eliminates the cost to manage Elasticsearch logs and storage that comes with Calico Enterprise. - -![calico-cloud](/img/calico/calico-cloud.svg) - -## Best fit - -What is the best fit for you? It depends on your needs. The following table provides a high-level comparison. - -| Product | Cost and support | Best fit | -| ------------------------- | ------------------------------------------------------------ | ------------------------------------------------------------ | -| Calico Open Source | Free, community-supported | **Users** who want best-in-class networking and network policy capabilities for Kubernetes without any costs. | -| Calico Enterprise | Paid subscription | **Enterprise teams** who need full control to customize their networking security deployment to meet regulatory and compliance requirements for Kubernetes at scale. Teams who want Tigera Customer Support for day-zero to production best practices, custom training and workshops, and Solution Architects to customize solutions. | -| Calico Cloud | Free trial with hands-on training from Customer Support, then pay-as-you-go with self-service training. Also offered as an annual subscription. | **Small teams** who need to manage the full spectrum of compliance in a web-based console for novice users:
    - Secure clusters, pods, and applications
    - Scan images for vulnerabilities
    - Web-based UI for visibility to troubleshoot Kubernetes
    - Detect and mitigate threats
    - Run compliance reports

    **Enterprise teams** who want to scale their $[prodname] Enterprise on-premises deployments by providing more self-service to developers. | - -## Product comparison by feature - -| | Calico Open Source | Calico Cloud | Calico Enterprise | -| ------------------------------------------------------------ | ------------------------ | ------------------------ | ------------------------ | -| **Networking** | | | | -| High-performance, scalable pod networking |
    |
    |
    | -| Advanced IP address management |
    |
    |
    | -| Direct infrastructure peering without the overlay |
    |
    |
    | -| Dual ToR peering | |
    |
    | -| Egress gateway | |
    |
    | -| Multiple Calico networks on a pod | |
    |
    | -| **Apps, pods, clusters** | | | | -| Seamless support with Kubernetes network policy |
    |
    |
    | -| Label-based (identity-aware) policy |
    |
    |
    | -| Namespace and cluster-wide scope |
    |
    |
    | -| Global default deny policy design |
    |
    |
    | -| Application layer policy |
    |
    |
    | -| Policy for services |
    |
    |
    | -| Web UI | |
    |
    | -| Onboarding tutorials and lab cluster | |
    | | -| DNS/FQDN-based policy | |
    |
    | -| Hierarchical tiered network policy | |
    |
    | -| Policy recommendations | |
    |
    | -| Preview and staged network policy | |
    |
    | -| Policy integration for third-party firewalls | |
    |
    | -| Network sets to limit IP ranges for egress and ingress traffic to workloads |
    |
    |
    | -| **Data** | | | | -| Data-in-transit encryption for pod traffic using WireGuard |
    |
    |
    | -| SIEM integration | |
    |
    | -| **Non-cluster hosts** |
    |
    |
    | -| Restrict traffic to/from hosts and VMs using network policy |
    | |
    | -| Automatic host endpoints |
    | |
    | -| Secure Kubernetes nodes with host endpoints managed by Calico |
    |
    |
    | -| Apply policy to host-forwarded traffic |
    |
    |
    | -| **Data plane** | | | | -| eBPF |
    |
    |
    | -| iptables |
    |
    |
    | -| Windows HNS |
    |
    |
    | -| VPP |
    | | | -| **Observability and troubleshooting** | | | | -| Application level observability and troubleshooting | |
    |
    | -| Service Graph | |
    |
    | -| Packet capture | |
    |
    | -| Elasticsearch logs (flow, l7, audit, bgp, dns, events) | |
    |
    | -| Alerts | |
    |
    | -| Kibana DNS dashboards | |
    |
    | -| Traffic Flow Visualizer | |
    |
    | -| **Multi-cluster management** | |
    |
    | -| Multi-cluster management | | |
    | -| Federated identity and services | |
    |
    | -| **Threat defense** | | | | -| Container threat detection | |
    | | -| Workload-centric Web Application Firewall (WAF) | |
    |
    | -| Honeypods to see intruder activity | |
    |
    | -| Add threatfeeds to trace suspicious network flows | |
    |
    | -| **Reports** | | | | -| Compliance reports | |
    |
    | -| CIS benchmark reports | |
    |
    | -| **Monitor Calico components** | | | | -| Prometheus |
    |
    |
    | \ No newline at end of file diff --git a/calico-enterprise_versioned_docs/version-3.19-2/about/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/about/index.mdx deleted file mode 100644 index 03cf870c82..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/about/index.mdx +++ /dev/null @@ -1,49 +0,0 @@ ---- -description: A high-level description of Calico Enterprise. ---- - -# About Calico Enterprise - -## What is $[prodname]? - -$[prodname] is a security solution with full-stack observability for cloud-native applications running on containers and Kubernetes. Built upon the Calico CNI and network policy, $[prodname] works across all multi-cloud and hybrid environments with any combination of VMs, containers, Kubernetes, cloud instances, hosts, and bare metal servers. - -![calico-enterprise](/img/calico/calico-enterprise.svg) - -## Best fit - -The best fit for $[prodname] is **enterprise teams** who need full control to customize their networking security deployment to meet regulatory and compliance requirements for Kubernetes at scale. - -## Key features - -| Feature | Highlights | -| -------------------------------------------------- | ------------------------------------------------------------ | -| Web UI for observability and troubleshooting
    | *Observability*
    • Single UI for all enterprise teams to observe traffic, troubleshoot logs, get alerts, manage policy lifecycle (preview, stage, enforce), and generate compliance reports.
    • Service Graph to visualize traffic to/from a cluster
    • Dashboards | -| | *Troubleshooting*
    • Elasticsearch logs (flow, L7, DNS, audit) with workload identity context
    • Packet capture
    • SIEM integration (Syslog, Splunk, or Amazon S3) | -| Threat defense | • Global alerts
    • Workload-based Web Application Firewall (WAF)
    • Deep packet inspection (DPI) on selected workloads | -| Multi-cluster management | • Unified management plane to manage clusters and workloads running on different infrastructures and using different Kubernetes distributions
    • Federated endpoints for policy-writing efficiency
    • Federated services to extend and automate endpoint sharing
    • Federated Prometheus metrics | -| Logs and compliance reports | • Out-of-the-box support for PCI DSS, SOC 2, HIPAA, GDPR, NIST, and custom frameworks
    • Out-of-the-box CIS benchmarks for Kubernetes compliance reports
    • Pre-defined and custom compliance reports for audit reporting (on-demand or scheduled)
    • Auditor-ready cluster compliance history | -| Advanced Calico networking | • WireGuard pod-to-pod and host-to-host encryption
    • Egress gateways to identify the source of traffic at the namespace or pod level when it leaves a Kubernetes cluster to communicate to external resources to avoid opening up a larger set of IP addresses.
    • Dual top-of-rack (ToR) peering for redundant, active-active network path for business-critical cluster applications (for example, streaming and AI/ML applications) | -| Advanced Calico networking policy | • Policy recommendations to isolate namespaces with network policy
    • Tiered policy
    • Stage and preview impacts on traffic before enforcing policy
    • Network sets to reuse and scale sets of IP addresses used in policies
    • DNS policy
    • Application layer policy with Envoy as daemonset
    • Auto host endpoints
    • Policy integration with Fortinet and AWS firewalls | - -For a detailed list of $[prodname] features, see [Tigera product comparison](../about/calico-product-editions.mdx) - -## Going into production with $[prodname] - -It is not easy navigating the cultural shifts that come with adopting Kubernetes. Tigera's **Customer Success** has spent many years working with enterprise companies in highly-regulated industries to understand the sticking points that stall going into production. Common hurdles seen during pre-production are: - -- Troubleshooting in Kubernetes across teams (cluster and pod failures, apps failures, and security breaches/attacks) -- Writing policy with granular security controls for workloads -- Ensuring security team requirements are met while allowing developer self-service with guardrails -- Implementing compliance controls - -Tigera's Customer Success has invested heavily in custom and self-service training to address these obstacles. Guided by their best-practices-to-production workflows, you can keep progressing and join the growing list of companies who are in production with $[prodname]. - -- [Services, education, and support](https://www.tigera.io/customer-success/) -- To get a personalized demo based on your requirements, upgrade from Calico Open Source, partner with us, or if you have a question, see [Contact us](https://tigera.io/contact) - -## Need more info? - -- [Calico Enterprise support and compatibility](../getting-started/compatibility.mdx) -- To install $[prodname] on a cluster in 15 minutes, see [Quickstart](../getting-started/install-on-clusters/kubernetes/quickstart.mdx) -- To see if Calico Cloud is a better fit, start a [Calico Cloud Free Tier trial](https://www.calicocloud.io/home) \ No newline at end of file diff --git a/calico-enterprise_versioned_docs/version-3.19-2/compliance/compliance-reports-cis.mdx b/calico-enterprise_versioned_docs/version-3.19-2/compliance/compliance-reports-cis.mdx deleted file mode 100644 index 8f3d67b336..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/compliance/compliance-reports-cis.mdx +++ /dev/null @@ -1,188 +0,0 @@ ---- -description: Configure reports to assess compliance for all assets in a Kubernetes cluster. ---- - -# Configure CIS benchmark reports - -## Big picture - -Use the $[prodname] Kubernetes CIS benchmark report to assess compliance for all assets in a Kubernetes cluster. - -## Value - -A standard requirement for an organization’s security and compliance posture is to assess your Kubernetes clusters against CIS benchmarks. The $[prodname] Kubernetes CIS benchmark report provides this comprehensive view into your Kubernetes clusters while strengthening your threat detection capability by looking beyond networking data. - -## Concepts - -### Default settings and configuration - -During $[prodname] installation, each node starts a pod named, `compliance-benchmarker`. A preconfigured Kubernetes CIS benchmark report is generated every hour. You can view the report in **Compliance**, **Compliance Reports**, download it to .csv format. - -To schedule the CIS benchmark report or change settings, use the **global report** resource. Global reports are configured as YAML files and are applied using `kubectl`. - -### Best practices - -We recommend that you review the CIS benchmark best practices for securing cluster component configurations here: [CIS benchmarks downloads](https://learn.cisecurity.org/benchmarks). - -## Before you begin - -**Limitations** - -CIS benchmarks runs only on nodes where $[prodname] is running. This limitation may exclude control plane nodes in some managed cloud platforms (AKS, EKS, GKE). Because the user has limited control over installation of control plane nodes in managed cloud platforms, these reports may have limited use for cloud users. - -## How to - -- [Configure and schedule CIS benchmark reports](#configure-and-schedule-cis-benchmark-reports) -- [View report generation status](#view-report-generation-status) -- [Review and address CIS benchmark results](#review-and-address-cis-benchmark-results) -- [Manually run reports](#manually-run-reports) -- [Troubleshooting](#troubleshooting) - -### Configure and schedule CIS benchmark reports - -Verify that the `compliance-benchmarker` is running and the `cis-benchmark` report type is installed. - -```bash -kubectl get -n tigera-compliance daemonset compliance-benchmarker -kubectl get globalreporttype cis-benchmark -``` - -In the following example, we use a **GlobalReport** with CIS benchmark fields to schedule and filter results. The report is scheduled to run at midnight of the next day (in UTC), and the benchmark items 1.1.4 and 1.2.5 will be omitted from the results. - -| **Fields** | **Description** | -| -------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| schedule | The start and end time of the report using [crontab format](https://en.wikipedia.org/wiki/Cron). To allow for archiving, reports are generated approximately 30 minutes after the end time. A single report is limited to a maximum of two per hour. | -| highThreshold | **Optional**. Integer percentage value that determines the lower limit of passing tests to consider a node as healthy. Default: 100 | -| medThreshold | **Optional**. Integer percentage value that determines the lower limit of passing tests to consider a node as unhealthy. Default: 50 | -| includeUnscoredTests | **Optional**. Boolean value that when false, applies a filter to exclude tests that are marked as “Unscored” by the CIS benchmark standard. If true, the tests will be included in the report. Default: true | -| numFailedTests | **Optional**. Integer value that sets the number of tests to display in the Top-failed Tests section of the CIS benchmark report. Default: 5 | -| resultsFilter | **Optional**. An include or exclude filter to apply on the test results that will appear on the report. | - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalReport -metadata: - name: daily-cis-results - labels: - deployment: production -spec: - reportType: cis-benchmark - schedule: 0 0 * * * - cis: - highThreshold: 100 - medThreshold: 50 - includeUnscoredTests: true - numFailedTests: 5 - resultsFilters: - - benchmarkSelection: { kubernetesVersion: '1.13' } - exclude: ['1.1.4', '1.2.5'] -``` - -### View report generation status - -To view the status of a report, you must use the `kubectl` command. For example: - -```bash -kubectl get globalreports.projectcalico.org daily-cis-results -o yaml -``` - -In a report, the job status types are: - -- **lastScheduledReportJob**: - The most recently scheduled job for generating the report. Because reports are scheduled in order, the “end time” of - this report will be the “start time” of the next scheduled report. -- **activeReportJobs**: - Default = allows up to 5 concurrent report generation jobs. -- **lastFailedReportJobs**: - Default = keeps the 3 most recent failed jobs and deletes older ones. A single report generation job will be retried - up to 6 times (by default) before it is marked as failed. -- **lastSuccessfulReportJobs**: - Default = keeps the 2 most recent successful jobs and deletes older ones. - -#### Change the default report generation time - -By default, reports are generated 30 minutes after the end of the report, to ensure all of the audit data is archived. -(However, this gap does not affect the data collected “start/end time” for a report.) - -You can adjust the time for audit data for cases like initial report testing, to demo a report, or when manually -creating a report that is not counted in global report status. - -To change the delay, go to the installation manifest, and uncomment and set the environment variable -`TIGERA_COMPLIANCE_JOB_START_DELAY`. Specify value as a [Duration string][parse-duration]. - -### Review and address CIS benchmark results - -We recommend the following approach to CIS benchmark reports results: - -1. Download the Kubernetes CIS benchmarks and export your full CIS benchmark results in .csv format. -1. In the compliance dashboard, review the "Top-Failed Tests" section to identify which tests are the most problematic. -1. Cross-reference the top-failed tests to identify which nodes are failing that test. -1. Look up those tests in the [Kubernetes benchmark document](https://downloads.cisecurity.org/#/) and follow the remediation steps to resolve the failure. -1. Discuss with your infrastructure and security team if this remediation is viable within your organization. -1. If so, update your nodes with the fix and ensure that the test passes on the next generation of the report. -1. If the fix is not viable but is an acceptable risk to take within the organization, configure the report specification to exclude that test index so that it no longer appears in the report. -1. If the fix is not viable and not an acceptable risk to take on, keep the failing test within the report so that your team is reminded to address the issue as soon as possible. - -### Manually run reports - -You can manually run reports at any time. For example, run a manual report: - -- To specify a different start/end time -- If a scheduled report fails - -$[prodname] GlobalReport schedules Kubernetes Jobs which create a single-run pod to generate a report and store it in Elasticsearch. Because you need to run manual reports as a pod, you need higher permissions: allow `create` access for pods in namespace `tigera-compliance` using the `tigera-compliance-reporter` service account. - -To manually run a report: - -1. Download the pod template corresponding to your installation method. - **Operator** - - For management and standalone clusters: - - ```bash - curl -O $[filesUrl]/manifests/compliance-reporter-pod.yaml - ``` - - For managed clusters: - - ```bash - curl $[filesUrl]/manifests/compliance-reporter-pod-managed.yaml -o compliance-reporter-pod.yaml - ``` - -1. Edit the template as follows: - - - Edit the pod name if required. - - If you are using your own docker repository, update the container image name with your repo and image tag. - - Set the following environments according to the instructions in the downloaded manifest: - - `TIGERA_COMPLIANCE_REPORT_NAME` - - `TIGERA_COMPLIANCE_REPORT_START_TIME` - - `TIGERA_COMPLIANCE_REPORT_END_TIME` - -1. Apply the updated manifest, and query the status of the pod to ensure it completes. - Upon completion, the report is available in the web console. - - ```bash - # Apply the compliance report pod - kubectl apply -f compliance-reporter-pod.yaml - # Query the status of the pod - kubectl get pod -n=tigera-compliance - ``` - -:::note - -Manually-generated reports do not appear in GlobalReport status. - -::: - -### Troubleshooting - -**Problem**: Compliance reports can fail to generate if the `compliance-benchmarker` component cannot find the required `kubelet` or `kubectl` binaries to determine the Kubernetes version running on the cluster. - -**Solution or workaround**: If a node is running within a container (not running `kubelet` as a binary), make sure the `kubectl` binary is available in the `/usr/bin` directory. - -## Additional resources - -- For details on configuring and scheduling reports, see [Global reports](../reference/resources/globalreport.mdx) -- For other predefined compliance reports, see [Compliance reports](../reference/resources/compliance-reports/index.mdx) - -[parse-duration]: https://golang.org/pkg/time/#ParseDuration diff --git a/calico-enterprise_versioned_docs/version-3.19-2/compliance/encrypt-cluster-pod-traffic.mdx b/calico-enterprise_versioned_docs/version-3.19-2/compliance/encrypt-cluster-pod-traffic.mdx deleted file mode 100644 index 48fd5063c7..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/compliance/encrypt-cluster-pod-traffic.mdx +++ /dev/null @@ -1,251 +0,0 @@ ---- -description: Enable WireGuard for state-of-the-art cryptographic security between pods for Calico Enterprise clusters. ---- - -# Encrypt data in transit - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -## Big picture - -Enable WireGuard to secure on the wire in-cluster pod traffic in a $[prodname] cluster. - -## Value - -When this feature is enabled, $[prodname] automatically creates and manages WireGuard tunnels between nodes providing transport-level security for inter-node, in-cluster pod traffic. WireGuard provides [formally verified](https://www.wireguard.com/formal-verification/) secure and [performant tunnels](https://www.wireguard.com/performance/) without any specialized hardware. For a deep dive in to WireGuard implementation, see this [white paper](https://www.wireguard.com/papers/wireguard.pdf). - -## Concepts - -### About WireGuard - -$[prodname] supports encryption for both inter-node pod traffic, and inter-node, host-network traffic. Because $[prodname] is not implemented using a sidecar, traffic is not encrypted for the full journey from one pod to another; traffic is only encrypted on the host-to-host portion of the journey. Though there is unencrypted traffic between the host-to-pod portion of the journey, attackers cannot easily intercept this traffic. To intercept the unencrypted traffic, they would need root access to the node. - -$[prodname] supports WireGuard encryption for both IPv4 and IPv6 traffic. You can enable traffic independently using parameters in the FelixConfiguration resource: - -- `wireguardEnabled` - enables encrypting IPv4 traffic over an IPv4 underlay network -- `wireguardEnabledV6` - enables encrypting IPv6 traffic over an IPv6 underlay network - -## Before you begin - -**Terminology** - - - Inter-node pod traffic: Traffic leaving a pod from one node destined to a pod on another node - - Inter-node, host-network traffic: traffic generated by the node itself or a host-networked-pod destined to another node or host-networked-pod - - Same-node pod traffic: Traffic between pods on the same node - -**Supported encryption** - -- Inter-node pod traffic: IPv4 only -- Inter-node, host-network traffic, IPv4/IPv6: supported only on managed clusters deployed on EKS and AKS - -**Unsupported** - -- Encrypted same-node pod traffic -- GKE -- Using your own custom keys to encrypt traffic - -**Required** - -- On all nodes in the cluster that you want to participate in $[prodname] encryption, verify that the operating system(s) on the nodes are [installed with WireGuard](https://www.wireguard.com/install/). - - :::note - - Some node operating systems do not support WireGuard, or do not have it installed by default. Enabling $[prodname] WireGuard encryption does not require all nodes to be installed with WireGuard. However, traffic to or from a node that does not have WireGuard installed, will not be encrypted. - - ::: - -- IP addresses for every node in the cluster. This is required to establish secure tunnels between the nodes. $[prodname] can automatically do this using [IP autodetection methods](../networking/ipam/ip-autodetection.mdx). - -## How to - -- [Install WireGuard](#install-wireguard) -- [Enable WireGuard for a cluster](#enable-wireguard-for-a-cluster) -- [Verify encryption is enabled](#verify-encryption-is-enabled) -- [Enable WireGuard statistics](#enable-wireguard-statistics) -- [View WireGuard statistics](#view-wireguard-statistics) -- [Disable WireGuard for an individual node](#disable-wireguard-for-an-individual-node) -- [Disable WireGuard for a cluster](#disable-wireguard-for-a-cluster) - -### Install WireGuard - -WireGuard is included in Linux 5.6+ kernels, and has been backported to earlier Linux kernels in some Linux distributions. - -Install WireGuard on cluster nodes using [instructions for your operating system](https://www.wireguard.com/install/). Note that you may need to reboot your nodes after installing WireGuard to make the kernel modules available on your system. - -Use the following instructions for these platforms that are not listed on the WireGuard installation page, before proceeding to [enabling WireGuard](#enable-wireguard-for-a-cluster). - - - - -To install WireGuard on the default Amazon Machine Image (AMI): - -```bash -sudo yum install kernel-devel-`uname -r` -y -sudo yum install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm -y -sudo curl -o /etc/yum.repos.d/jdoss-wireguard-epel-7.repo https://copr.fedorainfracloud.org/coprs/jdoss/wireguard/repo/epel-7/jdoss-wireguard-epel-7.repo -sudo yum install wireguard-dkms wireguard-tools -y -``` - - - - -AKS cluster nodes run Ubuntu with a kernel that has WireGuard installed already, so there is no manual installation required. - - - - -To install WireGuard for OpenShift: - -1. Install requirements: - - - [CoreOS Butane](https://coreos.github.io/butane/getting-started/) - - [Openshift CLI](https://docs.openshift.com/container-platform/4.2/cli_reference/openshift_cli/getting-started-cli.html) - -1. Download and configure the tools needed for kmods. - -```bash -FAKEROOT=$(mktemp -d) -git clone https://github.com/tigera/kmods-via-containers -cd kmods-via-containers -make install FAKEROOT=${FAKEROOT} -cd .. -git clone https://github.com/tigera/kvc-wireguard-kmod -cd kvc-wireguard-kmod -make install FAKEROOT=${FAKEROOT} -cd .. -``` - -1. Configure/edit `${FAKEROOT}/root/etc/kvc/wireguard-kmod.conf`. - - a. You must then set the URLs for the `KERNEL_CORE_RPM`, `KERNEL_DEVEL_RPM` and `KERNEL_MODULES_RPM` packages in the conf file `$FAKEROOT/etc/kvc/wireguard-kmod.conf`. Obtain copies for `kernel-core`, `kernel-devel`, and `kernel-modules` rpms from [RedHat Access](https://access.redhat.com/downloads/content/package-browser) and host it in an http file server that is reachable by your OCP workers. - - b. For help configuring `kvc-wireguard-kmod/wireguard-kmod.conf` and WireGuard version to kernel version compatibility, see the [kvc-wireguard-kmod README file](https://github.com/tigera/kvc-wireguard-kmod#quick-config-variables-guide). - -1. Get RHEL Entitlement data from your own RHEL8 system from a host in your cluster. - - ```bash - tar -czf subs.tar.gz /etc/pki/entitlement/ /etc/rhsm/ /etc/yum.repos.d/redhat.repo - ``` - -1. Copy the `subs.tar.gz` file to your workspace and then extract the contents using the following command. - - ```bash - tar -x -C ${FAKEROOT}/root -f subs.tar.gz - ``` - -1. Transpile your machine config using [CoreOS Butane](https://coreos.github.io/butane/getting-started/). - - ```bash - cd kvc-wireguard-kmod - make ignition FAKEROOT=${FAKEROOT} > mc-wg.yaml - ``` - -1. With the KUBECONFIG set for your cluster, run the following command to apply the MachineConfig which will install WireGuard across your cluster. - ```bash - oc create -f mc-wg.yaml - ``` - - - - -### Enable WireGuard for a cluster - -Enable IPv4 WireGuard encryption across all the nodes using the following command. - -```bash -kubectl patch felixconfiguration default --type='merge' -p '{"spec":{"wireguardEnabled":true}}' -``` - -Enable IPv6 WireGuard encryption across all the nodes using the following command. - -```bash -kubectl patch felixconfiguration default --type='merge' -p '{"spec":{"wireguardEnabledV6":true}}' -``` - -To enable both IPv4 and IPv6 WireGuard encryption across all the nodes, use the following command. - -```bash -kubectl patch felixconfiguration default --type='merge' -p '{"spec":{"wireguardEnabled":true,"wireguardEnabledV6":true}}' -``` - -For OpenShift, add the Felix configuration with WireGuard enabled [under custom resources](../getting-started/install-on-clusters/openshift/installation.mdx#provide-additional-configuration). - -:::note - -The above command can be used to change other WireGuard attributes. For a list of other WireGuard parameters and configuration evaluation, see the [Felix configuration](../reference/resources/felixconfig.mdx#felix-configuration-definition). - -::: - -We recommend that you review and modify the MTU used by $[prodname] networking when WireGuard is enabled to increase network performance. Follow the instructions in the [Configure MTU to maximize network performance](../networking/configuring/mtu.mdx) guide to set the MTU to a value appropriate for your network. - -### Verify encryption is enabled - -To verify that the nodes are configured for WireGuard encryption, check the node status set by Felix using `kubectl`. For example: - -```bash -kubectl get node -o yaml -... -kind: Node -metadata: - annotations: - projectcalico.org/WireguardPublicKey: jlkVyQYooZYzI2wFfNhSZez5eWh44yfq1wKVjLvSXgY= -... -``` - -### Enable WireGuard statistics - -Since v3.11.1, WireGuard statistics are now automatically enabled with the enable wireguard setting(s) mentioned above. - -### View WireGuard statistics - -To view WireGuard statistics in the web console, you must enable them. From the left navbar, click **Dashboard**, and the Layout Settings icon. - -![Wireguard Dashboard Toggle](/img/calico-enterprise/wireguard/stats-toggle.png) - -#### WireGuard Dashboard toggle - -When viewing WireGuard statistics, you might wonder why the charts in the web console Dashboard show more ingress traffic than egress if all the traffic goes within the cluster. The chart might show a 1% difference between traffic for the following reasons: - -- Sampling time. The statistics are generated a few microseconds apart. -- Packet loss. If a node resends a lost packet, then the node counts the packet twice where the receiver counts it only once. -- Averaging/smoothing. The statistics are smoothed out over a few seconds. - -### Disable WireGuard for an individual node - -To disable WireGuard on a specific node with WireGuard installed, modify the node-specific Felix configuration. e.g., to turn off encryption for pod traffic on node `my-node`, use the following command. This command disables WireGuard for both IPv4 and IPv6, modify it accordingly if disabling only either IP version: - -```bash -cat < - - - - \ No newline at end of file diff --git a/calico-enterprise_versioned_docs/version-3.19-2/compliance/overview.mdx b/calico-enterprise_versioned_docs/version-3.19-2/compliance/overview.mdx deleted file mode 100644 index a714a650c0..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/compliance/overview.mdx +++ /dev/null @@ -1,374 +0,0 @@ ---- -description: Get the reports for regulatory compliance on Kubernetes workloads and environments. ---- - -# Schedule and run compliance reports - -## Big picture - -Schedule and run compliance reports to assess Kubernetes workloads and environments for regulatory compliance. - -## Value - -Compliance tools that rely on periodic snapshots, do not provide accurate assessments of Kubernetes workloads against your compliance standards. $[prodname] compliance dashboard and reports provide a complete inventory of regulated workloads, along with evidence of enforcement of network controls for these workloads. Additionally, audit reports are available to see changes to any network security controls. - -## Concepts - -### Compliance reports at a glance - -Compliance report are based on archived flow logs and audit logs for all of your $[prodname] resources, plus any audit logs you've configured for Kubernetes resources in the Kubernetes API server: - -- Pods -- Host endpoints -- Service accounts -- Namespaces -- Kubernetes service endpoints -- Global network sets -- Calico and Kubernetes network policies -- Global network policies - -Compliance reports provide the following high-level information: - -- **Protection** - - - Endpoints explicitly protected using ingress or egress policy - - Endpoints with Envoy enabled - -- **Policies and services** - - - Policies and services associated with endpoints - - Policy audit logs - -- **Traffic** - - Allowed ingress/egress traffic to/from namespaces - - Allowed ingress/egress traffic to/from the internet - -![compliance-reporting](/img/calico-enterprise/compliance-reporting.png) - -## Before you begin - -**Unsupported** - -- AKS -- GKE -- OpenShift -- TKG - -**Required** - -- Ensure that all nodes in your Kubernetes clusters are time-synchronized using NTP or similar (for accurate audit log timestamps) - -- [Configure audit logs for Kubernetes resources](../observability/elastic/audit-overview.mdx) - - You must configure audit logs for Kubernetes resources through the Kubernetes API to get a complete view of all resources. - -## How To - -- [Configure report permissions](#configure-report-permissions) -- [Configure and schedule reports](#configure-and-schedule-reports) -- [View report generation status](#view-report-generation-status) -- [Run reports](#run-reports) - -### Configure report permissions - -Report permissions are granted using the standard Kubernetes RBAC based on ClusterRole and ClusterRoleBindings. The following table outlines the required RBAC verbs for each resource type for a specific user actions. - -| **Action** | **globalreporttypes** | **globalreports** | **globalreports/status** | -| ------------------------------------------------------- | ------------------------------- | --------------------------------- | ------------------------ | -| Manage reports (create/modify/delete) | | \* | get | -| View status of report generation through kubectl | | get | get | -| List the generated reports and summary status in the UI | | list + get (for required reports) | | -| Export the generated reports from the UI | get (for the particular report) | get (for required reports) | | - -The following sample manifest creates RBAC for three users: Paul, Candice and David. - -- Paul has permissions to create/modify/delete the report schedules and configuration, but does not have permission to export generated reports from the UI. -- Candice has permissions to list and export generated reports from the UI, but cannot modify the report schedule or configuration. -- David has permissions to list and export generated `dev-inventory` reports from the UI, but cannot list or download other reports, nor modify the report - schedule or configuration. - -```yaml -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: tigera-compliance-manage-report-config -rules: - - apiGroups: ['projectcalico.org'] - resources: ['globalreports'] - verbs: ['*'] - - apiGroups: ['projectcalico.org'] - resources: ['globalreports/status'] - verbs: ['get', 'list', 'watch'] - ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: tigera-compliance-manage-report-config -subjects: - - kind: User - name: paul - apiGroup: rbac.authorization.k8s.io -roleRef: - kind: ClusterRole - name: tigera-compliance-manage-report-config - apiGroup: rbac.authorization.k8s.io - ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: tigera-compliance-list-download-all-reports -rules: - - apiGroups: ['projectcalico.org'] - resources: ['globalreports'] - verbs: ['get', 'list'] - - apiGroups: ['projectcalico.org'] - resources: ['globalreporttypes'] - verbs: ['get'] - ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: tigera-compliance-list-download-all-reports -subjects: - - kind: User - name: candice - apiGroup: rbac.authorization.k8s.io -roleRef: - kind: ClusterRole - name: tigera-compliance-list-download-all-reports - apiGroup: rbac.authorization.k8s.io - ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: tigera-compliance-list-download-dev-inventory -rules: - - apiGroups: ['projectcalico.org'] - resources: ['globalreports'] - verbs: ['list'] - - apiGroups: ['projectcalico.org'] - resources: ['globalreports'] - verbs: ['get'] - resourceNames: ['dev-inventory'] - - apiGroups: ['projectcalico.org'] - resources: ['globalreporttypes'] - verbs: ['get'] - resourceNames: ['dev-inventory'] - ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: tigera-compliance-list-download-dev-inventory -subjects: - - kind: User - name: david - apiGroup: rbac.authorization.k8s.io -roleRef: - kind: ClusterRole - name: tigera-compliance-list-download-dev-inventory - apiGroup: rbac.authorization.k8s.io -``` - -### Configure and schedule reports - -To configure and schedule a compliance report, create a [GlobalReport](../reference/resources/globalreport.mdx) with the following information. - -| **Fields** | **Description** | -| --------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| name | Unique name for your report. | -| reportType | One of the following predefined report types: `inventory`, `network-access`, `policy-audit`. | -| schedule | The start and end time of the report using [crontab format](https://en.wikipedia.org/wiki/Cron). To allow for archiving, reports are generated approximately 30 minutes after the end time. A single report is limited to a maximum of two per hour. | -| endpoints | **Optional**. For inventory and network-access reports, specifies the endpoints to include in the report. For the policy-audit report, restricts audit logs to include only policies that apply to the selected endpoints. If not specified, the report includes all endpoints and audit logs. | -| jobNodeSelector | **Optional**. Limits report generation jobs to specific nodes. | -| suspend | **Optional**. Suspends report generation. All in-flight reports will complete, and future scheduled reports are suspended. | - -:::note - -GlobalReports can only be configured using kubectl (not calicoctl); and they cannot be edited in the Tigera -Secure EE the web console. - -::: - -The following sections provide sample schedules for the predefined reports. - -### Weekly reports, all endpoints - -The following report schedules weekly inventory reports for _all_ endpoints. The jobs that create the reports will run -on the infrastructure nodes (e.g. nodetype == 'infrastructure'). - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalReport -metadata: - name: weekly-full-inventory -spec: - reportType: inventory - schedule: 0 0 * * 0 - jobNodeSelector: - nodetype: infrastructure -``` - -### Daily reports, selected endpoints - -The following report schedules daily inventory reports for production endpoints (e.g. deployment == ‘production’). - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalReport -metadata: - name: daily-production-inventory -spec: - reportType: inventory - endpoints: - selector: deployment == 'production' - schedule: 0 0 * * * -``` - -### Hourly reports, endpoints in named namespaces - -The following report schedules hourly network-access reports for the accounts department endpoints, that are -specified using the namespace names: **payable**, **collections** and **payroll**. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalReport -metadata: - name: hourly-accounts-networkaccess -spec: - reportType: network-access - endpoints: - namespaces: - names: ['payable', 'collections', 'payroll'] - schedule: 0 * * * * -``` - -### Daily reports, endpoints in selected namespaces - -The following report schedules daily network-access reports for the accounts department with endpoints specified using -a namespace selector. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalReport -metadata: - name: daily-accounts-networkaccess -spec: - reportType: network-access - endpoints: - namespaces: - selector: department == 'accounts' - schedule: 0 0 * * * -``` - -### Monthly reports, endpoints for named service accounts in named namespaces - -The following schedules monthly audit reports. The audited policy is restricted to policy that applies to -widgets/controller endpoints specified by the namespace **widgets** and service account **controller**. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalReport -metadata: - name: monthly-widgets-controller-tigera-policy-audit -spec: - reportType: policy-audit - schedule: 0 0 1 * * - endpoints: - serviceAccounts: - names: ['controller'] - namespaces: - names: ['widgets'] -``` - -### View report generation status - -To view the status of a report, you must use the `kubectl` command. For example: - -```bash -kubectl get globalreports.projectcalico.org daily-inventory.p -o yaml -``` - -In a report, the job status types are: - -- **lastScheduledReportJob**: - The most recently scheduled job for generating the report. Because reports are scheduled in order, the “end time” of - this report will be the “start time” of the next scheduled report. -- **activeReportJobs**: - Default = allows up to 5 concurrent report generation jobs. -- **lastFailedReportJobs**: - Default = keeps the 3 most recent failed jobs and deletes older ones. A single report generation job will be retried - up to 6 times (by default) before it is marked as failed. -- **lastSuccessfulReportJobs**: - Default = keeps the 2 most recent successful jobs and deletes older ones. - -### Change the default report generation time - -By default, reports are generated 30 minutes after the end of the report, to ensure all of the audit data is archived. -(However, this gap does not affect the data collected “start/end time” for a report.) - -You can adjust the time for audit data for cases like initial report testing, to demo a report, or when manually -creating a report that is not counted in global report status. - -To change the delay, go to the installation manifest, and uncomment and set the environment -`TIGERA_COMPLIANCE_JOB_START_DELAY`. Specify value as a [Duration string][parse-duration]. - -### Run reports - -You can run reports at any time to specify a different start/end time, and if a scheduled report fails. - -$[prodname] GlobalReport schedules Kubernetes Jobs, which create a single-run pod to generate a report and store it -in Elasticsearch. Because you need to run reports as a pod, you need higher permissions: allow `create` access -access for pods in namespace `tigera-compliance` using the `tigera-compliance-reporter` service account. - -To run a report on demand: - -1. Download the pod template corresponding to your installation method. - - For management and standalone clusters: - - ```bash - curl -O $[filesUrl]/manifests/compliance-reporter-pod.yaml - ``` - - For managed clusters: - - ```bash - curl $[filesUrl]/manifests/compliance-reporter-pod-managed.yaml -o compliance-reporter-pod.yaml - ``` - -1. Edit the template as follows: - - Edit the pod name if required. - - If you are using your own docker repository, update the container image name with your repo and image tag. - - Set the following environments according to the instructions in the downloaded manifest: - - `TIGERA_COMPLIANCE_REPORT_NAME` - - `TIGERA_COMPLIANCE_REPORT_START_TIME` - - `TIGERA_COMPLIANCE_REPORT_END_TIME` -1. Apply the updated manifest, and query the status of the pod to ensure it completes. - Upon completion, the report is available in the $[prodname] web console. - - ```bash - # Apply the compliance report pod - kubectl apply -f compliance-reporter-pod.yaml - - # Query the status of the pod - kubectl get pod -n tigera-compliance - ``` - -:::note - -Manually-generated reports do not appear in GlobalReport status. - -::: - -## Additional resources - -- For details on configuring and scheduling reports, see [Global reports](../reference/resources/globalreport.mdx) -- For report field descriptions, see [Compliance reports](../reference/resources/compliance-reports/index.mdx) -- [CIS benchmarks](compliance-reports-cis.mdx) - -[parse-duration]: https://golang.org/pkg/time/#ParseDuration diff --git a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/bare-metal/about.mdx b/calico-enterprise_versioned_docs/version-3.19-2/getting-started/bare-metal/about.mdx deleted file mode 100644 index dcab1b2658..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/bare-metal/about.mdx +++ /dev/null @@ -1,97 +0,0 @@ ---- -description: Install Calico network policy so you can secure hosts and VMs that aren't part of a Kubernetes cluster. ---- - -# Install network policy on non-cluster hosts and VMs - -import DockerContainerService from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_docker-container-service.mdx'; - -import NonClusterBinaryInstall from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_non-cluster-binary-install.mdx'; - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -## Big picture - -Secure non-cluster hosts and VMs by installing $[prodname] network policy. - -## Value - -Not all hosts in your environment run pods/workloads. You may have physical machines or legacy applications that you cannot move into a Kubernetes cluster, but still need to securely communicate with pods in your cluster. $[prodname] lets you enforce policy on these **non-cluster hosts** using the same robust $[prodname] network policy that you use for pods. This solution can also be used to protect bare metal/physical servers that run Kubernetes clusters instead of VMs. - -## Concepts - -### Non-cluster hosts and host endpoints - -A non-cluster host is a computer that is running an application that is _not part of a Kubernetes cluster_. But you can protect hosts using the same $[prodname] network policy that you use for your Kubernetes cluster. In the following diagram, the Kubernetes cluster is running full $[prodname] with networking (for pod-to-pod communications) and network policy; the non-cluster host uses $[prodname] network policy only for host protection. - -![non-cluster-host](/img/calico-enterprise/non-cluster-host.png) - -For non-cluster hosts, you can secure host interfaces using **host endpoints**. Host endpoints can have labels, and work the same as labels on pods/workload endpoints. The advantage is that you can write network policy rules to apply to both workload endpoints and host endpoints using label selectors; where each selector can refer to the either type (or be a mix of the two). For example, you can write a cluster-wide policy for non-cluster hosts that is immediately applied to every host. - -To learn how to restrict traffic to/from hosts and VMs using $[prodname] network policy, see [Protect hosts](../../network-policy/hosts/protect-hosts.mdx). - -## Before you begin - -**CNI support** - -Calico CNI for networking with $[prodname] network policy - -The geeky details of what you get: - - - -**Required** - -- Kubernetes API datastore is up and running and is accessible from the host - - If $[prodname] is installed on a cluster, you already have a datastore. - -- Non-cluster host meets $[prodname] [system requirements](requirements.mdx) - - Ensure that your node OS includes the `ipset` and `conntrack` kernel dependencies - - Install Docker if you are using container install option (rather than binary install) - -## How to - -- [Configure hosts to communicate with your Kubernetes cluster](#configure-hosts-to-communicate-with-your-kubernetes-cluster) - - - - - - - - - -**Additional Requirements** - -1. Verify that Docker is installed. -1. Configure container to start at boot time. - The `$[nodecontainer]` container should be started at boot time by your init system and the init system must be configured to restart it if stopped. $[prodname] relies on that behavior for certain configuration changes. - - - - - - -### Configure hosts to communicate with your Kubernetes cluster - -Using $[prodname] network policy-only mode, you must ensure that the non-cluster host can directly communicate with your Kubernetes cluster. Here are some vendor tips: - -**AWS** - -- For hosts to communicate with your Kubernetes cluster, the node must be in the same VPC as nodes in your Kubernetes cluster, and must use the AWS VPC CNI plugin (used by default in EKS). -- The Kubernetes cluster security group needs to allow traffic from your host endpoint. Make sure that an inbound rule is set so that traffic from your host endpoint node is allowed. -- For a non-cluster host to communicate with an EKS cluster, the correct IAM roles must be configured. -- You also need to provide authentication to your Kubernetes cluster using [aws-iam-authenticator](https://docs.aws.amazon.com/eks/latest/userguide/install-aws-iam-authenticator.html) and the [aws cli](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-install.html) - -**GKE** - -For hosts to communicate with your Kubernetes cluster directly, you must make the host directly reachable/routable; this is not set up by default with the VPC native network routing. - -## Additional resources - -- [Protect hosts](../../network-policy/hosts/protect-hosts.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/bare-metal/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/getting-started/bare-metal/index.mdx deleted file mode 100644 index d53454ee53..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/bare-metal/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Install Calico Enterprise on hosts to secure host communications. -hide_table_of_contents: true ---- - -# Non-cluster hosts - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/bare-metal/requirements.mdx b/calico-enterprise_versioned_docs/version-3.19-2/getting-started/bare-metal/requirements.mdx deleted file mode 100644 index 657c21aea0..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/bare-metal/requirements.mdx +++ /dev/null @@ -1,12 +0,0 @@ ---- -description: Review node requirements for installing Calico Enterprise on non-cluster hosts. ---- - -import ReqsSys from "@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/ReqsSys"; -import ReqsKernel from "@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/ReqsKernel"; - -# System requirements - - - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/compatibility.mdx b/calico-enterprise_versioned_docs/version-3.19-2/getting-started/compatibility.mdx deleted file mode 100644 index 0c97ba0d29..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/compatibility.mdx +++ /dev/null @@ -1,114 +0,0 @@ ---- -description: Lists versions of Calico Enterprise and Kubernetes for each platform. ---- - -# Support and compatibility - -## Supported platforms - -The following list shows the platforms supported in this release. If you're working with a version older than these, consult the [documentation archive](https://docs.tigera.io/archive) or contact Support. - -- [AKS](#aks) -- [EKS](#eks) -- [GKE](#gke) -- [kOps on AWS](#kops-on-aws) -- [Kubernetes-kubeadm](#kubernetes-kubeadm) -- [MKE](#mke) -- [OpenShift](#openshift) -- [RKE](#rke) -- [RKE2](#rke2) -- [TKG](#tkg) - -### Supported $[prodname] features - -If your platform is listed below, the features in this release will work for your platform unless an individual feature topic explicitly calls out a vendor or $[prodname] limitation. - -Note that all Windows feature limitations are described in [Windows limitations](install-on-clusters/windows-calico/limitations.mdx), and are not called out in individual Linux topics. - -## AKS - -Kubernetes version support aligns with [upstream Kubernetes](#kubernetes-kubeadm) to the latest version if available. - -| $[prodname] version | $[prodname] support | -| ----------------------- | ---------------------------------------------------------------------------------------------------------------------------------------- | -| 3.17 to current release | - $[prodname] CNI with network policy
    - Azure CNI with $[prodname] network policy
    - Azure CNI with $[prodname] network policy | - -## EKS - -Kubernetes version support aligns with [upstream Kubernetes](#kubernetes-kubeadm) to the latest version if available. - -| $[prodname] version | $[prodname] support | -| ----------------------- | ------------------------------------------------------------------------------------ | -| 3.17 to current release | - $[prodname] CNI with network policy
    - AWS CNI with $[prodname] network policy | - -## GKE - -Kubernetes version support aligns with [upstream Kubernetes](#kubernetes-kubeadm) to the latest version if available. - -| $[prodname] version | $[prodname] support | -| ----------------------- | ----------------------------------------- | -| 3.17 to current release | - GKE CNI with $[prodname] network policy | - -## kOps on AWS - -| $[prodname] version | kOps and Kubernetes versions | $[prodname] support | -| ------------------- | ---------------------------- | ------------------------------------------------------------------------------------ | -| 3.19 | 1.28 - 1.30 | - $[prodname] CNI with network policy
    - AWS CNI with $[prodname] network policy | -| 3.18 | 1.26 - 1.28 | - $[prodname] CNI with network policy
    - AWS CNI with $[prodname] network policy | -| 3.17 | 1.25 - 1.26 | - $[prodname] CNI with network policy
    - AWS CNI with $[prodname] network policy | - -## Kubernetes-kubeadm - -| $[prodname] version | Kubernetes/kubeadm versions | $[prodname] support | -| ------------------- | --------------------------- | ----------------------------------- | -| 3.19 | 1.28 - 1.30 | $[prodname] CNI with network policy | -| 3.18 | 1.26 - 1.28 | $[prodname] CNI with network policy | -| 3.17 | 1.25 - 1.27 | $[prodname] CNI with network policy | - -## MKE - -| $[prodname] version | MKE version | $[prodname] support | Kubernetes versions | -| ------------------- | ----------- | ----------------------------------- | ------------------- | -| 3.19 | MKE 3.7 | $[prodname] CNI with network policy | 1.27 | -| 3.18 | MKE 3.7 | $[prodname] CNI with network policy | 1.27 | -| 3.17 | MKE 3.7 | $[prodname] CNI with network policy | 1.27 | - -## OpenShift - -| $[prodname] version | OpenShift versions for Kubernetes | $[prodname] support | -| ------------------- | --------------------------------- | ----------------------------------- | -| 3.19 | 4.14 - 4.16 | $[prodname] CNI with network policy | -| 3.18 | 4.12 - 4.14 | $[prodname] CNI with network policy | -| 3.17 | 4.12 - 4.13 | $[prodname] CNI with network policy | - -## RKE - -| $[prodname] version | RKE version | $[prodname] support | Kubernetes versions | -| ------------------- | ----------- | ----------------------------------- | ------------------- | -| 3.19 | 1.5 - 1.6 | $[prodname] CNI with network policy | 1.28 - 1.30 | -| 3.18 | 1.4 | $[prodname] CNI with network policy | 1.26 | -| 3.17 | 1.4 | $[prodname] CNI with network policy | 1.25 | - -## RKE2 - -| $[prodname] version | $[prodname] support | Kubernetes versions | -| ------------------- | ----------------------------------- | ------------------- | -| 3.19 | $[prodname] CNI with network policy | 1.28 - 1.30 | -| 3.18 | $[prodname] CNI with network policy | 1.26 - 1.28 | -| 3.17 | $[prodname] CNI with network policy | 1.25 - 1.26 | - -## TKG - -| $[prodname] version | TKG version | $[prodname] support | Kubernetes versions | -| ------------------- | ----------- | ------------------------------------- | ------------------- | -| 3.19 | 2.4 | $[prodname] CNI with network policy | 1.27 | -| 3.18 | 2.4 | $[prodname] CNI with network policy | 1.27 | -| 3.17 | N/A | $[prodname] 3.17 does not support TKG | N/A | - -## Supported browsers - -The following list shows the browsers supported by $[prodname] in this release. - -- Chrome -- Safari -- Firefox diff --git a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/getting-started/index.mdx deleted file mode 100644 index 8c8f609d31..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/index.mdx +++ /dev/null @@ -1,77 +0,0 @@ ---- -description: Install Calico Enterprise on nodes and hosts for popular orchestrators, and install the calicoctl command line interface (CLI) tool. ---- - -import { DocCardLink, DocCardLinkLayout } from '/src/___new___/components'; - -# Install Calico Enterprise - -Requirements and guides for installing Calico Enterprise on Kubernetes clusters and non-cluster hosts.. - -## Getting started - - - - - - -## Installing - - - - - - - - - - - - - - - - - - - - -## Installing from a private registry - - - - - - -## Installing on Windows - - - - - - - - - - - - -## Upgrading - - - - - - - - - - - -## Non-cluster hosts - - - - - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/aks.mdx b/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/aks.mdx deleted file mode 100644 index 4f67ad0546..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/aks.mdx +++ /dev/null @@ -1,87 +0,0 @@ ---- -description: Install Calico Enterprise for an AKS cluster. ---- - -import InstallAKS from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/InstallAKS.js'; - -# Microsoft Azure Kubernetes Service (AKS) - -## Big picture - -Install $[prodname] on an AKS managed Kubernetes cluster. - -## Before you begin - -**CNI support** - -- Calico CNI for networking with $[prodname] network policy - - The geeky details of what you get: - - - -- Azure CNI networking with $[prodname] network policy - - The geeky details of what you get: - - - -- Azure CNI with overlay networking with $[prodname] network policy - - The geeky details of what you get: - - - -**Recommended** - -- Set suggested value for maximum number of pods per node - - It is recommended to set the maximum pods per node to be at least 60 for use with Calico Enterprise. The default value in AKS is 30. If you need to increase the number of pods per node, see [Configure maximum pods per node](https://docs.microsoft.com/en-us/azure/aks/configure-azure-cni#configure-maximum-pods-per-node). - -**Required** - -- A [compatible AKS cluster](../compatibility.mdx#aks) - - - To use the Calico CNI, you must configure the AKS cluster with [Bring your own CNI](https://docs.microsoft.com/en-us/azure/aks/use-byo-cni?tabs=azure-cli) - - To use the Azure CNI, see [Azure CNI networking](https://docs.microsoft.com/en-us/azure/aks/configure-azure-cni) - - To use the Azure CNI with overlay networking, see [Azure CNI with Overlay](https://learn.microsoft.com/en-us/azure/aks/azure-cni-overlay) - -- Cluster is not using a Kubernetes reconciler - - If your cluster has an existing version of $[prodname] installed, verify that the cluster is not managed by any kind of Kubernetes reconciler. For example, if addon-manager exists, there will be an annotation called, `addonmanager.kubernetes.io/mode` on either of the following resources (if the resources exist): - - - `tigera-operator` deployment in the `tigera-operator` namespace - - `calico-node` daemonset in the `kube-system` namespace - -- User account has IAM permissions - - Verify your user account has IAM permissions to create Kubernetes ClusterRoles, ClusterRoleBindings, Deployments, Service Accounts, and Custom Resource Definitions. The easiest way to grant permissions is to assign the "Kubernetes Service Cluster Admin Role” to your user account. For help, see [AKS access control](https://docs.microsoft.com/en-us/azure/aks/control-kubeconfig-access). - -- Cluster meets [system requirements](requirements.mdx) - -- A [Tigera license key and credentials](calico-enterprise.mdx) - -- [Install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) - -1. [Option A: Install with Azure CNI networking](#install-with-azure-cni-networking) -1. [Option B: Install with Calico networking](#install-with-calico-enterprise-networking) -1. [Install the $[prodname] license](#install-the-calico-enterprise-license) - - - -## Next steps - -- [Configure access to the $[prodname] web console](../../operations/cnx/access-the-manager.mdx) -- [Video: Everything you need to know about Kubernetes networking on Azure](https://www.projectcalico.org/everything-you-need-to-know-about-kubernetes-networking-on-azure/) -- [Get started with Kubernetes network policy](../../network-policy/get-started/kubernetes-network-policy.mdx) -- [Get started with $[prodname] network policy](../../network-policy/beginners/calico-network-policy.mdx) -- [Enable default deny for Kubernetes pods](../../network-policy/beginners/kubernetes-default-deny.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/aws.mdx b/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/aws.mdx deleted file mode 100644 index 9af075cce9..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/aws.mdx +++ /dev/null @@ -1,155 +0,0 @@ ---- -description: Install Calico Enterprise with a self-managed Kubernetes cluster using kOps on AWS. ---- - -# kOps on AWS - -## Big picture - -Install $[prodname] with a self-managed Kubernetes cluster using Kubernetes Operations (kOps) on AWS. kOps is a cluster management tool that provisions cluster VMs and installs Kubernetes. It is a good default choice for most because it gives you access to all $[prodname] [flexible and powerful networking features](../../networking/index.mdx). However, other options may work better for your environment. - -## Before you begin - -**CNI support** - -- Calico CNI for networking with $[prodname] network policy - - The geeky details of what you get: - - {' '} - -- AWS VPC CNI for networking with $[prodname] network policy - - The geeky details of what you get: - - - -**Required** - -- A [compatible kOps cluster](../compatibility.mdx#kops-on-aws) -- A [Tigera license key and credentials](calico-enterprise.mdx) -- Cluster meets [system requirements](requirements.mdx) -- [Install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) -- [Install AWS CLI tools](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-install.html) - -## How to - -Select one of the following installation paths: - -- [Install Calico Enterprise networking and network policy](#install-calico-enterprise-networking-and-network-policy) -- [Install Amazon VPC networking with Calico Enterprise network policy](#install-amazon-vpc-networking-with-calico-enterprise-network-policy) - -### Install Calico Enterprise networking and network policy - -To use kOps to create a cluster with $[prodname] networking and network policy: - -1. [Install kOps](https://kops.sigs.k8s.io/install/) on your workstation. -1. [Set up your environment for AWS](https://kops.sigs.k8s.io/getting_started/aws/) . -1. Be sure to [set up an S3 state store](https://kops.sigs.k8s.io/getting_started/aws/#cluster-state-storage) and export its name: - - ```bash - export KOPS_STATE_STORE=s3:// - ``` - -1. Configure kOps to use $[prodname] for networking. - Create a cluster with kOps using the `--networking cni` flag. For example: - - ``` - kops create cluster \ - --zones us-west-2a \ - --networking cni \ - - ``` - - :::note - - The name of the cluster must be chosen as a valid DNS name belonging to the root user. It can either be a subdomain of an existing domain name or a subdomain which can be configured on AWS Route 53 service. - - ::: - - Or, you can add `cni` to your cluster config. Run `kops update cluster --name=` and set the following networking configuration. - - ```yaml - networking: - cni: {} - ``` - - :::note - - Setting the `--networking cni` flag delegates the installation of the CNI to the user for a later stage. - - ::: - -1. The provisioned kOps cluster will assign its own set of pod network CIDR in the kube-proxy instance different than the one $[prodname] expects. To set the cluster cidr for the kube-proxy to match the one expected by $[prodname] edit the cluster config `kops edit cluster ` and add the `kubeProxy` config with the `clusterCIDR` expected by the default $[prodname] installation. - - ```yaml noValidation - spec: - ... - kubeProxy: - clusterCIDR: 192.168.0.0/16 - ``` - - :::note - - For more advanced pod networking CIDR configuration, the requirement is to have `ipPools` CIDR set by the $[prodname] installation to match cluster CIDR set in kube-proxy. Calico's `ipPools` setting is obtainable in the Installation resource `kubectl get installation -o yaml` and can be configured by editing the operator manifest found in the [install instructions for $[prodname]](kubernetes/generic-install.mdx). - - ::: - -1. The default size of the provisioned instance groups for the cluster might not be sufficient for the full installation of kubernetes and $[prodname]. To increase the size of the instance groups run `kops edit ig --name ` and edit the following fields accordingly. - - ```yaml noValidation - spec: - ... - machineType: t3.medium - maxSize: 1 - minSize: 1 - ``` - - The name of the instance groups can be obtained from `kops get instancegroups --name `. - -1. Once your cluster has been configured run `kops update cluster --name=` to preview the changes. Then the same command with `--yes` option (ie. `kops update cluster --name= --yes`) to commit the changes to AWS to create the cluster. It may take 10 to 15 minutes for the cluster to be fully created. - - :::note - - Once the cluster has been created, the `kubectl` command should be pointing to the newly created cluster. By default `kops>=1.19` does not update `kubeconfig` to include the cluster certificates, accesses to the cluster through `kubectl` must be configured. - - ::: - -1. Validate that nodes are created. - - ```bash - kubectl get nodes - ``` - - The above should return the status of the nodes in the `Not Ready` state. - -1. KOps does not install any CNI when the flag `--networking cni` or `spec.networking: cni {}` is used. In this case the user is expected to install the CNI separately. - To Install $[prodname] follow the [install instructions for $[prodname]](kubernetes/generic-install.mdx). - -1. Finally, to delete your cluster once finished, run `kops delete cluster --yes`. - -You can further customize the $[prodname] install with [options listed in the kops documentation](https://kops.sigs.k8s.io/networking/#calico-example-for-cni-and-network-policy). - -### Install Amazon VPC networking with Calico Enterprise network policy - -You can use Amazon’s VPC CNI plugin for networking, and $[prodname] for network policy. The advantage of this approach is that pods are assigned IP addresses associated with Elastic Network Interfaces on worker nodes. The IPs come from the VPC network pool, and therefore do not require NAT to access resources outside the Kubernetes cluster. - -Set your kOps cluster configuration to: - -```yaml -networking: - amazonvpc: {} -``` - -After the cluster is up and ready, [Install $[prodname]](kubernetes/generic-install.mdx). - -## Next steps - -- [Video: Everything you need to know about Kubernetes pod networking on AWS](https://www.projectcalico.org/everything-you-need-to-know-about-kubernetes-pod-networking-on-aws/) -- [Try out $[prodname] network policy](../../network-policy/beginners/calico-network-policy.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/calico-enterprise.mdx b/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/calico-enterprise.mdx deleted file mode 100644 index a83827d79b..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/calico-enterprise.mdx +++ /dev/null @@ -1,42 +0,0 @@ ---- -description: Get a license to install Calico Enterprise. ---- - -# Get a license - -## Get private registry credentials and license key - -Contact your Tigera support representative and get the following files: - -**Private registry credentials** - -Get the Docker configuration file, (`config.json`) that contains a robot account token to retrieve the $[prodname] images from the private Tigera repository. For example: - -```json -{ - "auths": { - "quay.io": { - "auth": "", - "email": "" - } - } -} -``` - -**License key** - -Get a license key file, `-license.yaml`. For example: - -```yaml -apiVersion: projectcalico.org/v3 -kind: LicenseKey -metadata: - creationTimestamp: null - name: default -spec: - certificate: | - -----BEGIN CERTIFICATE----- - MII...n5 - -----END CERTIFICATE----- - token: eyJ...zaQ -``` diff --git a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/docker-enterprise.mdx b/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/docker-enterprise.mdx deleted file mode 100644 index 1d69103ef5..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/docker-enterprise.mdx +++ /dev/null @@ -1,131 +0,0 @@ ---- -description: Install Calico Enterprise on an MKE cluster. ---- - -# Mirantis Kubernetes Engine (MKE) - -## Big picture - -Install $[prodname] on a Mirantis Kubernetes Engine (MKE) cluster (formerly Docker Enterprise). - -## Before you begin - -**CNI support** - -Calico CNI for networking with $[prodname] network policy: - -The geeky details of what you get: - - - -**Required** - -- A [compatible MKE cluster](../compatibility.mdx#mke) with: - - - A minimum of three nodes for non-production deployments - - CNI flag set to unmanaged, `--unmanaged-cni` so UCP does not install the default $[prodname] networking plugin - - For help, see [Docker Enterprise](https://docs.docker.com/), and [Docker EE Best Practices and Design Considerations](https://docs.mirantis.com/docker-enterprise/v3.0/dockeree-ref-arch/deploy-manage/best-practices-design.html) - -- Install UCP control plane to access the cluster using [Docker Universal Control Plane CLI-Based Access](https://dockerlabs.collabnix.com/advanced/Docker-UCP-overview.html). After installing the control plane, enable the option "Allow all authenticated users, including service accounts, to schedule on all nodes, including UCP managers and DTR nodes." - -- Cluster meets [system requirements](requirements.mdx) - -- A [Tigera license key and credentials](calico-enterprise.mdx) - -- Install [Install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) - -## How to - -- [Install $[prodname]](#install-calico-enterprise) -- [Install the $[prodname] license](#install-the-calico-enterprise-license) - -### Install $[prodname] - -1. [Configure a storage class for $[prodname]](../../operations/logstorage/create-storage.mdx). - -1. Configure Tigera Operator and Calico CNI plugin role bindings for Docker EE. - - ```bash - kubectl create clusterrolebinding tigera-operator-cluster-admin -n tigera-operator \ - --clusterrole cluster-admin --serviceaccount tigera-operator:tigera-operator - kubectl create clusterrolebinding calico-cni-plugin-cluster-admin -n calico-system \ - --clusterrole cluster-admin --serviceaccount calico-system:calico-cni-plugin - ``` - -1. Install the Tigera Operator and custom resource definitions. - - ```bash - kubectl create -f $[filesUrl]/manifests/tigera-operator.yaml - ``` - -1. Install the Prometheus operator and related custom resource definitions. The Prometheus operator will be used to deploy Prometheus server and Alertmanager to monitor $[prodname] metrics. - - :::note - - If you have an existing Prometheus operator in your cluster that you want to use, skip this step. To work with $[prodname], your Prometheus operator must be v0.40.0 or higher. - - ::: - - ```bash - kubectl create -f $[filesUrl]/manifests/tigera-prometheus-operator.yaml - ``` - -1. Install your pull secret. - - If pulling images directly from `quay.io/tigera`, you will likely want to use the credentials provided to you by your Tigera support representative. If using a private registry, use your private registry credentials instead. - - ```bash - kubectl create secret generic tigera-pull-secret \ - --type=kubernetes.io/dockerconfigjson -n tigera-operator \ - --from-file=.dockerconfigjson= - ``` - -1. Install any extra [$[prodname] resources](../../reference/resources/index.mdx) needed at cluster start using [calicoctl](../../reference/clis/calicoctl/overview.mdx). - -1. Install the Tigera custom resources. For more information on configuration options available in this manifest, see [the installation reference](../../reference/installation/api.mdx). - - ```bash - kubectl create -f $[filesUrl]/manifests/custom-resources.yaml - ``` - - Monitor progress with the following command: - - ```bash - watch kubectl get tigerastatus - ``` - - Wait until the `apiserver` shows a status of `Available`, then proceed to the next section. - -### Install the $[prodname] license - -To use $[prodname], you must install the license provided to you by Tigera. - -```bash -kubectl create -f -``` - -Monitor progress with the following command: - -```bash -watch kubectl get tigerastatus -``` - -## Next steps - -**Recommended** - -- [Configure access to the $[prodname] web console](../../operations/cnx/access-the-manager.mdx) -- [Authentication quickstart](../../operations/cnx/authentication-quickstart.mdx) -- [Configure an external identity provider](../../operations/cnx/configure-identity-provider.mdx) - -**Recommended - Networking** - -- The default networking uses IP-in-IP with BGP routing. For all networking options, see [Determine best networking option](../../networking/determine-best-networking.mdx). - -**Recommended - Security** - -- [Get started with $[prodname] tiered network policy](../../network-policy/policy-tiers/tiered-policy.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/eks.mdx b/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/eks.mdx deleted file mode 100644 index 383827ac07..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/eks.mdx +++ /dev/null @@ -1,59 +0,0 @@ ---- -description: Enable Calico network policy in EKS. ---- - -import InstallEKS from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/InstallEKS'; - -# Amazon Elastic Kubernetes Service (EKS) - -## Big picture - -Install $[prodname] on an EKS managed Kubernetes cluster. - -## Before you begin - -**CNI support** - -- Calico CNI for networking with $[prodname] network policy - - The geeky details of what you get by default: - - - -- AWS CNI networking with $[prodname] network policy - - The geeky details of what you get by default: - - - -**Required** - -* You have a [compatible EKS cluster](../compatibility.mdx#eks). -* Your cluster meets the [system requirements](requirements.mdx). -* You [disabled network policy for the AWS VPC CNI](https://docs.aws.amazon.com/eks/latest/userguide/network-policy-disable.html). -* You have a [Tigera license key and credentials](calico-enterprise.mdx). -* You [installed kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) on your workstation. - -## How to - -1. [Option A: Install with Amazon VPC networking](#install-eks-with-amazon-vpc-networking) -1. [Option B: Install with Calico CNI networking](#install-eks-with-calico-networking) -1. [Install the $[prodname] license](#install-the-calico-enterprise-license) - - - -## Next steps - -- [Configure access to the $[prodname] web console](../../operations/cnx/access-the-manager.mdx) - -**Recommended** - -- [Video: Everything you need to know about Kubernetes pod networking on AWS](https://www.projectcalico.org/everything-you-need-to-know-about-kubernetes-pod-networking-on-aws/) -- [Get started with $[prodname] network policy](../../network-policy/beginners/calico-network-policy.mdx) -- [Enable default deny for Kubernetes pods](../../network-policy/beginners/kubernetes-default-deny.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/gke.mdx b/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/gke.mdx deleted file mode 100644 index 0d2b5631c7..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/gke.mdx +++ /dev/null @@ -1,69 +0,0 @@ ---- -description: Enable Calico network policy in GKE. ---- - -import InstallGKE from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/InstallGKE'; - -# Google Kubernetes Engine (GKE) - -## Big picture - -Install $[prodname] on a GKE managed Kubernetes cluster. - -{/* CE supports only GKE CNI, even though OS/Calico supports Calico CNI. Verified with Shaun, Casey, and Sujeet in 3.15. So docs will continue to be misaligned so be careful during merges. Note that OS support of Calico CNI is only in v1 of the data plane; in v2, Cillium is the supported CNI. Another misalignment is that CE install docs list all platforms supported; OS/Calico lists of supported CNIs based on an opinionated view. */} - -## Before you begin - -**CNI support** - -GKE CNI with $[prodname] network policy: - -The geeky details of what you get: - - - -**Required** - -- A [compatible GKE cluster](../compatibility.mdx#gke) - -- Cluster has these Networking settings: - - - Intranode visibility is enabled - - Network policy is disabled - - Dataplane V2 is disabled - - GKE control plane access to TCP ports 5443, 8080 and 9090 - The GKE control plane must be able to access the $[prodname] API server which runs with pod networking on TCP ports 5443 and 8080, and the $[prodname] Prometheus server which runs with pod networking on TCP port 9090. For multi-zone clusters and clusters with the "master IP range" configured, you will need to add a GCP firewall rule to allow access to those ports from the control plane nodes. - -- User account has IAM permissions - - Verify your user account has IAM permissions to create Kubernetes ClusterRoles, ClusterRoleBindings, Deployments, Service Accounts, and Custom Resource Definitions. The easiest way to grant permissions is to assign the "Kubernetes Service Cluster Admin Role” to your user account. For help, see [GKE access control](https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control). - - :::note - - By default, GCP users often have permissions to create basic Kubernetes resources (such as Pods and Services) but lack the permissions to create ClusterRoles and other admin resources. Even if you can create basic resources, it's worth verifying that you can create admin resources before continuing. - - ::: - -- Cluster meets [system requirements](requirements.mdx) - -- A [Tigera license key and credentials](calico-enterprise.mdx#get-private-registry-credentials-and-license-key) - -- [Install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) - -## How to - -1. [Install $[prodname]](#install-calico-enterprise) -1. [Install the $[prodname] license](#install-the-calico-enterprise-license) - - - -## Next steps - -- [Configure access to the $[prodname] web console](../../operations/cnx/access-the-manager.mdx) -- [Video: Everything you need to know about Kubernetes networking on Google cloud](https://www.projectcalico.org/everything-you-need-to-know-about-kubernetes-networking-on-google-cloud/) -- [Get started with Kubernetes network policy](../../network-policy/get-started/kubernetes-network-policy.mdx) -- [Get started with $[prodname] network policy](../../network-policy/beginners/calico-network-policy.mdx) -- [Enable default deny for Kubernetes pods](../../network-policy/beginners/kubernetes-default-deny.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/index.mdx deleted file mode 100644 index 06c79a8ccb..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Install Calico Enterprise on clusters. -hide_table_of_contents: true ---- - -# Install on clusters - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/kubernetes/generic-install.mdx b/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/kubernetes/generic-install.mdx deleted file mode 100644 index f35b6c2a57..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/kubernetes/generic-install.mdx +++ /dev/null @@ -1,53 +0,0 @@ ---- -description: Install Calico Enterprise on a kubeadm-provisioned Kubernetes cluster for on-premises deployments. ---- - -import InstallGeneric from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/InstallGeneric'; - -# Standard - -## Big picture - -Install $[prodname] on a deployed Kubernetes/kubeadm cluster for on-premises deployments. - -## Before you begin - -**CNI support** - -Calico CNI for networking with $[prodname] network policy: - -The geeky details of what you get: - - - -**Required** - -- A [compatible Kubernetes cluster](../../compatibility.mdx#kubernetes-kubeadm) -- Cluster meets [system requirements](../requirements.mdx) -- A [Tigera license key and credentials](../calico-enterprise.mdx) - -## How to - -- [Install $[prodname]](#install-calico-enterprise) -- [Install $[prodname] license](#install-calico-enterprise-license) - - - -## Next steps - -**Recommended** - -- [Configure access to the $[prodname] web console](../../../operations/cnx/access-the-manager.mdx) -- [Authentication quickstart](../../../operations/cnx/authentication-quickstart.mdx) -- [Configure your own identity provider](../../../operations/cnx/configure-identity-provider.mdx) - -**Recommended - Networking** - -- The default networking uses IP in IP encapsulation with BGP routing. For all networking options, see [Determine best networking option](../../../networking/determine-best-networking.mdx). - -**Recommended - Security** - -- [Get started with $[prodname] tiered network policy](../../../network-policy/policy-tiers/tiered-policy.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/kubernetes/helm.mdx b/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/kubernetes/helm.mdx deleted file mode 100644 index 1d6965df12..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/kubernetes/helm.mdx +++ /dev/null @@ -1,126 +0,0 @@ ---- -description: Install Calico Enterprise using Helm application package manager. ---- - -# Helm - -import CodeBlock from '@theme/CodeBlock'; - -## Big picture - -Install $[prodname] on a Kubernetes cluster using Helm 3. - -## Value - -Helm charts are a way to package up an application for Kubernetes (similar to `apt` or `yum` for operating systems). Helm is also used by tools like ArgoCD to manage applications in a cluster, taking care of install, upgrade (and rollback if needed), etc. - -## Before you begin - -**Required** - -- Install Helm 3 -- `kubeconfig` is configured to work with your cluster (check by running `kubectl get nodes`) -- [Credentials for the Tigera private registry and a license key](../calico-enterprise.mdx) - -## Concepts - -### Operator based installation - -In this guide, you install the Tigera Calico operator and custom resource definitions using the Helm 3 chart. The Tigera Operator provides lifecycle management for $[prodname] exposed via the Kubernetes API defined as a custom resource definition. - -## How to - -### Download the Helm chart - -```bash -helm repo add tigera-ee https://downloads.tigera.io/ee/charts -helm repo update -helm pull tigera-ee/tigera-operator --version $[releaseTitle] -``` - -### Prepare the Installation Configuration - -You **must** provide the desired configuration for your cluster via the `values.yaml`, otherwise installation will use the default settings based on the auto-detected provider. -The configurations you need to provide depends on your cluster's settings and your desired state. - -Some important configurations you might need to provide to the installer (via `values.yaml`) includes (but not limited to): _kubernetesProvider_, _cni type_, or if you need to customize _TLS certificates_. - -Here are some examples for updating `values.yaml` with your configurations: - -Example 1. Providing `kubernetesProvider`: if you are installing on a cluster installed by EKS, set the `kubernetesProvider` as described in the [Installation reference](../../../reference/installation/api.mdx#provider) - - ```bash - echo '{ installation: {kubernetesProvider: EKS }}' > values.yaml - ``` - -Example 2. Providing custom settings in `values.yaml` for Azure AKS cluster with no Kubernetes CNI pre-installed: - - ```bash - cat > values.yaml <,tigera-prometheus-operator.imagePullSecrets.tigera-pull-secret= \ - --set-file licenseKeyContent= \ - --namespace tigera-operator --create-namespace - ``` - - or if you created a `values.yaml` above: - - ```bash - helm install $[prodnamedash] tigera-operator-$[chart_version_name].tgz -f values.yaml \ - --set-file imagePullSecrets.tigera-pull-secret=,tigera-prometheus-operator.imagePullSecrets.tigera-pull-secret= \ - --set-file licenseKeyContent= \ - --namespace tigera-operator --create-namespace - ``` -1. You can now monitor progress with the following command: - - ```bash - watch kubectl get tigerastatus - ``` - - Congratulations! You have now installed $[prodname] using the Helm 3 chart. - -## Next steps - -**Multicluster Management** - -- [Create a $[prodname] management cluster](../../../multicluster/set-up-multi-cluster-management/standard-install/create-a-management-cluster.mdx) -- [Create a $[prodname] managed cluster](../../../multicluster/set-up-multi-cluster-management/standard-install/create-a-managed-cluster.mdx) - -**Recommended** - -- [Configure access to the $[prodname] web console](../../../operations/cnx/access-the-manager.mdx) -- [Authentication quickstart](../../../operations/cnx/authentication-quickstart.mdx) -- [Configure your own identity provider](../../../operations/cnx/configure-identity-provider.mdx) - -**Recommended - Networking** - -- The default networking is IP in IP encapsulation using BGP routing. For all networking options, see [Determine best networking option](../../../networking/determine-best-networking.mdx). - -**Recommended - Security** - -- [Get started with $[prodname] tiered network policy](../../../network-policy/policy-tiers/tiered-policy.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/kubernetes/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/kubernetes/index.mdx deleted file mode 100644 index 404f9daa82..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/kubernetes/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Get Calico up and running in your Kubernetes cluster. -hide_table_of_contents: true ---- - -# Kubernetes - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/kubernetes/options-install.mdx b/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/kubernetes/options-install.mdx deleted file mode 100644 index 6d69002ac0..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/kubernetes/options-install.mdx +++ /dev/null @@ -1,123 +0,0 @@ ---- -description: Learn about API-driven installation and how to customize your installation configuration. ---- - -# Options for installing Calico Enterprise - -import DefaultInstall from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_default-install.mdx'; - -## Big picture - -Understand the options for installing $[prodname]. - -## Value - -Determine the right install method for your deployment, how to customize your installation configuration, and explains concepts important for integrating $[prodname] into your configuration management. - -## Concepts - -### API-driven installation - -The Tigera Operator APIs (**operator.tigera.io**), let you define a $[prodname] cluster using declarative states. The APIs in the operator.tigera.io group define the desired state for your installation, and provide status reporting and visibility into the health of $[prodname]. The Tigera Operator makes sure the cluster’s state matches your desired state. These APIs can be configured at install-time, and can also be modified on a running cluster to adjust configuration. - -### Available APIs - -Each API in the operator.tigera.io API group configures the installation of a different $[prodname] subsystem. The resources that you deploy and the settings you choose for each, depends on your cluster(s). For a detailed look at the available APIs, see the [installation API reference](../../../reference/installation/api.mdx). - -| **Resource** | **Description** | -| ------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| **Installation** | The foundation of every deployment is the **Installation** resource. It deploys core networking and network policy enforcement features to the cluster, and is required for all other resources to function. | -| **APIServer** | Installs the Tigera API server, which enables access to the full set of [$[prodname] APIs](../../../reference/installation/api.mdx). | -| **LogStorage** | Configures installation of storage for $[prodname] flow and DNS logs. | -| **LogCollector** | Configures collection of those logs from the cluster, including optional configuration to backup logs in additional stores. | -| **Compliance** | Configures the $[prodname] compliance reporting feature. | -| **IntrusionDetection** | Configures the $[prodname] intrusion detection feature. | -| **Manager** | Installs the $[prodname] web console. | -| **ManagementClusterConnection** | Configures a connection to the management cluster for managed clusters. | -| **TigeraStatus** | Displays conditions for a component (available, progressing, or degraded). | - -### Additional configuration - -In addition to the above APIs, some product features are configured through additional Kubernetes Secrets and Kubernetes ConfigMaps. For details, see the relevant documentation for each feature. - -### Customize over time - -The out-of-the-box $[prodname] installation gives you a working cluster with reasonable defaults with minimal post-installation tasks. - - - -You do not have to customize everything during initial install. As you progress through each stage of implementing $[prodname] leading to production (following diagram), you will gradually customize $[prodname] resources and automate cluster deployment. For example, when you move to **2 - Pre-production**, you may switch from standalone clusters (default) to multi-cluster management for centralization and scaling. - -![customize install](/img/calico-enterprise/customize-install.png) - -### API-driven installation options - -For most users, we recommend the standard installation, which uses a Kubernetes operator to guide and manage the installation. -For exceptional circumstances, $[prodname] components can be installed using the following methods: - -**Install directly on non-cluster hosts** - -Although $[prodname] requires a Kubernetes control plane to function, you may want to install the node components on [non-cluster hosts](../../bare-metal/index.mdx) to consistently secure all of your infrastructure. - -**Install on Kubernetes using Helm** - -If your deployment requires Helm charts, we provide a helm chart that installs $[prodname] using the Tigera Operator. The Helm `values.yaml` includes sections that correspond directly to the operator.tigera.io APIs for installing the product. In the following example, the installation value is piped into the **spec** field of the **installation API**. - -**values.yaml** - -```yaml -installation: - variant: TigeraSecureEnterprise - registry: gcr.io/mycorp -``` - -**installation.yaml** - -```yaml -apiVersion: operator.tigera.io/v1 -kind: Installation -metadata: - name: default -spec: - variant: TigeraSecureEnterprise - registry: gcr.io/mycorp -``` - -### Installation steps - -The following table shows the conceptual stages of a $[prodname] install. - -:::note - -Details in the following steps will vary for platforms; for example, OpenShift automatically orchestrates the execution of the steps through tooling. - -::: - -| **Steps** | **Resources/files/APIs** | -| --------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| 1 - Install the necessary APIs using CustomResourceDefinitions, and install the necessary operators | This step installs CustomResourceDefinitions (CRDs) into the cluster to enable the necessary API endpoints in the Kubernetes API server. It also installs the Tigera Operator and the Prometheus operator which implements those APIs.

    **Tip**: For most platforms, this step is accomplished by installing `tigera-operator.yaml`. However, note that OpenShift requires all resources to be in their own file, and so `tigera-operator.yaml` is split among many files for OpenShift installs. | -| 2 - Install any required $[prodname] resources | Depending on your cluster, you may want to configure certain features of $[prodname] at install-time. For example, you may require cluster-specific BGP configurations. You can create any install-time configuration using `calicoctl`.

    **Tip**: On OpenShift clusters, this is done automatically by placing configuration in a Kubernetes ConfigMap. | -| 3 - Install $[prodname] | Install $[prodname] by creating instances of the **operator.tigera.io** resources discussed above. The Tigera Operator reads this configuration and installs the necessary components, verifying that everything is working along the way. | - -## Frequently asked questions - -**Are new $[prodname] features always delivered in major or minor release of the Tigera Operator?** - -Usually, but not always. Sometimes new features are delivered in a patch version of the Tigera Operator. - -**How do I find the version of the Tigera Operator for troubleshooting?** - -```bash -kubectl exec -n tigera-operator -l name=tigera-operator -- operator --version -``` - -**Which CLI do I use to customize and configure $[prodname]?** - -You can use `kubectl` for all resources in the operator.tigera.io/v1 API group. - -For projectcalico.org/v3 APIs, you can use `kubectl` as long as the Tigera API server is running. Before the Tigera API server is running (for example, during product installation), you must use `calicoctl` to configure these APIs. - -## Next steps - -- To get started with installation, see [Install on clusters](../../install-on-clusters/index.mdx) -- To upgrade from a non-operator installation method, see [Upgrade](../../upgrading/upgrading-enterprise/kubernetes-upgrade-tsee/operator.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/kubernetes/quickstart.mdx b/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/kubernetes/quickstart.mdx deleted file mode 100644 index 08c38f46b9..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/kubernetes/quickstart.mdx +++ /dev/null @@ -1,185 +0,0 @@ ---- -description: Install Calico Enterprise on a single-host Kubernetes cluster for testing or development. ---- - -# Quickstart for Calico Enterprise on Kubernetes - -## Big picture - -Install $[prodname] on a single-host Kubernetes cluster in approximately 15 minutes. - -To deploy a cluster suitable for production, see [$[prodname] on Kubernetes](../kubernetes/index.mdx). - -## Before you begin - -**CNI support** - -Calico CNI for networking with $[prodname] network policy: - -The geeky details of what you get: - - - -**Required** - -A Linux host that meets the following requirements. - -- x86-64 -- 2CPU -- 12GB RAM -- 50GB free disk space -- Ubuntu Server 18.04 -- Internet access -- [Sufficient virtual memory](https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html) - -## How to - -- [Install Kubernetes](#install-kubernetes) -- [Install $[prodname]](#install-calico-enterprise) -- [Install the $[prodname] license](#install-the-calico-enterprise-license) -- [Log in to the $[prodname] web console](#log-in-to-calico-enterprise-manager) - -### Install Kubernetes - -1. [Follow the Kubernetes instructions to install kubeadm](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/). For a compatible version for this release, see [Support and compatibility](../../compatibility.mdx#kubernetes-kubeadm). - - :::note - - After installing kubeadm, do not power down or restart - the host. Instead, continue directly to the next step. - - ::: - -1. As a regular user with sudo privileges, open a terminal on the host that you installed kubeadm on. - -1. Initialize the control plane using the following command. - - ```bash - sudo kubeadm init --pod-network-cidr=192.168.0.0/16 \ - --apiserver-cert-extra-sans=127.0.0.1 - ``` - - :::note - - If 192.168.0.0/16 is already in use within your network you must select a different pod network - CIDR, replacing 192.168.0.0/16 in the above command. - - ::: - -1. Execute the following commands to configure kubectl (also returned by `kubeadm init`). - - ```bash - mkdir -p $HOME/.kube - sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config - sudo chown $(id -u):$(id -g) $HOME/.kube/config - ``` - -1. Remove the taint from the control plane to allow Kubernetes to schedule pods on the control plane node. - - ```bash - kubectl taint nodes --all node-role.kubernetes.io/control-plane- - ``` - -### Install $[prodname] - -1. [Configure a storage class for $[prodname].](../../../operations/logstorage/create-storage.mdx) - -1. Install the Tigera Operator and custom resource definitions. - - ```bash - kubectl create -f $[filesUrl]/manifests/tigera-operator.yaml - ``` - -1. Install the Prometheus operator and related custom resource definitions. The Prometheus operator will be used to deploy Prometheus server and Alertmanager to monitor $[prodname] metrics. - - :::note - - If you have an existing Prometheus operator in your cluster that you want to use, skip this step. To work with $[prodname], your Prometheus operator must be v0.40.0 or higher. - - ::: - - ```bash - kubectl create -f $[filesUrl]/manifests/tigera-prometheus-operator.yaml - ``` - -1. Install your pull secret. - - If pulling images directly from `quay.io/tigera`, you will likely want to use the credentials provided to you by your Tigera support representative. If using a private registry, use your private registry credentials instead. - - ```bash - kubectl create secret generic tigera-pull-secret \ - --type=kubernetes.io/dockerconfigjson -n tigera-operator \ - --from-file=.dockerconfigjson= - ``` - -1. Install the Tigera custom resources. For more information on configuration options available in this manifest, see [the installation reference](../../../reference/installation/api.mdx). - - ```bash - kubectl create -f $[filesUrl]/manifests/custom-resources.yaml - ``` - - Monitor progress with the following command: - - ```bash - watch kubectl get tigerastatus - ``` - - Wait until the `apiserver` shows a status of `Available`, then proceed to the next section. - -### Install the $[prodname] license - -To use $[prodname], you must install the license provided to you by Tigera. - -```bash -kubectl create -f -``` - -Monitor progress with the following command: - -```bash -watch kubectl get tigerastatus -``` - -When all components show a status of `Available`, proceed to the next section. - -### Log in to the $[prodname] web console - -1. Create network admin user "Jane". - - ```bash - kubectl create sa jane -n default - kubectl create clusterrolebinding jane-access --clusterrole tigera-network-admin --serviceaccount default:jane - ``` - -1. Create a login token for use with the $[prodname] UI. - - ``` - kubectl create token jane --duration=24h - ``` - - Copy the `token` from the above command to your clipboard for use in the next step. - - :::note - - The token created above will expire after 24 hours. - - ::: - -1. Set up a channel from your local computer to the $[prodname] UI. - - ```bash - kubectl port-forward -n tigera-manager svc/tigera-manager 9443 - ``` - - Visit https://localhost:9443/ to log in to the $[prodname] UI. Use the `token` from the previous step to authenticate. - -Congratulations! You now have a single-host Kubernetes cluster with $[prodname]. - -## Next steps - -- By default, your cluster networking uses IP in IP encapsulation with BGP routing. To review other networking options, - see [Determine best networking option](../../../networking/determine-best-networking.mdx). -- [Get started with $[prodname] tiered network policy](../../../network-policy/policy-tiers/tiered-policy.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/openshift/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/openshift/index.mdx deleted file mode 100644 index c44af96ba3..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/openshift/index.mdx +++ /dev/null @@ -1,12 +0,0 @@ ---- -description: Install Calico on OpenShift for networking and network policy. - -hide_table_of_contents: true ---- - -# OpenShift - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/openshift/installation.mdx b/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/openshift/installation.mdx deleted file mode 100644 index f3e733e9fa..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/openshift/installation.mdx +++ /dev/null @@ -1,47 +0,0 @@ ---- -description: Install Calico Enterprise on an OpenShift 4 cluster. ---- - -import InstallOpenshiftBeforeYouBegin from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/InstallOpenshiftBeforeYouBegin'; -import InstallOpenShift from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/InstallOpenShift'; - -# Install Calico Enterprise on OpenShift - -## Big picture - -Install an OpenShift 4 cluster with $[prodname]. - -This guide augments the applicable steps in the [OpenShift documentation](https://cloud.redhat.com/openshift/install) to install $[prodname]. - -## Before you begin - - - -## How to - -1. [Create a configuration file for the OpenShift installer](#create-a-configuration-file-for-the-openshift-installer) -1. [Update the configuration file to use $[prodname]](#update-the-configuration-file-to-use-calico-enterprise) -1. [Generate the install manifests](#generate-the-install-manifests) -1. [Add an image pull secret](#add-an-image-pull-secret) -1. [Provide additional configuration](#provide-additional-configuration) -1. [Create the cluster](#create-the-cluster) -1. [Create a storage class](#create-a-storage-class) -1. [Install the $[prodname] license](#install-the-calico-enterprise-license) - - - -## Next steps - -**Recommended** - -- [Configure access to the $[prodname] web console](../../../operations/cnx/access-the-manager.mdx) -- [Authentication quickstart](../../../operations/cnx/authentication-quickstart.mdx) -- [Configure your own identity provider](../../../operations/cnx/configure-identity-provider.mdx) - -**Recommended - Networking** - -- The default networking uses IP in IP encapsulation with BGP routing. For all networking options, see [Determine best networking option](../../../networking/determine-best-networking.mdx). - -**Recommended - Security** - -- [Get started with $[prodname] tiered network policy](../../../network-policy/policy-tiers/tiered-policy.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/openshift/requirements.mdx b/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/openshift/requirements.mdx deleted file mode 100644 index 6f78e5c86c..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/openshift/requirements.mdx +++ /dev/null @@ -1,16 +0,0 @@ ---- -description: Review requirements for using OpenShift with Calico Enterprise. ---- - -import ReqsSys from "@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/ReqsSys"; -import ReqsKernel from "@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/ReqsKernel"; - -# System requirements - -## OpenShift requirements - -See [OpenShift Container Platform](https://docs.openshift.com/container-platform/4.15/welcome/index.html). - - - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/private-registry/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/private-registry/index.mdx deleted file mode 100644 index 872170535c..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/private-registry/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Install Calico Enterprise using a private registry. -hide_table_of_contents: true ---- - -# Install from a private registry - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/private-registry/private-registry-image-path.mdx b/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/private-registry/private-registry-image-path.mdx deleted file mode 100644 index b79627983b..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/private-registry/private-registry-image-path.mdx +++ /dev/null @@ -1,43 +0,0 @@ ---- -description: Install and configure Calico Enterprise using an image path in a private registry. ---- - -import PrivateRegistryImagePath from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/PrivateRegistryImagePath'; - -# Install from an image path in a private registry - -## Big picture - -Move Calico Enterprise container images to an image path in a private registry and configure $[prodname] to pull images from it. - -## Value - -Install Calico Enterprise in clusters where pulling from third party private repos is not an option, and all images are desired to be part of a single directory in the private registry. - -## Concepts - -A **container image registry** (often referred to as a **registry**) is a service where container images are pushed to, stored, and pulled from. A registry is said to be "private" if it requires users authenticate before accessing images. - -An **image path** is a directory in the private registry that contains images required to install $[prodname]. - -An **image pull secret** is used in Kubernetes to deploy container images from a private container image registry. - -## Before you begin... - -- Configure pull access to your private registry -- [Configure pull access to Tigera's private container registry](../calico-enterprise.mdx#get-private-registry-credentials-and-license-key). -- Use the [Crane command](https://github.com/google/go-containerregistry/blob/main/cmd/crane/README.md) to manage the $[prodnameWindows] images, as using `docker pull` commands for these images may not work depending on the operating system and version in which the commands are run. - -## How to - -- [Push $[prodname] images to your private registry](#push-calico-enterprise-images-to-your-private-registry-image-path) -- [Run the operator using images from your private registry](#run-the-operator-using-images-from-your-private-registry-image-path) -- [Configure the operator to use images from your private registry](#configure-the-operator-to-use-images-from-your-private-registry-image-path) - - - -:::note - -See [the Installation resource reference page](../../../reference/installation/api.mdx) for more information on the `imagePullSecrets`, `registry` and `imagePath` fields. - -::: diff --git a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/private-registry/private-registry-regular.mdx b/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/private-registry/private-registry-regular.mdx deleted file mode 100644 index c03ff3ea50..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/private-registry/private-registry-regular.mdx +++ /dev/null @@ -1,47 +0,0 @@ ---- -description: Install and configure Calico Enterprise in a private registry. ---- - -import PrivateRegistryRegular from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/PrivateRegistryRegular'; - -# Install from a private registry - -## Big picture - -Move Calico Enterprise container images to a private registry and configure $[prodname] to pull images from it. - -## Value - -Install Calico Enterprise in clusters where pulling from third party private repos is not an option, such as air-gapped clusters, or clusters with bandwidth constraints or security constraints. - -## Concepts - -A **container image registry** (often referred to as a **registry**) is a service where container images are pushed to, stored, and pulled from. A registry is said to be "private" if it requires users authenticate before accessing images. - -An **image pull secret** is used in Kubernetes to deploy container images from a private container image registry. - -## Before you begin... - -- Configure pull access to your private registry -- [Configure pull access to Tigera's private container registry](../calico-enterprise.mdx#get-private-registry-credentials-and-license-key). -- Use the [Crane command](https://github.com/google/go-containerregistry/blob/main/cmd/crane/README.md) to manage the $[prodnameWindows] images, as using `docker pull` commands for these images may not work depending on the operating system and version in which the commands are run. - -## How to - -- [Push $[prodname] images to your private registry](#push-calico-enterprise-images-to-your-private-registry) -- [Run the operator using images from your private registry](#run-the-operator-using-images-from-your-private-registry) -- [Configure the operator to use images from your private registry](#configure-the-operator-to-use-images-from-your-private-registry) - - - -:::note - -See [Install from an image path in a private registry](private-registry-image-path.mdx#big-picture) page for more information on installing using a private registry image path. - -::: - -:::note - -See [the Installation resource reference page](../../../reference/installation/api.mdx) for more information on the `imagePullSecrets` and `registry` fields. - -::: diff --git a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/rancher-ui.mdx b/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/rancher-ui.mdx deleted file mode 100644 index 89a058135f..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/rancher-ui.mdx +++ /dev/null @@ -1,120 +0,0 @@ ---- -description: Install Calico Enterprise on a RKE2 cluster using the Rancher UI. ---- - -# Rancher UI - -## Big picture - -Install $[prodname] on RKE2 using the Rancher UI (Rancher Manager). - -## Before you begin - -:::note - -To install $[prodname] using Rancher UI, you must provision a base RKE2 cluster with Calico Open Source, then upgrade to $[prodname]. This is required because Rancher UI does not provide an option to set the RKE2 CNI value as `none`, which is required to install a non-default CNI like $[prodname]. - -::: - -**CNI support** - -Calico CNI for networking with $[prodname] network policy: - -The geeky details of what you get: - - - -**Required** - -- A compatible cluster that can host the Rancher Manager with v2.6.5 or later - - For help, see [Rancher](https://ranchermanager.docs.rancher.com/). - -- A [Tigera license key and credentials](calico-enterprise.mdx). - -- Rancher's `kubectl` shell with access to provisioned cluster. - -## How to - -- [Prepare a Calico Open Source cluster](#prepare-a-calico-open-source-cluster) -- [Upgrade to $[prodname]](#install-calico-enterprise) - -### Prepare a Calico Open Source cluster - -1. [Provision an RKE2 cluster using Calico as the CNI and default config options](https://ranchermanager.docs.rancher.com/pages-for-subheaders/launch-kubernetes-with-rancher). -2. Validate that the RKE2 cluster is set up and running. -3. In Rancher UI, open a `kubectl` shell for the cluster, and perform the next steps. -4. Annotate the Calico Helm chart with `helmcharts.helm.cattle.io/unmanaged=true`. (This avoids Rancher resetting the CNI to Calico when the RKE2 cluster is shut down or upgraded.) - - ```bash - kubectl annotate helmchart -n kube-system rke2-calico helmcharts.helm.cattle.io/unmanaged=true && \ - kubectl annotate helmchart -n kube-system rke2-calico-crd helmcharts.helm.cattle.io/unmanaged=true - ``` - -5. SSH to all the control plane nodes and rename `rke2-calico.yaml` in the `/var/lib/rancher/rke2/server/manifests/` directory to `rke2-calico.yaml.skip`. - - ```bash - sudo mv /var/lib/rancher/rke2/server/manifests/rke2-calico.yaml /var/lib/rancher/rke2/server/manifests/rke2-calico.yaml.skip - ``` - -6. Patch the Calico `Installation` resource to remove the image path prefix. - - ```bash - kubectl patch installation default --type='json' -p='[{"op": "remove", "path": "/spec/imagePath"},{"op": "remove", "path": "/spec/imagePrefix"}]' - ``` - -7. Create ClusterRole and ClusterRoleBinding to allow Tigera Operator to update Pod Security Admission. - - ```bash - kubectl create -f - < - -**Required** - -- A [compatible RKE cluster](../compatibility.mdx#rke) - - For help, see [Rancher Kubernetes Engine cluster](https://rancher.com/docs/rke/latest/en/). Note that RKE2 is a different Kubernetes distribution and [documented separately](rke2.mdx). - -- Configure your cluster for $[prodname] CNI - - - Create a [Cluster Config File](https://ranchermanager.docs.rancher.com/reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration#rke-cluster-config-file-reference). In the config file under `network`, set the [network plugin](https://rancher.com/docs/rke/latest/en/config-options/add-ons/network-plugins/) to `plugin: none`. - - :::note - - You cannot use the Rancher UI to set the RKE CNI set to "none". - - ::: - -- Cluster meets [system requirements](requirements.mdx) - -- A [Tigera license key and credentials](calico-enterprise.mdx). - -- A `kubectl` environment with access to your cluster - - Use [Rancher kubectl Shell](https://ranchermanager.docs.rancher.com/how-to-guides/new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig#accessing-clusters-with-kubectl-shell-in-the-rancher-ui) for access - - Ensure you have the [Kubeconfig file that was generated when you created the cluster](https://rancher.com/docs/rke/latest/en/installation/#save-your-files). -- If using a Kubeconfig file locally, [install and set up the Kubectl CLI tool](https://kubernetes.io/docs/tasks/tools/install-kubectl/). - -## How to - -- [Install $[prodname]](#install-calico-enterprise) -- [Install the $[prodname] license](#install-the-calico-enterprise-license) - -### Install $[prodname] - -1. [Configure a storage class for $[prodname].](../../operations/logstorage/create-storage.mdx). - -1. Install the Tigera Operator and custom resource definitions. - - ```bash - kubectl create -f $[filesUrl]/manifests/tigera-operator.yaml - ``` - -1. Install the Prometheus operator and related custom resource definitions. The Prometheus operator is used to deploy Prometheus server and Alertmanager to monitor $[prodname] metrics. - - :::note - - If you have an existing Prometheus operator in your cluster that you want to use, skip this step. To work with $[prodname], your Prometheus operator must be v0.40.0 or higher. - - ::: - - ```bash - kubectl create -f $[filesUrl]/manifests/tigera-prometheus-operator.yaml - ``` - -1. Install your pull secret. - - If pulling images directly from `quay.io/tigera`, you will likely want to use the credentials provided to you by your Tigera support representative. If using a private registry, use your private registry credentials instead. - - ```bash - kubectl create secret generic tigera-pull-secret \ - --type=kubernetes.io/dockerconfigjson -n tigera-operator \ - --from-file=.dockerconfigjson= - ``` - -1. Install any extra [$[prodname] resources](../../reference/resources/index.mdx) needed at cluster start using [calicoctl](../../reference/clis/calicoctl/overview.mdx). - -1. Install the Tigera custom resources. For more information on configuration options available in this manifest, see [the installation reference](../../reference/installation/api.mdx). - - ```bash - kubectl create -f $[filesUrl]/manifests/custom-resources.yaml - ``` - - Monitor progress with the following command: - - ```bash - watch kubectl get tigerastatus - ``` - - Wait until the `apiserver` shows a status of `Available`, then proceed to the next section. - -### Install the $[prodname] license - -```bash -kubectl create -f -``` - -Monitor progress with the following command: - -```bash -watch kubectl get tigerastatus -``` - -## Next steps - -**Recommended** - -- [Configure access to the $[prodname] web console](../../operations/cnx/access-the-manager.mdx) -- [Authentication quickstart](../../operations/cnx/authentication-quickstart.mdx) -- [Configure your own identity provider](../../operations/cnx/configure-identity-provider.mdx) - -**Recommended - Networking** - -- The default networking uses IP in IP encapsulation with BGP routing. For all networking options, see [Determine best networking option](../../networking/determine-best-networking.mdx). - -**Recommended - Security** - -- [Get started with $[prodname] tiered network policy](../../network-policy/policy-tiers/tiered-policy.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/requirements.mdx b/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/requirements.mdx deleted file mode 100644 index 798ee4eb8d..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/requirements.mdx +++ /dev/null @@ -1,40 +0,0 @@ ---- -description: Review requirements to install Calico Enterprise networking and network policy. ---- - -import ReqsSys from "@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/ReqsSys"; -import ReqsKernel from "@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/ReqsKernel"; - -# System requirements - - - -## CNI plug-in enabled - -$[prodname] must be installed as a CNI plugin in the container runtime. - -This installation must use the Kubernetes default CNI configuration directory (`/etc/cni/net.d`) and binary directory (`/opt/cni/bin`). - -## Other network providers - -Generally, you cannot use $[prodname] together with another network provider. - -Notable exceptions include certain platform-specific CNIs, such as the [AWS VPC CNI](https://github.com/aws/amazon-vpc-cni-k8s/blob/master/README.md) and [Azure VNET CNI](https://github.com/Azure/azure-container-networking/blob/master/docs/cni.md) plugins. - -If you're working with a cluster that already uses another CNI, you cannot migrate to $[prodname]. - -## Supported kube-proxy modes - -$[prodname] supports the following kube-proxy modes: - -- `iptables` (default) - -### IP pool configuration - -The IP range selected for pod IP addresses cannot overlap with any other -IP ranges in your network, including: - -- The Kubernetes service cluster IP range -- The range from which host IPs are allocated - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/rke2.mdx b/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/rke2.mdx deleted file mode 100644 index beb4cbc014..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/rke2.mdx +++ /dev/null @@ -1,125 +0,0 @@ ---- -description: Install Calico Enterprise on an RKE2 cluster. ---- - -# RKE2 - -## Big picture - -Install $[prodname] on RKE2 (RKE Government) clusters. - -## Before you begin - -**CNI support** - -Calico CNI for networking with $[prodname] network policy: - -The geeky details of what you get: - - - -**Required** - -- A [compatible RKE2 cluster](../compatibility.mdx#rke2) with 2.6.5 or later - - For help, see [Rancher Kubernetes Engine cluster](https://rancher.com/docs/rke/latest/en/). - -- [Configure cluster with no CNI plugin](https://docs.rke2.io/install/configuration) using any of these methods: - - - RKE2 CLI: `--cni none` - - Install script: `RKE2_CNI=none` - - [Configuration file](https://docs.rke2.io/install/configuration#configuration-file): `cni: none` - -- Cluster meets [system requirements](requirements.mdx) - -- A [Tigera license key and credentials](calico-enterprise.mdx). - -- A `kubectl` environment with access to your cluster - - Ensure you have the [Kubeconfig file that was generated when you created the cluster](https://docs.rke2.io/cluster_access). - -- If using a Kubeconfig file locally, [install and set up the Kubectl CLI tool](https://kubernetes.io/docs/tasks/tools/install-kubectl/). - -## How to - -- [Install $[prodname]](#install-calico-enterprise) -- [Install the $[prodname] license](#install-the-calico-enterprise-license) - -### Install $[prodname] - -1. [Configure a storage class for $[prodname].](../../operations/logstorage/create-storage.mdx). - -1. Install the Tigera Operator and custom resource definitions. - - ```bash - kubectl create -f $[filesUrl]/manifests/tigera-operator.yaml - ``` - -1. Install the Prometheus operator and related custom resource definitions. The Prometheus operator is used to deploy Prometheus server and Alertmanager to monitor $[prodname] metrics. - - :::note - - If you have an existing Prometheus operator in your cluster that you want to use, skip this step. To work with $[prodname], your Prometheus operator must be v0.40.0 or higher. - - ::: - - ```bash - kubectl create -f $[filesUrl]/manifests/tigera-prometheus-operator.yaml - ``` - -1. Install your pull secret. - - If pulling images directly from `quay.io/tigera`, you can use the credentials provided to you by your Tigera support representative. If using a private registry, use your private registry credentials instead. - - ```bash - kubectl create secret generic tigera-pull-secret \ - --type=kubernetes.io/dockerconfigjson -n tigera-operator \ - --from-file=.dockerconfigjson= - ``` - -1. Install any extra [Calico resources](../../reference/resources/index.mdx) needed at cluster start using [calicoctl](../../reference/clis/calicoctl/overview.mdx). - -1. Install the Tigera custom resources. For more information on configuration options available, see [the installation reference](../../reference/installation/api.mdx). - - ```bash - kubectl create -f $[filesUrl]/manifests/rancher/custom-resources-rke2.yaml - ``` - - Monitor progress with the following command: - - ```bash - watch kubectl get tigerastatus - ``` - - Wait until the `apiserver` shows a status of `Available`, then proceed to the next section. - -### Install the $[prodname] license - -```bash -kubectl create -f -``` - -Monitor progress with the following command: - -```bash -watch kubectl get tigerastatus -``` - -## Next steps - -**Recommended** - -- [Configure access to the $[prodname] web console](../../operations/cnx/access-the-manager.mdx) -- [Authentication quickstart](../../operations/cnx/authentication-quickstart.mdx) -- [Configure your own identity provider](../../operations/cnx/configure-identity-provider.mdx) - -**Recommended - Networking** - -- The default networking uses VXLAN encapsulation with BGP routing. For all networking options, see [Determine best networking option](../../networking/determine-best-networking.mdx). - -**Recommended - Security** - -- [Get started with $[prodname] tiered network policy](../../network-policy/policy-tiers/tiered-policy.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/tkg.mdx b/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/tkg.mdx deleted file mode 100644 index 9f4937d055..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/tkg.mdx +++ /dev/null @@ -1,59 +0,0 @@ ---- -description: Install Calico Enterprise on Tanzu Kubernetes Grid. ---- - -import InstallGeneric from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/InstallGeneric'; - -# Tanzu Kubernetes Grid (TKG) - -## Big picture - -Install $[prodname] on a Tanzu Kubernetes Grid cluster. - -## Before you begin - -**CNI support** - -Calico CNI for networking with $[prodname] network policy: - -The geeky details of what you get: - - - -**Required** - -- A [compatible TKG cluster](../compatibility.mdx#tkg) -- Configure your cluster for $[prodname] CNI - The workload cluster must be configured with `CNI: none`. When the workload cluster is bootstrapped, the nodes will be in a `NotReady` state until $[prodname] is installed. For more information, see [Tanzu networking](https://docs.vmware.com/en/VMware-Tanzu-Kubernetes-Grid/1.4/vmware-tanzu-kubernetes-grid-14/GUID-tanzu-k8s-clusters-networking.html) and [Tanzu configuration file reference](https://docs.vmware.com/en/VMware-Tanzu-Kubernetes-Grid/1.4/vmware-tanzu-kubernetes-grid-14/GUID-tanzu-config-reference.html). - -- Cluster meets the [system requirements](requirements.mdx) -- A [Tigera license key and credentials](calico-enterprise.mdx) -- If using AWS, EC2 instances must be configured to belong to a separate SecurityGroup with ingress rules: - - Calico (BGP) TCP 179 - - Calico (Typha) TCP 5473 - - Web console Prometheus metrics TCP 9081 - - Web console BGP metrics TCP 9900 - -## How to - -- [Install $[prodname]](#install-calico-enterprise) -- [Install $[prodname] license](#install-calico-enterprise-license) - - - -## Next steps - -**Recommended** - -- [Configure access to the $[prodname] web console](../../operations/cnx/access-the-manager.mdx) - -**Recommended - Networking** - -- The default networking uses IP in IP encapsulation with BGP routing. For all networking options, see [Determine best networking option](../../networking/determine-best-networking.mdx). - -**Recommended - Security** - -- [Get started with $[prodname] tiered network policy](../../network-policy/policy-tiers/tiered-policy.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/windows-calico/demo.mdx b/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/windows-calico/demo.mdx deleted file mode 100644 index 509d90570e..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/windows-calico/demo.mdx +++ /dev/null @@ -1,630 +0,0 @@ ---- -description: An interactive demo to show how to apply basic network policy to pods in a Calico Enterprise for Windows cluster. ---- - -# Basic policy demo - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -This guide provides a simple demo to illustrate basic pod-to-pod connectivity and the application of network policy in a $[prodnameWindows] cluster. We will create client and server pods on Linux and Windows nodes, verify connectivity between the pods, and then we'll apply a basic network policy to isolate pod traffic. - -## Prerequisites - -To run this demo, you will need a [$[prodnameWindows] cluster](operator.mdx) with -Windows Server 1809 (build 17763.1432 August 2020 update or newer). More recent versions of Windows Server can be used with a change to the demo manifests, namely the version tags of the Windows images, e.g.: `servercore:ltsc2022` instead of `servercore:1809`. - -:::note - -Windows Server 1809 (build older than 17763.1432) do not support [direct server return](https://techcommunity.microsoft.com/t5/networking-blog/direct-server-return-dsr-in-a-nutshell/ba-p/693710). This means that policy support is limited to only pod IP addresses. - -::: - - - - -## Create pods on Linux nodes - -First, create a client (busybox) and server (nginx) pod on the Linux nodes: - -```bash -kubectl apply -f - < 80 -``` - -To combine both of the above steps: - -```bash -kubectl exec -n calico-demo busybox -- nc -vz $(kubectl get po porter -n calico-demo -o 'jsonpath={.status.podIP}') 80 -``` - -If the connection from the busybox pod to the porter pod succeeds, we will get output similar to the following: - -``` -192.168.40.166 (192.168.40.166:80) open -``` - -Now let's verify that the powershell pod can reach the nginx pod: - -```bash -kubectl exec -n calico-demo pwsh -- powershell Invoke-WebRequest -Uri http://$(kubectl get po nginx -n calico-demo -o 'jsonpath={.status.podIP}') -UseBasicParsing -TimeoutSec 5 -``` - -If the connection succeeds, we will get output similar to: - -``` -StatusCode : 200 -StatusDescription : OK -Content : - - - Welcome to nginx! - - <... -... -``` - -Finally, let's verify that the powershell pod can reach the porter pod: - -```bash -kubectl exec -n calico-demo pwsh -- powershell Invoke-WebRequest -Uri http://$(kubectl get po porter -n calico-demo -o 'jsonpath={.status.podIP}') -UseBasicParsing -TimeoutSec 5 -``` - -If that succeeds, we will see something like: - -``` -StatusCode : 200 -StatusDescription : OK -Content : This is a $[prodnameWindows] demo. -RawContent : HTTP/1.1 200 OK - Content-Length: 49 - Content-Type: text/plain; charset=utf-8 - Date: Fri, 21 Aug 2020 22:45:46 GMT - - This is a $[prodnameWindows] demo. -Forms : -Headers : {[Content-Length, 49], [Content-Type, text/plain; - charset=utf-8], [Date, Fri, 21 Aug 2020 22:45:46 GMT]} -Images : {} -InputFields : {} -Links : {} -ParsedHtml : -RawContentLength : 49 -``` - -## Apply policy to the Windows client pod - -Now let's apply a basic network policy that allows only the busybox pod to reach the porter pod. - -```bash -kubectl apply -f - < - - -## Installing kubectl on Windows - -To run the commands in this demo you need the Windows version of kubectl installed and add it to the system path. -[Install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/) and move the kubectl binary to **c:\k**. - -Add `c:\k` to the system path - -1. Open a PowerShell window as Administrator - - ```powershell - $env:Path += ";C:\k" - ``` - -1. Close all PowerShell windows. - -## Create pods on Linux nodes - -First, create a client (busybox) and server (nginx) pod on the Linux nodes. - -### Create a YAML file policy-demo-linux.yaml using your favorite editor on Windows - -```yaml -apiVersion: v1 -kind: Namespace -metadata: - name: calico-demo - ---- -apiVersion: v1 -kind: Pod -metadata: - labels: - app: busybox - name: busybox - namespace: calico-demo -spec: - containers: - - args: - - /bin/sh - - -c - - sleep 360000 - image: busybox:1.28 - imagePullPolicy: Always - name: busybox - nodeSelector: - kubernetes.io/os: linux - ---- -apiVersion: v1 -kind: Pod -metadata: - labels: - app: nginx - name: nginx - namespace: calico-demo -spec: - containers: - - name: nginx - image: nginx:1.8 - ports: - - containerPort: 80 - nodeSelector: - kubernetes.io/os: linux -``` - -### Apply the policy-demo-linux.yaml file to the Kubernetes cluster - -1. Open a PowerShell window. -1. Use `kubectl` to apply the `policy-demo-linux.yaml` configuration. - -```powershell -kubectl apply -f policy-demo-linux.yaml -``` - -## Create pods on Window nodes - -Next, we’ll create a client (pwsh) and server (porter) pod on the Windows nodes. -:::note - -The pwsh and porter pod manifests below use images based on mcr.microsoft.com/windows/servercore:1809. If you are using a more recent Windows Server version, update the manifests to use a servercore image that matches your Windows Server version. - -::: - -### Create the policy-demo-windows.yaml using your favorite editor on Windows - -```yaml -apiVersion: v1 -kind: Pod -metadata: - name: pwsh - namespace: calico-demo - labels: - app: pwsh -spec: - containers: - - name: pwsh - image: mcr.microsoft.com/windows/servercore:1809 - args: - - powershell.exe - - -Command - - 'Start-Sleep 360000' - imagePullPolicy: IfNotPresent - nodeSelector: - kubernetes.io/os: windows ---- -apiVersion: v1 -kind: Pod -metadata: - name: porter - namespace: calico-demo - labels: - app: porter -spec: - containers: - - name: porter - image: calico/porter:1809 - ports: - - containerPort: 80 - env: - - name: SERVE_PORT_80 - value: This is a $[prodnameWindows] demo. - imagePullPolicy: IfNotPresent - nodeSelector: - kubernetes.io/os: windows -``` - -### Apply the policy-demo-windows.yaml file to the Kubernetes cluster - -1. Open a PowerShell window. -1. Use `kubectl` to apply the `policy-demo-windows.yaml` configuration - -```powershell -kubectl apply -f policy-demo-windows.yaml -``` - -### Verify four pods have been created and are running - -:::note - -Launching the Windows pods is going to take some time depending on your network download speed. - -::: - -1. Open a powershell window. -1. Using `kubectl` to list the pods in the `calico-demo` namespace. - -```powershell -kubectl get pods --namespace calico-demo -``` - -You should see something like the below - -```output -NAME READY STATUS RESTARTS AGE -busybox 1/1 Running 0 4m14s -nginx 1/1 Running 0 4m14s -porter 0/1 ContainerCreating 0 74s -pwsh 0/1 ContainerCreating 0 2m9s -``` - -Repeat the command every few minutes until the output shows all 4 pods in the Running state. - -```output -NAME READY STATUS RESTARTS AGE -busybox 1/1 Running 0 7m24s -nginx 1/1 Running 0 7m24s -porter 1/1 Running 0 4m24s -pwsh 1/1 Running 0 5m19s -``` - -### Check connectivity between pods on Linux and Windows nodes - -Now that client and server pods are running on both Linux and Windows nodes, let’s verify that client pods on Linux nodes can reach server pods on Windows nodes. - -1. Open a PowerShell window. -1. Using `kubectl` to determine the porter pod IP address: - - ```powershell - kubectl get pod porter --namespace calico-demo -o 'jsonpath={.status.podIP}' - ``` - -1. Log into the busybox pod and try reaching the porter pod on port 80. Replace the `` tag with the IP address returned from the previous command. - - ```powershell - kubectl exec --namespace calico-demo busybox -- nc -vz 80 - ``` - - :::note - - You can also combine both of the above steps: - - ::: - - ```powershell - kubectl exec --namespace calico-demo busybox -- nc -vz $(kubectl get pod porter --namespace calico-demo -o 'jsonpath={.status.podIP}') 80 - ``` - - If the connection from the busybox pod to the porter pod succeeds, you will get output similar to the following: - - ```powershell - 192.168.40.166 (192.168.40.166:80) open - ``` - - :::note - - The IP addresses returned will vary depending on your environment setup. - - ::: - -1. Now you can verify that the pwsh pod can reach the nginx pod: - - ```powershell - kubectl exec --namespace calico-demo pwsh -- powershell Invoke-WebRequest -Uri http://$(kubectl get po nginx -n calico-demo -o 'jsonpath={.status.podIP}') -UseBasicParsing -TimeoutSec 5 - ``` - - If the connection succeeds, you will see output similar to: - - ``` - StatusCode : 200 - StatusDescription : OK - Content : - - - Welcome to nginx! - - <... - ``` - -1. Verify that the pwsh pod can reach the porter pod: - - ```powershell - kubectl exec --namespace calico-demo pwsh -- powershell Invoke-WebRequest -Uri http://$(kubectl get po porter -n calico-demo -o 'jsonpath={.status.podIP}') -UseBasicParsing -TimeoutSec 5 - ``` - - If that succeeds, you will see something like: - - ``` - StatusCode : 200 - StatusDescription : OK - Content : This is a Calico Enterprise for Windows demo. - RawContent : HTTP/1.1 200 OK - Content-Length: 49 - Content-Type: text/plain; charset=utf-8 - Date: Fri, 21 Aug 2020 22:45:46 GMT - - This is a Calico Enterprise for Windows demo. - Forms : - Headers : {[Content-Length, 49], [Content-Type, text/plain; - charset=utf-8], [Date, Fri, 21 Aug 2020 22:45:46 GMT]} - Images : {} - InputFields : {} - Links : {} - ParsedHtml : - RawContentLength : 49 - - ``` - -You have now verified that communication is possible between all pods in the application. - -## Apply policy to the Windows client pod - -In a real world deployment you would want to make sure only pods that are supposed to communicate with each other, are actually allowed to do so. - -To achieve this you will apply a basic network policy which allows only the busybox pod to reach the porter pod. - -### Create the network-policy.yaml file using your favorite editor on Windows - -```yaml -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: allow-busybox - namespace: calico-demo -spec: - podSelector: - matchLabels: - app: porter - policyTypes: - - Ingress - ingress: - - from: - - podSelector: - matchLabels: - app: busybox - ports: - - protocol: TCP - port: 80 -``` - -### Apply the network-policy.yaml file - -1. Open a PowerShell window. -1. Use `kubectl` to apply the network-policy.yaml file. - -```powershell -kubectl apply -f network-policy.yaml -``` - -### Verify the policy is in effect - -With the policy in place, the busybox pod should still be able to reach the porter pod: -:::note - -We will be using the combined command line from earlier in this chapter. - -::: - -```powershell -kubectl exec --namespace calico-demo busybox -- nc -vz $(kubectl get po porter -n calico-demo -o 'jsonpath={.status.podIP}') 80 -``` - -However, the pwsh pod will not able to reach the porter pod: - -```powershell -kubectl exec --namespace calico-demo pwsh -- powershell Invoke-WebRequest -Uri http://$(kubectl get po porter -n calico-demo -o 'jsonpath={.status.podIP}') -UseBasicParsing -TimeoutSec 5 -``` - -The request times out with a message like the below: - -```powershell -Invoke-WebRequest : The operation has timed out. -At line:1 char:1 -+ Invoke-WebRequest -Uri http://192.168.40.166 -UseBasicParsing -Timeout ... -+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - + CategoryInfo : InvalidOperation: (System.Net.HttpWebRequest:Htt -pWebRequest) [Invoke-WebRequest], WebException - + FullyQualifiedErrorId : WebCmdletWebResponseException,Microsoft.PowerShell.Commands.InvokeWebRequestCommand -command terminated with exit code 1 -``` - -## Wrap up - -In this demo we’ve configured pods on Linux and Windows nodes, verified basic pod connectivity, and tried a basic network policy to isolate pod to pod traffic. -As the final step you can clean up all of the demo resources: - -1. Open a PowerShell window. - -```powershell -kubectl delete namespace calico-demo -``` - - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/windows-calico/dnspolicy.mdx b/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/windows-calico/dnspolicy.mdx deleted file mode 100644 index c3a3f01d55..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/windows-calico/dnspolicy.mdx +++ /dev/null @@ -1,45 +0,0 @@ ---- -description: Configure DNS policy for Calico Enterprise for Windows workloads. ---- - -# Configure DNS policy for workloads - -:::note - -This feature is tech preview. Tech preview features may be subject to significant changes before they become GA. - -::: - -## Big picture - -Configure $[prodnameWindows] domain based policy (DNS policy) for Windows workloads in Kubernetes clusters. - -## Value - -Using domain names in policies to identify services outside of the cluster is often operationally simpler and more robust than using IP -addresses. In particular, they are useful when an external service does not map to a well known set of static IP addresses. - -## Before you begin - -**Limitations** - -$[prodname] supports DNS policy on Windows with these limitations: - -- It could take up to 5 seconds for the first TCP SYN packet to go through, for a connection to a DNS domain name. This is because DNS policies are dynamically programmed. The first TCP packet could be dropped since there is no policy to allow it until $[prodnameWindows] detects domain IPs from DNS response and programs DNS policy rules. The Windows TCPIP stack will send SYN again after TCP Retransmission timeout (RTO) if previous SYN has been dropped. -- Some runtime libraries do not honour DNS TTL. Instead, they manage their own DNS cache which has a different TTL value for DNS entries. On .NET Framework, the value to control DNS TTL is ServicePointManager.DnsRefreshTimeout which has default value of 120 seconds - [DNS refresh timeout](https://docs.microsoft.com/en-us/dotnet/api/system.net.servicepointmanager.dnsrefreshtimeout). It is important that $[prodnameWindows] uses a longer TTL value than the one used by the application, so that DNS policy will be in place when the application is making outbound connections. The configuration item “WindowsDNSExtraTTL” should have a value bigger than the maximum value of DNS TTL used by the runtime libraries for your applications. -- Due to the limitations of Windows container networking, a policy update could have an impact on performance. Programming DNS policy may result in more policy updates. Setting “WindowsDNSExtraTTL” to a bigger number will reduce the performance impact. - -## How to - -$[prodnameWindows] DNS policy is enabled by default and is configured the same way as [DNS policy for Linux](../../../network-policy/domain-based-policy.mdx). Use the following Windows-specific parameters for specifying the file to preserve learned DNS information and extra TTL in addition to DNS TTL. - -### Felix configurations - -| Field | Description | Accepted Values | Schema | Default | -| ------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | --------------- | ------ | --------------------------------------- | -| WindowsDNSCacheFile | Specify the name of the file that $[prodnameWindows] uses to preserve learned DNS information when restarting. | string | string | `c:\\TigeraCalico\\felix-dns-cache.txt` | -| WindowsDNSExtraTTL | Specify extra time in seconds to keep IPs and alias names that are learned from DNS, in addition to each name or IP's advertised TTL. | int | int | `120` | - -## Additional resources - -- [DNS policy](../../../network-policy/domain-based-policy.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/windows-calico/flowlogs.mdx b/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/windows-calico/flowlogs.mdx deleted file mode 100644 index 2f96802c7d..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/windows-calico/flowlogs.mdx +++ /dev/null @@ -1,47 +0,0 @@ ---- -description: Configure flow logs for Calico Enterprise for Windows workloads. ---- - -# Configure flow logs for workloads - -## Big picture - -Configure $[prodnameWindows] flow log data for visibility and troubleshooting Windows workloads in Kubernetes clusters. - -## Value - -$[prodnameWindows] includes a fully-integrated deployment of Elasticsearch to collect flow log data that drives -key features like Flow Visualizer, metrics in the dashboard and Policy Board, policy automation and testing features, and security. - -## Before you begin - -**Limitations** - -$[prodnameWindows] provides the same support for flow logs as Linux, with these exceptions: - -- No packet/bytes stats for denied traffic -- No DNS stats -- No HTTP stats -- No RuleTrace for tiers -- No BGP logs -- No support for syslog archiving - -## How to - -$[prodnameWindows] flow logs are enabled and configured the same way as [Flow logs for Linux](../../../observability/elastic/overview.mdx). Use the following Windows-specific parameters for specifying file directories and paths. - -### Felix configurations - -| Field | Description | Accepted Values | Schema | Default | -| ------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------- | ------ | ------------------------------------------- | -| windowsFlowLogsFileDirectory | Set the directory where flow logs files are stored on Windows nodes. This parameter takes effect only when `flowLogsFileEnabled` is set to `true`. | string | string | `c:\\TigeraCalico\\flowlogs` | -| windowsFlowLogsPositionFilePath | Specify the position of the external pipeline that reads flow logs on Windows nodes. This parameter takes effect only when `FlowLogsDynamicAggregationEnabled` is set to `true`. | string | string | `c:\\TigeraCalico\\flowlogs\\flows.log.pos` | -| windowsStatsDumpFilePath | Specify the position of the file used for dumping flow log statistics on Windows nodes. Note this is an internal setting that you should not need to modify. | string | string | `c:\\TigeraCalico\\stats\\dump` | - -## Additional resources - -- [Configure RBAC for Elasticsearch logs](../../../observability/elastic/rbac-elasticsearch.mdx) -- [Configure flow log aggregation](../../../observability/elastic/flow/aggregation.mdx) -- [Log storage recommendations](../../../operations/logstorage/log-storage-recommendations.mdx) -- [Archive logs](../../../observability/elastic/archive-storage.mdx) -- [Log collection options](../../../reference/installation/api.mdx#logcollectorspec) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/windows-calico/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/windows-calico/index.mdx deleted file mode 100644 index aaaf632b9b..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/windows-calico/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Install and configure Calico Enterprise for Windows. -hide_table_of_contents: true ---- - -# Calico Enterprise for Windows - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/windows-calico/limitations.mdx b/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/windows-calico/limitations.mdx deleted file mode 100644 index e3d868681d..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/windows-calico/limitations.mdx +++ /dev/null @@ -1,208 +0,0 @@ ---- -description: Review limitations before starting installation. ---- - -# Limitations and known issues - -## $[prodnameWindows] feature limitations - -| Feature | Unsupported in this release | -| ------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| Platforms | - TKG
    - GKE | -| Install and upgrade | - Non-cluster hosts
    - Typha component for scaling (Linux-based feature) | -| Networking | - Overlay mode with BGP peering
    - IP in IP overlay with BGP routing
    - Cross-subnet support and MTU setting for VXLAN
    - IPv6 and dual stack
    - Dual-ToR
    - Service advertisement
    - Multiple networks to pods | -| Policy | - Staged network-policy
    - Firewall integrations
    - Policy for hosts (host endpoints, including automatic host endpoints)
    - Tiered policy: TKG, GKE, AKS
    - WAF integration
    - AWS firewall integration
    - Fortinet integration | -| Visibility and troubleshooting | - Packet capture
    - DNS logs
    - iptables logs
    - L7 logs | -| Threat defense | - Honeypods
    - DPI
    - Performance hotspots
    - WAF | -| Multi-cluster management | - Multi-cluster management federated identity endpoints and services
    - Federated endpoint identity and services | -| Compliance and security | - CIS benchmark and other reports
    - Wireguard encryption for pod-to-pod traffic and host-to-host traffic | -| Data plane | - eBPF is a Linux-based feature | - -## $[prodname] BGP networking limitations - -If you are using $[prodname] with BGP, note these current limitations with Windows. - -| Feature | Limitation | -| ------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| IP mobility/ borrowing | $[prodname] IPAM allocates IPs to host in blocks for aggregation purposes.
    If the IP pool is full, nodes can also "borrow" IPs from another node's block. In BGP terms, the borrower then advertises a more specific "/32" route for the borrowed IP and traffic for that IP is only routed to the borrowing host.

    Windows nodes do not support this borrowing mechanism; they will not borrow IPs even if the IP pool is full and they mark their blocks so that Linux nodes will not borrow from them. | -| IPs reserved for Windows | $[prodname] IPAM allocates IPs in CIDR blocks. Due to networking requirements on Windows, four IPs per Windows node-owned block must be reserved for internal purposes.

    For example, with the default block size of /26, each block contains 64 IP addresses, 4 are reserved for Windows, leaving 60 for pod networking.

    To reduce the impact of these reservations, a larger block size can be configured at the IP pool scope (before any pods are created). | -| Single IP block per host | $[prodname] IPAM is designed to allocate blocks of IPs (default size /26) to hosts on demand. While the $[prodname] CNI plugin was written to do the same, kube-proxy for Windows currently only supports a single IP block per host.

    To work around the default limit of one /26 per host there some options:

    - Use $[prodname] BGP networking with the kubernetes datastore. In that mode, $[prodname] IPAM is not used and the CNI host-local IPAM plugin is used with the node's Pod CIDR.

    To allow multiple IPAM blocks per host (at the expense of kube-proxy compatibility), set the `windows_use_single_network` flag to `false` in the `cni.conf.template` before installing $[prodname]. Changing that setting after pods are networked is not recommended because it may leak HNS endpoints. | -| IP-in-IP overlay | $[prodname]'s IPIP overlay mode cannot be used in clusters that contain Windows nodes because Windows does not support IP-in-IP. | -| NAT-outgoing | $[prodname] IP pools support a "NAT outgoing" setting with the following behaviour:

    - Traffic between $[prodname] workloads (in any IP pools) is not NATted.
    - Traffic leaving the configured IP pools is NATted if the workload has an IP within an IP pool that has NAT outgoing enabled. $[prodnameWindows] honors the above setting but it is only applied at pod creation time. If the IP pool configuration is updated after a pod is created, the pod's traffic will continue to be NATted (or not) as before. NAT policy for newly-networked pods will honor the new configuration. $[prodnameWindows] automatically adds the host itself and its subnet to the NAT exclusion list. This behaviour can be disabled by setting flag `windows_disable_host_subnet_nat_exclusion` to `true` in `cni.conf.template` before running the install script. | -| Service IP advertisement | This $[prodname] feature is not supported on Windows. | - -### Check your network configuration - -If you are using a networking type that requires layer 2 reachability (such as $[prodname] with a BGP mesh and no peering to your fabric), you can check that your network has layer 2 reachability as follows: - -On each of your nodes, check the IP network of the network adapter that you plan to use for pod networking. For example, on Linux, assuming your network adapter is eth0, you can run: - -``` -$ ip addr show eth0 - 2: eth0: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000 - - link/ether 00:0c:29:cb:c8:19 brd ff:ff:ff:ff:ff:ff - inet 192.168.171.136/24 brd 192.168.171.255 scope - - global eth0 - valid_lft forever preferred_lft forever - inet6 fe80::20c:29ff:fecb:c819/64 scope - link - - valid_lft forever preferred_lft - forever -``` - -In this case, the IPv4 is 192.168.171.136/24; which, after applying the /24 mask gives 192.168.171.0/24 for the IP network. - -Similarly, on Windows, you can run - -``` -PS C:\> ipconfig - -Windows IP Configuration - -Ethernet adapter vEthernet (Ethernet 2): - - Connection-specific DNS Suffix . : - us-west-2.compute.internal Link-local IPv6 Address . . . . - . : fe80::6d10:ccdd:bfbe:bce2%15 IPv4 Address. . . . . . . - . . . . : 172.20.41.103 Subnet Mask . . . . . . . . . . . - : 255.255.224.0 Default Gateway . . . . . . . . . : - 172.20.32.1 - -``` - -In this case, the IPv4 address is 172.20.41.103 and the mask is represented as bytes 255.255.224.0 rather than CIDR notation. Applying the mask, we get a network address 172.20.32.0/19. - -Because the Linux node has network 192.168.171.136/24 and the Windows node has a different network, 172.20.32.0/19, they are unlikely to be on the same layer 2 network. - -## VXLAN networking limitations - -Because of differences between the Linux and Windows data plane feature sets, the following $[prodname] features are not supported on Windows. - -| Feature | Limitation | -| ------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| IPs reserved for Windows | $[prodname] IPAM allocates IPs in CIDR blocks. Due to networking requirements on Windows, four IPs per Windows node-owned block must be reserved for internal purposes.

    For example, with the default block size of /26, each block contains 64 IP addresses, 4 are reserved for Windows, leaving 60 for pod networking.

    To reduce the impact of these reservations, a larger block size can be configured at the IP pool scope (before any pods are created). | -| Single IP block per host | $[prodname] IPAM is designed to allocate blocks of IPs (default size /26) to hosts on demand. While the $[prodname] CNI plugin was written to do the same, kube-proxy currently only supports a single IP block per host.
    To allow multiple IPAM blocks per host (at the expense of kube-proxy compatibility), set the `windows_use_single_network` flag to `false` in the `cni.conf.template` before installing $[prodname]. Changing that setting after pods are networked is not recommended because it may leak HNS endpoints. | - -## Routes are lost in cloud providers - -If you create a Windows host with a cloud provider (AWS for example), the creation of the vSwitch at $[prodname] install time can remove the cloud provider's metadata route. If your application relies on the metadata service, you may need to examine the routing table before and after installing $[prodname] to reinstate any lost routes. - -## VXLAN limitations - -**VXLAN support** - -- Windows 1903 build 18317 and above -- Windows 1809 build 17763 and above - -**Configuration updates** - -Certain configuration changes will not be honored after the first pod is networked. This is because Windows does not currently support updating the VXLAN subnet parameters after the network is created so updating those parameters requires the node to be drained: - -One example is the VXLAN VNI setting. To change such parameters: - -- Drain the node of all pods -- Delete the $[prodname] HNS network: - - ```powershell - Import-Module -DisableNameChecking $[rootDirWindows]\libs\hns\hns.psm1 - Get-HNSNetwork | ? Name -EQ "$[prodname]" | Remove-HNSNetwork - ``` - -- Update the configuration in `config.ps1`, run `uninstall-calico.ps1` and then `install-calico.ps1` to regenerate the CNI configuration. - -## Pod-to-pod connections are dropped with TCP reset packets - -Restarting Felix or changes to policy (including changes to endpoints referred to in policy) can cause pod-to-pod connections to be dropped with TCP reset packets when one of the following occurs: - -- The policy that applies to a pod is updated -- Some ingress or egress policy that applies to a pod contains selectors and the set of endpoints that those selectors match changes - -Felix must reprogram the HNS ACL policy attached to the pod. This reprogramming can cause TCP resets. Microsoft has confirmed this is a HNS issue, and they are investigating. - -## Service ClusterIPs incompatible with selectors on pod IPs in network policy - -**Windows 1809 prior to build 17763.1432** - -On Windows nodes, kube-proxy unconditionally applies source NAT to traffic from local pods to service ClusterIPs. This means that, at the destination pod, where policy is applied, the traffic appears to come from the source host rather than the source pod. In turn, this means that a network policy with a source selector matching the source pod will not match the expected traffic. - -## Network policy and using selectors - -Under certain conditions, relatively simple $[prodname] policies can require significant Windows data plane resources, that can cause significant CPU and memory usage, and large policy programming latency. - -We recommend avoiding policies that contain rules with both a source and destination selector. The following is an example of a policy that would be inefficient. The policy applies to all workloads, and it only allows traffic from workloads labeled as clients to workloads labeled as servers: - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: calico-dest-selector -spec: - selector: all() - order: 500 - ingress: - - action: Allow - destination: - selector: role == "webserver" - source: - selector: role == "client" -``` - -Because the policy applies to all workloads, it will be rendered once per workload (even if the workload is not labeled as a server), and then the selectors will be expanded into many individual data plane rules to capture the allowed connectivity. - -Here is a much more efficient policy that still allows the same traffic: - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: calico-dest-selector -spec: - selector: role == "webserver" - order: 500 - ingress: - - action: Allow - source: - selector: role == "client" -``` - -The destination selector is moved into the policy selector, so this policy is only rendered for workloads that have the `role: webserver` label. In addition, the rule is simplified so that it only matches on the source of the traffic. Depending on the number of webserver pods, this change can reduce the data plane resource usage by several orders of magnitude. - -## Network policy with tiers - -Because of the way the Windows data plane handles rules, the following limitations are required to avoid performance issues: - -- Tiers: maximum of 5 -- `pass` rules: maximum of 10 per tier -- If each tier contains a large number of rules, and has pass rules, you may need to reduce the number of tiers further. - -## Flow log limitations - -$[prodname] supports flow logs with these limitations: - -- No packet/bytes stats for denied traffics -- Inaccurate `num_flows_started` and `num_flows_completed` stats with VXLAN networking -- No DNS stats -- No Http stats -- No RuleTrace for tiers -- No BGP logs - -## DNS Policy limitations - -:::note - -DNS Policy is a tech preview feature. Tech preview features may be subject to significant changes before they become GA. - -::: - -$[prodname] supports DNS policy on Windows with these limitations: - -- It could take up to 5 seconds for the first TCP SYN packet to go through, for a connection to a DNS domain name. This is because DNS policies are dynamically programmed. The first TCP packet could be dropped since there is no policy to allow it until $[prodnameWindows] detects domain IPs from DNS response and programs DNS policy rules. The Windows TCPIP stack will send SYN again after TCP Retransmission timeout (RTO) if previous SYN has been dropped. -- Some runtime libraries do not honour DNS TTL. Instead, they manage their own DNS cache which has a different TTL value for DNS entries. On .NET Framework, the value to control DNS TTL is ServicePointManager.DnsRefreshTimeout which has default value of 120 seconds - [DNS refresh timeout](https://docs.microsoft.com/en-us/dotnet/api/system.net.servicepointmanager.dnsrefreshtimeout). It is important that $[prodnameWindows] uses a longer TTL value than the one used by the application, so that DNS policy will be in place when the application is making outbound connections. The configuration item “WindowsDNSExtraTTL” should have a value bigger than the maximum value of DNS TTL used by the runtime libraries for your applications. -- Due to the limitations of Windows container networking, a policy update could have an impact on performance. Programming DNS policy may result in more policy updates. Setting “WindowsDNSExtraTTL” to a bigger number will reduce the performance impact. - -## Next steps - -- [Install $[prodnameWindows] using operator](operator.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/windows-calico/operator.mdx b/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/windows-calico/operator.mdx deleted file mode 100644 index 5e4e152053..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/windows-calico/operator.mdx +++ /dev/null @@ -1,120 +0,0 @@ ---- -description: Install Calico Enterprise for Windows on a Kubernetes cluster for testing or development. ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import CalicoWindowsInstall from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/CalicoWindowsInstall'; - -# Install using Operator - -## Big picture - -Install $[prodnameWindows] on your Kubernetes cluster using the Tigera Operator. - -## Concepts - -$[prodnameWindows] is a hybrid implementation that requires a Linux cluster for $[prodname] components and Linux workloads, and Windows nodes for Windows workloads. - -## Before you begin - -Review the [Linux requirements](../requirements.mdx) and the [$[prodnameWindows] requirements](requirements.mdx). - -Before beginning, setup a $[prodname] cluster on Linux nodes and provision Windows machines. - -## How to - -- [Configure strict affinity for clusters using $[prodname] networking](#configure-strict-affinity-for-clusters-using-calico-networking) -- [Install $[prodnameWindows] using the operator](#install-calico-enterprise-for-windows-using-the-operator) - -### Configure strict affinity for clusters using $[prodname] networking - -For Linux control nodes using $[prodname] networking, strict affinity must be set to `true`. -This is required to prevent Linux nodes from borrowing IP addresses from Windows nodes: - -```bash -kubectl patch ipamconfigurations default --type merge --patch='{"spec": {"strictAffinity": true}}' -``` - -### Install $[prodnameWindows] using the operator - -With Kubernetes v1.22, there is a new Windows container type called **HostProcess containers (HPC)** that can run directly on the host with access to the host network namespace, -storage, and devices. With this feature, $[prodnameWindows] can now be installed and managed using Kubernetes resources such as Daemonsets and ConfigMaps, -instead of needing to configure and install $[prodnameWindows] manually on each node. Using this installation method, the $[prodnameWindows] -services are no longer registered on the host. Instead, the services are run directly within HostProcess containers. - -#### HPC requirements - -In addition to the [$[prodnameWindows] requirements](requirements.mdx), -this installation method has [additional requirements](https://kubernetes.io/docs/tasks/configure-pod-container/create-hostprocess-pod/): - -- Kubernetes v1.22+ -- Enable HostProcess containers support. For Kubernetes v1.22, see [here](https://kubernetes.io/docs/tasks/configure-pod-container/create-hostprocess-pod/#before-you-begin). For Kubernetes v1.23+, HostProcess containers are enabled by default. -- [containerd](https://kubernetes.io/docs/setup/production-environment/container-runtimes/#containerd) v1.6+ -- The Windows nodes have joined the cluster. - -To install containerd on the Windows node and configure the containerd service: - -```powershell -Invoke-WebRequest $[windowsScriptsURL]/Install-Containerd.ps1 -OutFile c:\Install-Containerd.ps1 -c:\Install-Containerd.ps1 -ContainerDVersion 1.6.22 -skipHypervisorSupportCheck -CNIConfigPath "c:/etc/cni/net.d" -CNIBinPath "c:/opt/cni/bin" -``` - -If you have an existing $[prodnameWindows] installation using the manual method, your Windows nodes may have already joined the cluster. - -To join a Windows node to a cluster provisioned with kubeadm: - -- Install kubeadm and kubelet binaries and install the kubelet service - -```powershell -Invoke-WebRequest $[windowsScriptsURL]/PrepareNode.ps1 -OutFile c:\PrepareNode.ps1 -c:\PrepareNode.ps1 -KubernetesVersion v1.26.6 -``` - -- Run kubeadm on a control plane host and copy the join command - -```bash -kubeadm token create --print-join-command -``` - -- Edit the join command by updating the kubeadm.exe path to `c:\k\kubeadm.exe`. -- If using a Kubernetes version earlier than v1.25, also append `--cri-socket "npipe:////./pipe/containerd-containerd"` to the join command. - An example join command: - -``` -c:\k\kubeadm.exe join 172.16.101.139:6443 --token v8w2jt.jmc45acn85dbll1e --discovery-token-ca-cert-hash sha256:d0b7040a704d8deb805ba1f29f56bbc7cea8af6aafa78137a9338a62831739b4 --cri-socket "npipe:////./pipe/containerd-containerd" -``` - -- Run the join command on the Windows node. Shortly after it completes successfully, the Windows node will appear in `kubectl get nodes`. - The new node's status will be NotReady since the Calico CNI has not yet been installed. - -#### Migrate from $[prodnameWindows] manual installation to operator installation - -The HPC installation in this guide is operator-based and will automatically migrate your Windows nodes that have $[prodnameWindows] installed using the manual installation method. This installation process will uninstall any existing $[prodnameWindows] services and overwrite the $[prodnameWindows] installation files with those included in the `tigera/cni-windows` and `tigera/cnx-node-windows` images. If `kubelet` was installed using `$[rootDirWindows]\kubernetes\install-kube-services.ps1`, this service will remain installed. If the `kube-proxy` service was also installed using `$[rootDirWindows]\kubernetes\install-kube-services.ps1`, it will be stopped and uninstalled by the `calico-node-windows` HPC initContainers. It is recommended to run `kube-proxy-windows` as hostprocess container images using the [manifest provided in the kubernetes-sigs sig-windows-tools repository](https://raw.githubusercontent.com/kubernetes-sigs/sig-windows-tools/master/hostprocess/calico/kube-proxy/kube-proxy.yml). - -:::note - -Before proceeding, take note of the configuration parameters in `$[rootDirWindows]\config.ps1`. These configuration parameters will be needed during the install. - -::: - -#### Operator installation - - - - - - - - - - - - - - -Congratulations! You now have a Kubernetes cluster with $[prodnameWindows] and a Linux control node. - -## Next steps - -You can now use the $[prodname] Linux-based docs site for your documentation. Before you continue, review the [Limitations and known issues](limitations.mdx) to understand the features (and sections of documentation) that do not apply to Windows. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/windows-calico/rancher.mdx b/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/windows-calico/rancher.mdx deleted file mode 100644 index eafd5bec5b..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/windows-calico/rancher.mdx +++ /dev/null @@ -1,114 +0,0 @@ ---- -description: Install Calico Enterprise for Windows on RKE. ---- - -# Install Calico Enterprise for Windows on RKE - -## Big picture - -Install $[prodnameWindows] on Rancher Kubernetes Engine (RKE). - -## Before you begin - -**Supported networking** - -- BGP with no encapsulation -- VXLAN - -**Required** - -- A [compatible RKE cluster](../../compatibility.mdx#rke) - -- An RKE cluster provisioned with [no network plugin](https://rancher.com/docs/rke/latest/en/config-options/add-ons/network-plugins#disabling-deployment-of-a-network-plug-in) - -- One or more Windows nodes that meet the [requirements](requirements.mdx). - -## How to - -The following steps will outline the installation of $[prodname] networking on the RKE cluster, then the installation of $[prodnameWindows] on the Windows nodes. - -1. Install the Tigera Operator and custom resource definitions. - - ```bash - kubectl create -f $[filesUrl]/manifests/tigera-operator.yaml - ``` - -1. Download the necessary Installation custom resources. - - ```bash - wget $[filesUrl]/manifests/custom-resources.yaml - ``` - -1. Update the `calicoNetwork` options, ensuring that the correct pod CIDR is set. (Rancher uses `10.42.0.0/16` by default.) - Below are sample installations for VXLAN and BGP networking using the default Rancher pod CIDR: - - **VXLAN** - - ```yaml - apiVersion: operator.tigera.io/v1 - kind: Installation - metadata: - name: default - spec: - # Configures Calico networking. - calicoNetwork: - bgp: Disabled - ipPools: - - blockSize: 26 - cidr: 10.42.0.0/16 - encapsulation: VXLAN - natOutgoing: Enabled - nodeSelector: all() - ``` - - **BGP** - - ```yaml - apiVersion: operator.tigera.io/v1 - kind: Installation - metadata: - name: default - spec: - # Configures Calico networking. - calicoNetwork: - ipPools: - - blockSize: 26 - cidr: 10.42.0.0/16 - encapsulation: None - natOutgoing: Enabled - nodeSelector: all() - ``` - - :::note - - For more information on configuration options available in this manifest, see [the installation reference](../../../reference/installation/api.mdx). - - ::: - -1. Apply the updated custom resources: - - ```bash - kubectl create -f custom-resources.yaml - ``` - -1. Configure strict affinity: - - ```bash - kubectl patch ipamconfigurations default --type merge --patch='{"spec": {"strictAffinity": true}}' - ``` - -1. Finally, install $[prodnameWindows]. For an operator installation, follow the [operator guide](operator.mdx). - For VXLAN clusters, follow the instructions under the "Kubernetes VXLAN" tab. For BGP clusters, follow the instructions under the "Kubernetes BGP" tab. - - :::note - - For Rancher default values for service CIDR and DNS cluster IP, see the [Rancher kube-api service options](https://rancher.com/docs/rke/latest/en/config-options/services/#kubernetes-api-server-options). - - ::: - -1. Check the status of the nodes with `kubectl get nodes`. If you see that the Windows node has the status `Ready`, then you have a $[prodnameWindows] on RKE cluster ready for Linux and Windows workloads! - -## Next steps - -- [Try the basic policy demo](demo.mdx) -- [Secure pods with $[prodname] network policy](../../../network-policy/beginners/calico-network-policy.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/windows-calico/requirements.mdx b/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/windows-calico/requirements.mdx deleted file mode 100644 index aebd81a4eb..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/windows-calico/requirements.mdx +++ /dev/null @@ -1,104 +0,0 @@ ---- -description: Review requirements for Calico Enterprise for Windows. ---- - -# Requirements - -## What's supported in this release - -✓ Install: - -- Operator install for Kubernetes clusters using hostprocess containers (HPC) on Windows nodes - -✓ Platforms: Kubernetes, OpenShift, RKE, EKS, AKS - -✓ Networking: - -- Kubernetes, on-premises: Calico CNI with BGP or VXLAN -- OpenShift: Calico CNI with BGP or VXLAN -- Rancher Kubernetes Engine: Calico CNI with BGP or VXLAN -- EKS: VPC CNI -- AKS: Azure CNI - -## Requirements - -Because the Kubernetes and $[prodname] control components do not run on Windows yet, a hybrid Linux/Windows cluster is required. - -### CNI and networking options - -The following table summarizes the networking options and considerations. - -| Networking | Components | **Value/Content** | -| ------------------ | ------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| $[prodname] BGP | Windows CNI plugin:

    calico.exe

    Linux: $[prodname] for policy and networking | $[prodname]'s native networking approach, supports:
    - Auto-configured node-to-node BGP mesh over an L2 fabric
    - Peering with external routers for an L3 fabric
    - $[prodname] IPAM and IP aggregation (with some limitations)
    - Route reflectors **Note**: Windows node cannot act as route reflectors.
    - Kubernetes API datastore driver

    **AWS users**: If running on AWS, you must disable the source/dest check on your EC2 instances so that hosts can forward traffic on behalf of pods. | -| $[prodname] VXLAN | Windows CNI plugin:
    calico.exe

    Linux: $[prodname] for policy and networking | $[prodname]'s VXLAN overlay, supports:

    - VXLAN overlay, which can traverse most networks.
    - Auto-configured node-to-node routing
    - $[prodname] IPAM and IP aggregation (with some limitations)
    - Kubernetes API datastore driver
    **Note**: VXLAN runs on UDP port 4789 (this is the only port supported by Windows), remember to open that port between your $[prodname] hosts in any firewalls / security groups. | -| Cloud provider | Windows CNI plugin: win-bridge.exe

    Linux: $[prodname] policy-only | A useful fallback, particularly if you have a Kubernetes cloud provider that automatically installs inter-host routes. $[prodname] has been tested with the standard **win-bridge.exe** CNI plugin so it should work with any networking provider that ultimately uses win-bridge.exe to network the pod (such as the Azure CNI plugin and cloud provider). | - -:::note - -If Calico CNI with VXLAN is used, BGP must be disabled. See the [installation reference](../../../reference/installation/api.mdx#bgpoption). - -::: - -### Kubernetes version - -For Kubernetes versions for your platform, see [Support and compatibility](../../compatibility.mdx). - -When using Operator install and Windows hostprocess containers (HPC), see [here for the additional requirements](operator.mdx#hpc-requirements). - -### Linux platform requirements - -- At least four Linux Kubernetes worker nodes to run $[prodname]'s cluster-wide components that meets [Linux system requirements](../requirements.mdx), and is installed with $[prodname] v3.5.0+ -- Must not be running in eBPF mode -- VXLAN or BGP without encapsulation is supported if using $[prodname] CNI. IPIP ($[prodname]'s default encapsulation mode) is not supported. Use the following command to turn off IPIP. - - ```bash - kubectl patch felixconfiguration default -p '{"spec":{"ipipEnabled":false}}' - ``` - -- If using $[prodname] IPAM, strict affinity of IPAM configuration must be set to `true`. - - ```bash - kubectl patch ipamconfigurations default --type merge --patch='{"spec": {"strictAffinity": true}}' - ``` - -:::note - -For operator-managed Linux $[prodname] clusters, three Linux worker nodes are required to meet high-availability requirements for Typha. - -::: - -### Windows platform requirements - -- Windows versions: - - - Windows Server 1809 (build 17763.1432 or later) - - Windows Server 2022 (build 20348.169 or later) - - :::note - - Windows Server version support differs for each Kubernetes version. Review the [Windows OS Version Support](https://kubernetes.io/docs/setup/production-environment/windows/intro-windows-in-kubernetes/#windows-os-version-support) table for the Windows Server versions supported by each Kubernetes version. - - ::: - -#### Operator install requirements - -- Kubernetes v1.22+ -- Enable HostProcess containers support. For Kubernetes v1.22, see [here](https://kubernetes.io/docs/tasks/configure-pod-container/create-hostprocess-pod/#before-you-begin). For Kubernetes v1.23+, HostProcess containers are enabled by default. -- [containerd](https://kubernetes.io/docs/setup/production-environment/container-runtimes/#containerd) v1.6.0+ -- The Windows nodes have joined the cluster. -- See [this section of the operator install guide for example commands](operator.mdx#hpc-requirements). - -### EKS requirements - -- The VPC controllers must be installed to run Windows pods. -- An instance role on the Windows instance must have permissions to get `namespaces` and get `secrets` in the calico-system namespace (or kube-system namespace if you are using a non operator-managed $[prodname] installation.) - -### AKS requirements - -- $[prodnameWindows] can be enabled only on newly created clusters. -- Non-HPC $[prodnameWindows] is available with Kubernetes version 1.20 or later - -## Next steps - -[Install Calico for Windows using operator](operator.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/windows-calico/troubleshoot.mdx b/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/windows-calico/troubleshoot.mdx deleted file mode 100644 index e1206d6465..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/install-on-clusters/windows-calico/troubleshoot.mdx +++ /dev/null @@ -1,207 +0,0 @@ ---- -description: Help for troubleshooting Calico Enterprise for Windows issues. ---- - -# Troubleshoot Calico Enterprise for Windows - -## Useful troubleshooting commands - -**Examine the HNS network(s)** - -When using the $[prodname] CNI plugin, each $[prodname] IPAM block (or the single podCIDR in host-local IPAM mode), is represented as a HNS l2bridge network. Use the following command to inspect the networks. - -```powershell -ipmo -DisableNameChecking $[rootDirWindows]\libs\hns\hns.psm1 -Get-HNSNetwork -``` - -**Examine pod endpoints** - -Use the following command to view the HNS endpoints on the system. There should be one HNS endpoint per pod networked with $[prodname]: - -```powershell -ipmo -DisableNameChecking $[rootDirWindows]\libs\hns\hns.psm1 -Get-HNSEndpoint -``` - -## Troubleshoot - -### kubectl exec fails with timeout for Windows pods - -Ensure that the Windows firewall (and any network firewall or cloud security group) allows traffic to the host on port 10250. - -### kubelet fails to register, complains of node not found in logs - -This can be caused by a mismatch between a cloud provider (such as the AWS cloud provider) and the configuration of the node. For example, the AWS cloud provider requires that the node has a nodename matching its private domain name. - -### After initializing $[prodnameWindows], AWS metadata server is no longer reachable - -This is a known Windows issue that Microsoft is working on. The route to the metadata server is lost when the vSwitch is created. As a workaround, manually add the route back by running: - -```powershell -New-NetRoute -DestinationPrefix 169.254.169.254/32 -InterfaceIndex -``` - -Where `` is the index of the "vEthernet (Ethernet 2)" device as shown by - -```powershell -Get-NetAdapter -``` - -### Installation stalls at "Waiting for $[prodname] initialization to finish" - -This can be caused by Window's Execution protection feature. Exit the install using Ctrl-C, unblock the scripts, run `uninstall-calico.ps1`, followed by `install-calico.ps1`. - -### Windows Server 2019 insider preview: after rebooting a node, $[prodnameWindows] fails to start, the tigera-node.err.log file contains errors - -After rebooting the Windows node, pods fail to schedule, and the kubelet log has CNI errors like "timed out waiting for interface matching the management IP (169.254.57.5) of network" (where the IP address may vary but will always be a 169.254.x.x address). To workaround: - -- Stop and then start $[prodnameWindows] using the `stop-calico.ps1` and `start-calico.ps1` scripts -- Sometimes the HNS network picks up a temporary self-assigned address at start-of-day and it does not get refreshed when the correct IP becomes known. Rebooting the node a second time often resolves the problem. - -### Invoke-Webrequest fails with TLS errors - -The error, "The request was aborted: Could not create SSL/TLS secure channel", often means that Windows does not support TLS v1.2 (which is required by many websites) by default. To enable TLS v1.2, run the following command: - -```powershell -[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12 -``` - -### Kubelet persistently fails to contact the API server - -If kubelet is already running when $[prodnameWindows] is installed, the creation of the container vSwitch can cause kubelet to lose its connection and then persistently fail to reconnect to the API server. -To resolve this, restart kubelet after installing $[prodnameWindows]. - -### No connectivity between pods on Linux and Windows nodes - -If using AWS, check that the source/dest check is disabled on the interfaces assigned to your nodes. This allows nodes to forward traffic on behalf of local pods. -In AWS, the "Change Source/Dest. Check" option can be found on the Actions menu for a selected network interface. - -If using $[prodname] networking, check that the $[prodname] IP pool you are using has IPIP mode disabled (set to "Never). IPIP is not supported on Windows. To check the IP pool, you can use `calicoctl`: - -```bash -calicoctl get ippool -o yaml -``` - -Example output of an IP pool with IPIP disabled: - -```yaml -apiVersion: projectcalico.org/v3 -items: - - apiVersion: projectcalico.org/v3 - kind: IPPool - metadata: - creationTimestamp: 2018-11-26T15:37:39Z - name: default-ipv4-ippool - resourceVersion: '172' - uid: 34db7316-f191-11e8-ad7d-02850eebe6c4 - spec: - blockSize: 26 - cidr: 192.168.0.0/16 - disabled: true - ipipMode: Never - natOutgoing: true -``` - -### Felix starts, but does not output logs - -By default, Felix waits to connect to the datastore before logging (in case the datastore configuration intentionally disables logging). To start logging at startup, update the [FELIX_LOGSEVERITYSCREEN environment variable](../../../reference/component-resources/node/felix/configuration.mdx#general-configuration) to "info" or "debug" level. - -### $[prodname] BGP mode: connectivity issues, Linux calico/node pods report unready - -Check the detailed health output that shows which health check failed: - -``` -kubectl describe pod -n calico-system -``` - -:::note - -Use namespace `kube-system` instead of `calico-system` if your Calico installation is non operator-managed. - -::: - -If the health check reports a BGP peer failure, check the IP address of the peer is either an -expected IP of a node or an external BGP peer. If the IP of the failed peering is a Windows node: - -- Check that the node is up a reachable over IP -- Check that the RemoteAccess service is installed and running: - - ```powershell - Get-Service | ? Name -EQ RemoteAccess - ``` - -- Check the logs for the confd service in the configured log directory for errors - (default $[rootDirWindows]\logs). - -**Examine BGP state on a Windows host** - -The Windows BGP router exposes its configuration and state as PowerShell commandlets. - -**To show BGP peers**: - -```powershell -Get-BgpPeer -``` - -Example output: - -``` -PeerName LocalIPAddress PeerIPAddress PeerASN OperationMode ConnectivityStatus --------- -------------- ------------- ------- ------------- ------------------ -Mesh_172_20_48_43 172.20.55.101 172.20.48.43 64512 Mixed Connected -Mesh_172_20_51_170 172.20.55.101 172.20.51.170 64512 Mixed Connected -Mesh_172_20_54_3 172.20.55.101 172.20.54.3 64512 Mixed Connected -Mesh_172_20_58_252 172.20.55.101 172.20.58.252 64512 Mixed Connected -For an established peering, the ConnectivityStatus column should be "Connected". -``` - -**To examine routes learned from other hosts**: - -```powershell -Get-BgpRouteInformation -Type all -``` - -Example output: - -``` -DestinationNetwork NextHop LearnedFromPeer State LocalPref MED ------------------- ------- --------------- ----- --------- --- -10.243.128.192/26 172.20.58.252 Mesh_172_20_58_252 Best 100 -10.244.115.128/26 172.20.48.43 Mesh_172_20_48_43 Best 100 -10.244.128.192/26 172.20.58.252 Mesh_172_20_58_252 Best 100 -``` - -For active routes, the State should show as "Best". Routes with State equal to "Unresolved" -indicate that the BGP router could not resolve a route to the peer and the route will not be -used. This can occur if the networking state changes after the BGP router is started; -restarting the BGP router may solve the problem: - -```powershell -Restart-Service RemoteAccess -``` - -To see the routes being exported by this host: - -```powershell -(Get-BgpCustomRoute).Network -``` - -Example output: - -``` -10.243.214.152/29 -10.243.214.160/29 -10.243.214.168/29 -10.244.42.0/26 -``` - -### Nodeports on Linux do not pass connections on to Windows pods - -Check that your linux nodes are NOT running in eBPF mode. This can be checked by running: - -``` -kubectl get felixconfiguration default -o yaml -``` - -Check that `bpfEnabled=false` (or is not present at all in the felixconfiguration) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/manifest-archive.mdx b/calico-enterprise_versioned_docs/version-3.19-2/getting-started/manifest-archive.mdx deleted file mode 100644 index 62499c2396..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/manifest-archive.mdx +++ /dev/null @@ -1,108 +0,0 @@ ---- -description: Install an older patch release of Calico Enterprise. ---- - -# Install a patch release - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import ReleaseArchiveTable from "../_includes/components/ReleaseArchiveTable"; - -Installing the most recent patch version of $[prodname] $[version] ensures that you have the latest bug fixes and improvements. -But you can always install, or downgrade to, a previous patch version. -Installing an older patch can help if you encounter problems with the latest patch, or if you're required to install only a specific patch version. - -## Before you begin - -This feature is: - -- Available in 3.0 and later -- Not available for Helm with operator - -## How to - -1. Download the release archive for your patch version from the following table: - - - -1. Untar the **release-vx.y.z-vx.y.z.tgz** to a local directory. - - ```bash - tar xzvf release-vx.y.z-vx.y.z.tgz - ``` - - - - -In the patch release archive, navigate to the `manifests` folder. - -1. Follow the [quickstart installation](install-on-clusters/kubernetes/quickstart.mdx), making the following changes: - - 1. Install Tigera Operator and custom resource definitions. - - ```bash - kubectl create -f /manifests/tigera-operator.yaml - ``` - - 2. If you are not using an existing Prometheus operator, install it. - - ```bash - kubectl create -f /manifests/tigera-prometheus-operator.yaml - ``` - - 3. Install Tigera custom resources. - - ```bash - kubectl create -f /manifests/custom-resources.yaml - ``` - - :::note - - For platforms like AKS or EKS, you must modify the command to be platform specific. - EKS example: `kubectl create -f /manifests/eks/custom-resources.yaml` - - ::: - - - - -In the patch release archive, navigate to the `ocp-manifests` folder which contains three folders `install-manifests`, -`enterprise-resources`, and `upgrade-manifests`. - -- `install-manifests` contains all the manifests needed for minimal OCP cluster. -- `enterprise-resources` contains the $[prodname] resources. -- `upgrade-manifests` folder contains all the manifests needed for upgrading $[prodname]. - -1. Create the cluster by following [the standard installation](install-on-clusters/openshift/installation.mdx), with the following caveat: - - 1. After the Kubernetes manifests directory is generated, copy the files from `install-manifests` instead of downloading the manifests. - - :::note - - Before creating the cluster, be sure to add an image pull secret in `install-manifests/02-pull-secret.yaml` - - ::: - -1. Install $[prodname] resources: - - ```bash - cd /ocp-manifests/enterprise-resources && oc create -f - ``` - - - - -In the patch release archive, there are additional manifests relating to specific features. - -**Examples** - -To apply the patch release for threat defense features. - -```bash -cd /manifests/threatdef && kubectl create -f ejr-vpn.yaml.yaml -cd /manifests/threatdef && kubectl create -f tor-exit-feed.yaml -``` - - - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/upgrading/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/getting-started/upgrading/index.mdx deleted file mode 100644 index 20bc2723b1..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/upgrading/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Upgrade to a newer version of Calico. -hide_table_of_contents: true ---- - -# Upgrade - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/upgrading/upgrading-calico-to-calico-enterprise/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/getting-started/upgrading/upgrading-calico-to-calico-enterprise/index.mdx deleted file mode 100644 index a7ab5ec08d..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/upgrading/upgrading-calico-to-calico-enterprise/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Upgrade from Calico to Calico Enterprise. -hide_table_of_contents: true ---- - -# Upgrade from Calico to Calico Enterprise - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/upgrading/upgrading-calico-to-calico-enterprise/upgrade-to-tsee-openshift.mdx b/calico-enterprise_versioned_docs/version-3.19-2/getting-started/upgrading/upgrading-calico-to-calico-enterprise/upgrade-to-tsee-openshift.mdx deleted file mode 100644 index c5c1f22083..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/upgrading/upgrading-calico-to-calico-enterprise/upgrade-to-tsee-openshift.mdx +++ /dev/null @@ -1,103 +0,0 @@ ---- -description: Steps to upgrade from open source Calico to Calico Enterprise on OpenShift. ---- - -import InstallOpenShiftManifests from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/InstallOpenShiftManifests'; -import OpenShiftPullSecret from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/OpenShiftPullSecret'; -import OpenShiftPrometheusOperator from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/OpenShiftPrometheusOperator'; - -# Upgrade from Calico to Calico Enterprise on OpenShift - -## Prerequisites - -Ensure that your Kubernetes cluster is running with open source Calico on the latest release using an operator-based -installation. If not, follow the [Calico upgrade documentation](/calico/latest/operations/upgrading/openshift-upgrade) -before continuing. - -Ensure your Kubernetes cluster is using the Kubernetes datastore. If you are using an `etcdv3` datastore, or the cluster doesn't have a datastore, contact Tigera Support to upgrade the datastore. - -Your Kubernetes cluster must not be running in production. Operator-based upgrades from open source Calico are not recommended for production clusters due to limited testing. Also, upgrades are not tested with open source Calico prior to v3.15. - -If your cluster already has $[prodname] installed, follow the [Upgrading $[prodname] from an earlier release guide](../upgrading-enterprise/openshift-upgrade.mdx) instead. - -## Prepare your cluster for the upgrade - -Calico Enterprise creates default-deny policies for all Calico and Tigera namespaces, including calico-system. If you deploy workloads into the calico-system namespace, you must create policy that allows the required traffic for your workloads prior to upgrade. - -## Upgrade Calico to $[prodname] - -### Before you begin - -**Required** - -- [Configure a storage class for $[prodname].](../../../operations/logstorage/create-storage.mdx) - -### Install $[prodname] - -**Download the new manifests** - -Make the manifests directory. - -```bash -mkdir manifests -``` - - - -**Add an image pull secret** - - - -> (Optional) If your cluster architecture requires any custom [$[prodname] resources](../../../reference/resources/index.mdx) to function at startup, install them now using [calicoctl](../../../reference/clis/calicoctl/overview.mdx). - -**Install $[prodname]** - -1. Apply the Tigera Operators and custom resource definitions. - - ```bash - oc apply --server-side --force-conflicts -f manifests/ - ``` - -2. Optional: If your cluster architecture requires any custom [Calico resources](../../../reference/resources/index.mdx) to function at startup, install them now using [calicoctl](../../../reference/clis/calicoctl/overview.mdx). - -3. Create the custom resources for $[prodname] features, see [the installation reference](../../../reference/installation/api.mdx). - - ```bash - oc apply -f $[filesUrl]/manifests/ocp/tigera-enterprise-resources.yaml - ``` - -4. Patch installation. - - ```bash - oc patch installations.operator.tigera.io default --type merge -p '{"spec":{"variant":"TigeraSecureEnterprise","imagePullSecrets":[{"name":"tigera-pull-secret"}]}}' - ``` - -5. You can now monitor the upgrade progress with the following command: - - ```bash - watch oc get tigerastatus - ``` - -Wait until the `apiserver` shows a status of `Available`, then proceed to the next section. - -:::note - -To troubleshoot problems, use `oc get tigerastatus -o yaml`. - -::: - -### Install the $[prodname] license - -Install the $[prodname] license provided to you by Tigera. - -``` -oc create -f
    -``` - - - -You can now monitor progress with the following command: - -``` -watch oc get tigerastatus -``` diff --git a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/upgrading/upgrading-calico-to-calico-enterprise/upgrade-to-tsee/helm.mdx b/calico-enterprise_versioned_docs/version-3.19-2/getting-started/upgrading/upgrading-calico-to-calico-enterprise/upgrade-to-tsee/helm.mdx deleted file mode 100644 index 38e3c520c8..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/upgrading/upgrading-calico-to-calico-enterprise/upgrade-to-tsee/helm.mdx +++ /dev/null @@ -1,90 +0,0 @@ ---- -description: Upgrade to Calico Enterprise from Calico installed with Helm. ---- - -# Upgrade Calico to Calico Enterprise installed with Helm - -import CodeBlock from '@theme/CodeBlock'; - -:::note - -All upgrades in $[prodname] are free with a valid license. - -::: - -## Prepare your cluster for the upgrade - -Calico Enterprise creates default-deny policies for all Calico and Tigera namespaces, including calico-system. If you deploy workloads into the calico-system namespace, you must create policy that allows the required traffic for your workloads prior to upgrade. - -## Upgrade from Calico to $[prodname] - -:::note - -The following steps assume the Calico deployment is installed on `tigera-operator` namespace. Replace with valid namespace otherwise. - -::: - -1. Get the Helm chart - - - {'$[version]' === 'master' - ? (`gsutil cp gs://tigera-helm-charts/tigera-operator-v0.0.tgz`) - : (`curl -O -L $[downloadsurl]/ee/charts/tigera-operator-$[chart_version_name].tgz`) - } - - -1. Install the $[prodname] custom resource definitions. - - ```bash - kubectl apply --server-side --force-conflicts -f $[filesUrl]/manifests/operator-crds.yaml - kubectl create -f $[filesUrl]/manifests/prometheus-operator-crds.yaml - kubectl create -f $[filesUrl]/manifests/eck-operator-crds.yaml - ``` - -1. [Configure a storage class for $[prodname]](../../../../operations/logstorage/create-storage.mdx) - -1. Run the Helm upgrade command for `tigera-operator`: - - - {'$[version]' === 'master' - ? ( - `helm upgrade calico tigera-operator-v0.0.tgz \\ - --set-file imagePullSecrets.tigera-pull-secret=,tigera-prometheus-operator.imagePullSecrets.tigera-pull-secret= \\ - --namespace tigera-operator` - ) - : ( - `helm upgrade calico tigera-operator-$[chart_version_name].tgz \\ - --set-file imagePullSecrets.tigera-pull-secret=,tigera-prometheus-operator.imagePullSecrets.tigera-pull-secret= \\ - --namespace tigera-operator` - ) - } - - -1. Wait until the `apiserver` shows a status of `Available`, then proceed to the next section. You can monitor progress with the following command: - - ```bash - watch kubectl get tigerastatus/apiserver - ``` - -1. Install your $[prodname] license. - - ```bash - kubectl create -f
    - ``` - -1. Monitor progress, wait until all components show a status of `Available`, then proceed to the next step. - - ```bash - watch kubectl get tigerastatus - ``` - - :::note - - If there are any problems you can use `kubectl get tigerastatus -o yaml` to get more details. - - ::: - -## Next steps - -- [Configure access to the $[prodname] web console](../../../../operations/cnx/access-the-manager.mdx) -- [Authentication quickstart](../../../../operations/cnx/authentication-quickstart.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/upgrading/upgrading-calico-to-calico-enterprise/upgrade-to-tsee/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/getting-started/upgrading/upgrading-calico-to-calico-enterprise/upgrade-to-tsee/index.mdx deleted file mode 100644 index 7dc0a75945..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/upgrading/upgrading-calico-to-calico-enterprise/upgrade-to-tsee/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Upgrade to Calico Enterprise from Calico installed with Helm. -hide_table_of_contents: true ---- - -# Kubernetes - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/upgrading/upgrading-calico-to-calico-enterprise/upgrade-to-tsee/standard.mdx b/calico-enterprise_versioned_docs/version-3.19-2/getting-started/upgrading/upgrading-calico-to-calico-enterprise/upgrade-to-tsee/standard.mdx deleted file mode 100644 index a96a9cb29e..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/upgrading/upgrading-calico-to-calico-enterprise/upgrade-to-tsee/standard.mdx +++ /dev/null @@ -1,101 +0,0 @@ ---- -description: Steps to upgrade from open source Calico to Calico Enterprise. ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import UpgradeOperatorSimple from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/UpgradeOperatorSimple'; - -# Upgrade from Calico to Calico Enterprise - -## Prerequisites - -Ensure that your Kubernetes cluster is running with open source Calico on the latest release using an operator-based -installation. If not, follow the [Calico upgrade documentation](/calico/latest/operations/upgrading/kubernetes-upgrade) -before continuing. - -$[prodname] only supports clusters with a Kubernetes datastore. -Please contact Tigera Support for assistance upgrading a cluster with an `etcdv3` datastore. - -If your cluster already has $[prodname] installed, follow the [Upgrading $[prodname] from an earlier release guide](../../upgrading-enterprise/kubernetes-upgrade-tsee/index.mdx) instead. - -For hybrid Linux and Windows clusters, ensure that your Windows nodes have at least 4 cores, 8GB RAM. - -## Prepare your cluster for the upgrade - -$[prodname] creates default-deny policies for all Calico and Tigera namespaces, including calico-system. If you deploy workloads into the calico-system namespace, you must create policy that allows the required traffic for your workloads prior to upgrade. - -## Upgrade Calico to $[prodname] - -:::note - -GKE upgrades from open source Calico are not currently supported. - -::: - -### Before you begin - -**Required** - -- [Configure a storage class for $[prodname].](../../../../operations/logstorage/create-storage.mdx) - -### Install $[prodname] - - - - - - - - - - - - - - -:::note - -The following instructions are only for AKS clusters that use a Kubernetes reconciler. For AKS clusters with a self-managed Calico installation (including AKS clusters with Calico CNI), or a cluster that is not using a reconciler, follow the upgrade steps under the **Kubernetes** tab. - -::: - -These upgrade instructions will upgrade your AKS clusters with Azure CNI and an AKS-managed Calico installation. - - - - - - -Wait until the `apiserver` shows a status of `Available`, then proceed to the next section. - -:::note - -If there are any problems you can use `kubectl get tigerastatus -o yaml` to get more details. - -::: - -### Install the $[prodname] license - -To use $[prodname], you must install the license provided to you by Tigera. - -``` -kubectl create -f
    -``` - -You can now monitor progress with the following command: - -``` -watch kubectl get tigerastatus -``` - -### Next steps - -- [Configure access to the $[prodname] web console](../../../../operations/cnx/access-the-manager.mdx) -- [Authentication quickstart](../../../../operations/cnx/authentication-quickstart.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/upgrading/upgrading-enterprise/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/getting-started/upgrading/upgrading-enterprise/index.mdx deleted file mode 100644 index ea2e81741f..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/upgrading/upgrading-enterprise/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Upgrade to a newer version of Calico Enterprise. -hide_table_of_contents: true ---- - -# Upgrade Calico Enterprise - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/upgrading/upgrading-enterprise/kubernetes-upgrade-tsee/helm.mdx b/calico-enterprise_versioned_docs/version-3.19-2/getting-started/upgrading/upgrading-enterprise/kubernetes-upgrade-tsee/helm.mdx deleted file mode 100644 index ee3ef42a43..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/upgrading/upgrading-enterprise/kubernetes-upgrade-tsee/helm.mdx +++ /dev/null @@ -1,117 +0,0 @@ ---- -description: Upgrade to a newer version of Calico Enterprise installed with Helm. ---- - -# Upgrade Calico Enterprise installed with Helm - -import CodeBlock from '@theme/CodeBlock'; - -:::note - -All upgrades in $[prodname] are free with a valid license. - -::: - -## Prerequisites - -- Verify that your Kubernetes cluster is using Helm - - ```bash - kubectl get tigerastatus - ``` - - If the result is successful, then your installation is using Helm. - -## Prepare your cluster for the upgrade - -During the upgrade the controller that manages Elasticsearch is updated. Because of this, the $[prodname] LogStorage -CR is temporarily removed during upgrade. Features that depend on LogStorage are temporarily unavailable, among which -are the dashboards in the web console. Data ingestion is temporarily paused and will continue when the LogStorage is -up and running again. - -To retain data from your current installation (optional), ensure that the currently mounted persistent volumes -have their reclaim policy set to [retain data](https://kubernetes.io/docs/tasks/administer-cluster/change-pv-reclaim-policy/). -Retaining data is only recommended for users that use a valid Elastic license. Trial licenses can get invalidated during -the upgrade. - -If your cluster has Windows nodes and uses custom TLS certificates for log storage then, prior to upgrade, prepare and apply new certificates for [log storage](../../../../operations/comms/log-storage-tls.mdx) that include the required service DNS names. - -### Upgrade OwnerReferences - -If you do not use OwnerReferences on resources in the projectcalico.org/v3 API group, you can skip this section. - -Starting in $[prodname] v3.19, a change in the way UIDs are generated for projectcalico.org/v3 resources requires that you update any OwnerReferences -that refer to projectcalico.org/v3 resources as an owner. After upgrade, the UID for all projectcalico.org/v3 resources will be changed, resulting in any -owned resources being garbage collected by Kubernetes. - -1. Remove any OwnerReferences from resources in your cluster that have `apiGroup: projectcalico.org/v3`. -1. Perform the upgrade normally. -1. Add new OwnerReferences to your resources referencing the new UID. - -### Default Deny - -$[prodname] creates a default-deny for the calico-system namespace. If you deploy workloads into the calico-system namespace, you must create policy that allows the required traffic for your workloads prior to upgrade. - -## Upgrade from 3.17 or later - -:::note - -These steps differ based on your cluster type. If you are unsure of your cluster type, look at the field `clusterManagementType` when you run `kubectl get installation -o yaml` before you proceed. - -::: - -1. Get the Helm chart - - - {'$[version]' === 'master' - ? (`gsutil cp gs://tigera-helm-charts/tigera-operator-v0.0.tgz`) - : (`curl -O -L $[downloadsurl]/ee/charts/tigera-operator-$[chart_version_name].tgz`) - } - - -1. Install the $[prodname] custom resource definitions. - - ```bash - kubectl apply --server-side --force-conflicts -f $[filesUrl]/manifests/operator-crds.yaml - kubectl apply --server-side --force-conflicts -f $[filesUrl]/manifests/prometheus-operator-crds.yaml - kubectl apply --server-side --force-conflicts -f $[filesUrl]/manifests/eck-operator-crds.yaml - ``` - -1. Run the Helm upgrade command for `tigera-operator` and make sure to either update `values.yaml` with your configuration or use custom `values.yaml` file: - - - {'$[version]' === 'master' - ? ( - `helm upgrade calico-enterprise --values= tigera-operator-v0.0.tgz \\ - --set-file imagePullSecrets.tigera-pull-secret=,tigera-prometheus-operator.imagePullSecrets.tigera-pull-secret= \\ - --set-file licenseKeyContent= \\ - --namespace tigera-operator` - ) - : ( - `helm upgrade calico-enterprise --values= tigera-operator-$[chart_version_name].tgz \\ - --set-file imagePullSecrets.tigera-pull-secret=,tigera-prometheus-operator.imagePullSecrets.tigera-pull-secret= \\ - --set-file licenseKeyContent= \\ - --namespace tigera-operator` - ) - } - - - :::note - - If you previously upgraded from open source to enterprise, your installation name will be `calico`. Substitute the command - above with `helm upgrade calico ...` instead. - - ::: - - 1. You can monitor progress with the following command: - - ```bash - watch kubectl get tigerastatus - ``` - -:::note - -Make sure you have the `CNI Type` defined in your `values.yaml` file, especially if it is different from the default `CNI` type. -If there are any problems you can use `kubectl get tigerastatus -o yaml` to get more details. - -::: diff --git a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/upgrading/upgrading-enterprise/kubernetes-upgrade-tsee/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/getting-started/upgrading/upgrading-enterprise/kubernetes-upgrade-tsee/index.mdx deleted file mode 100644 index f7c25471c4..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/upgrading/upgrading-enterprise/kubernetes-upgrade-tsee/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Upgrade from an earlier release of Calico Enterprise using Kubernetes. -hide_table_of_contents: true ---- - -# Kubernetes - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/upgrading/upgrading-enterprise/kubernetes-upgrade-tsee/operator.mdx b/calico-enterprise_versioned_docs/version-3.19-2/getting-started/upgrading/upgrading-enterprise/kubernetes-upgrade-tsee/operator.mdx deleted file mode 100644 index 67eaf09748..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/upgrading/upgrading-enterprise/kubernetes-upgrade-tsee/operator.mdx +++ /dev/null @@ -1,116 +0,0 @@ ---- -description: Upgrading from an earlier release of Calico Enterprise with the operator. ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import UpgradeOperatorSimple from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/UpgradeOperatorSimple'; - -# Upgrade Calico Enterprise installed with the operator - -:::note - -All upgrades in $[prodname] are free with a valid license. - -::: - -## Upgrades paths - -You can upgrade your cluster to a maximum of **two releases** from your existing version. For example, if you are on version 3.15, you can upgrade to 3.16, or you can upgrade directly to 3.17. However, you cannot upgrade beyond **two releases**; upgrading from 3.15 to 3.18 (three releases) is not supported. - -If you are several versions behind where you want to be, you must go through each group of two releases to get there. For example, if you are on version 3.16, and you want to get to 3.19, you can upgrade to 3.18, then upgrade from 3.18 directly to 3.19. - -:::note - -Always check the [Release Notes](../../../../release-notes/index.mdx) for exceptions; limitations can override the above pattern. - -::: - -## Prerequisites - -Verify that your Kubernetes cluster is using a version of $[prodname] installed with the operator, by running -`kubectl get tigerastatus`. If the result is successful, then your installation is using the operator. - -If your cluster is on a version earlier than 2.6 or does not use the operator, contact Tigera support to upgrade. - -If your cluster has a Calico installation, contact Tigera support to upgrade. - -## Prepare your cluster for the upgrade - -During the upgrade the controller that manages Elasticsearch is updated. Because of this, the $[prodname] LogStorage -CR is temporarily removed during upgrade. Features that depend on LogStorage are temporarily unavailable, among which -are the dashboards in the web console. Data ingestion is temporarily paused and will continue when the LogStorage is -up and running again. - -To retain data from your current installation (optional), ensure that the currently mounted persistent volumes -have their reclaim policy set to [retain data](https://kubernetes.io/docs/tasks/administer-cluster/change-pv-reclaim-policy/). -Retaining data is only recommended for users that use a valid Elastic license. Trial licenses can get invalidated during -the upgrade. - -### Upgrade OwnerReferences - -If you do not use OwnerReferences on resources in the projectcalico.org/v3 API group, you can skip this section. - -Starting in $[prodname] v3.19, a change in the way UIDs are generated for projectcalico.org/v3 resources requires that you update any OwnerReferences -that refer to projectcalico.org/v3 resources as an owner. After upgrade, the UID for all projectcalico.org/v3 resources will be changed, resulting in any -owned resources being garbage collected by Kubernetes. - -1. Remove any OwnerReferences from resources in your cluster that have `apiGroup: projectcalico.org/v3`. -1. Perform the upgrade normally. -1. Add new OwnerReferences to your resources referencing the new UID. - -### Default Deny - -$[prodname] creates a default-deny for the calico-system namespace. If you deploy workloads into the calico-system namespace, you must create policy that allows the required traffic for your workloads prior to upgrade. - -### Windows - -If your cluster has Windows nodes and uses custom TLS certificates for log storage, prior to upgrade, prepare and apply new certificates for [log storage](../../../../operations/comms/log-storage-tls.mdx) that include the required service DNS names. - -For AKS only, upgrades to a newer version will automatically upgrade $[prodnameWindows]. During the upgrade, Windows nodes will be tainted so new pods will not be scheduled until the upgrade of the node has finished. The $[prodnameWindows] upgrade status can be monitored with: `kubectl get tigerastatus calico -oyaml`. - -### Multi-cluster management - -For $[prodname], upgrading multi-cluster management setups must include updating all managed and management clusters. - -:::note - -These steps differ based on your cluster type. If you are unsure of your cluster type, look at the field `clusterManagementType` when you run `kubectl get installation -o yaml` before you proceed. - -::: - -## Upgrade Calico Enterprise - - - - - - - - - -:::note - -If $[prodname] was installed directly onto the AKS cluster, follow the upgrade instructions in the **Kubernetes** tab. - -::: - -These AKS upgrade instructions apply only to AKS clusters which were upgraded to -$[prodname] from Calico. Check whether the cluster was upgraded from Calico by -checking the namespace for the active operator: - -```bash -kubectl get configmap -n calico-system active-operator -oyaml | grep active-namespace -``` - -If the `active-namespace` is `tigera-operator-enterprise`, then the cluster was -upgraded from Calico. - - - - - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/upgrading/upgrading-enterprise/openshift-upgrade.mdx b/calico-enterprise_versioned_docs/version-3.19-2/getting-started/upgrading/upgrading-enterprise/openshift-upgrade.mdx deleted file mode 100644 index 0fdc55c714..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/getting-started/upgrading/upgrading-enterprise/openshift-upgrade.mdx +++ /dev/null @@ -1,187 +0,0 @@ ---- -description: Upgrade to a newer version of Calico Enterprise installed with OpenShift. ---- - -import InstallOpenShiftManifests from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/InstallOpenShiftManifests'; -import OpenShiftPrometheusOperator from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/OpenShiftPrometheusOperator'; - -# Upgrade Calico Enterprise installed with OpenShift - -:::note - -All upgrades in $[prodname] are free with a valid license. - -::: - -## Upgrades paths - -You can upgrade your cluster to a maximum of **two releases** from your existing version. For example, if you are on version 3.15, you can upgrade to 3.16, or you can upgrade directly to 3.17. However, you cannot upgrade beyond **two releases**; upgrading from 3.15 to 3.18 (three releases) is not supported. - -If you are several versions behind where you want to be, you must go through each group of two releases to get there. For example, if you are on version 3.16, and you want to get to 3.19, you can upgrade to 3.18, then upgrade from 3.18 directly to 3.19. - -:::note - -Always check the [Release Notes](../../../release-notes/index.mdx) for exceptions; limitations can override the above pattern. - -::: - -## Prerequisites - -Ensure that your $[prodname] OpenShift cluster is running a supported version of [OpenShift Container Platform](../../compatibility.mdx#openshift), and the $[prodname] operator version is v1.2.4 or greater. - -:::note - -You can check if you are running the operator by checking for the existence of the operator namespace -with `oc get ns tigera-operator` or issuing `oc get tigerastatus`; a successful return means your installation is -using the operator. - -::: - -## Prepare your cluster for the upgrade - -During upgrade, the $[prodname] LogStorage CR is temporarily removed so Elasticsearch can be upgraded. Features -that depend on LogStorage are temporarily unavailable, including dashboards in the web console. Data ingestion is paused -temporarily, but resumes when the LogStorage is up and running again. - -To retain data from your current installation (optional), ensure that the currently mounted persistent volumes -have their reclaim policy set to [retain data](https://kubernetes.io/docs/tasks/administer-cluster/change-pv-reclaim-policy/). -Data retention is recommended only for users that have a valid Elasticsearch license. (Trial licenses can be invalidated -during upgrade). - -### Upgrade OwnerReferences - -If you do not use OwnerReferences on resources in the projectcalico.org/v3 API group, you can skip this section. - -Starting in $[prodname] v3.19, a change in the way UIDs are generated for projectcalico.org/v3 resources requires that you update any OwnerReferences -that refer to projectcalico.org/v3 resources as an owner. After upgrade, the UID for all projectcalico.org/v3 resources will be changed, resulting in any -owned resources being garbage collected by Kubernetes. - -1. Remove any OwnerReferences from resources in your cluster that have `apiGroup: projectcalico.org/v3`. -1. Perform the upgrade normally. -1. Add new OwnerReferences to your resources referencing the new UID. - -### Default Deny - -$[prodname] creates a default-deny for the calico-system namespace. If you deploy workloads into the calico-system namespace, you must create policy that allows the required traffic for your workloads prior to upgrade. - -### Windows - -If your cluster has Windows nodes and uses custom TLS certificates for log storage, prior to upgrade, prepare and apply new certificates for [log storage](../../../operations/comms/log-storage-tls.mdx) that include the required service DNS names. - -### Multi-cluster management - -For $[prodname] v3.5 and v3.7, upgrading multi-cluster management setups must include updating all managed and management clusters. - -### Egress gateway - -If your cluster has egress gateway in a non-default namespace, ensure the namespace is privileged by adding the following to the namespace: - -##### Label -``` -openshift.io/run-level: "0" -pod-security.kubernetes.io/enforce: privileged -pod-security.kubernetes.io/enforce-version: latest -``` -##### Annotation -``` -security.openshift.io/scc.podSecurityLabelSync: "false" -``` - -## Download the new manifests - -Make a manifests directory. - -```bash -mkdir manifests -``` - - - -# Upgrade from 3.0 or later - -:::note - -The steps differ based on your cluster type. If you are unsure of your cluster type, look at the field `clusterManagementType` when you run `oc get installation -o yaml` before you proceed. - -::: - -1. Apply the updated manifests. - - ```bash - oc apply --server-side --force-conflicts -f manifests/ - ``` - -1. - -1. If your cluster is a management cluster, apply a [ManagementCluster](../../../reference/installation/api.mdx#managementcluster) - CR to your cluster. - - ```bash - oc apply -f - < - ``` - -1. Apply the [ManagementCluster](../reference/installation/api.mdx) CR. - - ```bash - kubectl apply -f - < $MANAGED_CLUSTER.yaml create -f - < -n tigera-manager - ``` -1. Delete all managed cluster resources in your cluster. - - ```bash - kubectl delete managedcluster --all - ``` - - :::note - - Although installation automatically cleans up credentials for managed clusters, the management cluster Elasticsearch still retains managed cluster data based on the retention value that you specify in your [LogStorage](../reference/installation/api.mdx). - - ::: - -### Change a managed cluster to a standalone cluster - -1. Ensure a persistent volume is provisioned to store log data for the standalone cluster. - See [Configure storage for logs and reports](../operations/logstorage/create-storage.mdx). - -1. Remove the `ManagementClusterConnection` from your cluster and delete the managed cluster connection secret. - - ```bash - kubectl delete managementclusterconnection tigera-secure - kubectl delete secret tigera-managed-cluster-connection -n tigera-operator - ``` - - :::note - - If the operator in your managed cluster is running in a different namespace, use that namespace in the `kubectl delete secret...` command. - - ::: - - Follow the following step to also disconnect your managed cluster from your management cluster. - - 1. In your management cluster, delete the `ManagedCluster` resource for your managed cluster. Note that deleting the managed cluster does not result in loss of data. Whenever you reconnect your cluster using the same name, existing data is available again. - ```bash - kubectl delete managedcluster - ``` - -1. Install the Tigera custom resources. - For more information, see [the installation reference](../reference/installation/api.mdx). - ```bash - kubectl apply -f $[filesUrl]/manifests/custom-resources.yaml - ``` -1. Monitor the progress with the following command: - ```bash - watch kubectl get tigerastatus - ``` - When all components show a status of `Available`, go to the next step. -1. Remove your managed cluster from the **management cluster**. - ```bash - kubectl delete managedcluster - ``` diff --git a/calico-enterprise_versioned_docs/version-3.19-2/multicluster/federation/aws.mdx b/calico-enterprise_versioned_docs/version-3.19-2/multicluster/federation/aws.mdx deleted file mode 100644 index 631b562f86..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/multicluster/federation/aws.mdx +++ /dev/null @@ -1,68 +0,0 @@ ---- -description: A sample configuration of Calico Enterprise federated endpoint identity and federated services for an AWS cluster. ---- - -# Cluster mesh example for clusters in AWS - -## Big picture - -A sample configuration for cluster mesh using AWS clusters. - -## Tutorial - -**Set up** - -The cluster is installed on real hardware where node and pod IPs are routable, using an edge VPN router to peer with the AWS cluster. - -![A diagram showing the key configuration requirements setting up an AWS cluster (using AWS VPN CNI) peering with an on-premise cluster.](/img/calico-enterprise/federation/aws-rcc.svg) - -**Calico Enterprise configuration** - -- IP pool resource is configured for the on-premise IP assignment with IPIP is disabled -- BGP peering to the VPN router -- A Remote Cluster Configuration resource references the AWS cluster -- Service discovery of the AWS cluster services uses the Calico Enterprise Federated Services Controller - -**Notes** - -- If VPN Router is configured as a route reflector for the on-premise cluster, you would: - - Configure the default BGP Configuration resource to disable node-to-node mesh - - Configure a global BGP Peer resource to peer with the VPN router -- If the IP Pool has `Outgoing NAT` enabled, then you must add an IP Pool covering the AWS cluster VPC with disabled set to `true`. When set to `true` the pool is not used for IP allocations, and SNAT is not performed for traffic to the AWS cluster. - -**AWS configuration** - -- A VPC CIDR is chosen that does not overlap with the on-premise IP ranges. -- There are 4 subnets within the VPC, split across two AZs (for availability) such that each AZ has a public and private subnet. In this particular example, the split of responsibility is: - - The private subnet is used for node and pod IP allocation - - The public subnet is used to home a NAT gateway for pod-to-internet traffic. -- The VPC is peered to an on-premise network using a VPN. This is configured as a VPN gateway for the AWS side, and a classic VPN for the customer side. BGP is used for route distribution. -- Routing table for private subnet has: - - "propagate" set to "true" to ensure BGP-learned routes are distributed - - Default route to the NAT gateway for public internet traffic - - Local VPC traffic -- Routing table for public subnet has default route to the internet gateway. -- Security group for the worker nodes has: - - Rule to allow traffic from the peered networks - - Other rules required for settings up VPN peering (refer to the AWS docs for details). - -To automatically create a Network Load Balancer (NLB) for the AWS deployment, we apply a service with the correct annotation. - -```yaml -apiVersion: v1 -kind: Service -metadata: - annotations: - service.beta.kubernetes.io/aws-load-balancer-type: nlb - name: nginx-external -spec: - externalTrafficPolicy: Local - ports: - - name: http - port: 80 - protocol: TCP - targetPort: 80 - selector: - run: nginx - type: LoadBalancer -``` diff --git a/calico-enterprise_versioned_docs/version-3.19-2/multicluster/federation/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/multicluster/federation/index.mdx deleted file mode 100644 index a505a53f50..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/multicluster/federation/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Steps to configure cluster mesh. -hide_table_of_contents: true ---- - -# Federation and multi-cluster networking - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/multicluster/federation/kubeconfig.mdx b/calico-enterprise_versioned_docs/version-3.19-2/multicluster/federation/kubeconfig.mdx deleted file mode 100644 index a7f3daaa8d..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/multicluster/federation/kubeconfig.mdx +++ /dev/null @@ -1,474 +0,0 @@ ---- -description: Configure a local cluster to pull endpoint data from a remote cluster. ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -# Configure federated endpoint identity and multi-cluster networking - -## Big picture - -Configure a cluster to federate endpoint identities and establish cross-cluster connectivity. - -## Value - -Secure cross-cluster traffic with identity-aware policy, and leverage $[prodname] to establish the required cross-cluster networking. - -## Concepts - -### Local and remote clusters - -Each cluster in the cluster mesh can act as both a local and remote cluster. - -- Local clusters are configured to retrieve endpoint and routing data from remote clusters (via RemoteClusterConfiguration) -- Remote clusters authorize local clusters to retrieve endpoint and routing data - -### Remote endpoint identity and policy - -Typically, policy can only reference the endpoint identity (e.g. pod labels) of local endpoints. Federated endpoint identity enables local policy rules to reference remote endpoint identities. - -### RemoteClusterConfiguration -RemoteClusterConfiguration is the resource that configures a local cluster to sync resources from a remote cluster. It primarily describes how a local cluster establishes that connection to the remote cluster through which resources are synced. - -The resources synced through this connection enable the local cluster to reference remote endpoint identity and establish cross-cluster overlay routes. - -RemoteClusterConfiguration creates this connection in one direction. If you want identity-aware policy on both sides (i.e. both clusters) of a connection, or you want $[prodname] to establish cross-cluster overlay networking, you need to create a RemoteClusterConfiguration for both directions. - -### kubeconfig files -Each cluster in the cluster mesh should have a dedicated kubeconfig file used by other clusters in the mesh to connect and authenticate. - -## Before you begin -**Required** -- [Install $[prodname]](../../getting-started/install-on-clusters/kubernetes/index.mdx) -- [Ensure pod IP routability](#ensure-pod-ip-routability) - -## How to -- [Create kubeconfig files](#create-kubeconfig-files) -- [Create RemoteClusterConfiguration](#create-remoteclusterconfigurations) -- [Validate federation and multi-cluster networking](#validate-federation-and-multi-cluster-networking) -- [Create remote-identity-aware network policy](#create-remote-identity-aware-network-policy) -- [Troubleshoot](#troubleshoot) -- [Configure IP pool resources](#configure-ip-pool-resources) - -### Ensure pod IP routability -Federation of workload endpoint identities requires [Pod IP routability](./overview#pod-ip-routability) between clusters. If your clusters are using a supported overlay networking mode, $[prodname] can automatically meet this requirement when clusters are connected. - -#### $[prodname] multi-cluster networking -$[prodname] can automatically extend the overlay networking in your clusters to establish pod IP routes across clusters and thus meet the requirement for Pod IP routability. Only VXLAN overlay is supported at this time. - -Ensure the following requirements are met if utilizing $[prodname] multi-cluster networking to achieve pod IP routability: -- All nodes in the cluster mesh must be able to establish connections to each other via their private IP, and must have unique node names. -- VXLAN must be enabled on participating IP pools in all clusters, and these IP pool CIDRs must not overlap. -- `routeSource` and `vxlan*` FelixConfiguration values must be aligned across clusters, and traffic on the `vxlanPort` must be allowed between nodes in the cluster mesh. -- RemoteClusterConfigurations must be established in both directions for cluster pairs in the cluster mesh. -- CNI must be Calico. - -With these requirements met, multi-cluster networking will be automatically established when RemoteClusterConfigurations are created. - -#### Other networking configurations -Alternatively, you can meet the requirement for Pod IP routability by configuring $[prodname] with BGP or with VPC routing to establish unencapsulated Pod IP routes in your environment. - -:::caution -If you have already configured federated endpoint identity without multi-cluster networking, and you wish to switch to using multi-cluster networking, you should note that the steps below are intended for establishing new RemoteClusterConfigurations. You may wish to consult the [switch to multi-cluster networking](#switch-to-multi-cluster-networking) section. -::: - -### Create kubeconfig files - -Create a kubeconfig file, for each cluster, that will be used by other clusters to connect and authenticate themselves. - -**For each** cluster in the cluster mesh, utilizing an existing kubeconfig with administrative privileges, follow these steps: - -1. Create the ServiceAccount used by remote clusters for authentication: - - ```bash - kubectl apply -f $[filesUrl]/manifests/federation-remote-sa.yaml - ``` - -1. If RBAC is enabled, create the ClusterRole and ClusterRoleBinding used by remote clusters for authorization: - - ```bash - kubectl apply -f $[filesUrl]/manifests/federation-rem-rbac-kdd.yaml - ``` - -1. Create the kubeconfig file: - - Open a file in your favorite editor. Consider establishing a naming scheme unique to each cluster, e.g. `kubeconfig-app-a`. - - Paste the following into the file - we will replace the templated values with data retrieved in following steps. - ```yaml - apiVersion: v1 - kind: Config - users: - - name: tigera-federation-remote-cluster - user: - token: - clusters: - - name: tigera-federation-remote-cluster - cluster: - certificate-authority-data: - server: - contexts: - - name: tigera-federation-remote-cluster-ctx - context: - cluster: tigera-federation-remote-cluster - user: tigera-federation-remote-cluster - current-context: tigera-federation-remote-cluster-ctx - ``` - -1. Retrieve the ServiceAccount token: - - #### If using Kubernetes ≥ 1.24 - - Create the ServiceAccount token: - ```bash - kubectl apply -f - <` with it's value: - ```bash - kubectl describe secret tigera-federation-remote-cluster -n kube-system - ``` - - #### If using Kubernetes < 1.24 - - Retrieve the ServiceAccount token value and replace `` with it's value: - ```bash - kubectl describe secret -n kube-system $(kubectl get serviceaccounts tigera-federation-remote-cluster -n kube-system -o jsonpath='{.secrets[0].name}') - ``` - -1. Retrieve and save the certificate authority and server data: - - Run the following command: - ```bash - kubectl config view --flatten --minify - ``` - Replace `` and `` with `certificate-authority-data` and `server` values respectively. - -1. Verify that the `kubeconfig` file works: - - Issue a command like the following to validate the kubeconfig file can be used to connect to the current cluster and access resources: - ```bash - kubectl --kubeconfig=kubeconfig-app-a get nodes - ``` - -### Create RemoteClusterConfigurations -We'll now create the RemoteClusterConfigurations that establish synchronization between clusters. This enables remote-identity aware policy, federated services, and can establish multi-cluster networking. - - - - -In this setup, the cluster mesh will be configured to meet the pod IP routability requirement by establishing routes between clusters using [$[prodname] multi-cluster networking](#calico-enterprise-multi-cluster-networking). - -**For each pair** of clusters in the cluster mesh (e.g. cluster A and cluster B): - -1. In cluster A, create a secret that contains the kubeconfig for cluster B: - - Determine the namespace (``) for the secret to replace in all steps. - The simplest method to create a secret for a remote cluster is to use the `kubectl` command because it correctly encodes the data and formats the file. - ```bash - kubectl create secret generic remote-cluster-secret-name -n \ - --from-literal=datastoreType=kubernetes \ - --from-file=kubeconfig= - ``` - -1. If RBAC is enabled in cluster A, create a Role and RoleBinding for $[prodname] to use to access the secret that contains the kubeconfig for cluster B: - ```bash - kubectl create -f - < - rules: - - apiGroups: [""] - resources: ["secrets"] - verbs: ["watch", "list", "get"] - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: remote-cluster-secret-access - namespace: - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: remote-cluster-secret-access - subjects: - - kind: ServiceAccount - name: calico-typha - namespace: calico-system - EOF - ``` - -1. Create the RemoteClusterConfiguration in cluster A: - - Within the RemoteClusterConfiguration, we specify the secret used to access cluster B, and the overlay routing mode which toggles the establishment of cross-cluster overlay routes. - ```bash - kubectl create -f - < - kind: Secret - syncOptions: - overlayRoutingMode: Enabled - EOF - ``` - -1. [Validate](#check-remote-cluster-connection) the that the remote cluster connection can be established. - -1. Repeat the above steps, switching cluster A and cluster B. - -After completing the above steps for all cluster pairs in the cluster mesh, your clusters should now be ready to utilize remote-identity-aware policy and federated services, along with multi-cluster networking if requirements were met. - - - - -In this setup, the cluster mesh will rely on the [underlying network](#other-networking-configurations) to meet the pod IP routability requirement. - -**For each pair** of clusters in the cluster mesh (e.g. cluster A and cluster B): - -1. In cluster A, create a secret that contains the kubeconfig for cluster B: - - Determine the namespace (``) for the secret to replace in all steps. - The simplest method to create a secret for a remote cluster is to use the `kubectl` command because it correctly encodes the data and formats the file. - ```bash - kubectl create secret generic remote-cluster-secret-name -n \ - --from-literal=datastoreType=kubernetes \ - --from-file=kubeconfig= - ``` - -1. If RBAC is enabled in cluster A, create a Role and RoleBinding for $[prodname] to use to access the secret that contains the kubeconfig for cluster B: - ```bash - kubectl create -f - < - rules: - - apiGroups: [""] - resources: ["secrets"] - verbs: ["watch", "list", "get"] - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: remote-cluster-secret-access - namespace: - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: remote-cluster-secret-access - subjects: - - kind: ServiceAccount - name: calico-typha - namespace: calico-system - EOF - ``` - -1. Create the RemoteClusterConfiguration in cluster A: - - Within the RemoteClusterConfiguration, we specify the secret used to access cluster B, and the overlay routing mode which toggles the establishment of cross-cluster overlay routes. - ```bash - kubectl create -f - < - kind: Secret - syncOptions: - overlayRoutingMode: Disabled - EOF - ``` - -1. If you have no IP pools in cluster A with NAT-outgoing enabled, skip this step. - - Otherwise, if you have IP pools in cluster A with NAT-outgoing enabled, and workloads in that pool will egress to workloads in cluster B, you need to instruct $[prodname] to not perform NAT on traffic destined for IP pools in cluster B. - - You can achieve this by creating a disabled IP pool in cluster A for each CIDR in cluster B. This IP pool should have NAT-outgoing disabled. For example: - - ```yaml - apiVersion: projectcalico.org/v3 - kind: IPPool - metadata: - name: clusterB-main-pool - spec: - cidr: - disabled: true - ``` - -1. [Validate](#check-remote-cluster-connection) the that the remote cluster connection can be established. - -1. Repeat the above steps, switching cluster A and cluster B. - -After completing the above steps for all cluster pairs in the cluster mesh, your clusters should now be ready to utilize remote-identity-aware policy and federated services. - - - - -:::caution - This tutorial sets up RemoteClusterConfigurations in both directions. This is required for $[prodname] to manage multi-cluster networking, and also ensures you can write identity-aware policy on both sides of a cross-cluster connection. Unidirectional connections can be made at your own discretion. -::: - -### Switch to multi-cluster networking -The steps above assume that you are configuring both federated endpoint identity and multi-cluster networking for the first time. If you already have federated endpoint identity, and want to use multi-cluster networking, follow these steps: - -1. Validate that all [requirements](#calico-enterprise-multi-cluster-networking) for multi-cluster networking have been met. -2. Update the ClusterRole in each cluster in the cluster mesh using the RBAC manifest found in [Create kubeconfig files](#create-kubeconfig-files) -3. In all RemoteClusterConfigurations, set `Spec.OverlayRoutingMode` to `Enabled`. -4. Verify that all RemoteClusterConfigurations are bidirectional (in both directions for each cluster pair) using these [instructions](#create-remoteclusterconfigurations). -5. If you had previously created disabled IP pools to prevent NAT outgoing from applying to remote cluster destinations, those disabled IP pools are no longer needed when using multi-cluster networking and must be deleted. - -### Validate federated endpoint identity & multi-cluster networking -#### Validate RemoteClusterConfiguration and federated endpoint identity -##### Check remote cluster connection -You can validate in a local cluster that Typha has synced to the remote cluster through the [Prometheus metrics for Typha](../../reference/component-resources/typha/prometheus#metric-reference). - -Alternatively, you can check the Typha logs for remote cluster connection status. Run the following command: -```bash -kubectl logs deployment/calico-typha -n calico-system | grep "Sending in-sync update" -``` -You should see an entry for each RemoteClusterConfiguration in the local cluster. - -If either output contains unexpected results, proceed to the [troubleshooting](#troubleshoot) section. - -#### Validate multi-cluster networking -If all requirements were met for $[prodname] to establish multi-cluster networking, you can test the functionality by establishing a connection from a pod in a local cluster to the IP of a pod in a remote cluster. Ensure that there is no policy in either cluster that may block this connection. - -If the connection fails, proceed to the [troubleshooting](#troubleshoot) section. - -### Create remote-identity-aware network policy -With federated endpoint identity and routing between clusters established, you can now use labels to reference endpoints on a remote cluster in local policy rules, rather than referencing them by IP address. - -The main policy selector still refers only to local endpoints; and that selector chooses which local endpoints to apply the policy. -However, rule selectors can now refer to both local and remote endpoints. - -In the following example, cluster A (an application cluster) has a network policy that governs outbound connections to cluster B (a database cluster). -```yaml -apiversion: projectcalico.org/v3 -kind: NetworkPolicy -metadata: - name: default.app-to-db - namespace: myapp -spec: - # The main policy selector selects endpoints from the local cluster only. - selector: app == 'backend-app' - tier: default - egress: - - destination: - # Rule selectors can select endpoints from local AND remote clusters. - selector: app == 'postgres' - protocol: TCP - ports: [5432] - action: Allow -``` - -### Troubleshoot -#### Troubleshoot RemoteClusterConfiguration and federated endpoint identity - -##### Verify configuration -For each impacted remote cluster pair (between cluster A and cluster B): -1. Retrieve the kubeconfig from the secret stored in cluster A. Manually verify that it can be used to connect to cluster B. - ```bash - kubectl get secret -n remote-cluster-secret-name -o=jsonpath="{.data.kubeconfig}" | base64 -d > verify_kubeconfig_b - kubectl --kubeconfig=verify_kubeconfig_b get nodes - ``` - This validates that the credentials used by Typha to connect to cluster B's API server are stored in the correct location and provide sufficient access. - - The command above should yield a result like the following: - ``` - NAME STATUS ROLES AGE VERSION - clusterB-master Ready master 7d v1.27.0 - clusterB-worker-1 Ready worker 7d v1.27.0 - clusterB-worker-2 Ready worker 7d v1.27.0 - ``` - - If you do not see the nodes of cluster B listed in response to the command above, verify that you [created](#create-kubeconfig-files) the kubeconfig for cluster B correctly, and that you [stored](#create-remoteclusterconfigurations) it in cluster A correctly. - - If you do see the nodes of cluster B listed in response to the command above, you can run this test (or a similar test) on a node in cluster A to verify that cluster A nodes can connect to the API server of cluster B. - -2. Validate that the Typha service account in Cluster A is authorized to retrieve the kubeconfig secret for cluster B. - ```bash - kubectl auth can-i list secrets --namespace --as=system:serviceaccount:calico-system:calico-typha - ``` - - This command should yield the following output: - ``` - yes - ``` - - If the command does not return this output, verify that you correctly [configured RBAC](#create-remoteclusterconfigurations) in cluster A. - -3. Repeat the above, switching cluster A to cluster B. - -##### Check logs -Validate that querying Typha logs yield the expected result outlined in the [validation](#validate-federated-endpoint-identity--multi-cluster-networking) section. - -If the Typha logs do not yield the expected result, review the warning or error-related logs in `typha` or `calico-node` for insights. - -###### calicoq -[calicoq](../../operations/clis/calicoq/installing) can be used to emulate the connection that Typha will make to remote clusters. Use the following command: -```bash -calicoq eval "all()" -``` -If all remote clusters are accessible, calicoq returns something like the following. Note the remote cluster prefixes: there should be endpoints prefixed with the name of each RemoteClusterConfiguration in the local cluster. -``` -Endpoints matching selector all(): - Workload endpoint remote-cluster-1/host-1/k8s/kube-system.kube-dns-5fbcb4d67b-h6686/eth0 - Workload endpoint remote-cluster-1/host-2/k8s/kube-system.cnx-manager-66c4dbc5b7-6d9xv/eth0 - Workload endpoint host-a/k8s/kube-system.kube-dns-5fbcb4d67b-7wbhv/eth0 - Workload endpoint host-b/k8s/kube-system.cnx-manager-66c4dbc5b7-6ghsm/eth0 -``` - -If this command fails, the error messages returned by the command may provide insight into where issues are occurring. - -#### Troubleshoot multi-cluster networking -##### Basic validation -* Ensure that RemoteClusterConfiguration and federated endpoint identity are [functioning correctly](#validate-federated-endpoint-identity--multi-cluster-networking) -* Verify that you have met the [prerequisites](#calico-enterprise-multi-cluster-networking) for multi-cluster networking -* If you had previously set up RemoteClusterConfigurations without multi-cluster networking, and are upgrading to use the feature, review the [switching considerations](#switch-to-multi-cluster-networking) -* Verify that traffic between clusters is not being denied by network policy - -##### Check overlayRoutingMode -Ensure that `overlayRoutingMode` is set to `"Enabled"` on all RemoteClusterConfigurations. - -If overlay routing is successfully enabled, you can view the logs of a Typha instance using: -```bash -kubectl logs deployment/calico-typha -n calico-system -``` - -You should see an output for each connected remote cluster that looks like this: -``` -18:49:35.394 [INFO][14] wrappedcallbacks.go 443: Creating syncer for RemoteClusterConfiguration(my-cluster) -18:49:35.394 [INFO][14] watchercache.go 186: Full resync is required ListRoot="/calico/ipam/v2/assignment/" -18:49:35.395 [INFO][14] watchercache.go 186: Full resync is required ListRoot="/calico/resources/v3/projectcalico.org/workloadendpoints" -18:49:35.396 [INFO][14] watchercache.go 186: Full resync is required ListRoot="/calico/resources/v3/projectcalico.org/hostendpoints" -18:49:35.396 [INFO][14] watchercache.go 186: Full resync is required ListRoot="/calico/resources/v3/projectcalico.org/profiles" -18:49:35.396 [INFO][14] watchercache.go 186: Full resync is required ListRoot="/calico/resources/v3/projectcalico.org/nodes" -18:49:35.397 [INFO][14] watchercache.go 186: Full resync is required ListRoot="/calico/resources/v3/projectcalico.org/ippools" -``` - -If you do not see the each of the resource types above, overlay routing was not successfully enabled in your cluster. Verify that you followed the [setup](#create-remoteclusterconfigurations) correctly for overlay routing, and that the cluster is using a version of $[prodname] that supports multi-cluster networking. - -###### Check logs -Warning or error logs in `typha` or `calico-node` may provide insight into where issues are occurring. - -## Next steps - -[Configure federated services](services-controller.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/multicluster/federation/overview.mdx b/calico-enterprise_versioned_docs/version-3.19-2/multicluster/federation/overview.mdx deleted file mode 100644 index 777537a648..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/multicluster/federation/overview.mdx +++ /dev/null @@ -1,54 +0,0 @@ ---- -description: Configure a cluster mesh for cross-cluster endpoints sharing, cross-cluster connectivity, and cross-cluster service discovery. ---- - -# Overview - -## Big picture - -Secure cross-cluster connections with identity-aware network policy, and federate services for cross-cluster service discovery. - -Utilize $[prodname] to establish cross-cluster connectivity. - -## Value - -At some point in your Kubernetes journey, you may have applications that need to access services and workloads running in another cluster. - -By default, pods can only communicate with pods within the same cluster. Additionally, services and network policy only select pods from within the same cluster. $[prodname] can help overcome these barriers by forming a cluster mesh the following features: -- **Federated endpoint identity** - - Allow a local Kubernetes cluster to include the workload endpoints (pods) and host endpoints of a remote cluster in the calculation of local network policies applied on each node of the local cluster. - -- **Federated services** - - Enable a local Kubernetes Service to populate with Endpoints selected from both local cluster and remote cluster Services. - -- **Multi-cluster networking** - - Establish an overlay network between clusters to provide cross-cluster connectivity with $[prodname]. - -## Concepts - -### Pod IP routability - -$[prodname] cluster mesh is implemented at Kubernetes at the network layer, based on pod IPs. - -Taking advantage of federated workload endpoint identity and federated services requires that pod IPs are routable between clusters. This is because identity-aware network policy requires source and destination pod IPs to be preserved to establish pod identity. Additionally, the Endpoint IPs of pods selected by a federated Service must be routable in order for that Service to be of value. - -You can utilize $[prodname] multi-cluster networking to establish pod IP routability between clusters via overlay. Alternatively, you can manually set up pod IP routability between clusters without encapsulation (e.g. VPC routing, BGP routing). - -### Federated endpoint identity - -Federated endpoint identity in a cluster mesh allows a local Kubernetes cluster to include the workload endpoints (pods) and host endpoints of a remote cluster in the calculation of the local policies for each node, e.g. Cluster A network policy allows its application pods to talk to database pods in Cluster B. - -This feature does not _federate network policies_; policies from a remote cluster are not applied to the endpoints on the local cluster, and the policy from the local cluster is rendered only locally and applied to the local endpoints. - -### Federated services - -Federated services in a cluster mesh works with federated endpoint identity, providing cross-cluster service discovery for a local cluster. If you have an existing service discovery mechanism, this feature is optional. - -Federated services use the Tigera Federated Services Controller to federate all Kubernetes endpoints (workload and host endpoints) across all of the clusters. The Federated Services Controller accesses service and endpoints data in the remote clusters directly through the Kubernetes API. - -## Next steps - -[Configure remote-aware policy and multi-cluster networking](kubeconfig.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/multicluster/federation/services-controller.mdx b/calico-enterprise_versioned_docs/version-3.19-2/multicluster/federation/services-controller.mdx deleted file mode 100644 index 31206db2c2..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/multicluster/federation/services-controller.mdx +++ /dev/null @@ -1,209 +0,0 @@ ---- -description: Configure a federated service for cross-cluster service discovery for local clusters. ---- - -# Configure federated services - -## Big picture - -Configure local clusters to discover services across multiple clusters. - -## Value - -Use cluster mesh and federated services discovery along with federated endpoint identity to extend and automate endpoints sharing. (Optional if you have your own service discovery mechanism.) - -## Concepts - -### Federated services - -A federated service (also called a backing service), is a set of services with consolidated endpoints. $[prodname] discovers services across a cluster mesh (both local cluster and remote clusters) and creates a "federated service" on the local cluster that encompasses all of the individual services. - -Federated services are managed by the Tigera Federated Service Controller, which monitors and maintains endpoints for each locally-federated service. The controller does not change configuration on remote clusters. - -A federated service looks similar to a regular Kubernetes service, but instead of using a pod selector, it uses an annotation. For example: - -```yaml -apiVersion: v1 -kind: Service -metadata: - name: my-app-federated - namespace: default - annotations: - federation.tigera.io/serviceSelector: run == "my-app" -spec: - ports: - - name: my-app-ui - port: 8080 - protocol: TCP - type: ClusterIP -``` - -| Annotation | Description | -| -------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `federation.tigera.io/serviceSelector` | Required field that specifies the services used in the federated service. Format is a standard $[prodname] selector (i.e. the same as $[prodname] policy resources) and selects services based on their labels. The selector annotation selects services, not pods.

    Only services in the same namespace as the federated service are included. This implies namespace names across clusters are linked (this is a basic premise of federated endpoint identity).

    If the value is incorrectly specified, the service is not federated and endpoint data is removed from the service. View the warning logs in the controller for any issues processing this value. | - -**Syntax and rules** - -- Services that you specify in the federated service must be in the same namespace or they are ignored. A basic assumption of federated endpoint identity is that namespace names are linked across clusters. -- If you specify a `spec.Selector` in a federated service, the service is not federated. -- You cannot federate another federated service. If a service has a federated services annotation, it is not included as a backing service of another federated service. -- The target port number in the federated service ports is not used. - -**Match services using a label** - -You can also match services using a label. The label is implicitly added to each service, but it does not appear in `kubectl` when viewing the service. - -| Label | Description | -| ---------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `federation.tigera.io/remoteClusterName` | Label added to all remote services that correspond to the Remote Cluster Configuration name for the remote cluster. Use this label to restrict the clusters selected by the services. **Note**: The label is not added for services in the local cluster. | - -**About endpoints** - -- Do not manually create or manage endpoints resources; let the Tigera controller do all of the work. User updates to endpoint resources are ignored. -- Endpoints are selected only when the service port name and protocol in the federated service matches the port name and protocol in the backing service. -- Endpoint data configured in the federated service is slightly modified from the original data of the backing service. For backing services on remote clusters, the `targetRef.name` field in the federated service adds the ``. For example, `/`. - -## Before you begin - -**Required** - -- [Configure federated endpoint identity](kubeconfig.mdx) - -## How to - -- [Create service resources](#create-service-resources) -- [Create a federated service](#create-a-federated-service) -- [Access a federated service](#access-a-federated-service) - -### Create service resources - -On each cluster in the mesh that is providing a particular service, create your service resources as you normal would with the following requirements: - -- Services must all be in the same namespace. -- Configure each service with a common label key and value to identify the common set of services across your clusters (for example, `run=my-app`). - -Kubernetes manages the service by populating the service endpoints from the pods that match the selector configured in the service spec. - -### Configure a federated service - -1. On a cluster that needs to access the federated set of pods that are running an application, create a - service on that cluster leaving the `spec selector` blank. -1. Set the `federation.tigera.io/serviceSelector` annotation to be a $[prodname] selector that selects the previously-configured services using the matching label match (for example, `run == "my-app"`). - -The Federated Services Controller manages this service, populating the service endpoints from all of the services that match the service selector configured in the annotation. - -### Access a federated service - -Any application can access the federated service using the local DNS name for that service. The simplest way to access a federated service is through its corresponding DNS name. - -By default, Kubernetes adds DNS entries to access a service locally. For a service called `my-svc` in the namespace -`my-namespace`, the following DNS entry would be added to access the service within the local cluster: - -``` -my-svc.my-namespace.svc.cluster.local -``` - -DNS lookup for this name returns the fixed ClusterIP address assigned for the federated service. The ClusterIP is translated in iptables to one of the federated service endpoint IPs, and is load balanced across all of the endpoints. - -## Tutorial - -### Create a service - -In the following example, the remote cluster defines the following service. - -```yaml -apiVersion: v1 -kind: Service -metadata: - labels: - run: my-app - name: my-app - namespace: default -spec: - selector: - run: my-app - ports: - - name: my-app-ui - port: 80 - protocol: TCP - targetPort: 9000 - - name: my-app-console - port: 81 - protocol: TCP - targetPort: 9001 - type: ClusterIP -``` - -This service definition exposes two ports for the application `my-app`. One port for accessing a UI, and the other for accessing a management console. The service specifies a Kubernetes selector, which means the endpoints for this service are automatically populated by Kubernetes from matching pods within the services own cluster. - -### Create a federated service - -To create a federated service on your local cluster that federates the web access port for both the local and remote service, you would create a service resource on your local cluster as follows: - -```yaml -apiVersion: v1 -kind: Service -metadata: - name: my-app-federated - namespace: default - annotations: - federation.tigera.io/serviceSelector: run == "my-app" -spec: - ports: - - name: my-app-ui - port: 8080 - protocol: TCP - type: ClusterIP -``` - -The `spec.selector` is not specified so it will not be managed by Kubernetes. Instead, we use a `federation.tigera.io/selector` annotation, indicating that this is a federated service managed by the Federated Services Controller. - -The controller matches the `my-app` services (matching the run label) on both the local and remote clusters, and consolidates endpoints from the `my-app-ui` TCP port for both of those services. Because the federated service does not specify the `my-app-console` port, the controller does not include these endpoints in the federated service. - -The endpoints data for the federated service is similar to the following. Note that the name of the remote cluster is included in `targetRef.name`. - -```yaml -apiVersion: v1 -kind: Endpoints -metadata: - creationTimestamp: 2018-07-03T19:41:38Z - annotations: - federation.tigera.io/serviceSelector: run == "my-app" - name: my-app-federated - namespace: default - resourceVersion: '701812' - selfLink: /api/v1/namespaces/default/endpoints/my-app-federated - uid: 1a0427e8-7ef9-11e8-a24c-0259d75c6290 -subsets: - - addresses: - - ip: 192.168.93.12 - nodeName: node1.localcluster.tigera.io - targetRef: - kind: Pod - name: my-app-59cf48cdc7-frf2t - namespace: default - resourceVersion: '701655' - uid: 19f5e914-7ef9-11e8-a24c-0259d75c6290 - ports: - - name: my-app-ui - port: 80 - protocol: TCP - - addresses: - - ip: 192.168.0.28 - nodeName: node1.remotecluster.tigera.io - targetRef: - kind: Pod - name: remotecluster/my-app-7b6f758bd5-ctgbh - namespace: default - resourceVersion: '701648' - uid: 19e2c841-7ef9-11e8-a24c-0259d75c6290 - ports: - - name: my-app-ui - port: 80 - protocol: TCP -``` - -## Additional resources - -- [Cluster mesh example for AWS](aws.mdx) -- [Federated service controller](../../reference/component-resources/kube-controllers/configuration.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/multicluster/fine-tune-deployment.mdx b/calico-enterprise_versioned_docs/version-3.19-2/multicluster/fine-tune-deployment.mdx deleted file mode 100644 index eb6345bde4..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/multicluster/fine-tune-deployment.mdx +++ /dev/null @@ -1,154 +0,0 @@ ---- -description: Review your multi-cluster management deployment to ensure it is ready for production. ---- - -# Fine-tune multi-cluster management - -## Big picture - -Fine-tune your multi-cluster management deployment for production. - -## How to - -- [Review log storage collection and retention](#review-log-storage-collection-and-retention) -- [Review service type for the management cluster](#review-service-type-for-the-management-cluster) -- [Review user permissions](#review-user-permissions) -- [Review user permissions for managed cluster log data](#review-user-permissions-for-managed-cluster-log-data) -- [Filter log data for a managed cluster in Kibana](#filter-log-data-for-a-managed-cluster-in-kibana) - -### Review log storage collection and retention - -Because the management cluster stores all log data across your managed clusters, choose a size to accommodate your anticipated data volume. See [Adjust log storage size](../operations/logstorage/adjust-log-storage-size.mdx). - -### Review service type for the management cluster - -In the [Install multi-cluster management guide](./set-up-multi-cluster-management/standard-install/create-a-management-cluster.mdx), we used a `NodePort` service because it was the quickest way to expose the management cluster. But, there are drawbacks to using `NodePort` services, described in [Defining a Service in Kubernetes](https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service). For production and high availability, choose a type of service that is scalable. We have tested both `NodePort` and `LoadBalancer` services. For both, a security rule/firewall rule is needed to allow connections to the management cluster. - -The configuration for your service (regardless of type) should obey the following requirements: - -- Uses TCP protocol -- Maps to port 9449 on the Manager (web console) pod -- Exists within the `tigera-manager` namespace -- Uses label selector `k8s-app: tigera-manager` - -The following is an example of a valid `LoadBalancer` service: - -```yaml -apiVersion: v1 -kind: Service -metadata: - name: tigera-manager-mcm - namespace: tigera-manager -spec: - type: LoadBalancer - ports: - - port: 9449 - protocol: TCP - targetPort: 9449 - selector: - k8s-app: tigera-manager -``` - -:::note - -Using a LoadBalancer may require additional steps, depending on how you provisioned your Kubernetes cluster. - -::: - -:::note - -If you previously set up a management cluster with a service, don’t forget to update the IP address in each managed clusters, by editing the `ManagementClusterConnection` [manifest that you downloaded](./set-up-multi-cluster-management/standard-install/create-a-managed-cluster.mdx) and apply it, or use `kubectl edit managementclusterconnection tigera-secure`. - -::: - -### Review user permissions - -In the [Install multi-cluster management guide](./set-up-multi-cluster-management/standard-install/create-a-management-cluster.mdx), we created a user with full admin-level permissions in both the management and managed cluster. In a production environment you will want to define narrow permissions for your users. - -When defining roles and permissions across your clusters, make note of the following: - -- All users that log in to the $[prodname] web console must use a valid service account or user account in the management cluster. -- When the management cluster performs actions on a managed cluster, it passes the user ID of the current logged in user to the managed cluster for authorization. As a requirement, the user must have the same username defined across the management cluster and managed clusters. A user can have different permissions for accessing resources in each managed cluster, as defined by Kubernetes Role and ClusterRole objects, but the username used in the corresponding RoleBinding and ClusterRoleBinding objects must always match what is in the management cluster. - -### Review user permissions for managed cluster log data - -Log data across all managed clusters is stored in a centralized Elasticsearch within the management cluster. You can use [Kubernetes RBAC roles and cluster roles](https://kubernetes.io/rbac/) to define granular access to cluster log data. For example, using the RBAC rule syntax, you can define rules to control access to specific log types or specific clusters by using the resources and resourceNames list fields. - -$[prodname] log data is stored within Elasticsearch indexes. The indexes have the following naming scheme: - -```bash -.. -``` - -A standalone cluster uses the cluster name cluster for Elasticsearch indexes. This is also the name used by a management cluster. For a managed cluster, its cluster name is the value chosen by the user at the time of registration, through the $[prodname] web console. - -To restrict to a specific cluster or subset of clusters use, resources. To restrict to a specific log type use, resourceNames. The following are valid cluster types: - -- “flows” -- “audit” -- “audit_ee” -- “audit_kube” -- “events” -- “dns” -- "l7" - -Let’s look at some examples for defining RBAC rules within a role or cluster role to restrict access to log data by log type and cluster name. - -The rule below allows access to log types flow and DNS for a single cluster with the name app-cluster. - -```yaml -- apiGroups: ['lma.tigera.io'] - resources: ['app-cluster'] - resourceNames: ['flows', 'dns'] - verbs: ['get'] -``` - -:::note - -The apiGroups will always be `lma.tigera.io`. The verbs will always be get. -The rule below allows access to any cluster for log types flow, DNS and audit. - -::: - -```yaml -- apiGroups: ['lma.tigera.io'] - resources: ['*'] - resourceNames: ['flows', 'dns', 'audit'] - verbs: ['get'] -``` - -The rule below allows access to any cluster for all log types. - -```yaml -- apiGroups: ['lma.tigera.io'] - resources: ['*'] - resourceNames: ['*'] - verbs: ['get'] -``` - -### Filter log data for a managed cluster in Kibana - -1. Log in to the $[prodname] web console. -1. In the left navigation, click Kibana and log in to the Kibana dashboard. -1. Navigate to the Discovery view and filter logs by managed cluster indexes. -1. Select a type of log (audit, dns, events, flow). -1. From the Available Fields section in the side panel, select the `_index` field. - - ![Kibana Cluster Indexes](/img/calico-enterprise/mcm/mcm-kibana.png) - -In the example above, the selected log type (flow logs) has the index prefix, `tigera_secure_ee_flows` and two cluster indexes available: - -- Index: tigera_secure_ee_flows.cluster.20200207 -- Index: tigera_secure_ee_flows.app-cluster-1.20200207 - -:::note - -The management cluster has a default cluster name to identify indexes. When filtering logs for the management cluster, use the cluster name: `cluster`. - -::: - -To filter log data by a given managed cluster you can include the filter criteria `_index: ..*` to your query when executing a search through the Kibana UI. - -# Additional resources - -- [ManagementClusterConnection resource reference](../reference/installation/api.mdx#managementclusterconnection) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/multicluster/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/multicluster/index.mdx deleted file mode 100644 index 40c70c3b65..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/multicluster/index.mdx +++ /dev/null @@ -1,40 +0,0 @@ ---- -description: Calico Enterprise features for scaling to production. -hide_table_of_contents: true ---- - -import { DocCardLink, DocCardLinkLayout } from '/src/___new___/components'; - -# Multi-cluster management and federation - -With multi-cluster management, you can centralize control of multiple Kubernetes clusters in a single management plane, with federated endpoint identity, federated services, and multi-cluster networking. - -## Setting up multi-cluster management - - - - - - -## Setting up multi-cluster management using Helm - - - - - - -## Cluster mesh - - - - - - - - -## Advanced - - - - - \ No newline at end of file diff --git a/calico-enterprise_versioned_docs/version-3.19-2/multicluster/set-up-multi-cluster-management/helm-install/create-a-managed-cluster-helm.mdx b/calico-enterprise_versioned_docs/version-3.19-2/multicluster/set-up-multi-cluster-management/helm-install/create-a-managed-cluster-helm.mdx deleted file mode 100644 index dd6c5cd6be..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/multicluster/set-up-multi-cluster-management/helm-install/create-a-managed-cluster-helm.mdx +++ /dev/null @@ -1,172 +0,0 @@ ---- -description: Install Calico Enterprise managed cluster using Helm application package manager. ---- - -# Create a Calico Enterprise managed cluster - -import CodeBlock from '@theme/CodeBlock'; - -## Big picture - -Create a $[prodname] managed cluster that you can control from your management cluster using Helm 3. - -## Value - -Helm charts are a way to package up an application for Kubernetes (similar to `apt` or `yum` for operating systems). Helm is also used by tools like ArgoCD to manage applications in a cluster, taking care of install, upgrade (and rollback if needed), etc. - -## Before you begin - -**Required** - -- Install Helm 3 -- `kubeconfig` is configured to work with your cluster (check by running `kubectl get nodes`) -- [Credentials for the Tigera private registry and a license key](../../../getting-started/install-on-clusters/calico-enterprise.mdx) - -## Concepts - -### Operator-based installation - -In this guide, you install the Tigera Calico operator and custom resource definitions using the Helm 3 chart. The Tigera Operator provides lifecycle management for $[prodname] exposed via the Kubernetes API defined as a custom resource definition. - -## How to - -### Download the Helm chart - -```bash -helm repo add tigera-ee https://downloads.tigera.io/ee/charts -helm repo update -helm pull tigera-ee/tigera-operator --version $[releaseTitle] -``` - -### Prepare the Installation Configuration - -You **must** provide the desired configuration for your cluster via the `values.yaml`, otherwise installation will use the default settings based on the auto-detected provider. -The configurations you need to provide depends on your cluster's settings and your desired state. - -Some important configurations you might need to provide to the installer (via `values.yaml`) includes (but not limited to): _kubernetesProvider_, _cni type_, or if you need to customize _TLS certificates_. - -Here are some examples for updating `values.yaml` with your configurations: - -Example 1. Providing `kubernetesProvider`: if you are installing on a cluster installed by EKS, set the `kubernetesProvider` as described in the [Installation reference](../../../reference/installation/api.mdx#provider) - - ```bash - echo '{ installation: {kubernetesProvider: EKS }}' > values.yaml - ``` - -Example 2. Providing custom settings in `values.yaml` for Azure AKS cluster with no Kubernetes CNI pre-installed: - - ```bash - cat > values.yaml < - ``` - -1. Export the management cluster certificate and managed cluster certificate and key. - - If you haven't already done so, generate the base64 encoded CRT and KEY for this managed cluster: - - ```bash - openssl genrsa 2048 | base64 -w 0 > my-managed-cluster.key.base64 - openssl req -new -key <(base64 -d my-managed-cluster.key.base64) -subj "/CN=my-managed-cluster" | \ - openssl x509 -req -signkey <(base64 -d my-managed-cluster.key.base64) -days 365 | base64 -w 0 > my-managed-cluster.crt.base64 - ``` - - Get the MANAGEMENT_CLUSTER_CRT by running the following command on the management cluster: - - ```bash - kubectl get secret -n tigera-operator $(kubectl get managementcluster tigera-secure -o jsonpath='{.spec.tls.secretName}') -o jsonpath='{.data.tls\.crt}' > management-cluster.crt.base64 - ``` - - Export the managed cluster variables: - - ```bash - export MANAGEMENT_CLUSTER_CRT=$(cat management-cluster.crt.base64) - export MANAGED_CLUSTER_CRT=$(cat my-managed-cluster.crt.base64) - export MANAGED_CLUSTER_KEY=$(cat my-managed-cluster.key.base64) - ``` - -1. Append the management cluster context to your `values.yaml`: - - ```bash - echo " - managementClusterConnection: - enabled: true - managementClusterAddress: $MANAGEMENT_CLUSTER_ADDR - management: - tls: - crt: $MANAGEMENT_CLUSTER_CRT - managed: - tls: - crt: $MANAGED_CLUSTER_CRT - key: $MANAGED_CLUSTER_KEY" >> values.yaml - ``` - -1. Install the Tigera Operator and custom resource definitions using the Helm 3 chart: - - - {'$[version]' === 'master' - ? `helm install $[prodnamedash] tigera/tigera-operator --version tigera-operator-v0.0 -f values.yaml \\ ---set-file imagePullSecrets.tigera-pull-secret=,tigera-prometheus-operator.imagePullSecrets.tigera-pull-secret= \\ ---set-file licenseKeyContent= \\ ---set logStorage.enabled=false --set manager.enabled=false \\ ---namespace tigera-operator --create-namespace` - : `helm install $[prodnamedash] tigera-operator-$[chart_version_name].tgz -f values.yaml \\ ---set-file imagePullSecrets.tigera-pull-secret=,tigera-prometheus-operator.imagePullSecrets.tigera-pull-secret= \\ ---set-file licenseKeyContent= \\ ---set logStorage.enabled=false --set manager.enabled=false \\ ---namespace tigera-operator --create-namespace`} - - -1. You can now monitor progress with the following command: - - ```bash - watch kubectl get tigerastatus - ``` - -#### Provide permissions to view the managed cluster - - To access resources belonging to a managed cluster from the $[prodname] web console, the service or user account used to log in must have appropriate permissions defined in the managed cluster. - -Define admin-level permissions for the service account `mcm-user` we created to log in to the web console. Run the following command against your managed cluster. - - ```bash - kubectl create clusterrolebinding mcm-user-admin --clusterrole=tigera-network-admin --serviceaccount=default:mcm-user - ``` - - Congratulations! You have now installed $[prodname] for a managed cluster using the Helm 3 chart. - -## Next steps - -**Recommended** - -- [Configure access to the $[prodname] web console](../../../operations/cnx/access-the-manager.mdx) -- [Authentication quickstart](../../../operations/cnx/authentication-quickstart.mdx) -- [Configure your own identity provider](../../../operations/cnx/configure-identity-provider.mdx) - -**Recommended - Networking** - -- The default networking is IP in IP encapsulation using BGP routing. For all networking options, see [Determine best networking option](../../../networking/determine-best-networking.mdx). - -**Recommended - Security** - -- [Get started with $[prodname] tiered network policy](../../../network-policy/policy-tiers/tiered-policy.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/multicluster/set-up-multi-cluster-management/helm-install/create-a-management-cluster-helm.mdx b/calico-enterprise_versioned_docs/version-3.19-2/multicluster/set-up-multi-cluster-management/helm-install/create-a-management-cluster-helm.mdx deleted file mode 100644 index d669ec33f6..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/multicluster/set-up-multi-cluster-management/helm-install/create-a-management-cluster-helm.mdx +++ /dev/null @@ -1,304 +0,0 @@ ---- -description: Install Calico Enterprise management cluster using Helm application package manager. ---- - -# Create a Calico Enterprise management cluster - -import CodeBlock from '@theme/CodeBlock'; -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -## Big picture - -Create a $[prodname] management cluster to manage multiple clusters from a single management plane using Helm 3. - -## Value - -Helm charts are a way to package up an application for Kubernetes (similar to `apt` or `yum` for operating systems). Helm is also used by tools like ArgoCD to manage applications in a cluster, taking care of install, upgrade (and rollback if needed), etc. - -## Before you begin - -**Required** - -- Install Helm 3 -- `kubeconfig` is configured to work with your cluster (check by running `kubectl get nodes`) -- [Credentials for the Tigera private registry and a license key](../../../getting-started/install-on-clusters/calico-enterprise.mdx) - -## Concepts - -### Operator-based installation - -In this guide, you install the Tigera Calico operator and custom resource definitions using the Helm 3 chart. The Tigera Operator provides lifecycle management for $[prodname] exposed via the Kubernetes API defined as a custom resource definition. - -## How to - -### Get the Helm chart - - - {'$[version]' === 'master' - ? `helm repo add tigera gs://tigera-helm-charts -helm repo update -helm pull tigera/tigera-operator --version $[releaseTitle]` - : `helm repo add tigera-ee https://downloads.tigera.io/ee/charts -helm repo update -helm pull tigera-ee/tigera-operator --version $[releaseTitle]`} - - -### Prepare the Installation Configuration - -You **must** provide the desired configuration for your cluster via the `values.yaml`, otherwise installation will use the default settings based on the auto-detected provider. -The configurations you need to provide depends on your cluster's settings and your desired state. - -Some important configurations you might need to provide to the installer (via `values.yaml`) includes (but not limited to): _kubernetesProvider_, _cni type_, or if you need to customize _TLS certificates_. - -Here are some examples for updating `values.yaml` with your configurations: - -Example 1. Providing `kubernetesProvider`: if you are installing on a cluster installed by EKS, set the `kubernetesProvider` as described in the [Installation reference](../../../reference/installation/api.mdx#provider) - - ```bash - echo '{ installation: {kubernetesProvider: EKS }}' > values.yaml - ``` - -Example 2. Providing custom settings in `values.yaml` for Azure AKS cluster with no Kubernetes CNI pre-installed: - - ```bash - cat > values.yaml < - - -To install a $[prodname] [management](create-a-management-cluster-helm#value) cluster with Helm, using a NodePort service: - -1. [Configure a storage class for Calico Enterprise](../../../operations/logstorage/create-storage). - -1. Export the service node port number - - ```bash - export EXT_SERVICE_NODE_PORT=30449 - ``` - - Export the public address or host of the management cluster. (Ex. "example.com:1234" or "10.0.0.10:1234".) - - ```bash - export MANAGEMENT_CLUSTER_ADDR=:$EXT_SERVICE_NODE_PORT - ``` - -1. Export one or more managed clusters. - - Generate the base64 encoded CRT and KEY for a managed cluster: - - ```bash - openssl genrsa 2048 | base64 -w 0 > my-managed-cluster.key.base64 - openssl req -new -key <(base64 -d my-managed-cluster.key.base64) -subj "/CN=my-managed-cluster" | \ - openssl x509 -req -signkey <(base64 -d my-managed-cluster.key.base64) -days 365 | base64 -w 0 > my-managed-cluster.crt.base64 - ``` - - Export the managed cluster variables: - - ```bash - export MANAGED_CLUSTER_NAME=my-managed-cluster - export MANAGED_CLUSTER_OPERATOR_NAMESPACE=tigera-operator - export MANAGED_CLUSTER_CERTIFICATE=$(cat my-managed-cluster.crt.base64) - ``` - -1. Append the management cluster context to your `values.yaml`: - - ```bash - echo " - managementCluster: - enabled: true - address: $MANAGEMENT_CLUSTER_ADDR - service: - enabled: true - annotations: - type: NodePort - port: 9449 - targetPort: 9449 - protocol: TCP - nodePort: $EXT_SERVICE_NODE_PORT - - managedClusters: - enabled: true - clusters: - - name: $MANAGED_CLUSTER_NAME - operatorNamespace: $MANAGED_CLUSTER_OPERATOR_NAMESPACE - certificate: $MANAGED_CLUSTER_CERTIFICATE" >> values.yaml - ``` - -1. Install the Tigera Operator and custom resource definitions using the Helm 3 chart: - - - {'$[version]' === 'master' - ? `helm install $[prodnamedash] tigera/tigera-operator --version tigera-operator-v0.0 -f values.yaml \\ ---set-file imagePullSecrets.tigera-pull-secret=,tigera-prometheus-operator.imagePullSecrets.tigera-pull-secret= \\ ---set-file licenseKeyContent= \\ ---namespace tigera-operator --create-namespace` - : `helm install $[prodnamedash] tigera-operator-$[chart_version_name].tgz -f values.yaml \\ ---set-file imagePullSecrets.tigera-pull-secret=,tigera-prometheus-operator.imagePullSecrets.tigera-pull-secret= \\ ---set-file licenseKeyContent= \\ ---namespace tigera-operator --create-namespace`} - - -1. You can now monitor progress with the following command: - - ```bash - watch kubectl get tigerastatus - ``` - - - - -To install a $[prodname] [management](create-a-management-cluster-helm#value) cluster with Helm, using a LoadBalancer service: - -#### Meet cloud provider requirements - -Ensure that you have met the requirements for your cloud provider to provision a load balancer in your environment. - -For example, if you are using EKS, you must meet the requirements defined in [create a network load balancer for AWS](https://docs.aws.amazon.com/eks/latest/userguide/network-load-balancing.html) - -#### Install the management cluster - -1. [Configure a storage class for Calico Enterprise](../../../operations/logstorage/create-storage). - -1. Export one or more managed clusters. - - Generate the base64 encoded CRT and KEY for a managed cluster: - - ```bash - openssl genrsa 2048 | base64 -w 0 > my-managed-cluster.key.base64 - openssl req -new -key <(base64 -d my-managed-cluster.key.base64) -subj "/CN=my-managed-cluster" | \ - openssl x509 -req -signkey <(base64 -d my-managed-cluster.key.base64) -days 365 | base64 -w 0 > my-managed-cluster.crt.base64 - ``` - - Export the managed cluster variables: - - ```bash - export MANAGED_CLUSTER_NAME=my-managed-cluster - export MANAGED_CLUSTER_OPERATOR_NAMESPACE=tigera-operator - export MANAGED_CLUSTER_CERTIFICATE=$(cat my-managed-cluster.crt.base64) - ``` - -1. Append the management cluster context to your `values.yaml`: - - ```bash - echo " - managementCluster: - enabled: true - service: - enabled: true - annotations: - type: LoadBalancer - port: 9449 - targetPort: 9449 - protocol: TCP - - managedClusters: - enabled: true - clusters: - - name: $MANAGED_CLUSTER_NAME - operatorNamespace: $MANAGED_CLUSTER_OPERATOR_NAMESPACE - certificate: $MANAGED_CLUSTER_CERTIFICATE" >> values.yaml - ``` - - If you are using EKS, make sure your management cluster has the following annotations: - ```yaml - managementCluster: - service: - annotations: - - key: service.beta.kubernetes.io/aws-load-balancer-type - value: "external" - - key: service.beta.kubernetes.io/aws-load-balancer-nlb-target-type - value: "instance" - - key: service.beta.kubernetes.io/aws-load-balancer-scheme - value: "internet-facing" - ``` - -1. Install the Tigera Operator and custom resource definitions using the Helm 3 chart: - - - {'$[version]' === 'master' - ? `helm install $[prodnamedash] tigera/tigera-operator --version tigera-operator-v0.0 -f values.yaml \\ ---set-file imagePullSecrets.tigera-pull-secret=,tigera-prometheus-operator.imagePullSecrets.tigera-pull-secret= \\ ---set-file licenseKeyContent= \\ ---namespace tigera-operator --create-namespace` - : `helm install $[prodnamedash] tigera-operator-$[chart_version_name].tgz -f values.yaml \\ ---set-file imagePullSecrets.tigera-pull-secret=,tigera-prometheus-operator.imagePullSecrets.tigera-pull-secret= \\ ---set-file licenseKeyContent= \\ ---namespace tigera-operator --create-namespace`} - - -1. You can now monitor progress with the following command: - - ```bash - watch kubectl get tigerastatus - ``` - -#### Update the ManagementCluster address - -1. Export the service port number - - ```bash - export EXT_LB_PORT= - ``` - - Export the public address or host of the management cluster, in this case the load-balancer's external IP (Ex. "example.com:1234" or "10.0.0.10:1234".) - - ```bash - export MANAGEMENT_CLUSTER_ADDR=:$EXT_LB_PORT - ``` - - Replace the `address` field in the ManagementCluster resource. - - ```bash - kubectl patch managementcluster tigera-secure --type merge -p "{\"spec\":{\"address\":\"${MANAGEMENT_CLUSTER_ADDR}\"}}" - ``` - - - - -#### Create an admin user and verify management cluster connection - -To access resources in a managed cluster from the $[prodname] web console within the management cluster, the logged-in user must have appropriate permissions defined in that managed cluster (clusterrole bindings). - -Create an admin user, `mcm-user`, in the default namespace with full permissions, and token. - - ```bash - kubectl create sa mcm-user - kubectl create clusterrolebinding mcm-user-admin --clusterrole=tigera-network-admin --serviceaccount=default:mcm-user - kubectl create token mcm-user -n default - ``` - - Use the generated token, to connect to the UI. In the top right banner in the UI, your management cluster is displayed as the first entry in the cluster selection drop-down menu with the fixed name, `management cluster`. - - Congratulations! You have now installed $[prodname] for a management cluster using the Helm 3 chart. - -## Next steps - -**Recommended** - -- [Configure access to the $[prodname] web console](../../../operations/cnx/access-the-manager.mdx) -- [Authentication quickstart](../../../operations/cnx/authentication-quickstart.mdx) -- [Configure your own identity provider](../../../operations/cnx/configure-identity-provider.mdx) - -**Recommended - Networking** - -- The default networking is IP in IP encapsulation using BGP routing. For all networking options, see [Determine best networking option](../../../networking/determine-best-networking.mdx). - -**Recommended - Security** - -- [Get started with $[prodname] tiered network policy](../../../network-policy/policy-tiers/tiered-policy.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/multicluster/set-up-multi-cluster-management/helm-install/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/multicluster/set-up-multi-cluster-management/helm-install/index.mdx deleted file mode 100644 index 32706b82e6..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/multicluster/set-up-multi-cluster-management/helm-install/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Steps to configure management and managed clusters using Helm. -hide_table_of_contents: true ---- - -# Helm and multi-cluster management - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/multicluster/set-up-multi-cluster-management/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/multicluster/set-up-multi-cluster-management/index.mdx deleted file mode 100644 index 383decd626..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/multicluster/set-up-multi-cluster-management/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Steps to configure management and managed clusters. -hide_table_of_contents: true ---- - -# Multi-cluster management - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/multicluster/set-up-multi-cluster-management/standard-install/create-a-managed-cluster.mdx b/calico-enterprise_versioned_docs/version-3.19-2/multicluster/set-up-multi-cluster-management/standard-install/create-a-managed-cluster.mdx deleted file mode 100644 index 0b29b28293..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/multicluster/set-up-multi-cluster-management/standard-install/create-a-managed-cluster.mdx +++ /dev/null @@ -1,72 +0,0 @@ ---- -description: Create a Calico Enterprise managed cluster that you can control from you management cluster. ---- - -# Create a Calico Enterprise managed cluster - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import InstallGeneric from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/InstallGeneric'; -import InstallGKE from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/InstallGKE'; -import InstallEKS from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/InstallEKS'; -import InstallAKS from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/InstallAKS'; -import InstallOpenShift from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/InstallOpenShift'; - -## Big picture - -Create a $[prodname] managed cluster that you can control from your management cluster. - -## Value - -Managing standalone clusters and multiple instances of Elasticsearch is not onerous when you first install $[prodname]. -As you move to production with 300+ clusters, it is not scalable; you need centralized cluster management and log storage. -With $[prodname] multi-cluster management, you can securely connect multiple clusters from different cloud providers -in a single management plane, and control user access using RBAC. This architecture also supports federation of network -policy resources across clusters, and lays the foundation for a “single pane of glass.” - -## Before you begin... - -**Required** - -- A [Calico Enterprise management cluster](create-a-management-cluster.mdx) -- A [$[prodname] pull secret](../../../getting-started/install-on-clusters/calico-enterprise.mdx) - -## How to - -### Create a managed cluster - -Follow these steps in the cluster you intend to use as the managed cluster. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -## Next steps - -- When you are ready to fine-tune your multi-cluster management deployment for production, see [Fine-tune multi-cluster management](../../fine-tune-deployment.mdx) -- To change an existing $[prodname] standalone cluster to a management or managed cluster, see [Change cluster types](../../change-cluster-type.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/multicluster/set-up-multi-cluster-management/standard-install/create-a-management-cluster.mdx b/calico-enterprise_versioned_docs/version-3.19-2/multicluster/set-up-multi-cluster-management/standard-install/create-a-management-cluster.mdx deleted file mode 100644 index 65ce051693..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/multicluster/set-up-multi-cluster-management/standard-install/create-a-management-cluster.mdx +++ /dev/null @@ -1,121 +0,0 @@ ---- -description: Create a Calico Enterprise management cluster to manage multiple clusters from a single management plane. ---- - -# Create a Calico Enterprise management cluster - -## Big picture - -Create a $[prodname] management cluster to manage multiple clusters from a single management plane. - -## Value - -Managing standalone clusters and multiple instances of Elasticsearch is not onerous when you first install $[prodname]. But as you move to production with 300+ clusters, it is not scalable; you need centralized cluster management and log storage. With $[prodname] multi-cluster management, you can securely connect multiple clusters from different cloud providers in a single management plane, and control user access using RBAC. This architecture also supports federation of network policy resources across clusters, and lays the foundation for a “single pane of glass.” - -## Before you begin... - -**Required** - -- A Calico Enterprise cluster, see [here](../../../getting-started/install-on-clusters/index.mdx) for help -- A reachable, public IP address for the management cluster - -## How to - -### Create a management cluster - -To control managed clusters from your central management plane, you must ensure it is reachable for connections. The simplest way to get started (but not for production scenarios), is to configure a `NodePort` service to expose the management cluster. Note that the service must live within the `tigera-manager` namespace. - -1. Create a service to expose the management cluster. - The following example of a NodePort service may not be suitable for production and high availability. For options, see [Fine-tune multi-cluster management for production](../../fine-tune-deployment.mdx). - Apply the following service manifest. - - ```bash - kubectl create -f - < - ``` -1. Apply the [ManagementCluster](../../../reference/installation/api.mdx#managementcluster) CR. - - ```bash - kubectl apply -f - < diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/application-layer-policies/alp-tutorial.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/application-layer-policies/alp-tutorial.mdx deleted file mode 100644 index df4cd12889..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/application-layer-policies/alp-tutorial.mdx +++ /dev/null @@ -1,169 +0,0 @@ ---- -description: Learn how to apply ALP to your workloads and control ingress traffic. ---- - -# Application layer policy tutorial - -This tutorial shows how to use $[prodname] application layer policy to restrict ingress traffic for applications and microservices. - -### Install the demo application - -We will use a simple microservice application to demonstrate $[prodname] -application layer policy. The [YAO Bank](https://github.com/projectcalico/yaobank) application creates a -customer-facing web application, a microservice that serves up account -summaries, and an [etcd](https://github.com/coreos/etcd) datastore. - -```bash -kubectl apply -f $[tutorialFilesURL]/10-yaobank.yaml -``` -```bash -namespace/yaobank configured -service/database created -serviceaccount/database created -deployment.apps/database created -service/summary created -serviceaccount/summary created -deployment.apps/summary created -service/customer created -serviceaccount/customer created -deployment.apps/customer created -``` - -:::note - -You can also -[view the manifest in your browser](/files/10-yaobank.yaml). - -::: - -Verify that the application pods have been created and are ready. -```bash - kubectl rollout status deploy/summary deploy/customer deploy/database -``` - -When the demo application is displayed, you will see three pods. - -``` -NAME READY STATUS RESTARTS AGE -customer-2809159614-qqfnx 3/3 Running 0 21h -database-1601951801-m4w70 3/3 Running 0 21h -summary-2817688950-g1b3n 3/3 Running 0 21h -``` - -## Set up -- A $[prodname] cluster is running with application layer policy enabled -- Cluster has three microservices: customer, database, summary -- The customer web service should not have access to the backend database, but should have access to clients outside the cluster - -Imagine what would happen if an attacker were to gain control of the customer web pod in our -application. Let's simulate this by executing a remote shell inside that pod. - -```bash -kubectl exec -ti customer- -c customer -- bash -``` - -Notice that from here, we get direct access to the backend database. For example, we can list all the entries in the database like this: - -```bash -curl http://database:2379/v2/keys?recursive=true | python -m json.tool -``` - -(Piping to `python -m json.tool` nicely formats the output.) - -## Apply application layer policy - -In this step, we get the application layer policy YAML and apply it. Note that the policy scope is cluster-wide. - -With a $[prodname] policy, you can mitigate risks to the banking application. - -```bash -wget $[tutorialFilesURL]/30-policy.yaml -kubectl create -f 30-policy.yaml -``` - -Let's examine this policy piece by piece. First, notice that an application layer policy looks like a regular $[prodname] global network policy. -The only difference you'll see is the ability to use the application layer policy parameters in global network policy. Another important difference -is that you'll see HTTP traffic flows in the web console in features like Service Graph. - -Next, there are three policy objects, one for each microservice. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: customer -spec: - selector: app == 'customer' - ingress: - - action: Allow - http: - methods: ['GET'] - egress: - - action: Allow -``` - -The first policy protects the customer web app. Because this application is customer-facing, we do not -restrict what can communicate with it. We do, however, restrict its communications to HTTP `GET` -requests. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: summary -spec: - selector: app == 'summary' - ingress: - - action: Allow - source: - serviceAccounts: - names: ['customer'] - egress: - - action: Allow -``` - -The second policy protects the account summary microservice. We know the only consumer of this -service is the customer web app, so we restrict the source of incoming connections to the service -account for the customer web app. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: database -spec: - selector: app == 'database' - ingress: - - action: Allow - source: - serviceAccounts: - names: ["summary"] - egress: - - action: Allow - -``` - -The third policy protects the database. Only the summary microservice should have direct access to -the database. - -### Verify the policy is working - -Let's verify our policy is working as intended. First, return to your browser and refresh to -ensure policy enforcement has not broken the application. - -Next, return to the customer web app. Recall that we simulated an attacker gaining control of that -pod by executing a remote shell inside it. - -```bash -kubectl exec -ti customer- -c customer bash -``` - -Repeat our attempt to access the database. - -```bash -curl -I http://database:2379/v2/keys?recursive=true -``` - -We omitted the JSON formatting because we do not expect to get a valid JSON response. This -time we should get a `403 Forbidden` response. Only the account summary microservice has database -access according to our policy. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/application-layer-policies/alp.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/application-layer-policies/alp.mdx deleted file mode 100644 index 1712514b04..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/application-layer-policies/alp.mdx +++ /dev/null @@ -1,68 +0,0 @@ ---- -description: Enforce application layer policies in your cluster to configure access controls based on L7 attributes. ---- - -# Enable and enforce application layer policies - -Application layer policies let you configure access controls based on L7 attributes. - -## Before you begin - -### Unsupported -- $[prodname] implements application layer policy using Envoy as a DaemonSet. This means you cannot use application layer policy alongside a service mesh like Istio. -- GKE - -### Limitations -- Application layer policy supports restricting only ingress traffic -- Support for L7 attributes are limited to HTTP method and URL exact/prefix path -- Supported protocols are limited to TCP-based protocols (for example, HTTP, HTTPS, or gRPC) -- You can control application layer policies only at the cluster level (not per namespace) - -## How to -- [Enable application layer policies](#enable-application-layer-policies) -- [Enforce application layer policies for ingress traffic](#enforce-application-layer-policies-for-ingress-traffic) -- [Disable application layer policies](#disable-application-layer-policies) - -### Enable application layer policies (ALP) -In the ApplicationLayer custom resource, set the `applicationLayerPolicy` field to Enabled. - -```yaml -apiVersion: operator.tigera.io/v1 -kind: ApplicationLayer -metadata: - name: tigera-secure -spec: - applicationLayerPolicy: Enabled - -``` - -### Enforce application layer policies for ingress traffic - -You can restrict ingress traffic using HTTP match criteria using Global network policy. -For a list of all HTTP match parameters, see [Global network policy](/reference/resources/globalnetworkpolicy.mdx). - -In the following example, the trading app is allowed ingress traffic only for HTTP GET requests that match the exact path /projects/calico, or that begins with the prefix, /users. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: customer -spec: - selector: app == 'tradingapp' - ingress: - - action: Allow - http: - methods: ["GET"] - paths: - - exact: "/projects/calico" - - prefix: "/users" - egress: - - action: Allow -``` -### Disable application layer policies - -To disable the policies, do one of the following steps: - - Set the `applicationLayerPolicy` field in the `ApplicationLayer` custom resource to `Disabled`. - - Remove the `applicationLayerPolicy` field entirely. - - Delete the ApplicationLayer` custom resource. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/application-layer-policies/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/application-layer-policies/index.mdx deleted file mode 100644 index 914f5afde2..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/application-layer-policies/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Use application layer policies to restrict ingress traffic based on HTTP attributes. -hide_table_of_contents: true ---- - -# Application layer policies to control ingress traffic - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/beginners/calico-labels.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/beginners/calico-labels.mdx deleted file mode 100644 index b2a96e8349..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/beginners/calico-labels.mdx +++ /dev/null @@ -1,124 +0,0 @@ ---- -description: Calico Enterprise automatic labels for use with resources. ---- - -# Calico Enterprise automatic labels - -As a convenience, $[prodname] provides immutable labels that are used for specific resources when evaluating selectors in policies. The labels make it easier to match resources in common ways (such as matching a namespace by name). - -## Labels for matching namespaces - -The label `projectcalico.org/name` is set to the name of the namespace. This allows for matching namespaces by name when using a `namespaceSelector` field. - -For example, the following GlobalNetworkPolicy applies to workloads with label, `color: red` in namespaces named, `"foo"` and `"bar"`. The policy allows ingress traffic to port 8080 from all workloads in a third namespace named, `"baz"`: - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: foo-and-bar -spec: - namespaceSelector: projectcalico.org/name in {"foo", "bar"} - selector: color == "red" - types: - - Ingress - ingress: - - action: Allow - source: - namespaceSelector: projectcalico.org/name == "baz" - destination: - ports: - - 8080 -``` - -Be aware that the default values for `namespaceSelector` for NetworkPolicy and GlobalNetworkPolicy are different. For example: - -**In a network policy**, - - ```yaml - namespaceSelector: - selector: foo == "bar" - ``` -means "resources in the same namespace as the network policy that matches foo == 'bar'". - -**In a global network policy**, - - ```yaml - namespaceSelector: - selector: foo == "bar" - ``` -means "resources in any namespace and non-namespaced resources that match foo == 'bar'". - -Further, - - ```yaml - namespaceSelector: projectcalico.org/name == "some-namespace" - selector: foo == "bar" - ``` -is equivalent to: - - ```yaml - namespaceSelector: - selector: (foo == "bar") && (projectcalico.org/namespace == "some-namespace") - ``` - -### Labels for matching service accounts - -Similarly, the `projectcalico.org/name` label is applied to ServiceAccounts and allows for matching by name in a `serviceAccountSelector`. - -### Kubernetes labels for matching namespaces - -Kubernetes also has [automatic labeling](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/#automatic-labelling), for example `kubernetes.io/metadata.name`. The Kubernetes namespace label serves the same purpose and can be used in the same way as the $[prodname] label. The `projectcalico.org/name` label predates the automatic Kubernetes label. - -## Labels for matching workload endpoints - -WorkloadEndpoints (which represent Pods in Kubernetes, or VM instances in OpenStack), receive several automatic labels: - -* `projectcalico.org/orchestrator` is applied to all WorkloadEndpoints and allows Kubernetes Pods to be distinguished from OpenStack VM instances, and from HostEndpoints (which do not have the label): - -* `has(projectcalico.org/orchestrator)` matches only WorkloadEndpoints -* `projectcalico.org/orchestrator == "k8s"` matches only Kubernetes Pods - -* For WorkloadEndpoints that represent Kubernetes Pods, `projectcalico.org/namespace` contains the name of the pod's namespace. `projectcalico.org/namespace` predates the addition of `namespaceSelector` fields to GlobalNetworkPolicies; it serves the same purpose as the `projectcalico.org/name` label in a `namespaceSelector` field. The following GlobalNetworkPolicy is exactly equivalent to the example shown in the Namespaces section: - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: foo-and-bar -spec: - selector: projectcalico.org/namespace in {"foo", "bar"} && color == "red" - types: - - Ingress - ingress: - - action: Allow - source: - selector: projectcalico.org/namespace == "baz" - destination: - ports: - - 8080 -``` - -### Labels for matching workload endpoints with multiple networks - -If you enable [multiple networks](../../networking/configuring/multiple-networks), the following automatic labels are available: - -- `projectcalico.org/network` - Name of the network specified in the NetworkAttachmentDefinition -- `projectcalico.org/network-namespace` - Namespace of the network -- `projectcalico.org/network-interface` - Network interface for the WorkloadEndpoint - -## Labels for matching host endpoints - -[Automatic HostEndpoints](../../network-policy/hosts/kubernetes-nodes) use the following label to differentiate them from regular HostEndpoints: - -- `projectcalico.org/created-by: calico-kube-controllers` - -## Use the correct selector with labels in policies - -$[prodname] labels must be used with the correct selector or the policy will not work as designed (and there are no error messages in the web console or when applying the YAML). - -| Calico label | Usage requirements | Use in these resources... | -| --------------------------- | ------------------------------------------------------------ | ------------------------------------------------------------ | -| `projectcalico.org/name` | Use with a **namespaceSelector** or **serviceAccountSelector**. | - Network policy
    - Staged network policy

    Namespaced resources that apply only to workload endpoint resources in the namespace.
    | -| `projectcalico.org/namespace` | Use only with selectors.

    Use the label as the label name, and a namespace name as the value to compare against (for example projectcalico.org/namespace == "default"). | - Global network policy
    - Staged global network policy

    Cluster-wide (non-namespaced) resources that apply to workload endpoint resources in all namespaces, and to host endpoint resources. | - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/beginners/calico-network-policy.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/beginners/calico-network-policy.mdx deleted file mode 100644 index 2cf2ce35cd..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/beginners/calico-network-policy.mdx +++ /dev/null @@ -1,253 +0,0 @@ ---- -description: Create your first Calico network policies. Shows the rich features using sample policies that extend native Kubernetes network policy. ---- - -# Get started with Calico network policy - -## Big picture - -Enforce which network traffic that is allowed or denied using rules in Calico network policy. - -## Value - -### Extends Kubernetes network policy - -Calico network policy provides a richer set of policy capabilities than Kubernetes including: policy ordering/priority, deny rules, and more flexible match rules. While Kubernetes network policy applies only to pods, Calico network policy can be applied to multiple types of endpoints including pods, VMs, and host interfaces. - -### Write once, works everywhere - -No matter which cloud provider you use now, adopting Calico network policy means you write the policy once and it is portable. If you move to a different cloud provider, you don’t need to rewrite your Calico network policy. Calico network policy is a key feature to avoid cloud provider lock-in. - -### Works seamlessly with Kubernetes network policies - -You can use Calico network policy in addition to Kubernetes network policy, or exclusively. For example, you could allow developers to define Kubernetes network policy for their microservices. For broader and higher-level access controls that developers cannot override, you could allow only security or Ops teams to define Calico network policies. - -## Concepts - -### Endpoints - -Calico network policies apply to **endpoints**. In Kubernetes, each pod is a Calico endpoint. However, Calico can support other kinds of endpoints. There are two types of Calico endpoints: **workload endpoints** (such as a Kubernetes pod or OpenStack VM) and **host endpoints** (an interface or group of interfaces on a host). - -### Namespaced and global network policies - -**Calico network policy** is a namespaced resource that applies to pods/containers/VMs in that namespace. - -```yaml -apiVersion: projectcalico.org/v3 -kind: NetworkPolicy -metadata: - name: allow-tcp-port-6379 - namespace: production -``` - -**Calico global network policy** is a non-namespaced resource and can be applied to any kind of endpoint (pods, VMs, host interfaces) independent of namespace. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: allow-tcp-port-6379 -``` - -Because global network policies use **kind: GlobalNetworkPolicy**, they are grouped separately from **kind: NetworkPolicy**. For example, global network policies will not be returned from `kubectl get networkpolicy.p`, and are rather returned from `kubectl get globalnetworkpolicy`. - -### Ingress and egress - -Each network policy rule applies to either **ingress** or **egress** traffic. From the point of view of an endpoint (pod, VM, host interface), **ingress** is incoming traffic to the endpoint, and **egress** is outgoing traffic from the endpoint. In a Calico network policy, you create ingress and egress rules independently (egress, ingress, or both). - -You can specify whether policy applies to ingress, egress, or both using the **types** field. If you do not use the types field, Calico defaults to the following values. - -| Ingress rule present? | Egress rule present? | Value | -| :-------------------: | :------------------: | :-------------: | -| No | No | Ingress | -| Yes | No | Ingress | -| No | Yes | Egress | -| Yes | Yes | Ingress, Egress | - -### Network traffic behaviors: deny and allow - -The Kubernetes network policy specification defines the following behavior: - -- If no network policies apply to a pod, then all traffic to/from that pod is allowed. -- If one or more network policies apply to a pod containing ingress rules, then only the ingress traffic specifically allowed by those policies is allowed. -- If one or more network policies apply to a pod containing egress rules, then only the egress traffic specifically allowed by those policies is allowed. - -For compatibility with Kubernetes, **Calico network policy** follows the same behavior for Kubernetes pods. For other endpoint types (VMs, host interfaces), Calico network policy is default deny. That is, only traffic specifically allowed by network policy is allowed, even if no network policies apply to the endpoint. - -## Before you begin - -`calicoctl` must be **installed** and **configured** before use. `calicoctl` will use Kubernetes as the datastore. You can find more information on how to configure `calicoctl` in the following link: - -- [Configure `calicoctl`](../../operations/clis/calicoctl/configure/overview.mdx) - -## How to - -- [Control traffic to/from endpoints in a namespace](#control-traffic-tofrom-endpoints-in-a-namespace) -- [Control traffic to/from endpoints independent of namespace](#control-traffic-tofrom-endpoints-independent-of-namespace) -- [Control traffic to/from endpoints using IP addresses or CIDR ranges](#control-traffic-tofrom-endpoints-using-ip-addresses-or-cidr-ranges) -- [Apply network policies in specific order](#apply-network-policies-in-specific-order) -- [Generate logs for specific traffic](#generate-logs-for-specific-traffic) - -### Control traffic to/from endpoints in a namespace - -In the following example, ingress traffic to endpoints in the **namespace: production** with label **color: red** is allowed, only if it comes from a pod in the same namespace with **color: blue**, on port **6379**. - -```yaml -apiVersion: projectcalico.org/v3 -kind: NetworkPolicy -metadata: - name: allow-tcp-port-6379 - namespace: production -spec: - selector: color == 'red' - ingress: - - action: Allow - protocol: TCP - source: - selector: color == 'blue' - destination: - ports: - - 6379 -``` - -To allow ingress traffic from endpoints in other namespaces, use a **namespaceSelector** in the policy rule. A namespaceSelector matches namespaces based on the labels that are applied in the namespace. In the following example, ingress traffic is allowed from endpoints in namespaces that match **shape == circle**. - -```yaml -apiVersion: projectcalico.org/v3 -kind: NetworkPolicy -metadata: - name: allow-tcp-port-6379 - namespace: production -spec: - selector: color == 'red' - ingress: - - action: Allow - protocol: TCP - source: - selector: color == 'blue' - namespaceSelector: shape == 'circle' - destination: - ports: - - 6379 -``` - -### Control traffic to/from endpoints independent of namespace - -The following Calico network policy is similar to the previous example, but uses **kind: GlobalNetworkPolicy** so it applies to all endpoints, regardless of namespace. - -In the following example, incoming TCP traffic to any pods with label **color: red** is denied if it comes from a pod with **color: blue**. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: deny-blue -spec: - selector: color == 'red' - ingress: - - action: Deny - protocol: TCP - source: - selector: color == 'blue' -``` - -As with **kind: NetworkPolicy**, you can allow or deny ingress traffic from endpoints in specific namespaces using a namespaceSelector in the policy rule: - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: deny-circle-blue -spec: - selector: color == 'red' - ingress: - - action: Deny - protocol: TCP - source: - selector: color == 'blue' - namespaceSelector: shape == 'circle' -``` - -### Control traffic to/from endpoints using IP addresses or CIDR ranges - -Instead of using a selector to define which traffic is allowed to/from the endpoints in a network policy, you can also specify an IP block in CIDR notation. - -In the following example, outgoing traffic is allowed from pods with the label **color: red** if it goes to an IP address in the **1.2.3.4/24** CIDR block. - -```yaml -apiVersion: projectcalico.org/v3 -kind: NetworkPolicy -metadata: - name: allow-egress-external - namespace: production -spec: - selector: color == 'red' - types: - - Egress - egress: - - action: Allow - destination: - nets: - - 1.2.3.0/24 -``` - -### Apply network policies in specific order - -To control the order/sequence of applying network policies, you can use the **order** field (with precedence from the lowest value to highest). Defining policy **order** is important when you include both **action: allow** and **action: deny** rules that may apply to the same endpoint. - -In the following example, the policy **allow-cluster-internal-ingress** (order: 10) will be applied before the **policy drop-other-ingress** (order: 20). - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: drop-other-ingress -spec: - order: 20 - #...deny policy rules here... -``` - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: allow-cluster-internal-ingress -spec: - order: 10 - #...allow policy rules here... -``` - -### Generate logs for specific traffic - -In the following example, incoming TCP traffic to an application is denied, and each connection attempt is logged to syslog. - -```yaml -apiVersion: projectcalico.org/v3 -kind: NetworkPolicy -metadata: - name: allow-tcp-port-6379 - namespace: production -spec: - selector: role == 'database' - types: - - Ingress - - Egress - ingress: - - action: Log - protocol: TCP - source: - selector: role == 'frontend' - - action: Deny - protocol: TCP - source: - selector: role == 'frontend' -``` -### Create policy for established connections - -Policies are immediately applied to any new connections. However, for existing connections that are already open, the policy changes will only take effect after the connection has been reestablished. This means that any ongoing sessions may not immediately reflect policy changes until they are initiated again. - -## Additional resources - -- For additional Calico network policy features, see [Calico network policy](../../reference/resources/networkpolicy.mdx) and [Calico global network policy](../../reference/resources/globalnetworkpolicy.mdx) -- For an alternative to using IP addresses or CIDRs in policy, see [Network sets](../../reference/resources/networkset.mdx) -- For details on how to stage network policy, see [Staged network policies](../staged-network-policies.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/beginners/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/beginners/index.mdx deleted file mode 100644 index 93dbc972f0..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/beginners/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Learn how to create your first Calico Enterprise network policy. -hide_table_of_contents: true ---- - -# Calico Enterprise network policy for beginners - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/beginners/kubernetes-default-deny.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/beginners/kubernetes-default-deny.mdx deleted file mode 100644 index 527f3f5b07..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/beginners/kubernetes-default-deny.mdx +++ /dev/null @@ -1,149 +0,0 @@ ---- -description: Create a default deny network policy so pods that are missing policy are not allowed traffic until appropriate network policy is defined. ---- - -# Enable a default deny policy for Kubernetes pods - -## Big picture - -Enable a default deny policy for Kubernetes pods using Kubernetes or $[prodname] network policy. - -## Value - -A **default deny** network policy provides an enhanced security posture so pods without policy (or incorrect policy) are not allowed traffic until appropriate network policy is defined. - -## Features - -This how-to guide uses the following $[prodname] features: - -- **NetworkPolicy** -- **GlobalNetworkPolicy** - -## Concepts - -### Default deny/allow behavior - -**Default allow** means all traffic is allowed by default, unless otherwise specified. **Default deny** means all traffic is denied by default, unless explicitly allowed. **Kubernetes pods are default allow**, unless network policy is defined to specify otherwise. - -For compatibility with Kubernetes, **$[prodname] network policy** enforcement follows the standard convention for Kubernetes pods: - -- If no network policies apply to a pod, then all traffic to/from that pod is allowed. -- If one or more network policies apply to a pod with type ingress, then only the ingress traffic specifically allowed by those policies is allowed. -- If one or more network policies apply to a pod with type egress, then only the egress traffic specifically allowed by those policies is allowed. - -For other endpoint types (VMs, host interfaces), the default behavior is to deny traffic. Only traffic specifically allowed by network policy is allowed, even if no network policies apply to the endpoint. - -## How to - -- [Create a default deny network policy](#crate-a-default-deny-network-policy) -- [Create a global default deny network policy](#create-a-global-default-deny-network-policy) - -### Create a default deny network policy - -Immediately after installation, a best practice is to create a namespaced default deny network policy to secure pods without policy or incorrect policy until you can put policies in place and test them. - -In the following example, we create a $[prodname] default deny **NetworkPolicy** for all workloads in the namespace, **engineering**. - -```yaml -apiVersion: projectcalico.org/v3 -kind: NetworkPolicy -metadata: - name: default-deny - namespace: engineering -spec: - selector: all() - types: - - Ingress - - Egress -``` - -Here's an equivalent default deny **Kubernetes network policy** for all pods in the namespace, **engineering** - -```yaml -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: default-deny - namespace: engineering -spec: - podSelector: {} - policyTypes: - - Ingress - - Egress -``` - -### Create a global default deny policy - -A default deny policy ensures that unwanted traffic (ingress and egress) is denied by default without you having to remember default deny/allow behavior of Kubernetes and $[prodname] policies. This policy can also help mitigate risks of lateral malicious attacks. - -#### Best practice #1: Allow, stage, then deny - -We recommend that you create a global default deny policy after you complete writing policy for the traffic that you want to allow. The following steps summarizes the best practice to test and lock down the cluster to block unwanted traffic: - -1. Create a global default deny policy and test it in a staging environment. (The policy will show all the traffic that would be blocked if it were converted into a deny.) -1. Create network policies to individually allow the traffic shown as blocked in step 1 until no connections are denied. -1. Enforce the global default deny policy. - -#### Best practice #2: Keep the scope to non-system pods - -A global default deny policy applies to the entire cluster including all workloads in all namespaces, hosts (computers that run the hypervisor for VMs or container runtime for containers), including Kubernetes control plane and $[prodname] control plane nodes and pods. - -For this reason, the best practice is to create a global default deny policy for **non-system pods** as shown in the following example. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: deny-app-policy -spec: - namespaceSelector: kubernetes.io/metadata.name not in {"calico-system", "kube-public", "kube-system", "tigera-operator", "tigera-system"} - types: - - Ingress - - Egress - egress: - # allow all namespaces to communicate to DNS pods - - action: Allow - protocol: UDP - destination: - selector: 'k8s-app == "kube-dns"' - ports: - - 53 - - action: Allow - protocol: TCP - destination: - selector: 'k8s-app == "kube-dns"' - ports: - - 53 -``` - -Note the following: - -- Even though we call this policy "global default deny", the above policy is not explicitly denying traffic. By selecting the traffic with the `namespaceSelector` but not specifying an allow, the traffic is denied after all other policy is evaluated. This design also makes it unnecessary to ensure any specific order (priority) for the default-deny policy. -- Allowing access to `kube-dns` simplifies per-pod policies because you don't need to duplicate the DNS rules in every policy -- This policy uses a negative selector for the `spec.namespaceselector` field to exclude control plane namespaces. - In this example, those namespaces include the `calico-system`, `kube-public`, `kube-system`, `tigera-operator`, and `tigera-system` namespaces. - Because your installation may have different components, make sure to check what you have installed before implementing a similar policy. - -In a staging environment, verify that the policy does not block any necessary traffic before enforcing it. - -### Don't try this! - -The following policy works and looks fine on the surface. But as described in Best practices #2, the policy is too broad in scope and could break your cluster. Therefore, we do not recommend adding this type of policy, even if you have verified allowed traffic in your staging environment. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: default.default-deny -spec: - tier: default - selector: all() - types: - - Ingress - - Egress -``` - -## Additional resources - -- [Network policy](../../reference/resources/networkpolicy.mdx) -- [Global network policy](../../reference/resources/globalnetworkpolicy.mdx) \ No newline at end of file diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/beginners/policy-rules/external-ips-policy.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/beginners/policy-rules/external-ips-policy.mdx deleted file mode 100644 index d2e5390952..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/beginners/policy-rules/external-ips-policy.mdx +++ /dev/null @@ -1,112 +0,0 @@ ---- -description: Limit egress and ingress traffic using IP address either directly within Calico network policy or managed as Calico network sets. ---- - -# Use external IPs or networks rules in policy - -## Big picture - -Use $[prodname] network policy to limit traffic to/from external non-$[prodname] workloads or networks. - -## Value - -Modern applications often integrate with third-party APIs and SaaS services that live outside Kubernetes clusters. To securely enable access to those integrations, network security teams must be able to limit IP ranges for egress and ingress traffic to workloads. This includes using IP lists or ranges to deny-list bad actors or embargoed countries. - -Using $[prodname] network policy, you can define IP addresses/CIDRs directly in policy to limit traffic to external networks. Or using $[prodname] network sets, you can easily scale out by using the same set of IPs in multiple policies. - -## Concepts - -### IP addresses/CIDRs - -IP addresses and CIDRs can be specified directly in both Kubernetes and $[prodname] network policy rules. $[prodname] network policy supports IPV4 and IPV6 CIDRs. - -### Network sets - -A **network set** resource is an arbitrary set of IP subnetworks/CIDRs that can be matched by standard label selectors in Kubernetes or $[prodname] network policy. This is useful to reference a set of IP addresses using a selector from a namespaced network policy resource. It is typically used when you want to scale/reuse the same set of IP addresses in policy. - -A **global network set** resource is similar, but can be selected only by $[prodname] global network policies. - -## How to - -- [Limit traffic to or from external networks, IPs in network policy](#limit-traffic-to-or-from-external-networks-ips-in-network-policy) -- [Limit traffic to or from external networks, global network set](#limit-traffic-to-or-from-external-networks-global-network-set) - -### Limit traffic to or from external networks, IPs in network policy - -In the following example, a $[prodname] NetworkPolicy allows egress traffic from pods with the label **color: red**, if it goes to an IP address in the 192.0.2.0/24 CIDR block. - -```yaml -apiVersion: projectcalico.org/v3 -kind: NetworkPolicy -metadata: - name: allow-egress-external - namespace: production -spec: - selector: color == 'red' - types: - - Egress - egress: - - action: Allow - destination: - nets: - - 192.0.2.0/24 -``` - -### Limit traffic to or from external networks, global network set - -In this example, we use a $[prodname] **GlobalNetworkSet** and reference it in a **GlobalNetworkPolicy**. - -In the following example, a $[prodname] **GlobalNetworkSet** deny-lists the CIDR ranges 192.0.2.55/32 and 203.0.113.0/24: - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkSet -metadata: - name: ip-protect - labels: - ip-deny-list: 'true' -spec: - nets: - - 192.0.2.55/32 - - 203.0.113.0/24 -``` - -Next, we create two $[prodname] **GlobalNetworkPolicy** objects. The first is a high “order” policy that allows traffic as a default for things that don’t match our second policy, which is low “order” and uses the **GlobalNetworkSet** label as a selector to deny ingress traffic (IP-deny-list in the previous step). In the label selector, we also include the term **!has(projectcalico.org/namespace)**, which prevents this policy from matching pods or NetworkSets that also have this label. To more quickly enforce the denial of forwarded traffic to the host at the packet level, use the **doNotTrack** and **applyOnForward** options. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: forward-default-allow -spec: - selector: apply-ip-protect == 'true' - order: 1000 - doNotTrack: true - applyOnForward: true - types: - - Ingress - ingress: - - action: Allow ---- -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: ip-protect -spec: - selector: apply-ip-protect == 'true' - order: 0 - doNotTrack: true - applyOnForward: true - types: - - Ingress - ingress: - - action: Deny - source: - selector: ip-deny-list == 'true' && !has(projectcalico.org/namespace) -``` - -## Additional resources - -- To understand how to use global network sets to mitigate common threats, see [Defend against DoS attacks](../../extreme-traffic/defend-dos-attack.mdx) -- [Global network sets](../../../reference/resources/globalnetworkset.mdx) -- [Global network policy](../../../reference/resources/globalnetworkpolicy.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/beginners/policy-rules/icmp-ping.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/beginners/policy-rules/icmp-ping.mdx deleted file mode 100644 index 09ba250e91..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/beginners/policy-rules/icmp-ping.mdx +++ /dev/null @@ -1,130 +0,0 @@ ---- -description: Control where ICMP/ping is used by creating a Calico network policy to allow and deny ICMP/ping messages for workloads and host endpoints. ---- - -# Use ICMP/ping rules in policy - -## Big picture - -Use $[prodname] network policy to allow and deny ICMP/ping messages. - -## Value - -The **Internet Control Message Protocol (ICMP)** provides valuable network diagnostic functions, but it can also be used maliciously. Attackers can use -it to learn about your network, or for DoS attacks. Using $[prodname] network policy, you can control where ICMP is used. For example, you can: - -- Allow ICMP ping, but only for workloads, host endpoints (or both) -- Allow ICMP for pods launched by operators for diagnostic purposes, but block other uses -- Temporarily enable ICMP to diagnose a problem, then disable it after the problem is resolved -- Deny/allow ICMPv4 and/or ICMPv6 - -## Concepts - -### ICMP packet type and code - -$[prodname] network policy also lets you deny and allow ICMP traffic based on specific types and codes. For example, you can specify ICMP type 5, code 2 to match specific ICMP redirect packets. - -For details, see [ICMP type and code](https://en.wikipedia.org/wiki/Internet_Control_Message_Protocol#Control_messages). - -## How to - -- [Deny all ICMP, all workloads and host endpoints](#deny-all-icmp-all-workloads-and-host-endpoints) -- [Allow ICMP ping, all workloads and host endpoints](#allow-icmp-ping-all-workloads-and-host-endpoints) -- [Allow ICMP matching protocol type and code, all Kubernetes pods](#allow-icmp-matching-protocol-type-and-code-all-kubernetes-pods) - -### Deny all ICMP, all workloads and host endpoints - -In this example, we introduce a "deny all ICMP" **GlobalNetworkPolicy**. - -This policy **selects all workloads and host endpoints**. It enables a default deny for all workloads and host endpoints, in addition to the explicit ICMP deny rules specified in the policy. - -If your ultimate goal is to allow some traffic, have your regular "allow" policies in place before applying a global deny-all ICMP traffic policy. - -In this example, all workloads and host endpoints are blocked from sending or receiving **ICMPv4** and **ICMPv6** messages. - -If **ICMPv6** messages are not used in your deployment, it is still good practice to deny them specifically as shown below. - -In any "deny-all" $[prodname] network policy, be sure to specify a lower order (**order:200**) than regular policies that might allow traffic. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: block-icmp -spec: - order: 200 - selector: all() - types: - - Ingress - - Egress - ingress: - - action: Deny - protocol: ICMP - - action: Deny - protocol: ICMPv6 - egress: - - action: Deny - protocol: ICMP - - action: Deny - protocol: ICMPv6 -``` - -### Allow ICMP ping, all workloads and host endpoints - -In this example, workloads and host endpoints can receive **ICMPv4 type 8** and **ICMPv6 type 128** ping requests that come from other workloads and host endpoints. - -All other traffic may be allowed by other policies. If traffic is not explicitly allowed, it will be denied by default. - -The policy applies only to **ingress** traffic. (Egress traffic is not affected, and default deny is not enforced for egress.) - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: allow-ping-in-cluster -spec: - selector: all() - types: - - Ingress - ingress: - - action: Allow - protocol: ICMP - source: - selector: all() - icmp: - type: 8 Ping request - - action: Allow - protocol: ICMPv6 - source: - selector: all() - icmp: - type: 128 Ping request -``` - -### Allow ICMP matching protocol type and code, all Kubernetes pods - -In this example, only Kubernetes pods that match the selector **projectcalico.org/orchestrator == 'kubernetes'** are allowed to receive ICMPv4 **code: 1 host unreachable** messages. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: allow-host-unreachable -spec: - selector: projectcalico.org/orchestrator == 'kubernetes' - types: - - Ingress - ingress: - - action: Allow - protocol: ICMP - icmp: - type: 3 Destination unreachable - code: 1 Host unreachable -``` - -## Additional resources - -For more on the ICMP match criteria, see: - -- [Global network policy](../../../reference/resources/globalnetworkpolicy.mdx) -- [Network policy](../../../reference/resources/networkpolicy.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/beginners/policy-rules/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/beginners/policy-rules/index.mdx deleted file mode 100644 index c035f8e7de..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/beginners/policy-rules/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Control traffic to/from endpoints using Calico network policy rules. -hide_table_of_contents: true ---- - -# Policy rules - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/beginners/policy-rules/namespace-policy.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/beginners/policy-rules/namespace-policy.mdx deleted file mode 100644 index 3591aa1b17..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/beginners/policy-rules/namespace-policy.mdx +++ /dev/null @@ -1,89 +0,0 @@ ---- -description: Use namespaces and namespace selectors in Calico network policy to group or separate resources. Use network policies to allow or deny traffic to/from pods that belong to specific namespaces. ---- - -# Use namespace rules in policy - -## Big picture - -Use $[prodname] network policies to reference pods in other namespaces. - -## Value - -Kubernetes namespaces let you group/separate resources to meet a variety of use cases. For example, you can use namespaces to separate development, production, and QA environments, or allow different teams to use the same cluster. You can use namespace selectors in $[prodname] network policies to allow or deny traffic to/from pods in specific namespaces. - -## How to - -- [Control traffic to/from endpoints in a namespace](#control-traffic-tofrom-endpoints-in-a-namespace) -- [Use Kubernetes RBAC to control namespace label assignment](#use-kubernetes-rbac-to-control-namespace-label-assignment) - -### Control traffic to/from endpoints in a namespace - -In the following example, ingress traffic is allowed to endpoints in the **namespace: production** with label **color: red**, and only from a pod in the same namespace with **color: blue**, on **port 6379**. - -```yaml -apiVersion: projectcalico.org/v3 -kind: NetworkPolicy -metadata: - name: allow-tcp-6379 - namespace: production -spec: - selector: color == 'red' - ingress: - - action: Allow - protocol: TCP - source: - selector: color == 'blue' - destination: - ports: - - 6379 -``` - -To allow ingress traffic from endpoints in other namespaces, use a **namespaceSelector** in the policy rule. A namespaceSelector matches one or more namespaces based on the labels that are applied on the namespace. In the following example, ingress traffic is also allowed from endpoints with **color: blue** in namespaces with **shape: circle**. - -```yaml -apiVersion: projectcalico.org/v3 -kind: NetworkPolicy -metadata: - name: allow-tcp-6379 - namespace: production -spec: - selector: color == 'red' - ingress: - - action: Allow - protocol: TCP - source: - selector: color == 'blue' - namespaceSelector: shape == 'circle' - destination: - ports: - - 6379 -``` - -### Use Kubernetes RBAC to control namespace label assignment - -Network policies can be applied to endpoints using selectors that match labels on the endpoint, the endpoint's namespace, or the endpoint's service account. By applying selectors based on the endpoint's namespace, you can use Kubernetes RBAC to control which users can assign labels to namespaces. This allows you to separate groups who can deploy pods from those who can assign labels to namespaces. - -In the following example, users in the development environment can communicate only with pods that have a namespace labeled, `environment == "development"`. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: restrict-development-access -spec: - namespaceSelector: 'environment == "development"' - ingress: - - action: Allow - source: - namespaceSelector: 'environment == "development"' - egress: - - action: Allow - destination: - namespaceSelector: 'environment == "development"' -``` - -## Additional resources - -- For more network policies, see [Network policy](../../../reference/resources/networkpolicy.mdx) -- To apply policy to all namespaces, see [Global network policy](../../../reference/resources/globalnetworkpolicy.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/beginners/policy-rules/policy-rules-overview.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/beginners/policy-rules/policy-rules-overview.mdx deleted file mode 100644 index 8bd83d6dde..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/beginners/policy-rules/policy-rules-overview.mdx +++ /dev/null @@ -1,22 +0,0 @@ ---- -description: Define network connectivity for Calico endpoints using policy rules and label selectors. ---- - -# Basic rules - -## Big picture - -Use Calico policy rules and label selectors that match Calico endpoints (pods, OpenStack VMs, and host interfaces) to define network connectivity. - -## Value - -Using label selectors to identify the endpoints (pods, OpenStack VMs, host interfaces) that a policy applies to, or that should be selected by policy rules, means you can define policy without knowing the IP addresses of the endpoints. This is ideal for handling dynamic workloads with ephemeral IPs (such as Kubernetes pods). - -## How to - -Read [Get started with Calico policy](../calico-network-policy.mdx) and [Kubernetes policy](../../get-started/kubernetes-network-policy.mdx), which cover all the basics of using label selectors in policies to select endpoints the policies apply to, or in policy rules. - -## Additional resources - -- [Global network policy](../../../reference/resources/globalnetworkpolicy.mdx) -- [Network policy](../../../reference/resources/networkpolicy.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/beginners/policy-rules/service-accounts.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/beginners/policy-rules/service-accounts.mdx deleted file mode 100644 index dec9f56e77..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/beginners/policy-rules/service-accounts.mdx +++ /dev/null @@ -1,116 +0,0 @@ ---- -description: Use Kubernetes service accounts in policies to validate cryptographic identities and/or manage RBAC controlled high-priority rules across teams. ---- - -# Use service accounts rules in policy - -## Big picture - -Use $[prodname] network policy to allow/deny traffic for Kubernetes service accounts. - -## Value - -Using $[prodname] network policy, you can leverage Kubernetes service accounts with RBAC for flexible control over how policies are applied in a cluster. For example, the security team can have RBAC permissions to: - -- Control which service accounts the developer team can use within a namespace -- Write high-priority network policies for those service accounts (that the developer team cannot override) - -The network security team can maintain full control of security, while selectively allowing developer operations where it makes sense. - -## Concepts - -### Use smallest set of permissions required - -Operations on service accounts are controlled by RBAC, so you can grant permissions only to trusted entities (code and/or people) to create, modify, or delete service accounts. To perform any operation in a workload, clients are required to authenticate with the Kubernetes API server. - -If you do not explicitly assign a service account to a pod, it uses the default ServiceAccount in the namespace. - -You should not grant broad permissions to the default service account for a namespace. If an application needs access to the Kubernetes API, create separate service accounts with the smallest set of permissions required. - -### Service account labels - -Like all other Kubernetes objects, service accounts have labels. You can use labels to create ‘groups’ of service accounts. $[prodname] network policy lets you select workloads by their service account using: - -- An exact match on service account name -- A service account label selector expression - -## Before you begin... - -Configure unique Kubernetes service accounts for your applications. - -## How to - -- [Limit ingress traffic for workloads by service account name](#limit-ingress-traffic-for-workloads-by-service-account-name) -- [Limit ingress traffic for workloads by service account label](#limit-ingress-traffic-for-workloads-by-service-account-label) -- [Use Kubernetes RBAC to control service account label assignment](#use-kubernetes-rbac-to-control-service-account-label-assignment) - -### Limit ingress traffic for workloads by service account name - -In the following example, ingress traffic is allowed from any workload whose service account matches the names **api-service** or **user-auth-service**. - -```yaml -apiVersion: projectcalico.org/v3 -kind: NetworkPolicy -metadata: - name: demo-calico - namespace: prod-engineering -spec: - ingress: - - action: Allow - source: - serviceAccounts: - names: - - api-service - - user-auth-service - selector: 'app == "db"' -``` - -### Limit ingress traffic for workloads by service account label - -In the following example, ingress traffic is allowed from any workload whose service account matches the label selector, **app == web-frontend**. - -```yaml -apiVersion: projectcalico.org/v3 -kind: NetworkPolicy -metadata: - name: allow-web-frontend - namespace: prod-engineering -spec: - ingress: - - action: Allow - source: - serviceAccounts: - selector: 'app == "web-frontend"' - selector: 'app == "db"' -``` - -### Use Kubernetes RBAC to control service account label assignment - -Network policies can be applied to endpoints using selectors that match labels on the endpoint, the endpoint's namespace, or the endpoint's service account. By applying selectors based on the endpoint's service account, you can use Kubernetes RBAC to control which users can assign labels to service accounts. This allows you to separate groups who can deploy pods from those who can assign labels to service accounts. - -In the following example, pods with an intern service account can communicate only with pods with service accounts labeled, `role: intern`. - -```yaml -apiVersion: projectcalico.org/v3 -kind: NetworkPolicy -metadata: - name: restrict-intern-access - namespace: prod-engineering -spec: - serviceAccountSelector: 'role == "intern"' - ingress: - - action: Allow - source: - serviceAccounts: - selector: 'role == "intern"' - egress: - - action: Allow - destination: - serviceAccounts: - selector: 'role == "intern"' -``` - -## Additional resources - -- [Network policy](../../../reference/resources/networkpolicy.mdx) -- [Global network policy](../../../reference/resources/globalnetworkpolicy.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/beginners/policy-rules/service-policy.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/beginners/policy-rules/service-policy.mdx deleted file mode 100644 index 09b3d19378..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/beginners/policy-rules/service-policy.mdx +++ /dev/null @@ -1,119 +0,0 @@ ---- -description: Use Kubernetes Service names in policy rules. ---- - -# Use service rules in policy - -## Big picture - -Use $[prodname] network policy to allow/deny traffic for Kubernetes services. - -## Value - -Using $[prodname] network policy, you can leverage Kubernetes Service names to easily define access to Kubernetes services. Using service names in policy enables you to: - -- Allow or deny access to the Kubernetes API service. -- Reference port information already declared by the application, making it easier to keep policy up-to-date as application requirements change. - -## How to - -- [Allow access to the Kubernetes API for a specific namespace](#allow-access-to-the-kubernetes-api-for-a-specific-namespace) -- [Allow access to Kubernetes DNS for the entire cluster](#allow-access-to-kubernetes-dns-for-the-entire-cluster) -- [Allow access from a specified service](#allow-access-from-a-specified-service) - -### Allow access to the Kubernetes API for a specific namespace - -In the following example, egress traffic is allowed to the `kubernetes` service in the `default` namespace for all pods in the namespace `my-app`. This service is the typical -access point for the Kubernetes API server. - -```yaml -apiVersion: projectcalico.org/v3 -kind: NetworkPolicy -metadata: - name: allow-api-access - namespace: my-app -spec: - selector: all() - egress: - - action: Allow - destination: - services: - name: kubernetes - namespace: default -``` - -Endpoint addresses and ports to allow will be automatically detected from the service. - -### Allow access to Kubernetes DNS for the entire cluster - -In the following example, a GlobalNetworkPolicy is used to select all pods in the cluster to apply a rule which ensures -all pods can access the Kubernetes DNS service. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: allow-kube-dns -spec: - selector: all() - egress: - - action: Allow - destination: - services: - name: kube-dns - namespace: kube-system -``` - -:::note - -This policy also enacts a default-deny behavior for all pods, so make sure any other required application traffic is allowed by a policy. - -::: - -## Allow access from a specified service - -In the following example, ingress traffic is allowed from the `frontend-service` service in the `frontend` namespace for all pods in the namespace `backend`. -This allows all pods that back the `frontend-service` service to send traffic to all pods in the `backend` namespace. - -```yaml -apiVersion: projectcalico.org/v3 -kind: NetworkPolicy -metadata: - name: allow-frontend-service-access - namespace: backend -spec: - selector: all() - ingress: - - action: Allow - source: - services: - name: frontend-service - namespace: frontend -``` - -We can also further specify the ports that the `frontend-service` service is allowed to access. The following example limits access from the `frontend-service` -service to port 80. - -```yaml -apiVersion: projectcalico.org/v3 -kind: NetworkPolicy -metadata: - name: allow-frontend-service-access - namespace: backend -spec: - selector: all() - ingress: - - action: Allow - protocol: TCP - source: - services: - name: frontend-service - namespace: frontend - destination: - ports: [80] -``` - -## Additional resources - -- [Network policy](../../../reference/resources/networkpolicy.mdx) -- [Global network policy](../../../reference/resources/globalnetworkpolicy.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/beginners/services/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/beginners/services/index.mdx deleted file mode 100644 index 9a8084c99b..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/beginners/services/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Apply Calico policy to Kubernetes node ports, and to services that are exposed externally as cluster IPs. -hide_table_of_contents: true ---- - -# Policy for Kubernetes services - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/beginners/services/kubernetes-node-ports.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/beginners/services/kubernetes-node-ports.mdx deleted file mode 100644 index f172e8b072..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/beginners/services/kubernetes-node-ports.mdx +++ /dev/null @@ -1,135 +0,0 @@ ---- -description: Restrict access to Kubernetes node ports using Calico Enterprise global network policy. Follow the steps to secure the host, the node ports, and the cluster. ---- - -# Apply Calico Enterprise policy to Kubernetes node ports - -## Big picture - -Restrict access to node ports to specific external clients. - -## Value - -Exposing services to external clients using node ports is a standard Kubernetes feature. However, if you want to restrict access to node ports to specific external clients, you need to use Calico global network policy. - -## Concepts - -### Network policy with preDNAT field - -In a Kubernetes cluster, kube-proxy will DNAT a request to the node's port and IP address to one of the pods that backs the service. For Calico global network policy to both allow normal ingress cluster traffic and deny other general ingress traffic, it must take effect before DNAT. To do this, you simply add a **preDNAT** field to a Calico global network policy. The preDNAT field: - -- Applies before DNAT -- Applies only to ingress rules -- Enforces all ingress traffic through a host endpoint, regardless of destination - The destination can be a locally hosted pod, a pod on another node, or a process running on the host. - -## Before you begin... - -For services that you want to expose to external clients, configure Kubernetes services with type **NodePort**. - -## How to - -To securely expose a Kubernetes service to external clients, you must implement all of the following steps. - -- [Allow cluster ingress traffic, but deny general ingress traffic](#allow-cluster-ingress-traffic-but-deny-general-ingress-traffic) -- [Allow local host egress traffic](#allow-local-host-egress-traffic) -- [Create host endpoints with appropriate network policy](#create-host-endpoints-with-appropriate-network-policy) -- [Allow ingress traffic to specific node ports](#allow-ingress-traffic-to-specific-node-ports) - -### Allow cluster ingress traffic but deny general ingress traffic - -In the following example, we create a global network policy to allow cluster ingress traffic (**allow-cluster-internal-ingress**): for the nodes’ IP addresses (**1.2.3.4/16**), and for pod IP addresses assigned by Kubernetes (**100.100.100.0/16**). By adding a preDNAT field, Calico global network policy is applied before regular DNAT on the Kubernetes cluster. - -In this example, we use the **selector: has(kubernetes-host)** -- so the policy is applicable to any endpoint with a **kubernetes-host** label (but you can easily specify particular nodes). - -Finally, when you specify a preDNAT field, you must also add the **applyOnForward: true** field. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: allow-cluster-internal-ingress-only -spec: - order: 20 - preDNAT: true - applyOnForward: true - ingress: - - action: Allow - source: - nets: [1.2.3.4/16, 100.100.100.0/16] - - action: Deny - selector: has(kubernetes-host) -``` - -### Allow local host egress traffic - -We also need a global network policy to allow egress traffic through each node's external interface. Otherwise, when we define host endpoints for those interfaces, no egress traffic will be allowed from local processes (except for traffic that is allowed by the [Failsafe rules](../../../reference/host-endpoints/failsafe.mdx). - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: allow-outbound-external -spec: - order: 10 - egress: - - action: Allow - selector: has(kubernetes-host) -``` - -### Create host endpoints with appropriate network policy - -In this example, we assume that you have already defined Calico host endpoints with network policy that is appropriate for the cluster. (For example, you wouldn’t want a host endpoint with a “default deny all traffic to/from this host” network policy because that is counter to the goal of allowing/denying specific traffic.) For help, see [host endpoints](../../../reference/resources/hostendpoint.mdx). - -All of our previously-defined global network policies have a selector that makes them applicable to any endpoint with a **kubernetes-host label**; so we will include that label in our definitions. For example, for **eth0** on **node1**. - -```yaml -apiVersion: projectcalico.org/v3 -kind: HostEndpoint -metadata: - name: node1-eth0 - labels: - kubernetes-host: ingress -spec: - interfaceName: eth0 - node: node1 - expectedIPs: - - INSERT_IP_HERE -``` - -When creating each host endpoint, replace `INSERT_IP_HERE` with the IP address on eth0. The `expectedIPs` field is required so that any selectors within ingress or egress rules can properly match the host endpoint. - -### Allow ingress traffic to specific node ports - -Now we can allow external access to the node ports by creating a global network policy with the preDNAT field. In this example, **ingress traffic is allowed** for any host endpoint with **port: 31852**. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: allow-nodeport -spec: - preDNAT: true - applyOnForward: true - order: 10 - ingress: - - action: Allow - protocol: TCP - destination: - selector: has(kubernetes-host) - ports: [31852] - selector: has(kubernetes-host) -``` - -To make the NodePort accessible only through particular nodes, give the nodes a particular label. For example: - -```yaml -nodeport-external-ingress: true -``` - -Then, use **nodeport-external-ingress: true** as the selector of the **allow-nodeport** policy, instead of **has(kubernetes-host)**. - -## Additional resources - -- [Global network policy](../../../reference/resources/globalnetworkpolicy.mdx) -- [Host endpoints](../../../reference/resources/hostendpoint.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/beginners/services/services-cluster-ips.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/beginners/services/services-cluster-ips.mdx deleted file mode 100644 index fa7297d48f..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/beginners/services/services-cluster-ips.mdx +++ /dev/null @@ -1,193 +0,0 @@ ---- -description: Expose Kubernetes service cluster IPs over BGP using Calico Enterprise, and restrict who can access them using Calico Enterprise network policy. ---- - -# Apply Calico Enterprise policy to services exposed externally as cluster IPs - -## Big picture - -Control access to services exposed through clusterIPs that are advertised outside the cluster using BGP. - -## Value - -$[prodname] network policy uses standard Kubernetes Services that allow you to expose services within clusters to external clients in the following ways: - -- [Apply policy to Kubernetes nodeports](kubernetes-node-ports.mdx) -- Using cluster IPs over BGP (described in this article) - -## Concepts - -### Advertise cluster IPs outside the cluster - -A **cluster IP** is a virtual IP address that represents a Kubernetes Service. Kube Proxy on each host translates the clusterIP into a pod IP for one of the pods backing the service, acting as a reverse proxy and load balancer. - -Cluster IPs were originally designed for use within the Kubernetes cluster. $[prodname] allows you to advertise Cluster IPs externally -- so external clients can use them to access services hosted inside the cluster. This means that $[prodname] ingress policy can be applied at **one or both** of the following locations: - -- Host interface, when the traffic destined for the clusterIP first ingresses the cluster -- Pod interface of the backend pod - -### Traffic routing: local versus cluster modes - -$[prodname] implements [Kubernetes service external traffic policy](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip), which controls whether external traffic is routed to node-local or cluster-wide endpoints. The following table summarizes key differences between these settings. The default is **cluster mode**. - -| **Service setting** | **Traffic is load balanced...** | **Pros and cons** | **Required service type** | -| ------------------------------------------- | --------------------------------------------------- | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| **externalTrafficPolicy: Cluster**(default) | Across all nodes in the cluster | Equal distribution of traffic among all pods running a service.

    Possible unnecessary network hops between nodes for ingress external traffic.When packets are rerouted to pods on another node, traffic is SNAT’d (source network address translation).

    Destination pod can see the proxying node’s IP address rather than the actual client IP. | **ClusterIP** | -| **externalTrafficPolicy: Local** | Across the nodes with the endpoints for the service | Avoids extra hops so better for apps that ingress a lot external traffic.

    Traffic is not SNAT’d so actual client IPs are preserved.

    Traffic distributed among pods running a service may be imbalanced. | **LoadBalancer** (for cloud providers), or **NodePort** (for node’s static port) | - -## Before you begin... - -[Configure Calico to advertise cluster IPs over BGP](../../../networking/configuring/advertise-service-ips.mdx). - -## How to - -Selecting which mode to use depends on your goals and resources. At an operational level, **local mode** simplifies policy, but load balancing may be uneven in certain scenarios. **Cluster mode** requires more work to manage clusterIPs, SNAT, and create policies that reference specific IP addresses, but you always get even load balancing. - -- [Secure externally exposed cluster IPs, local mode](#secure-externally-exposed-cluster-ips-local-mode) -- [Secure externally exposed cluster IPs, cluster mode](#secure-externally-exposed-cluster-ips-cluster-mode) - -### Secure externally exposed cluster IPs, local mode - -Using **local mode**, the original source address of external traffic is preserved, and you can define policy directly using standard $[prodname] network policy. - -1. Create $[prodname] **NetworkPolicies** or **GlobalNetworkPolicies** that select the same set of pods as your Kubernetes Service. -1. Add rules to allow the external traffic. -1. If desired, add rules to allow in-cluster traffic. - -### Secure externally exposed cluster IPs, cluster mode - -In the following steps, we define **GlobalNetworkPolicy** and **HostEndpoints**. - -#### Step 1: Verify Kubernetes Service manifest - -Ensure that your Kubernetes Service manifest explicitly lists the clusterIP; do not allow Kubernetes to automatically assign the clusterIP because you need it for your policies in the following steps. - -#### Step 2: Create global network policy at the host interface - -In this step, you create a **GlobalNetworkPolicy** that selects all **host endpoints**. It controls access to the cluster IP, and prevents unauthorized clients from outside the cluster from accessing it. The hosts then forwards only authorized traffic. - -**Set policy to allow external traffic for cluster IPs** - -Add rules to allow the external traffic for each clusterIP. The following example allows connections to two cluster IPs. Make sure you add **applyOnForward** and **preDNAT** rules. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: allow-cluster-ips -spec: - selector: k8s-role == 'node' - types: - - Ingress - applyOnForward: true - preDNAT: true - ingress: - # Allow 50.60.0.0/16 to access Cluster IP A - - action: Allow - source: - nets: - - 50.60.0.0/16 - destination: - nets: - - 10.20.30.40/32 Cluster IP A - # Allow 70.80.90.0/24 to access Cluster IP B - - action: Allow - source: - nets: - - 70.80.90.0/24 - destination: - nets: - - 10.20.30.41/32 Cluster IP B -``` - -**Add a rule to allow traffic destined for the pod CIDR** - -Without this rule, normal pod-to-pod traffic is blocked because the policy applies to forwarded traffic. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: allow-to-pods -spec: - selector: k8s-role == 'node' - types: - - Ingress - applyOnForward: true - preDNAT: true - ingress: - # Allow traffic forwarded to pods - - action: Allow - destination: - nets: - - 192.168.0.0/16 Pod CIDR -``` - -**Add a rule to allow traffic destined for all host endpoints** - -Or, you can add rules that allow specific host traffic including Kubernetes and $[prodname]. Without this rule, normal host traffic is blocked. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: allow-traffic-hostendpoints -spec: - selector: k8s-role == 'node' - types: - - Ingress - # Allow traffic to the node (not nodePorts, TCP) (not nodePorts, TCP) - - action: Allow - protocol: TCP - destination: - selector: k8s-role == 'node' - notPorts: ["30000:32767"] #nodePort range - # Allow traffic to the node (not nodePorts, TCP) (not nodePorts, UDP) - - action: Allow - protocol: UDP - destination: - selector: k8s-role == 'node' - notPorts: ["30000:32767"] #nodePort range -``` - -#### Step 3: Create a global network policy that selects pods - -In this step, you create a **GlobalNetworkPolicy** that selects the **same set of pods as your Kubernetes Service**. Add rules that allow host endpoints to access the service ports. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: allow-nodes-svc-a -spec: - selector: k8s-svc == 'svc-a' - types: - - Ingress - ingress: - - action: Allow - protocol: TCP - source: - selector: k8s-role == 'node' - destination: - ports: [80, 443] - - action: Allow - protocol: UDP - source: - selector: k8s-role == 'node' - destination: - ports: [80, 443] -``` - -#### Step 4: (Optional) Create network polices or global network policies that allow in-cluster traffic to access the service - -#### Step 5: Create HostEndpoints - -Create HostEndpoints for the interface of each host that will receive traffic for the clusterIPs. Be sure to label them so they are selected by the policy in Step 2 (Add a rule to allow traffic destined for the pod CIDR), and the rules in Step 3. - -In the previous example policies, the label **k8s-role: node** is used to identify these HostEndpoints. - -## Additional resources - -- [Enable service IP advertisement](../../../networking/configuring/advertise-service-ips.mdx) -- [Defend against DoS attacks](../../extreme-traffic/defend-dos-attack.mdx) -- [Global network policy](../../../reference/resources/globalnetworkpolicy.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/beginners/simple-policy-cnx.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/beginners/simple-policy-cnx.mdx deleted file mode 100644 index 92646307e5..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/beginners/simple-policy-cnx.mdx +++ /dev/null @@ -1,329 +0,0 @@ ---- -description: Learn the extra features for Calico Enterprise that make it so important for production environments. ---- - -# Calico Enterprise for Kubernetes demo - -This guide is a variation of the [simple policy demo](../get-started/kubernetes-policy-basic.mdx) intended to introduce the extra features of $[prodname] to people already familiar with Project Calico for Kubernetes. - -It requires a Kubernetes cluster configured with Calico networking and $[prodname], and expects that you have `kubectl` configured to interact with the cluster. - -You can quickly and easily obtain such a cluster by following one of the -[installation guides](../../getting-started/install-on-clusters/kubernetes/index.mdx), -or by [upgrading an existing cluster](../../getting-started/upgrading/upgrading-calico-to-calico-enterprise/upgrade-to-tsee/index.mdx). - -The key steps in moving to $[prodname] are to change to the $[prodname] version of calico-node, update its configuration, download [calicoq](../../reference/clis/calicoq/index.mdx) and deploy Prometheus. - -This guide assumes that you have installed all the $[prodname] components from the -guides above and that your cluster consists of the following nodes: - -- k8s-node1 -- k8s-node2 -- k8s-master - -Where you see references to these in the text below, substitute for your actual node names. You can find what nodes are on your cluster with `kubectl get nodes` - -## Configure Namespaces - -This guide will deploy pods in a Kubernetes namespace. Let's create the `Namespace` object for this guide. - -``` -kubectl create ns policy-demo -``` - -## Create demo pods - -We'll use Kubernetes `Deployment` objects to easily create pods in the namespace. - -1. Create some nginx pods in the `policy-demo` namespace. - - ```shell - kubectl create deployment --namespace=policy-demo nginx --image=nginx - ``` - -1. Expose them through a service. - - ```shell - kubectl expose --namespace=policy-demo deployment nginx --port=80 - ``` - -1. Ensure the nginx service is accessible. - - ```shell - kubectl run --namespace=policy-demo access --rm -ti --image busybox /bin/sh - ``` - - This should open up a shell session inside the `access` pod, as shown below. - - ``` - Waiting for pod policy-demo/access-472357175-y0m47 to be running, status is Pending, pod ready: false - - If you don't see a command prompt, try pressing enter. - - / # - ``` - -1. From inside the `access` pod, attempt to reach the `nginx` service. - - ```shell - wget -q nginx -O - - ``` - - You should see a response from `nginx`. Great! Our service is accessible. You can exit the pod now. - -1. Inspect the network policies using calicoq. The `host` command displays - information about the policies for endpoints on a given host. - - :::note - - calicoq complements calicoctl by inspecting the - dynamic aspects of $[prodname] Policy: in particular displaying the endpoints actually affected by policies, - and the policies that actually apply to endpoints. - The full calicoq documentation is [here](../../reference/clis/calicoq/index.mdx). - - ::: - - ``` - DATASTORE_TYPE=kubernetes calicoq host k8s-node1 - ``` - - You should see the following output. - - ``` - Policies and profiles for each endpoint on host "k8s-node1": - - Workload endpoint k8s/tigera-prometheus.alertmanager-calico-node-alertmanager-0/eth0 - Policies: - Policy "tigera-prometheus/knp.default.calico-node-alertmanager" (order 1000; selector "(projectcalico.org/orchestrator == 'k8s' && alertmanager == 'calico-node-alertmanager' && app == 'alertmanager') && projectcalico.org/namespace == 'tigera-prometheus'") - Policy "tigera-prometheus/knp.default.calico-node-alertmanager-mesh" (order 1000; selector "(projectcalico.org/orchestrator == 'k8s' && alertmanager == 'calico-node-alertmanager' && app == 'alertmanager') && projectcalico.org/namespace == 'tigera-prometheus'") - Policy "tigera-prometheus/knp.default.default-deny" (order 1000; selector "(projectcalico.org/orchestrator == 'k8s') && projectcalico.org/namespace == 'tigera-prometheus'") - Profiles: - Profile "kns.tigera-prometheus" - Rule matches: - Policy "tigera-prometheus/knp.default.calico-node-alertmanager-mesh" inbound rule 1 source match; selector "(projectcalico.org/namespace == 'tigera-prometheus') && (projectcalico.org/orchestrator == 'k8s' && app in { 'alertmanager' } && alertmanager in { 'calico-node-alertmanager' })" - - ... - - Workload endpoint k8s/policy-demo.nginx-8586cf59-5bxvh/eth0 - Policies: - Profiles: - Profile "kns.policy-demo" - ``` - - For each workload endpoint, the `Policies:` section lists the policies that - apply to that endpoint, in the order they apply. calicoq displays both - $[prodname] Policies and Kubernetes NetworkPolicies, although this - example focuses on the latter. The `Rule matches:` section lists the - policies that match that endpoint in their rules, in other words that have - rules that deny or allow that endpoint as a packet source or destination. - - Focusing on the - `k8s/tigera-prometheus.alertmanager-calico-node-alertmanager-0/eth0` endpoint: - - - The first two policies are defined in the monitor-calico.yaml manifest. - The selectors here have been translated from the original NetworkPolicies to - the $[prodname] format (note the addition of the namespace test). - - - The third policy and the following profile are created automatically by the - policy controller. - -1. Use kubectl to see the detail of any particular policy or profile. For - example, for the `kns.policy-demo` profile, which defines default behavior for - pods in the `policy-demo` namespace: - - ```shell - kubectl get profile kns.policy-demo -o yaml - ``` - - You should see the following output. - - ```yaml - apiVersion: projectcalico.org/v3 - kind: Profile - metadata: - creationTimestamp: '2022-01-06T21:32:05Z' - name: kns.policy-demo - resourceVersion: 435026/ - uid: 75dd2ed4-d3a6-41ca-a106-db073bfa946a - spec: - egress: - - action: Allow - destination: {} - source: {} - ingress: - - action: Allow - destination: {} - source: {} - labelsToApply: - pcns.projectcalico.org/name: policy-demo - ``` - - Alternatively, you may also use the $[prodname] web console to inspect and view information and metrics associated with policies, endpoints, and nodes. - -## Enable isolation - -Let's turn on isolation in our policy-demo namespace. $[prodname] will then prevent connections to pods in this namespace. - -Running the following command creates a NetworkPolicy which implements a default deny behavior for all pods in the `policy-demo` namespace. - -```shell -kubectl create -f - < - -### Workload and host endpoints - -Policy with domain names can be enforced on workload or host endpoints. When a policy with domain names applies to a workload endpoint, it -allows that workload to connect out to the specified domains. When policy with domain names applies to a host endpoint, it allows clients -directly on the relevant host (including any host-networked workloads) to connect out to the specified domains. - -### Trusted DNS servers - -$[prodname] trusts DNS information only from its list of DNS trusted servers. Using trusted DNS servers to back domain names in -policy, prevents a malicious workload from using IPs returned by a fake DNS server to hijack domain names in policy rules. - -By default, $[prodname] trusts the Kubernetes cluster’s DNS service (kube-dns or CoreDNS). For workload endpoints, these -out-of-the-box defaults work with standard Kubernetes installs, so normally you won’t change them. For host endpoints you will need to add -the IP addresses that the cluster nodes use for DNS resolution. - -## Before you begin - -**Not supported** - -DNS policy is not supported at egress of egress gateway pods. Domain-based rules will either never match in -that hook, or, they may match intermittently. Intermittent matches occur when a pod on the same node as the -egress gateway pod happens to make a matching DNS query. This is because the DNS-to-IP cache used to render -the policy is shared node-wide. - -## How to - -You can specify allowed domain names directly in a **global network policy** or **namespaced network policy**, or specify domain names in a **global network set** (and then -reference the global network set in a global network policy). - -- [Use domain names in a global network policy](#use-domain-names-in-a-global-network-policy) -- [Use domain names in a namespaced network policy](#use-domain-names-in-a-namespaced-network-policy) -- [Use domain names in a global network set, reference the set in a global network policy](#use-domain-names-in-a-global-network-set) - -### Best practice - -Use a **global network set** when the same set of domains needs to be referenced in multiple policies, or when you want the allowed -destinations to be a mix of domains and IPs from global network sets, or IPs from workload endpoints and host endpoints. By using a single -destination selector in a global network set, you can potentially match all of these resources. - -### Use domain names in a global network policy - -In this method, you create a **GlobalNetworkPolicy** with egress rules with `action: Allow` and a `destination.domains` field specifying the -domain names to which egress traffic is allowed. - -In the following example, the first rule allows DNS traffic, and the second rule allows connections outside the cluster to domains -**api.alice.com** and **\*.example.com** (which means `.example.com`, such as **bob.example.com**). - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: allow-egress-to-domains -spec: - order: 1 - selector: my-pod-label == 'my-value' - types: - - Egress - egress: - - action: Allow - protocol: UDP - destination: - ports: - - 53 - - dns - - action: Allow - destination: - domains: - - api.alice.com - - '*.example.com' -``` - -### Use domain names in a namespaced network policy - -In this method, you create a **NetworkPolicy** with egress rules with `action: Allow` and a `destination.domains` field specifying the -domain names to which egress traffic is allowed. - -In the following example, the first rule allows DNS traffic, and the second rule allows connections outside the cluster to domains -**api.alice.com** and **\*.example.com** (which means `.example.com`, such as **bob.example.com**). - -```yaml -apiVersion: projectcalico.org/v3 -kind: NetworkPolicy -metadata: - name: allow-egress-to-domains - namespace: rollout-test -spec: - order: 1 - selector: my-pod-label == 'my-value' - types: - - Egress - egress: - - action: Allow - protocol: UDP - destination: - ports: - - 53 - - dns - - action: Allow - destination: - domains: - - api.alice.com - - '*.example.com' -``` - -The difference between this and the **GlobalNetworkPolicy** example is that this namespaced NetworkPolicy can only grant egress access, to the specified domains, to workload endpoints in the `rollout-test` namespace. - -### Use domain names in a global network set - -In this method, you create a **GlobalNetworkSet** with the allowed destination domain names in the `allowedEgressDomains` field. Then, -you create a **GlobalNetworkPolicy** with a `destination.selector` that matches that GlobalNetworkSet. - -In the following example, the allowed egress domains (`api.alice.com` and `*.example.com`) are specified in the GlobalNetworkSet. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkSet -metadata: - name: allowed-domains-1 - labels: - color: red -spec: - allowedEgressDomains: - - api.alice.com - - '*.example.com' -``` - -Then, you reference the global network set in a **GlobalNetworkPolicy** using a destination label selector. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: allow-egress-to-domain -spec: - order: 1 - selector: my-pod-label == 'my-value' - types: - - Egress - egress: - - action: Allow - destination: - selector: color == 'red' -``` - -### Use domain names in a network set - -In this method, you create a **NetworkSet** with the allowed destination domain names in the `allowedEgressDomains` field. Then, -you create a **NetworkPolicy** with a `destination.selector` that matches that NetworkSet. - -In the following example, the allowed egress domains (`api.alice.com` and `*.example.com`) are specified in the NetworkSet. - -```yaml -apiVersion: projectcalico.org/v3 -kind: NetworkSet -metadata: - name: allowed-domains-1 - namespace: rollout-test - labels: - color: red -spec: - allowedEgressDomains: - - api.alice.com - - '*.example.com' -``` - -Then, you reference the network set in a **NetworkPolicy** using a destination label selector. - -```yaml -apiVersion: projectcalico.org/v3 -kind: NetworkPolicy -metadata: - name: allow-egress-to-domain - namespace: rollout-test -spec: - order: 1 - selector: my-pod-label == 'my-value' - types: - - Egress - egress: - - action: Allow - destination: - selector: color == 'red' -``` - -### Tuning performance and latency - -$[prodname] supports different DNS policy modes with different peformance and latency implications. -The default mode for the iptables and nftables data planes is `DelayDeniedPacket`. -The eBPF data plane defaults to `Inline` on kernels 5.17 (or RedHat 5.14) and newer and to `NoDelay` on older kernels. -The Windows data plane support only `NodeDelay` mode. - -With `NoDelay` mode, new pods sometimes fail to connect to domains that are allowed by a DNS policy. -In these cases, the pod tries to connect to a domain before $[prodname] has finished processing the DNS policy that allows the connection. -As soon as the processing is complete, the pod is able to connect. - -For more information, see [DNSPolicyMode](../reference/resources/felixconfig#dnspolicymode) and [BPFDNSPolicyMode](../reference/resources/felixconfig#bpfdnspolicymode). - -## Additional resources - -To change the default DNS trusted servers, use the [DNSTrustedServers parameter](../reference/component-resources/node/felix/configuration.mdx). - -For more detail about the relevant resources, see -[GlobalNetworkSet](../reference/resources/globalnetworkset.mdx), -[GlobalNetworkPolicy](../reference/resources/globalnetworkpolicy.mdx), -[NetworkPolicy](../reference/resources/networkpolicy.mdx) -and -[NetworkSet](../reference/resources/networkset.mdx). diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/extreme-traffic/defend-dos-attack.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/extreme-traffic/defend-dos-attack.mdx deleted file mode 100644 index 43b9618d00..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/extreme-traffic/defend-dos-attack.mdx +++ /dev/null @@ -1,107 +0,0 @@ ---- -description: Define DoS mitigation rules in Calico Enterprise policy to quickly drop connections when under attack. Learn how rules use eBPF and XDP, including hardware offload when available. ---- - -# Defend against DoS attacks - -## Big picture - -Calico automatically enforces specific types of deny-list policies at the earliest possible point in the packet processing pipeline, including offloading to NIC hardware whenever possible. - -## Value - -During a DoS attack, a cluster can receive massive numbers of connection requests from attackers. The faster these connection requests are dropped, the less flooding and overloading to your hosts. When you define DoS mitigation rules in Calico network policy, Calico enforces the rules as efficiently as possible to minimize the impact. - -## Concepts - -### Earliest packet processing - -The earliest point in the packet processing pipeline that packets can be dropped, depends on the Linux kernel version and the capabilities of the NIC driver and NIC hardware. Calico automatically uses the fastest available option. - -| Processed by... | Used by Calico if... | Performance | -| --------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------- | -| NIC hardware | The NIC supports **XDP offload** mode. | Fastest | -| NIC driver | The NIC driver supports **XDP native** mode. | Faster | -| Kernel | The kernel supports **XDP generic mode** and Calico is configured to explicitly use it. This mode is rarely used and has no performance benefits over iptables raw mode below. To enable, see [Felix Configuration](../../reference/resources/felixconfig.mdx). | Fast | -| Kernel | If none of the modes above are available, **iptables raw** mode is used. | Fast | - -:::note - -XDP modes require Linux kernel v4.16 or later. - -::: - -## How to - -The high-level steps to defend against a DoS attack are: - -- [Step 1: Create host endpoints](#step-1-create-host-endpoints) -- [Step 2: Add CIDRs to deny-list in a global network set](#step-2-add-cidrs-to-deny-list-in-a-global-network-set) -- [Step 3: Create deny incoming traffic global network policy](#step-3-create-deny-incoming-traffic-global-network-policy) - -### Best practice - -The following steps walk through the above required steps, assuming no prior configuration is in place. A best practice is to proactively do these steps before an attack (create the host endpoints, network policy, and global network set). In the event of a DoS attack, you can quickly respond by just adding the CIDRs that you want to deny-list to the global network set. - -### Step 1: Create host endpoints - -First, you create the HostEndpoints corresponding to the network interfaces where you want to enforce DoS mitigation rules. In the following example, the HostEndpoint secures the interface named **eth0** with IP **10.0.0.1** on node **jasper**. - -```yaml -apiVersion: projectcalico.org/v3 -kind: HostEndpoint -metadata: - name: production-host - labels: - apply-dos-mitigation: 'true' -spec: - interfaceName: eth0 - node: jasper - expectedIPs: ['10.0.0.1'] -``` - -### Step 2: Add CIDRs to deny-list in a global network set - -Next, you create a Calico **GlobalNetworkset**, adding the CIDRs that you want to deny-list. In the following example, the global network set deny-lists the CIDR ranges **1.2.3.4/32** and **5.6.0.0/16**: - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkSet -metadata: - name: dos-mitigation - labels: - dos-deny-list: 'true' -spec: - nets: - - '1.2.3.4/32' - - '5.6.0.0/16' -``` - -### Step 3: Create deny incoming traffic global network policy - -Finally, create a Calico GlobalNetworkPolicy adding the GlobalNetworkSet label (**dos-deny-list** in the previous step) as a selector to deny ingress traffic. To more quickly enforce the denial of forwarded traffic to the host at the packet level, use the **doNotTrack** and **applyOnForward** options. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: dos-mitigation -spec: - selector: apply-dos-mitigation == 'true' - doNotTrack: true - applyOnForward: true - types: - - Ingress - ingress: - - action: Deny - source: - selector: dos-deny-list == 'true' -``` - -## Additional resources - -- [Global network sets](../../reference/resources/globalnetworkset.mdx) -- [Global network policy](../../reference/resources/globalnetworkpolicy.mdx) -- [Create a host endpoint](../../reference/resources/hostendpoint.mdx) -- [Introduction to XDP](https://www.iovisor.org/technology/xdp) -- [Advanced XDP documentation](https://prototype-kernel.readthedocs.io/en/latest/networking/XDP/index.html) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/extreme-traffic/high-connection-workloads.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/extreme-traffic/high-connection-workloads.mdx deleted file mode 100644 index aa42a7b1c7..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/extreme-traffic/high-connection-workloads.mdx +++ /dev/null @@ -1,89 +0,0 @@ ---- -description: Create a Calico network policy rule to bypass Linux conntrack for traffic to workloads that experience extremely large number of connections. ---- - -# Enable extreme high-connection workloads - -## Big picture - -Use a $[prodname] network policy rule to bypass Linux conntrack for traffic to workloads that experience extremely large number of connections. - -## Value - -When the number of connections on a node exceeds the number of connections that Linux conntrack can track, connections can be rejected or dropped. $[prodname] network policy can be used to selectively bypass Linux conntrack for traffic to/from these types of workloads. - -## Concepts - -### Linux conntrack - -Connection tracking (“conntrack”) is a core feature of the Linux kernel’s networking stack. It allows the kernel to keep track of all logical network connections or flows, and thereby identify all of the packets that make up each flow so they can be handled consistently together. Conntrack is an essential part of the mainline Linux network processing pipeline, normally improving performance, and enabling NAT and stateful access control. - -### Extreme high-connection workloads - -Some niche workloads handling extremely high number of simultaneous connections, or very high rate of short lived connections, can exceed the maximum number of connections Linux conntrack is able to track. One real world example of such a workload is an extreme scale memcached server handling 50k+ connections per second. - -### $[prodname] doNotTrack network policy - -The $[prodname] global network policy option, **doNotTrack**, indicates to apply the rules in the policy before connection tracking, and that packets allowed by these rules should not be tracked. The policy is applied early in the Linux packet processing pipeline, before any regular network policy rules, and independent of the policy order field. - -Unlike normal network policy rules, doNotTrack network policy rules are stateless, meaning you must explicitly specify rules to allow return traffic that would normally be automatically allowed by conntrack. For example, for a server on port 999, the policy must include an ingress rule allowing inbound traffic to port 999, and an egress rule to allow outbound traffic from port 999. - -In a doNotTrack policy: - -- Ingress rules apply to all incoming traffic through a host endpoint, regardless of where the traffic is going -- Egress rules apply only to traffic that is sent from the host endpoint (not a local workload) - -Finally, you must add an **applyOnForward: true expression** for a **doNotTrack policy** to work. - -## Before you begin... - -Before creating a **doNotTrack** network policy, read this [blog](https://www.tigera.io/blog/when-linux-conntrack-is-no-longer-your-friend/) to understand use cases, benefits, and trade offs. - -## How to - -### Bypass connection traffic for high connection server - -In the following example, a memcached server pod with **hostNetwork: true** was scheduled on the node memcached-node-1. We create a HostEndpoint for the node. Next, we create a GlobalNetwork Policy with symmetrical rules for ingress and egress with doNotTrack and applyOnForward set to true. - -```yaml -apiVersion: projectcalico.org/v3 -kind: HostEndpoint -metadata: - name: memcached-node-1-eth0 - labels: - memcached: server -spec: - interfaceName: eth0 - node: memcached-node-1 - expectedIPs: - - 10.128.0.162 ---- -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: memcached-server -spec: - selector: memcached == 'server' - applyOnForward: true - doNotTrack: true - ingress: - - action: Allow - protocol: TCP - source: - selector: memcached == 'client' - destination: - ports: - - 12211 - egress: - - action: Allow - protocol: TCP - source: - ports: - - 12211 - destination: - selector: memcached == 'client' -``` - -## Additional resources - -[Global network policy](../../reference/resources/globalnetworkpolicy.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/extreme-traffic/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/extreme-traffic/index.mdx deleted file mode 100644 index 65e6316852..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/extreme-traffic/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Use Calico network policy early in the Linux packet processing pipeline to handle extreme traffic scenarios. -hide_table_of_contents: true ---- - -# Policy for extreme traffic - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/get-started/about-kubernetes-egress.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/get-started/about-kubernetes-egress.mdx deleted file mode 100644 index a1d5a48793..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/get-started/about-kubernetes-egress.mdx +++ /dev/null @@ -1,103 +0,0 @@ ---- -description: Learn why you should restrict egress traffic and how to do it. ---- - -# Kubernetes egress - -:::note - -This guide provides educational material that is not specific to $[prodname]. - -::: - -In this guide you will learn: - -- What is Kubernetes egress? -- Why should you restrict egress traffic and how can you do it? -- What is "NAT outgoing" and when is it used? -- What is an egress gateway, and why might you want to use one? - -## What is Kubernetes egress? - -In this guide we are using the term Kubernetes egress to describe connections being made from pods to anything outside of the cluster. - -In contrast to ingress traffic, where Kubernetes has the [Ingress](about-kubernetes-ingress.mdx) -resource type to help manage the traffic, there is no Kubernetes Egress resource. Instead, how the egress traffic is -handled at a networking level is determined by the Kubernetes network implementation / CNI plugin being used by the -cluster. In addition, if a service mesh is being used, this can add egress behaviors on top of those the -network implementation provides. - -There are three areas of behavior worth understanding for egress traffic, so you can choose a networking and/or service -mesh setup that best suits your needs: - -- Restricting egress traffic -- Outgoing NAT behavior -- Egress gateways - -## Restricting egress traffic - -It's a common security requirement and best practice to restrict outgoing connections from the cluster. This is normally -achieved using [Network Policy](about-network-policy.mdx) to define egress rules for each -microservice, often in conjunction with a [default deny](about-network-policy.mdx#default-deny) -policy that ensures outgoing connections are denied by default, until a policy is defined to explicitly allow specific -traffic. - -One limitation when using Kubernetes Network Policy to restrict access to specific external resources, is that the external -resources need to be specified as IP addresses (or IP address ranges) within the policy rules. If the IP addresses -associated with an external resource change, then every policy that referenced those IP addresses needs to be updated with -the new IP addresses. This limitation can be circumvented using $[prodname] [Use external IPs or networks rules in policy](../beginners/policy-rules/external-ips-policy.mdx), or [DNS policy](../domain-based-policy.mdx) in policy rules. - -Note in addition to everything mentioned so far, perimeter firewalls can also be used to restrict outgoing connections, -for example to allow connections only to particular external IP address ranges, or external services. However, since -perimeter firewalls typically cannot distinguish individual pods, the rules apply equally to all pods in the cluster. -This provides some defense in depth, but cannot replace the requirement for network policy. - -## NAT outgoing - -Network Address Translation ([NAT](https://en.wikipedia.org/wiki/Network_address_translation)) is the process of mapping an IP address in a packet -to a different IP address as the packet passes through the device performing the NAT. Depending on the use case, NAT can -apply to the source or destination IP address, or to both addresses. - -In the context of Kubernetes egress, NAT is used to allow pods to connect to services outside of the cluster if the pods -have IP addresses that are not routable outside of the cluster (for example, if the pod network is an overlay). - -For example, if a pod in an overlay network attempts to connect to an IP address outside of the cluster, then the -node hosting the pod uses SNAT (Source Network Address Translation) to map the non-routable source IP address of the -packet to the node's IP address before forwarding on the packet. The node then maps response packets coming in the -opposite direction back to the original pod IP address, so packets flow end-to-end in both directions, with neither -pod or external service being aware the mapping is happening. - -In most clusters this NAT behavior is configured statically across the whole of the cluster. When using -$[prodname], the NAT behavior can be configured at a more granular level for particular address ranges using [IP pools](../../reference/resources/ippool.mdx) -. This effectively allows the scope of "non-routable" to be more -tightly defined than just "inside the cluster vs outside the cluster", which can be useful in some enterprise deployment -scenarios. - -## Egress gateways - -Another approach to Kubernetes egress is to route all outbound connections via one or more egress gateways. The gateways -SNAT (Source Network Address Translation) the connections so the external service being connected to sees the connection -as coming from the egress gateway. The main use case is to improve security, either with the egress gateway performing a -direct security role in terms of what connections it allows, or in conjunction with perimeter firewalls (or other -external entities). For example, so that perimeter firewalls see the connections coming from well known IP -addresses (the egress gateways) rather than from dynamic pod IP addresses they don't understand. - -Egress gateways are not a native concept in Kubernetes itself, but are implemented by some Kubernetes network -implementations and some service meshes. For example, $[prodname] provides egress gateway functionality, plus the -ability to map namespaces (or even individual pods) to specific egress gateways. Perimeter firewalls (or other external -security entities) can then effectively provide per namespace security controls, even though they do not have visibility -to dynamic pod IP addresses. - -As an alternative approach to egress gateways, $[prodname] allows you to control pod IP address ranges based on -namespace, or node, or even at the individual pod level. Assuming no outgoing NAT is required, this provides a very -simple way for perimeter firewalls (or other external security entities) to integrate with Kubernetes for both ingress -and egress traffic. (Note that this approach relies on having enough address space available to sensibly assign IP -address ranges, for example to each namespace, so it can lead to IP address range exhaustion challenges for large scale -deployments. In these scenarios, using egress gateways is likely to be a better option.) - -## Additional resources - -- [Use external IPs or networks rules in policy](../beginners/policy-rules/external-ips-policy.mdx) -- [Restrict a pod to use an IP address in a specific range](../../networking/ipam/legacy-firewalls.mdx) -- [Assign IP addresses based on topology](../../networking/ipam/assign-ip-addresses-topology.mdx) -- [Egress gateways](../../networking/egress/index.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/get-started/about-kubernetes-ingress.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/get-started/about-kubernetes-ingress.mdx deleted file mode 100644 index 1bf12e3c24..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/get-started/about-kubernetes-ingress.mdx +++ /dev/null @@ -1,136 +0,0 @@ ---- -description: Learn the different ingress implementations and how ingress and policy interact. ---- - -# Kubernetes ingress - -:::note - -This guide provides education material -that is not specific to $[prodname]. - -::: - -In this guide you will learn: - -- What is Kubernetes ingress? -- Why use ingress? -- What are the differences between different ingress implementations? -- How does ingress and network policy interact? -- How does ingress and services fit together under the covers? - -## What is Kubernetes ingress? - -Kubernetes Ingress builds on top of Kubernetes [Services](about-kubernetes-services.mdx) to provide -load balancing at the application layer, mapping HTTP and HTTPS requests with particular domains or URLs to Kubernetes -services. Ingress can also be used to terminate SSL / TLS before load balancing to the service. - -The details of how Ingress is implemented depend on which [Ingress Controller](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers/) you are using. The Ingress -Controller is responsible for monitoring Kubernetes [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) resources and provisioning / configuring one -or more ingress load balancers to implement the desired load balancing behavior. - -Unlike Kubernetes services, which are handled at the network layer (L3-4), ingress load balancers operate at the -application layer (L5-7). Incoming connections are terminated at the load balancer so it can inspect the individual HTTP / -HTTPS requests. The requests are then forwarded via separate connections from the load balancer to the chosen service -backing pods. As a result, network policy applied to the backing pods can restrict access to only allow connections from the load -balancer, but cannot restrict access to specific original clients. - -## Why use Kubernetes Ingress? - -Given that Kubernetes [Services](about-kubernetes-services.mdx) already provide a mechanism for load -balancing access to services from outside of the cluster, why might you want to use Kubernetes Ingress? - -The mainline use case is if you have multiple HTTP / HTTPS services that you want to expose through a single external IP -address, perhaps with each service having a different URL path, or perhaps as multiple different domains. This is lot -simpler from a client configuration point of view than exposing each service outside of the cluster using Kubernetes -Services, which would give each service a separate external IP address. - -If on the other hand, your application architecture is fronted by a single "front end" microservice then Kubernetes -Services likely already meet your needs. In this case you might prefer to not add Ingress to the picture, both from a -simplicity point of view, and potentially also so you can more easily restrict access to specific clients using network -policy. In effect, your "front end" microservice already plays the role of Kubernetes Ingress, in a way that is not that -dissimilar to [in-cluster ingress](#in-cluster-ingress-solutions) solutions discussed below. - -## Types of Ingress solutions - -Broadly speaking there are two types of ingress solutions: - -- In-cluster ingress - where ingress load balancing is performed by pods within the cluster itself. -- External ingress - where ingress load balancing is implemented outside of the cluster by - appliances or cloud provider capabilities. - -### In-cluster ingress solutions - -In-cluster ingress solutions use software load balancers running in pods within the cluster itself. There are many -different ingress controllers to consider that follow this pattern, including for example the NGINX ingress controller. - -The advantages of this approach are that you can: - -- horizontally scale your ingress solution up to the limits of Kubernetes -- choose the ingress controller that best suits your specific needs, for example, with particular load balancing - algorithms, or security options. - -To get your ingress traffic to the in-cluster ingress pods, the ingress pods are normally exposed externally as a -Kubernetes service, so you can use any of the standard ways of accessing the service from outside of the cluster. A -common approach is use an external network load balancer or service IP advertisement, with `externalTrafficPolicy:local`. -This minimizes the number of network hops, and retains the client source IP address, which allows network policy to be used -to restrict access to the ingress pods to particular clients if desired. - -![In-cluster ingress](/img/calico-enterprise/ingress-in-cluster.svg) - -### External ingress solutions - -External ingress solutions use application load balancers outside of the cluster. The exact details and -features depend on which ingress controller you are using, but most cloud providers include an ingress controller that -automates the provisioning and management of the cloud provider's application load balancers to provide ingress. - -The advantages of this type of ingress solution is that your cloud provider handles the operational complexity of the -ingress for you. The downsides are a potentially more limited set of features compared to the rich range of in-cluster -ingress solutions, and the maximum number of services exposed by ingress being constrained by cloud provider specific -limits. - -![External ingress](/img/calico-enterprise/ingres-external.svg) - -Note that most application load balancers support a basic mode of operation of forwarding traffic to the chosen service -backing pods via the [node port](about-kubernetes-services.mdx#node-port-services) of the -corresponding service. - -In addition to this basic approach of load balancing to service node ports, some cloud providers support a second mode -of application layer load balancing, which load balances directly to the pods backing each service, without going via -node-ports or other kube-proxy service handling. This has the advantage of eliminating the potential second network hop -associated with node ports load balancing to a pod on a different node. The potential disadvantage is that if you are -operating at very high scales, for example with hundreds of pods backing a service, you may exceed the application layer -load balancers maximum limit of IPs it can load balance to in this mode. In this case switching to an in-cluster ingress -solution is likely the better fit for you. - -## Show me everything! - -All the above diagrams focus on connection level (L5-7) representation of ingress and services. You can learn more about -the network level (L3-4) interactions involved in handling the connections, including which scenarios client source IP -addresses are maintained, in the [About Kubernetes Services](about-kubernetes-services.mdx) guide. - -If you are already up to speed on how services work under the covers, here are some more complete diagrams that show details of how services are load balanced at the network layer (L3-4). - -:::note - -You can successfully use ingress without needing to understand this next level of detail! So feel free to skip -over these diagrams if you don't want to dig deeper into how services and ingress interact under the covers. - -::: - -**In-cluster ingress solution exposed as service type `LoadBalancer` with `externalTrafficPolicy:local`** - -![In-cluster ingress with NLB local](/img/calico-enterprise/ingress-in-cluster-nlb-local.svg) - -**External ingress solution via node ports** - -![External ingress via node port](/img/calico-enterprise/ingress-external-node-ports.svg) - -**External ingress solution direct to pods** - -![External ingress direct to pods](/img/calico-enterprise/ingress-external-direct-to-pods.svg) - -## Additional resources - -- [Video: Everything you need to know about Kubernetes Ingress networking ](https://www.projectcalico.org/everything-you-need-to-know-about-kubernetes-ingress-networking/) -- [Video: Everything you need to know about Kubernetes Services networking ](https://www.projectcalico.org/everything-you-need-to-know-about-kubernetes-services-networking/) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/get-started/about-kubernetes-services.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/get-started/about-kubernetes-services.mdx deleted file mode 100644 index b09b201448..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/get-started/about-kubernetes-services.mdx +++ /dev/null @@ -1,141 +0,0 @@ ---- -description: Learn the three main service types and how to use them. ---- - -# Kubernetes services - -:::note - -This guide provides educational material that is not specific to $[prodname]. - -::: - -In this guide you will learn: - -- What are Kubernetes services? -- What are the differences between the three main service types and what do you use them for? -- How do services and network policy interact? -- Some options for optimizing how services are handled. - -## What are Kubernetes services? - -Kubernetes [Services](https://kubernetes.io/docs/concepts/services-networking/service/) provide a way of abstracting access to a group -of pods as a network service. The group of pods backing each service is usually defined using a [label selector](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels). - -When a client connects to a Kubernetes service, the connection is load balanced to one of the pods backing the service, -as illustrated in this conceptual diagram: - -![Kubernetes Service conceptual diagram](/img/calico-enterprise/k8s-service-concept.svg) - -There are three main types of Kubernetes services: - -- Cluster IP - which is the usual way of accessing a service from inside the cluster -- Node port - which is the most basic way of accessing a service from outside the cluster -- Load balancer - which uses an external load balancer as a more sophisticated way to access a service from outside the - cluster. - -## Cluster IP services - -The default service type is `ClusterIP`. This allows a service to be accessed within the cluster via a virtual IP -address, known as the service Cluster IP. The Cluster IP for a service is discoverable through Kubernetes DNS. For -example, `my-svc.my-namespace.svc.cluster-domain.example`. The DNS name and Cluster IP address remain constant for the -life time of the service, even though the pods backing the service may be created or destroyed, and the number of pods -backing the service may change over time. - -In a typical Kubernetes deployment, kube-proxy runs on every node and is responsible for intercepting connections to -Cluster IP addresses and load balancing across the group of pods backing each service. As part of this process -[DNAT](../../networking/training/about-networking.mdx) is used to map the destination IP address from the Cluster IP to the -chosen backing pod. Response packets on the connection then have the NAT reverse on their way back to the pod that -initiated the connection. - -![kube-proxy cluster IP](/img/calico-enterprise/kube-proxy-cluster-ip.svg) - -Importantly, network policy is enforced based on the pods, not the service Cluster IP. (i.e. Egress network policy is -enforced for the client pod after the DNAT has changed the connection's destination IP to the chosen service backing -pod. And because only the destination IP for the connection is changed, ingress network policy for the backing pod sees the -original client pod as the source of the connection.) - -## Node port services - -The most basic way to access a service from outside the cluster is to use a service of type `NodePort`. A Node Port is a -port reserved on each node in the cluster through which the service can be accessed. In a typical Kubernetes deployment, -kube-proxy is responsible for intercepting connections to Node Ports and load balancing them across the pods backing -each service. - -As part of this process [NAT](../../networking/training/about-networking.mdx#nat) is used to map the destination IP address and -port from the node IP and Node Port, to the chosen backing pod and service port. In addition the source IP address is -mapped from the client IP to the node IP, so that response packets on the connection flow back via the original node, -where the NAT can be reversed. (It's the node which performed the NAT that has the connection tracking state needed to -reverse the NAT.) - -![kube-proxy node port](/img/calico-enterprise/kube-proxy-node-port.svg) - -Note that because the connection source IP address is SNATed to the node IP address, ingress network policy for the -service backing pod does not see the original client IP address. Typically this means that any such policy is limited to -restricting the destination protocol and port, and cannot restrict based on the client / source IP. This limitation can -be circumvented in some scenarios by using [externalTrafficPolicy](#externaltrafficpolicylocal) or by using -$[prodname]'s eBPF data plane [native service handling](#calico-ebpf-native-service-handling) (rather than kube-proxy) which preserves source IP address. - -## Load balancer services - -Services of type `LoadBalancer` expose the service via an external network load balancer (NLB). The exact type of -network load balancer depends on which public cloud provider or, if on-prem, which specific hardware load balancer integration is -integrated with your cluster. - -The service can be accessed from outside of the cluster via a specific IP address on the network load balancer, which by -default will load balance evenly across the nodes using the service node port. - -![kube-proxy load balancer](/img/calico-enterprise/kube-proxy-load-balancer.svg) - -Most network load balancers preserve the client source IP address, but because the service then goes via a node port, -the backing pods themselves do not see the client IP, with the same implications for network policy. As with node -ports, this limitation can be circumvented in some scenarios by using [externalTrafficPolicy](#externaltrafficpolicylocal) -or by using $[prodname]'s eBPF data plane [native service handling](#calico-ebpf-native-service-handling) (rather -than kube-proxy) which preserves source IP address. - -## Advertising service IPs - -One alternative to using node ports or network load balancers is to advertise service IP addresses over BGP. This -requires the cluster to be running on an underlying network that supports BGP, which typically means an on-prem -deployment with standard Top of Rack routers. - -$[prodname] supports advertising service Cluster IPs, or External IPs for services configured with one. If you are -not using Calico as your network plugin then [MetalLB](https://github.com/metallb/metallb) provides similar capabilities that work with a variety of different network -plugins. - -![kube-proxy service advertisement](/img/calico-enterprise/kube-proxy-service-advertisement.svg) - -## externalTrafficPolicy:local - -By default, whether using service type `NodePort` or `LoadBalancer` or advertising service IP addresses over BGP, -accessing a service from outside the cluster load balances evenly across all the pods backing the service, independent -of which node the pods are on. This behavior can be changed by configuring the service with -`externalTrafficPolicy:local` which specifies that connections should only be load balanced to pods backing the service -on the local node. - -When combined with services of type `LoadBalancer` or with $[prodname] service IP address advertising, traffic is -only directed to nodes that host at least one pod backing the service. This reduces the potential extra network hop -between nodes, and perhaps more importantly, to maintain the source IP address all the way to the pod, so network policy -can restrict specific external clients if desired. - -![kube-proxy service advertisement](/img/calico-enterprise/kube-proxy-service-local.svg) - -Note that in the case of services of type `LoadBalancer`, not all Load Balancers support this mode. And in the case of -service IP advertisement, the evenness of the load balancing becomes topology dependent. In this case, pod anti-affinity -rules can be used to ensure even distribution of backing pods across your topology, but this does add some complexity to -deploying the service. - -## Calico eBPF native service handling - -As an alternative to using Kubernetes standard kube-proxy, $[prodname]'s [eBPF data plane](../../operations/ebpf/enabling-ebpf.mdx) - supports native service handling. This preserves source IP to -simplify network policy, offers DSR (Direct Server Return) to reduce the number of network hops for return traffic, and -provides even load balancing independent of topology, with reduced CPU and latency compared to kube-proxy. - -![kube-proxy service advertisement](/img/calico-enterprise/calico-native-service-handling.svg) - -# Additional resources - -- [Video: Everything you need to know about Kubernetes Services networking ](https://www.projectcalico.org/everything-you-need-to-know-about-kubernetes-services-networking/) -- [Blog: Introducing the Calico eBPF data plane](https://www.projectcalico.org/introducing-the-calico-ebpf-dataplane/) -- [Blog: Hands on with Calico eBPF native service handling](https://www.projectcalico.org/hands-on-with-calicos-ebpf-service-handling/) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/get-started/about-network-policy.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/get-started/about-network-policy.mdx deleted file mode 100644 index e6221b2251..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/get-started/about-network-policy.mdx +++ /dev/null @@ -1,235 +0,0 @@ ---- -description: Learn the basics of Kubernetes and Calico Enterprise network policy ---- - -# What is network policy? - -:::note - -This guide provides educational material that is not specific to $[prodname]. - -::: - -Kubernetes and $[prodname] provide network policy APIs to help you secure your workloads. - -In this guide you will learn: - -- What network policy is and why it is important. -- The differences between Kubernetes and $[prodname] network policies and when you might want to use each. -- Some best practices for using network policy. - -## What is network policy? - -Network policy is the primary tool for securing a Kubernetes network. It allows you to easily restrict the network -traffic in your cluster so only the traffic that you want to flow is allowed. - -To understand the significance of network policy, let's briefly explore how network security was typically achieved -prior to network policy. Historically in enterprise networks, network security was provided by designing a physical -topology of network devices (switches, routers, firewalls) and their associated configuration. The physical topology -defined the security boundaries of the network. In the first phase of virtualization, the same network and network -device constructs were virtualized in the cloud, and the same techniques for creating specific network topologies of -(virtual) network devices were used to provide network security. Adding new applications or services often required -additional network design to update the network topology and network device configuration to provide the desired -security. - -In contrast, the [Kubernetes network model](../../networking/training/about-kubernetes-networking.mdx) defines a "flat" -network in which every pod can communicate with all other pods in the cluster using pod IP addresses. This approach -massively simplifies network design and allows new workloads to be scheduled dynamically anywhere in the cluster with no -dependencies on the network design. - -In this model, rather than network security being defined by network topology boundaries, it is defined using network -policies that are independent of the network topology. Network policies are further abstracted from the network by using -label selectors as their primary mechanism for defining which workloads can talk to which workloads, rather than IP -addresses or IP address ranges. - -## Why is network policy important? - -In an age where attackers are becoming more and more sophisticated, network security as a line of defense is more important -than ever. - -While you can (and should) use firewalls to restrict traffic at the perimeters of your network (commonly referred to as -north-south traffic), their ability to police Kubernetes traffic is often limited to a granularity of the cluster as a -whole, rather than to specific groups of pods, due to the dynamic nature of pod scheduling and pod IP addresses. In -addition, the goal of most attackers once they gain a small foothold inside the perimeter is to move laterally (commonly -referred to as east-west) to gain access to higher value targets, which perimeter based firewalls can't police against. - -Network policy on the other hand is designed for the dynamic nature of Kubernetes by following the standard Kubernetes -paradigm of using label selectors to define groups of pods, rather than IP addresses. And because network policy is -enforced within the cluster itself it can police both north-south and east-west traffic. - -Network policy represents an important evolution of network security, not just because it handles the dynamic nature of -modern microservices, but because it empowers dev and devops engineers to easily define network security themselves, -rather than needing to learn low-level networking details or raise tickets with a separate team responsible for managing -firewalls. Network policy makes it easy to define intent, such as "only this microservice gets to connect to the -database", write that intent as code (typically in YAML files), and integrate authoring of network policies into git -workflows and CI/CD processes. - -:::note - -Note: $[prodname] offers capabilities that can help perimeter firewalls integrate -more tightly with Kubernetes. However, this does not remove the need or value of network policies within the cluster itself.) - -::: - -## Kubernetes network policy - -Kubernetes network policies are defined using the Kubernetes [NetworkPolicy](https://kubernetes.io/docs/reference/kubernetes-api/policy-resources/network-policy-v1/) resource. - -The main features of Kubernetes network policies are: - -- Policies are namespace scoped (i.e. you create them within the context of a specific namespace just like, for example, pods) -- Policies are applied to pods using label selectors -- Policy rules can specify the traffic that is allowed to/from other pods, namespaces, or CIDRs -- Policy rules can specify protocols (TCP, UDP, SCTP), named ports or port numbers - -Kubernetes itself does not enforce network policies, and instead delegates their enforcement to network plugins. Most -network plugins implement the mainline elements of Kubernetes network policies, though not all implement every feature -of the specification. ($[prodname] does implement every feature, and was the original reference implementation of Kubernetes -network policies.) - -To learn more about Kubernetes network policies, read the [Get started with Kubernetes network policy](kubernetes-network-policy.mdx) - guide. - -## $[prodname] network policy - -In addition to enforcing Kubernetes network policy, $[prodname] supports its own -namespaced [NetworkPolicy](../../reference/resources/networkpolicy.mdx) and non-namespaced -[GlobalNetworkPolicy](../../reference/resources/globalnetworkpolicy.mdx) resources, which provide additional -features beyond those supported by Kubernetes network policy. This includes support for: - -- policy ordering/priority -- deny and log actions in rules -- more flexible match criteria for applying policies and in policy rules, including matching on Kubernetes ServiceAccounts -- ability to reference non-Kubernetes workloads in polices, including matching on - [NetworkSets](../../reference/resources/networkset.mdx) in policy rules - -While Kubernetes network policy applies only to pods, $[prodname] network policy can be applied to multiple types of -endpoints including pods, VMs, and host interfaces. - -To learn more about $[prodname] network policies, read the [Get started with $[prodname] network policy](../beginners/calico-network-policy.mdx) - guide. - -## Benefits of using $[prodname] for network policy - -### Full Kubernetes network policy support - -Unlike some other network policy implementations, $[prodname] implements the full set of Kubernetes network policy features. - -### Richer network policy - -$[prodname] network policies allow even richer traffic control than Kubernetes network policies if you need it. In addition, -$[prodname] network policies allow you to create policy that applies across multiple namespaces using GlobalNetworkPolicy -resources. - -### Mix Kubernetes and $[prodname] network policy - -Kubernetes and $[prodname] network policies can be mixed together seamlessly. One common use case for this is to split -responsibilities between security / cluster ops teams and developer / service teams. For example, giving the security / -cluster ops team RBAC permissions to define $[prodname] policies, and giving developer / service teams RBAC permissions to -define Kubernetes network policies in their specific namespaces. As $[prodname] policy rules can be ordered to be enforced -either before or after Kubernetes network policies, and can include actions such as deny and log, this allows the -security / cluster ops team to define basic higher-level more-general purpose rules, while empowering the developer / -service teams to define their own fine-grained constraints on the apps and services they are responsible for. - -For more flexible control and delegation of responsibilities between two or more teams, $[prodname] Enterprise extends this -model to provide [hierarchical policy](#hierarchical-policy). - -![Example mix of network policy types](/img/calico-enterprise/example-k8s-calico-policy-mix.svg) - -### Ability to protect hosts and VMs - -As $[prodname] policies can be enforce on host interfaces, you can use them to protect your Kubernetes nodes (not -just your pods), including for example, limiting access to node ports from outside of the cluster. To learn more, check -out the $[prodname] [policy for hosts](../hosts/index.mdx) guides. - -### Extendable with $[prodname] - -Calico Enterprise adds even richer network policy capabilities, with the ability -to specify hierarchical policies, with each team have particular boundaries of trust, and FQDN / domain names in policy -rules for restricting access to specific external services. - -## Best practices for network policies - -### Ingress and egress - -At a minimum we recommend that every pod is protected by network policy ingress rules that restrict what is allowed -to connect to the pod and on which ports. The best practice is also to define network policy egress rules that restrict -the outgoing connections that are allowed by pods themselves. Ingress rules protect your pod from attacks outside of the -pod. Egress rules help protect everything outside of the pod if the pod gets compromised, reducing the attack surface to -make moving laterally (east-west) or to prevent an attacker from exfiltrating compromised data from your cluster (north-south). - -### Policy schemas - -Due to the flexibility of network policy and labelling, there are often multiple different ways of labelling and writing -policies that can achieve the same particular goal. One of the most common approaches is to have a small number of -global policies that apply to all pods, and then a single pod specific policy that defines all the ingress and egress -rules that are particular to that pod. - -For example: - -```yaml -kind: NetworkPolicy -apiVersion: networking.k8s.io/v1 -metadata: - name: front-end - namespace: staging -spec: - podSelector: - matchLabels: - app: back-end - ingress: - - from: - - podSelector: - matchLabels: - app: front-end - ports: - - protocol: TCP - port: 443 - egress: - - to: - - podSelector: - matchLabels: - app: database - ports: - - protocol: TCP - port: 27017 - -``` - -### Default deny - -One approach to ensuring these best practices are being followed is to define [default deny](../beginners/kubernetes-default-deny.mdx) - network policies. These ensure that if no other policy is -defined that explicitly allows traffic to/from a pod, then the traffic will be denied. As a result, anytime a team -deploys a new pod, they are forced to also define network policy for the pod. It can be useful to use a $[prodname] -GlobalNetworkPolicy for this (rather than needing to define a policy every time a new namespace is created) and to -include some exceptions to the default deny (for example to allow pods to access DNS). - -For example, you might use the following policy to default-deny all (non-system) pod traffic except for DNS queries to kube-dns/core-dns. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: default-app-policy -spec: - namespaceSelector: has(projectcalico.org/name) && projectcalico.org/name not in {"kube-system", "calico-system", "calico-apiserver"} - types: - - Ingress - - Egress - egress: - - action: Allow - protocol: UDP - destination: - selector: k8s-app == "kube-dns" - ports: - - 53 -``` - -### Hierarchical policy - -[Calico Enterprise](../policy-tiers/tiered-policy.mdx) supports hierarchical network policy using policy tiers. RBAC -for each tier can be defined to restrict who can interact with each tier. This can be used to delegate trust across -multiple teams. - -![Example tiers](/img/calico-enterprise/example-tiers.svg) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/get-started/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/get-started/index.mdx deleted file mode 100644 index 7626a83757..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/get-started/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: If you are new to Kubernetes, start with "Kubernetes policy" and learn the basics of enforcing policy for pod traffic. Otherwise, dive in and create more powerful policies with Calico policy. The good news is, Kubernetes and Calico policies are very similar and work alongside each other -- so managing both types is easy. -hide_table_of_contents: true ---- - -# Get started with policy - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/get-started/kubernetes-demo.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/get-started/kubernetes-demo.mdx deleted file mode 100644 index 8bff7747b8..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/get-started/kubernetes-demo.mdx +++ /dev/null @@ -1,101 +0,0 @@ ---- -description: An interactive demo that visually shows how applying Kubernetes policy allows and denies connections. ---- - -# Kubernetes policy, demo - -The included demo sets up a frontend and backend service, as well as a client service, all -running on Kubernetes. It then configures network policy on each service. - -## Prerequisites - -To create a Kubernetes cluster which supports the Kubernetes network policy API, follow -one of our [getting started guides](../../getting-started/index.mdx). - -## Running the stars example - -### 1) Create the frontend, backend, client, and management-ui apps. - -```shell -kubectl create -f $[tutorialFilesURL]/00-namespace.yaml -kubectl create -f $[tutorialFilesURL]/01-management-ui.yaml -kubectl create -f $[tutorialFilesURL]/02-backend.yaml -kubectl create -f $[tutorialFilesURL]/03-frontend.yaml -kubectl create -f $[tutorialFilesURL]/04-client.yaml -``` - -Wait for all the pods to enter `Running` state. - -```bash -kubectl get pods --all-namespaces --watch -``` - -> Note that it may take several minutes to download the necessary Docker images for this demo. - -The management UI runs as a `NodePort` Service on Kubernetes, and shows the connectivity -of the Services in this example. - -You can view the UI by visiting `http://:30002` in a browser. - -Once all the pods are started, they should have full connectivity. You can see this by visiting the UI. Each service is -represented by a single node in the graph. - -- `backend` -> Node "B" -- `frontend` -> Node "F" -- `client` -> Node "C" - -### 2) Enable isolation - -Running following commands will prevent all access to the frontend, backend, and client Services. - -```shell -kubectl create -n stars -f $[tutorialFilesURL]/default-deny.yaml -kubectl create -n client -f $[tutorialFilesURL]/default-deny.yaml -``` - -#### Confirm isolation - -Refresh the management UI (it may take up to 10 seconds for changes to be reflected in the UI). -Now that we've enabled isolation, the UI can no longer access the pods, and so they will no longer show up in the UI. - -### 3) Allow the UI to access the services using network policy objects - -Apply the following YAML files to allow access from the management UI. - -```shell -kubectl create -f $[tutorialFilesURL]/allow-ui.yaml -kubectl create -f $[tutorialFilesURL]/allow-ui-client.yaml -``` - -After a few seconds, refresh the UI - it should now show the Services, but they should not be able to access each other anymore. - -### 4) Create the backend-policy.yaml file to allow traffic from the frontend to the backend - -```shell -kubectl create -f $[tutorialFilesURL]/backend-policy.yaml -``` - -Refresh the UI. You should see the following: - -- The frontend can now access the backend (on TCP port 6379 only). -- The backend cannot access the frontend at all. -- The client cannot access the frontend, nor can it access the backend. - -### 5) Expose the frontend service to the client namespace - -```shell -kubectl create -f $[tutorialFilesURL]/frontend-policy.yaml -``` - -The client can now access the frontend, but not the backend. Neither the frontend nor the backend -can initiate connections to the client. The frontend can still access the backend. - -To use $[prodname] to enforce egress policy on Kubernetes pods, see [the advanced policy demo](kubernetes-policy-advanced.mdx). - -### 6) (Optional) Clean up the demo environment - -You can clean up the demo by deleting the demo Namespaces: - -```bash -kubectl delete ns client stars management-ui -``` diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/get-started/kubernetes-network-policy.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/get-started/kubernetes-network-policy.mdx deleted file mode 100644 index 00f1a04eee..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/get-started/kubernetes-network-policy.mdx +++ /dev/null @@ -1,179 +0,0 @@ ---- -description: Learn Kubernetes policy syntax, rules, and features for controlling network traffic. ---- - -# Get started with Kubernetes network policy - -## Big picture - -Kubernetes network policy lets administrators and developers enforce which network traffic is allowed using rules. - -## Value - -Kubernetes network policy lets developers secure access to and from their applications using the same simple language they use to deploy them. Developers can focus on their applications without understanding low-level networking concepts. Enabling developers to easily secure their applications using network policies supports a shift left DevOps environment. - -## Concepts - -The Kubernetes Network Policy API provides a standard way for users to define network policy for controlling network traffic. However, Kubernetes has no built-in capability to enforce the network policy. To enforce network policy, you must use a network plugin such as Calico. - -### Ingress and egress - -The bulk of securing network traffic typically revolves around defining egress and ingress rules. From the point of view of a Kubernetes pod, **ingress** is incoming traffic to the pod, and **egress** is outgoing traffic from the pod. In Kubernetes network policy, you create ingress and egress “allow” rules independently (egress, ingress, or both). - -### Default deny/allow behavior - -**Default allow** means all traffic is allowed by default, unless otherwise specified. -**Default deny** means all traffic is denied by default, unless explicitly allowed. - -## How to - -Before you create your first Kubernetes network policy, you need to understand the default network policy behaviors. If no Kubernetes network policies apply to a pod, then all traffic to/from the pod are allowed (default-allow). As a result, if you do not create any network policies, then all pods are allowed to communicate freely with all other pods. If one or more Kubernetes network policies apply to a pod, then only the traffic specifically defined in that network policy are allowed (default-deny). - -You are now ready to start fine-tuning traffic that should be allowed. - -- [Create ingress policies](#create-ingress-policies) -- [Allow ingress traffic from pods in the same namespace](#allow-ingress-traffic-from-pods-in-the-same-namespace) -- [Allow ingress traffic from pods in a different namespace](#allow-ingress-traffic-from-pods-in-a-different-namespace) -- [Create egress policies](#create-egress-policies) -- [Allow egress traffic from pods in the same namespace](#allow-egress-traffic-from-pods-in-the-same-namespace) -- [Allow egress traffic to IP addresses or CIDR range](#allow-egress-traffic-to-ip-addresses-or-cidr-range) -- [Best practice: create deny-all default network policy](#best-practice-create-deny-all-default-network-policy) -- [Create deny-all default ingress and egress network policy](#create-deny-all-default-ingress-and-egress-network-policy) - -### Create ingress policies - -Create ingress network policies to allow inbound traffic from other pods. - -Network policies apply to pods within a specific **namespace**. Policies can include one or more ingress rules. To specify which pods in the namespace the network policy applies to, use a **pod selector**. Within the ingress rule, use another pod selector to define which pods allow incoming traffic, and the **ports** field to define on which ports traffic is allowed. - -#### Allow ingress traffic from pods in the same namespace - -In the following example, incoming traffic to pods with label **color=blue** are allowed only if they come from a pod with **color=red**, on port **80**. - -```yaml -kind: NetworkPolicy -apiVersion: networking.k8s.io/v1 -metadata: - name: allow-same-namespace - namespace: default -spec: - podSelector: - matchLabels: - color: blue - ingress: - - from: - - podSelector: - matchLabels: - color: red - ports: - - port: 80 -``` - -#### Allow ingress traffic from pods in a different namespace - -To allow traffic from pods in a different namespace, use a namespace selector in the ingress policy rule. In the following policy, the namespace selector matches one or more Kubernetes namespaces and is combined with the pod selector that selects pods within those namespaces. - -:::note - -Namespace selectors can be used only in policy rules. The **spec.podSelector** applies to pods only in the same namespace as the policy. - -::: - -In the following example, incoming traffic is allowed only if they come from a pod with label **color=red**, in a namespace with label **shape=square**, on port **80**. - -```yaml -kind: NetworkPolicy -apiVersion: networking.k8s.io/v1 -metadata: - name: allow-different-namespace - namespace: default -spec: - podSelector: - matchLabels: - color: blue - ingress: - - from: - - podSelector: - matchLabels: - color: red - namespaceSelector: - matchLabels: - shape: square - ports: - - port: 80 -``` - -### Create egress policies - -Create egress network policies to allow outbound traffic from pods. - -#### Allow egress traffic from pods in the same namespace - -The following policy allows pod outbound traffic to other pods in the same namespace that match the pod selector. In the following example, outbound traffic is allowed only if they go to a pod with label **color=red**, on port **80**. - -```yaml -kind: NetworkPolicy -apiVersion: networking.k8s.io/v1 -metadata: - name: allow-egress-same-namespace - namespace: default -spec: - podSelector: - matchLabels: - color: blue - egress: - - to: - - podSelector: - matchLabels: - color: red - ports: - - port: 80 -``` - -#### Allow egress traffic to IP addresses or CIDR range - -Egress policies can also be used to allow traffic to specific IP addresses and CIDR ranges. Typically, IP addresses/ranges are used to handle traffic that is external to the cluster for static resources or subnets. - -The following policy allows egress traffic to pods in CIDR, **172.18.0.0/24**. - -```yaml -kind: NetworkPolicy -apiVersion: networking.k8s.io/v1 -metadata: - name: allow-egress-external - namespace: default -spec: - podSelector: - matchLabels: - color: red - egress: - - to: - - ipBlock: - cidr: 172.18.0.0/24 -``` - -### Best practice: create deny-all default network policy - -To ensure that all pods in the namespace are secure, a best practice is to create a default network policy. This avoids accidentally exposing an app or version that doesn’t have policy defined. - -#### Create deny-all default ingress and egress network policy - -The following network policy implements a default **deny-all** ingress and egress policy, which prevents all traffic to/from pods in the **policy-demo** namespace. Note that the policy applies to all pods in the policy-demo namespace, but does not explicitly allow any traffic. All pods are selected, but because the default changes when pods are selected by a network policy, the result is: **deny all ingress and egress traffic**. (Unless the traffic is allowed by another network policy). - -```yaml -kind: NetworkPolicy -apiVersion: networking.k8s.io/v1 -metadata: - name: default-deny - namespace: policy-demo -spec: - podSelector: - matchLabels: {} - policyTypes: - - Ingress - - Egress -``` - -## Additional resources - -- [Kubernetes Network Policy API documentation](https://v1-21.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#networkpolicyspec-v1-networking-k8s-io) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/get-started/kubernetes-policy-advanced.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/get-started/kubernetes-policy-advanced.mdx deleted file mode 100644 index 101af301e9..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/get-started/kubernetes-policy-advanced.mdx +++ /dev/null @@ -1,342 +0,0 @@ ---- -description: Learn how to create more advanced Kubernetes network policies (namespace, allow and deny all ingress and egress). ---- - -# Kubernetes policy, advanced tutorial - -The Kubernetes `NetworkPolicy` API allows users to express ingress and egress policies (starting with Kubernetes 1.8.0) to Kubernetes pods -based on labels and ports. - -This guide walks through using Kubernetes `NetworkPolicy` to define more complex network policies. - -## Requirements - -- A working Kubernetes cluster and access to it using kubectl -- Your Kubernetes nodes have connectivity to the public internet -- You are familiar with [Kubernetes NetworkPolicy](kubernetes-policy-basic.mdx) - -## Tutorial flow - -1. Create the Namespace and Nginx Service -1. Deny all ingress traffic -1. Allow ingress traffic to Nginx -1. Deny all egress traffic -1. Allow egress traffic to kube-dns -1. Cleanup Namespace - -## 1. Create the namespace and nginx service - -We'll use a new namespace for this guide. Run the following commands to create it and a plain nginx service listening on port 80. - -```bash -kubectl create ns advanced-policy-demo -kubectl create deployment --namespace=advanced-policy-demo nginx --image=nginx -kubectl expose --namespace=advanced-policy-demo deployment nginx --port=80 -``` - -### Verify access - allowed all ingress and egress - -Open up a second shell session which has `kubectl` connectivity to the Kubernetes cluster and create a busybox pod to test policy access. This pod will be used throughout this tutorial to test policy access. - -```bash -kubectl run --namespace=advanced-policy-demo access --rm -ti --image busybox /bin/sh -``` - -This should open up a shell session inside the `access` pod, as shown below. - -``` -Waiting for pod advanced-policy-demo/access-472357175-y0m47 to be running, status is Pending, pod ready: false - -If you don't see a command prompt, try pressing enter. -/ # -``` - -Now from within the busybox "access" pod execute the following command to test access to the nginx service. - -```bash -wget -q --timeout=5 nginx -O - -``` - -It should return the HTML of the nginx welcome page. - -Still within the busybox "access" pod, issue the following command to test access to google.com. - -```bash -wget -q --timeout=5 google.com -O - -``` - -It should return the HTML of the google.com home page. - -## 2. Deny all ingress traffic - -Enable ingress isolation on the namespace by deploying a [default deny all ingress traffic policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/#default-deny-all-ingress-traffic). - -```bash -kubectl create -f - < - - -Welcome to nginx!... -``` - -After creating the policy, we can now access the nginx Service. - -## 4. Deny all egress traffic - -Enable egress isolation on the namespace by deploying a [default deny all egress traffic policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/#default-deny-all-egress-traffic). - -```bash -kubectl create -f - < - - -Welcome to nginx!... -``` - -Next, try to retrieve the home page of google.com. - -```bash -wget -q --timeout=5 google.com -O - -``` - -It should return: - -``` -wget: download timed out -``` - -Access to `google.com` times out because it can resolve DNS but has no egress access to anything other than pods with labels matching `app: nginx` in the `advanced-policy-demo` namespace. - -# 7. Clean up namespace - -You can clean up after this tutorial by deleting the advanced policy demo namespace. - -```bash -kubectl delete ns advanced-policy-demo -``` diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/get-started/kubernetes-policy-basic.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/get-started/kubernetes-policy-basic.mdx deleted file mode 100644 index e9b9aee215..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/get-started/kubernetes-policy-basic.mdx +++ /dev/null @@ -1,207 +0,0 @@ ---- -description: Learn how to use basic Kubernetes network policy to securely restrict traffic to/from pods. ---- - -# Kubernetes policy, basic tutorial - -This guide provides a simple way to try out Kubernetes `NetworkPolicy` with $[prodname]. It requires a Kubernetes cluster configured with $[prodname] networking, and expects that you have `kubectl` configured to interact with the cluster. - -You can quickly and easily deploy such a cluster by following one of the [installation guides](../../getting-started/install-on-clusters/kubernetes/index.mdx). - -## Configure namespaces - -This guide will deploy pods in a Kubernetes namespace. Let's create the `Namespace` object for this guide. - -```bash -kubectl create ns policy-demo -``` - -## Create demo pods - -We'll use Kubernetes `Deployment` objects to easily create pods in the namespace. - -1. Create some nginx pods in the `policy-demo` namespace. - - ```bash - kubectl create deployment --namespace=policy-demo nginx --image=nginx - ``` - -1. Expose them through a service. - - ```bash - kubectl expose --namespace=policy-demo deployment nginx --port=80 - ``` - -1. Ensure the nginx service is accessible. - - ```bash - kubectl run --namespace=policy-demo access --rm -ti --image busybox /bin/sh - ``` - - This should open up a shell session inside the `access` pod, as shown below. - - ``` - Waiting for pod policy-demo/access-472357175-y0m47 to be running, status is Pending, pod ready: false - - If you don't see a command prompt, try pressing enter. - - / # - ``` - -1. From inside the `access` pod, attempt to reach the `nginx` service. - - ```bash - wget -q nginx -O - - ``` - - You should see a response from `nginx`. Great! Our service is accessible. You can exit the pod now. - -## Enable isolation - -Let's turn on isolation in our `policy-demo` namespace. $[prodname] will then prevent connections to pods in this namespace. - -Running the following command creates a NetworkPolicy which implements a default deny behavior for all pods in the `policy-demo` namespace. - -```bash -kubectl create -f - < diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/hosts/kubernetes-nodes.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/hosts/kubernetes-nodes.mdx deleted file mode 100644 index c150726e5a..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/hosts/kubernetes-nodes.mdx +++ /dev/null @@ -1,216 +0,0 @@ ---- -description: Protect Kubernetes nodes with host endpoints managed by Calico Enterprise. ---- - -# Protect Kubernetes nodes - -## Big picture - -Secure Kubernetes nodes with host endpoints managed by $[prodname]. - -## Value - -$[prodname] can automatically create host endpoints for your Kubernetes nodes. This means $[prodname] can manage the lifecycle of host endpoints as your cluster evolves, ensuring nodes are always protected by policy. - -## Concepts - -## Host endpoints - -Each host has one or more network interfaces that it uses to communicate externally. You can represent these interfaces in $[prodname] using host endpoints and then use network policy to secure them. - -$[prodname] host endpoints can have labels, and they work the same as labels on workload endpoints. The network policy rules can apply to both workload and host endpoints using label selectors. - -Automatic host endpoints secure all of the host's interfaces (i.e. in Linux, all the interfaces in the host network namespace). They are created by setting `interfaceName: "*"`. - -## Automatic host endpoints - -$[prodname] creates a wildcard host endpoint for each node, with the host endpoint containing the same labels and IP addresses as its corresponding node. -$[prodname] will ensure these managed host endpoints maintain the same labels and IP addresses as its node by periodic syncs. -This means that policy targeting these automatic host endpoints will function correctly with the policy put in place to select those nodes, even if over time the node's IPs or labels change. - -Automatic host endpoints are differentiated from other host endpoints by the label `projectcalico.org/created-by: calico-kube-controllers`. -Enable or disable automatic host endpoints by configuring the default KubeControllersConfiguration resource. - -## Before you begin - -**Unsupported** - -- GKE - -## How to - -- [Enable automatic host endpoints](#enable-automatic-host-endpoints) -- [Apply network policy to automatic host endpoints](#apply-network-policy-to-automatic-host-endpoints) - -### Enable automatic host endpoints - -To enable automatic host endpoints, edit the default KubeControllersConfiguration instance, and set `spec.controllers.node.hostEndpoint.autoCreate` to `true`: - -```bash -kubectl patch kubecontrollersconfiguration default --patch='{"spec": {"controllers": {"node": {"hostEndpoint": {"autoCreate": "Enabled"}}}}}' -``` - -If successful, host endpoints are created for each of your cluster's nodes: - -```bash -kubectl get heps -o wide -``` - -The output may look similar to this: - -``` -kubectl get heps -o wide -NAME CREATED AT -ip-172-16-101-147.us-west-2.compute.internal-auto-hep 2021-05-12T22:16:47Z -ip-172-16-101-54.us-west-2.compute.internal-auto-hep 2021-05-12T22:16:47Z -ip-172-16-101-79.us-west-2.compute.internal-auto-hep 2021-05-12T22:16:47Z -ip-172-16-101-9.us-west-2.compute.internal-auto-hep 2021-05-12T22:16:47Z -ip-172-16-102-63.us-west-2.compute.internal-auto-hep 2021-05-12T22:16:47Z -``` - -### Apply network policy to automatic host endpoints - -To apply policy that targets all Kubernetes nodes, first add a label to the nodes. -The label will be synced to their automatic host endpoints. - -For example, to add the label **kubernetes-host** to all nodes and their host endpoints: - -```bash -kubectl label nodes --all kubernetes-host= -``` - -And an example policy snippet: - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: all-nodes-policy -spec: - selector: has(kubernetes-host) - # -``` - -To select a specific set of host endpoints (and their corresponding Kubernetes nodes), use a policy selector that selects a label unique to that set of host endpoints. -For example, if we want to add the label **environment=dev** to nodes named node1 and node2: - -```bash -kubectl label node node1 environment=dev -kubectl label node node2 environment=dev -``` - -With the labels in place and automatic host endpoints enabled, host endpoints for node1 and node2 will be updated with the **environment=dev** label. -We can write policy to select that set of nodes with a combination of selectors: - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: some-nodes-policy -spec: - selector: has(kubernetes-host) && environment == 'dev' - # -``` - -## Tutorial - -This tutorial will lock down Kubernetes node ingress to only allow SSH and required ports for Kubernetes to function. -We will apply two policies: one for the control plane nodes. and one for the worker nodes. - -:::note - -Note: This tutorial was tested on a cluster created with kubeadm v1.18.2 on AWS, using a "stacked etcd" [topology](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/ha-topology/). Stacked etcd topology means the etcd pods are running on the masters. kubeadm uses stacked etcd by default. -If your Kubernetes cluster is on a different platform, is running a variant of Kubernetes, or is running a topology with an external etcd cluster, -please review the required ports for control plane and worker nodes in your cluster and adjust the policies in this tutorial as needed. - -::: - -First, let's restrict ingress traffic to the control plane nodes. The ingress policy below contains three rules. -The first rule allows access to the API server port from anywhere. The second rule allows all traffic to localhost, which -allows Kubernetes to access control plane processes. These control plane processes includes the etcd server client API, the scheduler, and the controller-manager. -This rule also allows localhost access to the kubelet API and calico/node health checks. -And the final rule allows the etcd pods to peer with each other and allows the masters to access each others kubelet API. - -If you have not modified the failsafe ports, you should still have SSH access to the nodes after applying this policy. -Now apply the ingress policy for the Kubernetes masters: - -``` -kubectl apply -f - << EOF -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: ingress-k8s-masters -spec: - selector: has(node-role.kubernetes.io/control-plane) - # This rule allows ingress to the Kubernetes API server. - ingress: - - action: Allow - protocol: TCP - destination: - ports: - # kube API server - - 6443 - # This rule allows all traffic to localhost. - - action: Allow - destination: - nets: - - 127.0.0.0/8 - # This rule is required in multi-master clusters where etcd pods are colocated with the masters. - # Allow the etcd pods on the masters to communicate with each other. 2380 is the etcd peer port. - # This rule also allows the masters to access the kubelet API on other masters (including itself). - - action: Allow - protocol: TCP - source: - selector: has(node-role.kubernetes.io/control-plane) - destination: - ports: - - 2380 - - 10250 -EOF -``` - -Note that the above policy selects the standard **node-role.kubernetes.io/control-plane** label that kubeadm sets on control plane nodes. - -Next, we need to apply policy to restrict ingress to the Kubernetes workers. -Before adding the policy we will add a label to all of our worker nodes, which then gets added to its automatic host endpoint. -For this tutorial we will use **kubernetes-worker**. An example command to add the label to worker nodes: - -```bash -kubectl get node -l '!node-role.kubernetes.io/control-plane' -o custom-columns=NAME:.metadata.name | tail -n +2 | xargs -I{} kubectl label node {} kubernetes-worker= -``` - -The workers' ingress policy consists of two rules. The first rule allows all traffic to localhost. As with the masters, -the worker nodes need to access their localhost kubelet API and calico/node healthcheck. -The second rule allows the masters to access the workers kubelet API. Now apply the policy: - -``` -kubectl apply -f - << EOF -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: ingress-k8s-workers -spec: - selector: has(kubernetes-worker) - # Allow all traffic to localhost. - ingress: - - action: Allow - destination: - nets: - - 127.0.0.0/8 - # Allow only the masters access to the nodes kubelet API. - - action: Allow - protocol: TCP - source: - selector: has(node-role.kubernetes.io/control-plane) - destination: - ports: - - 10250 -EOF -``` - -## Additional resources - -- [Protect hosts tutorial](protect-hosts-tutorial.mdx) -- [Apply policy to Kubernetes node ports](../beginners/services/kubernetes-node-ports.mdx) -- [Global network policy](../../reference/resources/globalnetworkpolicy.mdx) -- [Host endpoints](../../reference/resources/hostendpoint.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/hosts/protect-hosts-tutorial.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/hosts/protect-hosts-tutorial.mdx deleted file mode 100644 index 2d7223d943..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/hosts/protect-hosts-tutorial.mdx +++ /dev/null @@ -1,192 +0,0 @@ ---- -description: Learn how to secure incoming traffic from outside the cluster using Calico host endpoints with network policy, including allowing controlled access to specific Kubernetes services. ---- - -# Protect hosts tutorial - -Imagine that the administrator of a Kubernetes cluster wants to secure it as much as -possible against incoming traffic from outside the cluster. But suppose that -the cluster provides various useful services that are exposed as Kubernetes -NodePorts, i.e., as well-known TCP port numbers that appear to be available on -any node in the cluster. The administrator does want to expose some -of those NodePorts to traffic from outside. - -In this example we will use pre-DNAT policy applied to the external interfaces -of each cluster node: - -- to disallow incoming traffic from outside, in general - -- but then to allow incoming traffic to particular NodePorts. - -We use pre-DNAT policy for these purposes, instead of normal host endpoint -policy, because: - -1. We want the protection against general external traffic to apply regardless - of where that traffic is destined for - for example, to a locally hosted - pod, or to a pod on another node, or to a local server process running on - the host itself. Pre-DNAT policy is enforced in all of those cases - as we - want - whereas normal host endpoint policy is not enforced for traffic going - to a local pod. - -2. We want to write this policy in terms of the advertised NodePorts, not in - terms of whatever internal port numbers those may be transformed to. - kube-proxy on the ingress node will use a DNAT to change a NodePort number - and IP address to those of one of the pods that backs the relevant Service. - Our policy therefore needs to take effect _before_ that DNAT - and that - means that it must be a pre-DNAT policy. - -:::note - -Note: This tutorial is intended to be used with named host endpoints, i.e. host endpoints with `interfaceName` set to a specific interface name. -This tutorial does not work, as-is, with host endpoints with `interfaceName: "*"`. - -::: - -Here is the pre-DNAT policy that we need to disallow incoming external traffic -in general: - -```bash -kubectl apply -f - < -``` - -and then using `host-endpoint==''` as the selector of the -`allow-nodeport` policy, instead of `has(host-endpoint)`. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/hosts/protect-hosts.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/hosts/protect-hosts.mdx deleted file mode 100644 index 1de3a814f4..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/hosts/protect-hosts.mdx +++ /dev/null @@ -1,186 +0,0 @@ ---- -description: Create Calico Enterprise network policies to restrict traffic to/from hosts. ---- - -# Protect hosts and VMs - -## Big picture - -Use $[prodname] network policy to restrict traffic to/from hosts and VMs. - -## Value - -Restricting traffic between hosts and the outside world is not unique to $[prodname]; many solutions provide this capability. However, the advantage of using $[prodname] to protect the host is you can use the same $[prodname] policy configuration as workloads. You only need to learn one tool. Write a cluster-wide policy, and it is immediately applied to every host. - -## Concepts - -### Hosts and workloads - -In the context of $[prodname] configuration, a *workload* is a containerized compute instance running in Kubernetes. -A *host* is a computer or virtual machine (VM) that acts as a node in a Kubernetes cluster or that runs application workloads outside of Kubernetes. -$[prodname] is unique in that it can enforce network policy and provide visibility in a consistent way for both workloads and hosts, even if those hosts are on-premises servers or VMs running in the public cloud. - -### Host endpoints - -Each host has one or more network interfaces that it uses to communicate externally. You can use $[prodname] network policy to secure these interfaces (called host endpoints). $[prodname] host endpoints can have labels, and they work the same as labels on workload endpoints. The network policy rules can apply to both workload and host endpoints using label selectors. - -### Failsafe rules - -It is easy to inadvertently cut all host connectivity because of nonexistent or misconfigured network policy. To avoid this, $[prodname] provides failsafe rules with default/configurable ports that are open on all host endpoints. - -### Default behavior of workload to host traffic - -By default, $[prodname] blocks all connections from a workload to its local host. You can control whether connections from a workload endpoint to its local host are dropped, returned, or accepted using a simple parameter. - -$[prodname] allows all connections from processes running on the host to guest workloads on the host. This allows host processes to run health checks and debug guest workloads. - -### Default behavior of external traffic to/from host - -If a host endpoint is added and network policy is not in place, the $[prodname] default is to deny traffic to/from that endpoint (except for traffic allowed by failsafe rules). For host endpoints, $[prodname] blocks traffic only to/from interfaces that it’s been explicitly told about in network policy. Traffic to/from other interfaces is ignored. - -### Other host protection - -In terms of design consistency in $[prodname], you may wonder about the following use cases. - -**Does $[prodname] protect a local host from workloads?**
    -Yes. DefaultEndpointToHostAction controls whether or not workloads can access their local host.
    - -**Does $[prodname] protect a workload from the host it is running on?**
    -No. $[prodname] allows connections the host makes to the workloads running on that host. Some orchestrators like Kubernetes depend on this connectivity for health checking the workload. Moreover, processes running on the local host are often privileged enough to override local $[prodname] policy. Be very cautious with the processes that you allow to run in the host's root network namespace. - -## Before you begin... - -If you are already running $[prodname] for Kubernetes, you are good to go. If you want to install $[prodname] on a non-cluster machine for host protection only, see [Non-cluster hosts](../../getting-started/bare-metal/index.mdx). - -## How to - -- [Avoid accidentally cutting all host connectivity ](#avoid-accidentally-cutting-all-host-connectivity) -- [Use policy to restrict host traffic](#use-policy-to-restrict-host-traffic) -- [Control default behavior of workload endpoint to host traffic](#control-default-behavior-of-workload-endpoint-to-host-traffic) - -### Avoid accidentally cutting all host connectivity - -To avoid inadvertently cutting all host connectivity because of nonexistent or misconfigured network policy, $[prodname] uses failsafe rules that open specific ports and CIDRs on all host endpoints. - -Review the following table to determine if the defaults work for your implementation. If not, change the default ports using the parameters, **FailsafeInboundHostPorts** and **FailsafeOutboundHostPorts** in [Configuring Felix](../../reference/component-resources/node/felix/configuration.mdx#environment-variables). - -| Port | Protocol | CIDR | Direction | Purpose | -| ---- | -------- | --------- | ------------------ | ------------------------------------ | -| 22 | TCP | 0.0.0.0/0 | Inbound | SSH access | -| 53 | UDP | 0.0.0.0/0 | Outbound | DNS queries | -| 67 | UDP | 0.0.0.0/0 | Outbound | DHCP access | -| 68 | UDP | 0.0.0.0/0 | Inbound | DHCP access | -| 179 | TCP | 0.0.0.0/0 | Inbound & Outbound | BGP access ($[prodname] networking) | -| 2379 | TCP | 0.0.0.0/0 | Inbound & Outbound | etcd access | -| 2380 | TCP | 0.0.0.0/0 | Inbound & Outbound | etcd access | -| 6443 | TCP | 0.0.0.0/0 | Inbound & Outbound | Kubernetes API server access | -| 6666 | TCP | 0.0.0.0/0 | Inbound & Outbound | etcd self-hosted service access | -| 6667 | TCP | 0.0.0.0/0 | Inbound & Outbound | etcd self-hosted service access | - -### Use policy to restrict host traffic - -#### Step 1: Create policy to restrict host traffic - -Although failsafe rules provide protection from removing all connectivity to a host, you should create a GlobalNetworkPolicy policy that restricts host traffic. - -In the following example, we use a **GlobalNetworkPolicy** that applies to all worker nodes (defined by a label). Ingress SSH access is allowed from a defined "management" subnet. - -**Ingress traffic** is also allowed for ICMP, and on TCP port 10250 (default kubelet port). **Egress** traffic is allowed to etcd on a particular IP, and UDP on port 53 and 67 for DNS and DHCP. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: k8s-worker -spec: - selector: "role == 'k8s-worker'" - order: 0 - ingress: - - action: Allow - protocol: TCP - source: - nets: - - '' - destination: - ports: [22] - - action: Allow - protocol: ICMP - - action: Allow - protocol: TCP - destination: - ports: [10250] - egress: - - action: Allow - protocol: TCP - destination: - nets: - - '/32' - ports: [2379] - - action: Allow - protocol: UDP - destination: - ports: [53, 67] -``` - -#### Step 2: Create host endpoints - -For each host point that you want to secure with policy, you must create a **HostEndpoint** object. To do that, you need the name of the $[prodname] node on the host that owns the interface; in most cases, it is the same as the hostname of the host. - -In the following example, we create a HostEndpoint for the host named **my-host** with the interface named **eth0**, with **IP 10.0.0.1**. Note that the value for **node:** must match the hostname used on the $[prodname] node object. - -When the HostEndpoint is created, traffic to or from the interface is dropped unless policy is in place. - -```yaml -apiVersion: projectcalico.org/v3 -kind: HostEndpoint -metadata: - name: my-host-eth0 - labels: - role: k8s-worker - environment: production -spec: - interfaceName: eth0 - node: my-host - expectedIPs: ['10.0.0.1'] -``` - -### Control default behavior of workload endpoint to host traffic - -The default $[prodname] behavior blocks all connections from workloads to their local host (after traffic passes any egress policy applied to the workload). You can change this behavior using the **DefaultEndpointToHostAction** parameter in Felix configuration. - -This parameter works at the IP table level, where you can specify packet behavior to **Drop** (default), **Accept**, or **Return**. - -To change this parameter for all hosts, edit the **FelixConfiguration** object named “default.” - -1. Get a copy of the object to edit. - - ```bash - kubectl get felixconfiguration default -o yaml > default-felix-config.yaml - ``` - -1. Open the file in a text editor and add the parameter, **defaultEndpointToHostAction**. For example: - - ```yaml - apiVersion: projectcalico.org/v3 - kind: FelixConfiguration - metadata: - name: default - spec: - ipipEnabled: true - logSeverityScreen: Info - reportingInterval: 0s - defaultEndpointToHostAction: Accept - ``` - -1. Update the FelixConfiguration on the cluster. - ```bash - kubectl apply -f default-felix-config.yaml - ``` - -## Additional resources - -- [Apply policy to Kubernetes node ports](../beginners/services/kubernetes-node-ports.mdx) -- [Protect Kubernetes nodes with host endpoints managed by $[prodname]](kubernetes-nodes.mdx) -- [Defend against DoS attacks](../extreme-traffic/defend-dos-attack.mdx) -- [Global network policy](../../reference/resources/globalnetworkpolicy.mdx) -- [Host endpoint](../../reference/resources/hostendpoint.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/index.mdx deleted file mode 100644 index 2831201964..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/index.mdx +++ /dev/null @@ -1,65 +0,0 @@ ---- -description: Calico Enterprise Network Policy and Calico Enterprise Global Network Policy are the fundamental resources to secure workloads and hosts, and to adopt a zero trust security model. ---- - -import { DocCardLink, DocCardLinkLayout } from '/src/___new___/components'; - -# Network policy - -Writing network policies is how you restrict traffic to pods in your Kubernetes cluster. -$[prodname] extends the standard `NetworkPolicy` object to provide advanced network policy features, such as policies that apply to all namespaces. - -## Getting started - - - - - - - - - - - -## Policy rules - - - - - - - - - - -## Policy for hosts and VMs - - - - - - - - -## Policy tiers - - - - - - - - -## Policy for services - - - - - - -## Policy for extreme traffic - - - - - \ No newline at end of file diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/networksets.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/networksets.mdx deleted file mode 100644 index 2951036f89..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/networksets.mdx +++ /dev/null @@ -1,267 +0,0 @@ ---- -description: Learn the power of network sets and why you should create them. ---- - -# Get started with network sets - -## Visualize traffic to/from your cluster - -Modern applications often integrate with third-party APIs and SaaS services that live outside Kubernetes clusters. To securely enable access to those integrations, you must be able to limit IP ranges for egress and ingress traffic to workloads. Limiting IP lists or ranges is also used to deny-list bad actors or embargoed countries. To limit IP ranges, you need to use the $[prodname] resource called **network sets**. - -## What are network sets? - -**Network sets** are a grouping mechanism that allows you to create an arbitrary set of IP subnetworks/CIDRs or domains that can be matched by standard label selectors in Kubernetes or $[prodname] network policy. Like IP pools for pods, they allow you to reuse/scale sets of IP addresses in policies. - -A **network set** is a namespaced resource that you can use with Kubernetes or $[prodname] network policies; a **global network set** is a cluster-wide resource that you can use with $[prodname] network policies. - -Like network policy, you manage user access to network sets using standard Kubernetes RBAC. - -## Why are network sets powerful? - -If you are familiar with Service Graph in the web console, you know the value of seeing pod-to-pod traffic within your cluster. But what about traffic external to your cluster? - -$[prodname] automatically detects IPs for pods and nodes that fall into the standard IETF “public network” and “private network” designations, and displays those as icons in Service Graph. So you get some visibility into external traffic without using any network sets. - -![public-private-networks](/img/calico-enterprise/public-private-networks.png) - -However, when you create network sets, you can get more granular visibility into what's leaving the cluster to public networks. Because you control the grouping, the naming, and labeling, you create visibility that is customized to your organization. This is why they are so powerful. - -Here are just a few examples of how network sets can be used: - -- **Egress access control** - - Network sets are a key resource for defining egress access controls; for example, securing ingress to microservices/apps or egress from workloads outside the cluster. - -- **Troubleshooting** - - Network sets appear as additional metadata in flow logs and Kibana, Flow Visualizer, and Service Graph. - -- **Efficiency and scaling** - - Network sets are critical when scaling your deployment. You may have only a few CIDRs when you start. But as you scale out, it is easier to update a handful of network sets than update each network policy individually. Also, in a Kubernetes deployment, putting lots of anything (CIDRs, ports, policy rules) directly into policies causes inefficiencies in traffic processing (iptables/eBPF). - -- **Microsegmentation and shift left** - - Network sets provide the same microsegmentation controls as network policy. For example, you can allow specific users to create policies (that reference network sets), but allow only certain users to manage network sets. - -- **Threat defense** - - Network sets are key to being able to manage threats by blocking bad IPs with policy in a timely way. Imagine having to update individual policies when you find a bad IP you need to quickly block. You can even give access to a controller that automatically updates CIDRs in a network set when a bad IP is found. - -## Create a network set and use it in policy - -In this section, we’ll walk through how to create a namespaced network set in the web console. You can follow along using your cluster or tigera-labs cluster. - -In this example, you will create a network set named, `google`. This network set contains a list of trusted google endpoints for a microservice called, `hipstershop`. As a service owner, you want to be able to see traffic leaving the microservices in Service Graph. Instead of matching endpoints on IP addresses, we will use domain names. - -1. From the left navbar, click **Network Sets**. -1. Click **Add Network Set**, and enter these values. - - For Name: `google` - - For Scope: Select **Namespace** and select, `hipstershop` -1. Under Labels, click **Add label**. - - In the Select key field, enter `destinations` and click the green bar to add this new entry. - - In the Value field, enter `google`, click the green bar to add the entry, and save. -1. For Domains, click **+Add Domain** and these URLs: `clouddebugger.googleapis.com`, `cloudtrace.googleapis.com`, `metadata.google.internal`, `monitoring.googleapis.com`. -1. Click **Create Network Set**. - -You’ve created your first network set. - -![add-networkset-google](/img/calico-enterprise/add-networkset-google.png) - -The YAML looks like this: - -```yaml -kind: NetworkSet -apiVersion: projectcalico.org/v3 -metadata: - name: google - labels: - destinations: google - namespace: hipstershop -spec: - nets: [] - allowedEgressDomains: - - clouddebugger.googleapis.com - - cloudtrace.googleapis.com - - metadata.google.internal - - monitoring.googleapis.com -``` - -Next, we write a DNS policy for hipstershop that allows egress traffic to the trusted google sites. The following network policy allows egress access for all destination selectors labeled, `google`. Note that putting domains in a network set referencing it in policy is the best practice. Also, note that using `selector: all()` should only be used if all pods in the namespace can access all of the domains in the network set; if not, you should create separate policies accordingly. - -```yaml -apiVersion: projectcalico.org/v3 -kind: NetworkPolicy -metadata: - name: application.allow-egress-domain - namespace: hipstershop -spec: - tier: application - order: 0 - selector: all() - serviceAccountSelector: '' - egress: - - action: Allow - source: {} - destination: - selector: destinations == "google" - types: - - Egress -``` - -## Network sets in Service Graph - -Continuing with our `hipstershop` example, if you go to Service Graph, you see hipstershop (highlighted in yellow). - -![hipstershop](/img/calico-enterprise/hipstershop.png) - -If we double-click `hipstershop` to drill down, we now see the `google` network set icon (highlighted in yellow). We now have visibility to traffic external from google sites to hipstershop. (If you are using the tigera-labs cluster, note that the network set will not be displayed as shown below.) - -![google-networkset](/img/calico-enterprise/google-networkset.png) - -Service Graph provides a view into how services are interconnected in a consumable view, along with easy access to flow logs. However, you can also see traffic associated with network sets in volumetric display with Flow Visualizer, and query flow log data associated with network sets in Kibana. - -## Tutorial - -In the following example, we create a global network set resource for a trusted load-balancer that can be used with microservices and applications. The label, `trusted-ep: load-balancer` is how this global network set can be referenced in policy. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkSet -metadata: - name: load-balancer - labels: - trusted-ep: "load-balancer" -spec: - nets: - # Modify the ip addresses to refer to the ip addresses of load-balancers in your environment - - 10.0.0.1/32 - - 10.0.0.2/32 -``` - -The following network policy uses the `selector: trusted-ep == "load balancer"` to reference the above GlobalNetworkSet. All applications in the `app2-ns` namespace, that match `app2` and `svc1` are allowed ingress traffic from the trusted load balance on port 1001. - -```yaml -apiVersion: projectcalico.org/v3 -kind: NetworkPolicy -metadata: - name: application.app2-svc1 - namespace: app2-ns -spec: - tier: application - order: 500 - selector: (app == "app2"&&svc == "svc1") - ingress: - - action: Allow - protocol: TCP - source: - selector: trusted-ep == "load-balancer" - destination: - ports: - - '10001' - types: - - Ingress -``` - -### Advanced policy rules with network sets - -When you combine $[prodname] policy rules with network sets, you have powerful ways to fine-tune. The following example combines network sets with specific rules in a global network policy to deny access more quickly. -We start by creating a $[prodname] GlobalNetworkSet that specifies a list of CIDR ranges we want to deny: 192.0.2.55/32 and 203.0.113.0/24. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkSet -metadata: - name: ip-protect - labels: - ip-deny-list: 'true' -spec: - nets: - - 192.0.2.55/32 - - 203.0.113.0/24 -``` - -Next, we create two $[prodname] GlobalNetworkPolicy resources. The first is a high "order" policy that allows traffic as a default for things that don’t match our second policy, which is low "order" and uses the GlobalNetworkSet label as a selector to deny ingress traffic (IP-deny-list in the previous step). In the label selector, we also include the term, `!has(projectcalico.org/namespace)`, which prevents this policy from matching pods or NetworkSets that also have this label. To more quickly enforce the denial of forwarded traffic to the host at the packet level, use the `doNotTrack` and `applyOnForward` options. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: forward-default-allow -spec: - selector: apply-ip-protect == 'true' - order: 1000 - doNotTrack: true - applyOnForward: true - types: - - Ingress - ingress: - - action: Allow ---- -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: ip-protect -spec: - selector: apply-ip-protect == 'true' - order: 0 - doNotTrack: true - applyOnForward: true - types: - - Ingress - ingress: - - action: Deny - source: - selector: ip-deny-list == 'true' && !has(projectcalico.org/namespace) -``` - -## Best practices for using network sets - -- Create network sets as soon as possible after getting started - - This allows you to quickly realize the benefits of seeing custom metadata in flow logs and visualizing traffic in Service Graph and Flow Visualizer. - -- Create a network set label and name schema - - It is helpful to think: what names would be meaningful and easy to understand when you look in Service Graph? Flow Viz? Kibana? What labels will be easy to understand when used in network policies – especially if you are separating users who manage network sets from those who consume them in network policies. - -- Do not put large sets of CIDRs and domains directly in policy - - Network sets allow you to specify CIDRs and/or domains. Although you can add CIDRs and domains directly in policy, it doesn't scale. - -- Do not put thousands of rules into a policy, each with a different CIDR - - If your set of /32s can be easily aggregated into a few broader CIDRs without compromising security, it’s a good thing to do; whether you’re putting the CIDRs in the rule or using a network set. - -- If you want to match thousands of endpoints, write one or two rules and use selectors to match the endpoints. - - Having one rule per port, per host is inefficient because each rule ends up being rendered as an iptables/eBPF rule instead of making good use of IP sets. - -- Avoid overlapping IP addresses/subnets in networkset/globalnetworkset definitions - -## Efficient use of network sets - -If you have a large number of things to match, using a network set is more efficient both in the control plane (for example, Felix CPU), and for the packet path (latency/per packet CPU). If you use network sets and you add/remove an IP from the network set, this doesn't require changing iptables rules at all. It only requires updating the ipset, which is efficient. If you also change the policy rules, then iptables must be updated too. Using network sets is efficient for all of the following use cases: - -- The system applying the iptables rules to incoming connections (to decide whether to allow or deny the traffic) -- iptables rules updates whenever one of the policies and/or network sets change -- The Kubernetes APIserver handling changes to the policy and/or networkset CRDs - -Follow these guidelines for efficient use of network sets. - -| Policy | Network set | Results | -| ------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | -| source: selector: foo="bar" | With handful of broad CIDRs | **Efficient**
    **-** 1 iptables/eBPF rule
    - 1 IP set with handful of CIDRs | -| source: nets: [ ... handful ...] | Not used | **Efficient**
    - Handful of iptables/eBPF rules
    - 0 IP sets | -| source: selector: foo="bar" | One network set with 2000 x /32s | **`*`Most efficient**
    - 1 iptables/eBPF rule
    - 1 IP sets with 2000 entries | -| | Two network sets with 1000 each x /32s | **Efficient**
    - 2 iptables/eBPF rules
    - 2 IP set with 1000 entries | -| source:
    nets: [... 2000 /32s ...]
    - source:
    nets: [1 x /32]
    - source: nets: [1 x /32]
    - ... x 2000 | Not used | **Inefficient**
    Results in programming 2k iptables/eBPF rules
    - 2000+ iptables/eBPF rules
    - 0 IP sets | - -`*` Updating **ipsets** is fast and efficient. Adding/removing a single entry is an O(1) operation no matter the number of IPs in the set. Updating **iptables** is generally slow and gets slower the more rules you have in total (including rules created by kube-proxy, for example). Less than 10K rules are generally fine, but noticeable latency occurs when updating rules above that number (increasing as the number of rules grows). (The newer nftables may scale more efficiently but those results are not included here.) - -Similarly, hitting many iptables (or eBPF) rules adds latency to the first packet in a flow. For iptables, it measures around 250ns per rule. Although a single rule is negligible, hitting 10K rules add >1ms to the first packet in the flow. Packets only hit the rules for the particular interface that they arrive on; if you have 10K rules on one interface and 10 rules on another, the packet processed by the first interface will have more latency. - -## Additional resources - -- [Network set](../reference/resources/networkset.mdx) -- [Global network set](../reference/resources/globalnetworkset.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/policy-best-practices.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/policy-best-practices.mdx deleted file mode 100644 index 5d00d00c74..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/policy-best-practices.mdx +++ /dev/null @@ -1,356 +0,0 @@ ---- -description: Learn policy best practices for security, scalability, and performance. ---- - -# Policy best practices - -## Big picture - -Policy best practices for run-time security starts with $[prodname]’s robust network security policy, but other $[prodname] resources play equally important roles in security, scalability, and performance. - -Learn $[prodname] policy best practices and resources that support a zero trust network model: - -- [Prepare for policy authoring](#prepare-for-policy-authoring) -- [Policy best practices for day-one zero trust](#policy-best-practices-for-day-one-zero-trust) -- [Policy design for efficiency and performance](#policy-design-for-efficiency-and-performance) -- [Policy life cycle tools](#policy-life-cycle-tools) - -## Prepare for policy authoring - -### Determine who can write policy - -Any team familiar with deploying microservices in Kubernetes can easily master writing network policies. The challenge in many organizations is deciding who will be given permission to write policy across teams. Although there are different approaches, $[prodname] policy tools have the flexibility and guardrails to accommodate different approaches. - -Let’s review two common approaches. - -- **Microservices teams write policy** - - In this model, network policy is treated as code, built into and tested during the development process, just like any other critical part of a microservice’s code. The team responsible for developing a microservice has a good understanding of other microservices they consume and depend on, and which microservices consume their microservice. With a defined, standardized approach to policy and label schemas, there is no reason that the teams cannot implement network policies for their microservice as part of the development of the microservice. With visibility in Service Graph, teams can even do basic troubleshooting. - -- **Dev/Ops writes policy, microservice team focuses on internals** - An equally valid approach is to have development teams focus purely on the internals of the microservices they are responsible for, and leave responsibility for operating the microservices with devops teams. A Dev/ops team needs the same understanding as the microservices team above. However, network security may come much later in the organization’s processes, or even as an afterthought on a system already in production. This can be more challenging because getting network policies wrong can have significant production impacts. But using $[prodname] tools, this approach is still achievable. - -When you get clarity on who can write policies, you can move to creating tiers. $[prodname] tiers, along with standard Kubernetes RBAC, provide the infrastructure to meet security concerns across teams. - -### Understand the depth of $[prodname] network policy - -Because $[prodname] policy goes well beyond the features in Kubernetes policy, we recommend that you have a basic understanding of [network policy and global network policy](beginners/calico-network-policy.mdx) and how they provide workload access controls. And even though you may not implement the following policies, it is helpful to know the depth of defense that is available in $[prodname]. - -- [Policy for services](beginners/services/index.mdx) -- [Policy integration for firewalls](policy-firewalls/index.mdx) -- [Policy for hosts](hosts/index.mdx) - -### Create policy tiers - -**Tiers** are a hierarchical construct used to group policies and enforce higher precedence policies that cannot be circumvented by other teams. As part of your microsegmentation strategy, tiers let you apply identity-based protection to workloads and hosts. - -Before creating policies, we recommend that you create your tier structure. This often requires internal debates and discussions. As noted previously, $[prodname] policy workflow has the guardrails you need to allow diverse teams to participate in policy writing. - -To understand how tiered policy works and best practices, see [Get started with tiered policies](policy-tiers/tiered-policy.mdx). - -### Create label standards - -Creating a label standard is often an overlooked step. But if you skip this step, it will cost you in troubleshooting down the road; especially given visibility/troubleshooting is already a challenge in a Kubernetes deployment. - -**Why are label standards important?** - -Network policies in Kubernetes depend on **labels and selectors** (not IP addresses and IP ranges) to determine which workloads can talk to each other. As pods dynamically scale up and down, network policy is enforced based on the labels and selectors that you define. So workloads and host endpoints need unique, identifiable labels. If you create duplicate label names, or labels are not intuitive, troubleshooting network policy issues and authoring network policies becomes more difficult. - -**Recommendations**: - -- Follow the [Kubernetes guidelines for labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/). If the Kubernetes guidelines do not cover your use cases, we recommend this blog from Tigera Support: [Label standard and best practices for Kubernetes security](https://www.helpnetsecurity.com/2021/05/26/kubernetes-security/). -- Develop a comprehensive set of labels that meets the deployment, reporting, and security requirements of different stakeholders in your organization. -- Standardize the way you label your pods and write your network policies using a consistent schema or design pattern. -- Labels should be defined to achieve a specific and explicit purpose -- Use an intuitive language in your label definition that enables a quick and simple identification of labeled Kubernetes objects. -- Use label key prefixes and suffixes to identify attributes required for asset classification. -- Ensure the right labels are applied to Kubernetes objects by implementing label governance checks in your CI/CD pipeline or at runtime. - -### Create network sets - -Network sets and global network sets are grouping mechanisms for arbitrary sets of IPs/subnets/CIDRs or domains. They are key resources for efficient policy design. The key use cases for network sets are: - -- **Use/reuse in policy to support scaling** - - You reference network sets in policies using selectors (rather than updating individual policies with CIDRs or domains). - -- **Visibility to traffic to/from a cluster** - - For apps that integrate with third-party APIs and SaaS services, you get enhanced visibility to this traffic in Service Graph. - -- **Global deny lists** - - Create a “deny-list” of CIDRs for bad actors or embargoed countries in policy. - -**Recommendation**: Create network sets **and labels** before writing policy. - -For network set tutorial and best practices, see [Get started with network sets](networksets.mdx). - -## Policy best practices for day-one zero trust - -### Create a global default deny policy - -A global default deny network policy provides an enhanced security posture – so pods without policy (or incorrect policy) are not allowed traffic until appropriate network policy is defined. We recommend creating a global default deny, regardless of whether you use Calico Enterprise and/or Kubernetes network policy. - -But, be sure to understand the [best practices for creating a default deny policy](default-deny.mdx) to avoid breaking your cluster. - -Here are sample [default deny policies](beginners/kubernetes-default-deny.mdx). - -### Define both ingress and egress network policy rules for every pod in the cluster - -Although defining network policy for traffic external to clusters (north-south) is certainly important, it is equally important to defend against attacks for east-west traffic. Simply put, **every connection from/to every pod in every cluster should be protected**. Although having both doesn’t guarantee protection against other attacks and vulnerabilities, one innocuous workload can lead to exposure of your most critical workloads. - -For examples, see [basic ingress and egress policies](beginners/calico-network-policy.mdx). - -## Policy design for efficiency and performance - -Teams can write policies that work, but ultimately you want policies that also scale, and do not negatively impact performance. - -If you follow a few simple guidelines, you’ll be well on your way to writing efficient policy. - -### Use global network policy only when all rules apply globally - -- **Do** - - Use global network policy for cluster-wide scope when all rules apply to multiple namespaces or host endpoints. For example, use a global network policy to create a deny-list of CIDRs for embargoed countries, or for global default deny everywhere, even for new namespaces. - - Why? Although at the level of packet processing there is no difference between network policy and global network, for CPU usage, one global network policy is faster than a large number of network policies. - -- **Avoid** - - Using a global network policy as a way to combine diverse, namespaced endpoints with different connectivity requirements. Although creating such a policy can work, appears efficient and is easier to view than several separate network policies, it is inefficient and should be avoided. - - Why? Putting a lot of anything in policy (rules, CIDRs, ports) that are manipulated by selectors is inefficient. iptables/eBPF rules depend on minimizing executions and updates. When a selector is encountered in a policy rule, it is converted into one iptables rule that matches on an IP set. Then, different code keeps the IP sets up to date; this is more efficient than updating iptables rules. Also, because iptables rules execute sequentially in order, having many rules results in longer network latencies for the first packet in a flow (approximately 0.25-0.5us per rule). Finally, having more rules slows down programming of the data plane, making policy updates take longer. - -**Example: Inefficient global network policy** - -The following policy is a global network policy for a microservice that limits all egress communication external to the cluster in the security tier. Does this policy work? Yes. And logically, it seems to cleanly implement application controls. - -```yaml noValidation -1 apiVersion: projectcalico.org/v3 -2 kind: GlobalNetworkPolicy -3 metadata: -4 name: security.allow-egress-from-pods -5 spec: -6 tier: security -7 order: 1 -8 selector: all() -9 egress: -10 - action: Deny -11 source: -12 namespaceSelector: projectcalico.org/namespace starts with "tigera" -13 destination: -14 selector: threatfeed == "feodo" -15 - action: Allow -16 protocol: TCP -17 source: -18 namespaceSelector: projectcalico.org/name == "sso" -19 ports: -20 - '443' -21 - '80' -22 destination: -23 domains: -24 - '*.googleapis.com' -25 - action: Allow -26 protocol: TCP -27 source: -28 selector: psql == "external" -29 destination: -30 ports: -31 - '5432' -32 domains: -33 - '*.postgres.database.azure.com' -34 - action: Allow -35 protocol: TCP -36 source: {} -37 destination: -38 ports: -39 - '443' -40 - '80' -41 domains: -42 - '*.logic.azure.com' -43 - action: Allow -44 protocol: TCP -45 source: {} -46 destination: -47 ports: -48 - '443' -49 - '80' -50 domains: -51 - '*.azurewebsites.windows.net' -52 - action: Allow -53 protocol: TCP -54 source: -55 selector: 'app in { "call-archives-api" }||app in { "finwise" }' -56 destination: -57 domains: -58 - '*.documents.azure.com' -59 - action: Allow -60 protocol: TCP -61 source: -62 namespaceSelector: projectcalico.org/name == "warehouse" -63 destination: -64 ports: -65 - '1433' -66 domains: -67 - '*.database.windows.net' -68 - action: Allow -69 protocol: TCP -70 source: {} -71 destination: -72 nets: -73 - 65.132.216.26/32 -74. - 10.10.10.1/32 -75 ports: -76 - '80' -77 - '443' -78 - action: Allow -79 protocol: TCP -80 source: -81 selector: app == "api-caller" -82 destination: -83 ports: -84 - '80' -85 - '443' -86 domains: -87 - api.example.com -88 - action: Allow -89 source: -90 selector: component == "tunnel" -91 - action: Allow -92 destination: -93 selector: all() -94 namespaceSelector: all() -95 - action: Deny -96 types: -97 - Egress -``` - -**Why this policy is inefficient** - -First, the policy does not follow guidance on use for global network policy: that all rules apply to the endpoints. So the main issue is inefficiency, although the policy works. - -The main selector `all()` (line 8) means the policy will be rendered on every endpoint (workload and host endpoints). The selectors in each rule (for example, lines 12 and 14) control traffic that are matched by that rule. So, even if the host doesn’t have any workloads that match `"selector: app == "api-caller"`, you’ll still get the iptables/eBPF rule rendered on every host to implement that rule. If this sample policy had 100 pods, that’s a 10 - 100x increase in the number of rules (depending on how many local endpoints match each rule). In short, it adds: - -- Memory and CPU to keep track of all the extra rules -- Complexity to handle changes to endpoint labels, and to re-render all the policies too. - -### Avoid policies that may select unwanted endpoints - -The following policy is for an application in a single namespace, `app1-ns` namespace. There are two microservices that are all labeled appropriately: - -- microservice 1 has `app: app1`, `svc: svc1` -- microservice 2 has `app: app1`, `svc: svc2` - -The following policy works correctly and does not incur a huge performance hit. But it could select additional endpoints that were not intended. - -```yaml -apiVersion: projectcalico.org/v3 -kind: NetworkPolicy -metadata: - name: application.app1 - namespace: app1-ns -spec: - tier: application - order: 10 - selector: app == "app1" - types: - - Ingress - ingress: - - action: Allow - source: - selector: trusted-ip == "load-balancer" - destination: - selector: svc == "svc1" - ports: - - 10001 - protocol: TCP - - action: Allow - source: - selector: svc == "svc1" - destination: - selector: svc == "svc2" - ports: - - 10002 - protocol: TCP -``` - -The policy incorrectly assumes that the main policy selector (`app == "app1"`) will be combined (ANDed) with the endpoint selector, and only for certain policy types. In this case, - -- **Ingress** - combines policy selector and _destination endpoint selector_ - or -- **Egress** - combines policy selector and _source endpoint selector_ - -But if the assumptions behind the labels are not understood by other policy authors and are not correctly assigned, the endpoint selector may select _additional endpoints that were not intended_. For ingress policy, this can open up the endpoint to more IP addresses than necessary. This unintended consequence would be exacerbated if the author used a global network policy. - -### Put multiple relevant policy rules together in the same policy - -As discussed previously, it is better to create separate policies for different endpoint connectivity rules, than a single global network policy. However, you may interpret this to mean that the best practice is to make unique policies that do not aggregate any rules. But that is not the case. Why? When $[prodname] calculates and enforces policy, it updates the iptables/eBPF and reads policy changes and pod/workload endpoints from the datastore. The more policies in memory, the more work it takes determine which policies match a particular endpoint. If you group more rules into one policy, there are fewer policies to match against. - -### Understand effective use of label selectors - -Label selectors abstract network policy from the network. Misuse of selectors can slow things down. As discussed previously, the more selectors you create, the harder $[prodname] works to find matches. - -The following policy shows an inefficient use of selectors. Using `selector: all()` renders the policy on all nodes for all workloads. If there are 10,000 workloads, but only 10 match label==foo, that is very inefficient at the data plane level. - -```yaml -selector: all() -ingress: - source: - selector: label == 'bar' - destination: - selector: label == 'foo' -``` - -The best practice policy below allows the same traffic, but is more efficient and scalable. Why? Because the policy will be rendered only on nodes with workloads that match the selector `label==foo`. - -```yaml -selector: label == 'foo' -ingress: - source: - selector: label == 'bar' -``` - -Another common mistake is using `selector: all()` when you don’t need to. `all()` means _all workloads_ so that will be a large IP set. Whenever there's a source/destination selector in a rule, it is rendered as an IP set in the data plane. - -```yaml -source: - selector: all() -``` - -### Put domains and CIDRs in network sets rather than policy - -Network sets allow you to specify CIDRs and/or domains. As noted in [Network set best practices](policy-best-practices.mdx), we do not recommend putting large CIDRs or domains directly in policy. Although nothing stops you from do this in policy, using network sets is more efficient and supports scaling. - -## Policy life cycle tools - -### Preview, stage, deploy - -A big obstacle to adopting Kubernetes is not having confidence that you can effectively prevent, detect, and mitigate across diverse teams. The following policy life cycle tools in the web console (**Policies** tab) can help. - -- **Policy recommendations** - - Get a policy recommendation for unprotected workloads. Speeds up learning, while supporting zero trust. - -- **Policy impact preview** - - Preview the impacts of policy changes before you apply them to avoid unintentionally exposing or blocking other network traffic. - -- **Policy staging and audit modes** - - Stage network policy so you can monitor traffic impact of both Kubernetes and $[prodname] policy as if it were actually enforced, but without changing traffic flow. This minimizes misconfiguration and potential network disruption. - -For details, see [Policy life cycle tools](staged-network-policies.mdx). - -### Do not trust anything - -Zero trust means that you do not trust anyone or anything. $[prodname] handles authentication on a per request basis. Every action is either authorized or restricted, and the default is everything is restricted. To apply zero trust to policy and reduce your attack surface and risk, we recommend the following: - -- Ensure that all expected and allowed network flows are explicitly allowed; any connection not explicitly allowed is denied - -- Create a quarantine policy that denies all traffic that you can quickly apply to workloads when you detect suspicious activity or threats - -## Additional resources - -- [Troubleshoot policies](policy-troubleshooting.mdx) -- [Security and policy best practices blog](https://www.tigera.io/blog/kubernetes-security-policy-10-critical-best-practices/) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/policy-firewalls/fortinet-integration/firewall-integration.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/policy-firewalls/fortinet-integration/firewall-integration.mdx deleted file mode 100644 index 1327e556cb..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/policy-firewalls/fortinet-integration/firewall-integration.mdx +++ /dev/null @@ -1,254 +0,0 @@ ---- -description: Enable FortiGate firewalls to control traffic from Kubernetes workloads. ---- - -# Extend Kubernetes to Fortinet firewall devices - -:::warning[deprecation and removal notice] - -The Fortinet integration is deprecated and will be removed in a future release. -If you want to use $[prodname] with Fortinet or any other firewall, we recommend instead using an [egress gateway](../../../networking/egress/egress-gateway-on-prem.mdx). - -::: - -## Big picture - -Use $[prodname] network policy to control traffic from Kubernetes clusters in your FortiGate firewalls. - -## Value - -As platform and security engineers, you want your apps to securely communicate with the external world. But you also want to secure the network traffic from the Kubernetes clusters using your Fortigate firewalls. Using the Fortinet/$[prodname] integration, security teams can retain firewall responsibility, secure traffic using $[prodname] network policy, which frees up time for ITOps. - -## Concepts - -## Integration at a glance - -This $[prodname]/Fortinet integration workflow lets you control egress traffic leaving the Kubernetes cluster. You create perimeter firewall policies in FortiManager and FortiGate that reference Kubernetes workloads. $[prodname] acts as a conduit, using the `tigera-firewall-controller` and global network policies to pass Kubernetes workload information to FortiManager and Fortigate devices where policies are applied and enforced. - -The basic workflow is: - -1. Determine the Kubernetes pods that are allowed access outside the perimeter firewall. -1. Create $[prodname] global network policies with selectors that match those pods. Each global network policy maps to an address group in the FortiGate firewall. -1. Deploy the `tigera firewall controller` in the Kubernetes cluster. -1. Create a ConfigMap with Fortinet firewall information. - The `tigera firewall controller` reads the ConfigMap, gets the FortiGate firewall IP address, API token, and source IP address selection with `node` or `pod`. In your Kubernetes cluster, the controller populates pod IPs or Kubernetes node IPs of selector matching pods in Fortigate address group objects. - -## Before you begin - -**Supported versions** - -- FortiGate v6.2 -- FortiManager v6.4 - -**Required** - -- Pull secret that you used during [$[prodname] installation](../../../getting-started/install-on-clusters/index.mdx) -- IPv4 CIDR’s or IP addresses of all Kubernetes nodes; this is required for FortiManager to treat Kubernetes nodes as trusted hosts. - -**Recommended** - -- Experience creating and administering FortiGate/FortiManager firewall policies -- Experience using [$[prodname] tiers](../../../reference/resources/tier.mdx) and [Global network policy](../../../reference/resources/globalnetworkpolicy.mdx) - -## How to - -- [Create tier and global network policy](#create-tier-and-global-network-policy) -- [Configure FortiGate firewall to communicate with firewall controller](#configure-fortigate-firewall-to-communicate-with-firewall-controller) -- [Configure FortiManager to communicate with firewall controller](#configure-fortimanager-to-communicate-with-firewall-controller) -- [Create a config map for address selection in firewall controller](#create-a-config-map-for-address-selection-in-firewall-controller) -- [Create a config map with FortiGate and FortiManager information](#create-a-config-map-with-fortigate-and-fortimanager-information) -- [Install FortiGate ApiKey and FortiManager password as secrets](#install-fortigate-apikey-and-fortimanager-password-as-secrets) -- [Deploy firewall controller in the Kubernetes cluster](#deploy-firewall-controller-in-the-kubernetes-cluster) - -### Create tier and global network policy - -1. Create a tier for organizing global network policies. - - Create a new [Tier](../../policy-tiers/tiered-policy.mdx) to organize all Fortigate firewall global network policies in a single location. - -1. Note the tier name to use in a later step for the FortiGate firewall information config map. - -1. Create a GlobalNetworkPolicy for address group mappings. - - For example, a GlobalNetworkPolicy can select a set of pods that require egress access to external workloads. In the following GlobalNetworkPolicy, the firewall controller creates an address group named, `default.production-microservice1` in the Fortigate firewall. The members of `default.production-microservice1` address group include IP addresses of nodes. Each node can host one or more pods whose label selector match with `env == 'prod' && role == 'microservice1'`. Each GlobalNetworkPolicy maps to an address group in FortiGate firewall. - - ```yaml - apiVersion: projectcalico.org/v3 - kind: GlobalNetworkPolicy - metadata: - name: default.production-microservice1 - spec: - selector: "env == 'prod' && role == 'microservice1'" - types: - - Egress - egress: - - action: Allow - ``` - -### Configure FortiGate firewall to communicate with firewall controller - -1. Determine and note the CIDR's or IP addresses of all Kubernetes nodes that can run the `tigera-firewall-controller`. - Required to explicitly allow the `tigera-firewall-controller` to access the FortiGate API. -1. Create an Admin profile with read-write access to Address and Address Group Objects. - For example: `tigera_api_user_profile` -1. Create a REST API Administrator, associate this user with the `tigera_api_user_profile` profile, and add the CIDR or IP address of your Kubernetes cluster nodes as trusted hosts. - For example: `calico_enterprise_api_user` -1. Note the API key. - -### Configure FortiManager to communicate with firewall controller - -1. Determine and note the CIDR's or IP addresses of all Kubernetes nodes that can run the `tigera-firewall-controller`. - Required to explicitly allow the tigera-firewall-controller to access the FortiManager API. -1. From system settings, create an Admin profile with Read-Write access for `Policy & Objects`. - For example: `tigera_api_user_profile` -1. Create a JSON API administrator, associate this user with the `tigera_api_user_profile` profile, and add the CIDR or IP address of your Kubernetes cluster nodes as `Trusted Hosts`. -1. Note the username and password. - -### Create a config map for address selection in firewall controller - -1. Create a namespace for tigera-firewall-controller. - - ```bash - kubectl create namespace tigera-firewall-controller - ``` - -1. Create a config map with FortiGate firewall information. - - For example: - - ```bash - kubectl -n tigera-firewall-controller create configmap tigera-firewall-controller \ - --from-literal=tigera.firewall.policy.selector="projectcalico.org/tier == 'default'" \ - --from-literal=tigera.firewall.addressSelection="node" - ``` - - **ConfigMap values** - - | Field | Enter values... | - | -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | - | tigera.firewall.policy.selector | The tier name with the global network policies with the Fortigate address group mappings.
    For example, this selects the global network policies in the `default` tier:
    `tigera.firewall.policy.selector: "projectcalico.org/tier == 'default'" | - | tigera.firewall.addressSelection | The addressSelection for outbound traffic leaving the cluster.
    For example, if outgoingNat is enabled in cluster and compute Node IP address is used "tigera.firewall.addressSelection == `node` or
    If pod IP address used then "tigera.firewall.addressSelection == `pod`" | - -### Create a config map with FortiGate and FortiManager information - -1. In the [Fortigate ConfigMap manifest]($[filesUrl]/manifests/fortinet-device-configmap.yaml), add your FortiGate firewall information in the data section, `tigera.firewall.fortigate`. - - Where: - - | Field | Description | - | ------------------------ | --------------------------------------------------------------------------- | - | name | FortiGate device name | - | ip | FortiGate Management Ip address | - | apikey | Secret in tigera-firewall-controller namespace, to store FortiGate's APIKey | - | apikey.secretKeyRef.name | Name of the secret to store APIKey | - | apikey.secretKeyRef.key | Key name in the secret, which stores APIKey | - | vdom | FortiGate VDOM name | - - For example: - - ```yaml - - name: prod-eastcoast-1 - ip: 1.2.3.1 - apikey: - secretKeyRef: - name: fortigate-east1 - key: apikey-fortigate-east1 - vdom: fortigate-vdom1 - - name: prod-eastcoast-2 - ip: 1.2.3.2 - apikey: - secretKeyRef: - name: fortigate-east2 - key: apikey-fortigate-east2 - vdom: fortigate-vdom2 - ``` - -1. In the [FortiManager ConfigMap manifest]($[filesUrl]/manifests/fortinet-device-configmap.yaml), add your FortiManager information in the data section, `tigera.firewall.fortimgr`. - - Where: - - | Field | Description | - | -------------------------- | ------------------------------------------------------------------------------ | - | name | FortiManager device name | - | ip | FortiManager Management Ip address | - | adom | FortiManager ADOM name to manage kubernetes cluster. | - | username | JSON api access account name to Read/Write FortiManager address objects. | - | password | Secret in tigera-firewall-controller namespace, to store FortiManager password | - | password.secretKeyRef.name | Name of the secret to store password. | - | password.secretKeyRef.key | Key name in the secret, which stores password. | - - For example: - - ```yaml - - name: prod-east1 - ip: 1.2.4.1 - username: api_user - adom: root - password: - secretKeyRef: - name: fortimgr-east1 - key: pwd-fortimgr-east1 - ``` - -:::note - -If you are not using FortiManager in the integration, include only the following field in the ConfigMap data section. `tigera.firewall.fortimgr: |` - -::: - -1. Apply the manifest. - - ``` - kubectl apply -f $[filesUrl]/manifests/fortinet-device-configmap.yaml - ``` - -### Install FortiGate ApiKey and FortiManager password as secrets - -1. Store each FortiGate API key as a secret in the `tigera-firewall-controller` namespace. - For example, the FortiGate device, `prod-east1`, store its ApiKey as a secret name as `fortigate-east1`, with key as `apikey-fortigate-east1`. - - ``` - kubectl create secret generic fortigate-east1 \ - -n tigera-firewall-controller \ - --from-literal=apikey-fortigate-east1= - ``` - -1. Store each FortiManager password as secret in the `tigera-firewall-controller` namespace. - For example, for FortiMgr `prod-east1`, store its password as a secret name as `fortimgr-east1`, with key as `pwd-fortimgr-east1`. - - ``` - kubectl create secret generic fortimgr-east1 \ - -n tigera-firewall-controller \ - --from-literal=pwd-fortimgr-east1= - ``` - -### Deploy firewall controller in the Kubernetes cluster - -1. Install your pull secret. - - ``` - kubectl create secret generic tigera-pull-secret \ - --from-file=.dockerconfigjson= \ - --type=kubernetes.io/dockerconfigjson -n tigera-firewall-controller - ``` - -1. Apply the manifest. - - ``` - kubectl apply -f $[filesUrl]/manifests/fortinet.yaml - ``` - -## Verify the integration - -1. Log in to the FortiGate firewall user interface. -1. Under **Policy & Objects**, click **Addresses**. -1. Verify that your Kubernetes-related address objects and address group objects are created with the following comments "Managed by Tigera $[prodname]". - -Fof all FortiManagers that are configured to work with firewall-controller, log in to each FortiManager UI with the correct ADOM. - -1. Click **Policy & Objects**, **Object Configuration**, \*\*Addresses. -1. Verify that your Kubernetes-related address objects and address group objects are created with the following comments "Managed by Tigera $[prodname]". - -## Additional resources - -- [Extend FortiManager firewall policies to Kubernetes](fortimgr-integration.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/policy-firewalls/fortinet-integration/fortimgr-integration.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/policy-firewalls/fortinet-integration/fortimgr-integration.mdx deleted file mode 100644 index 19b0ac937d..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/policy-firewalls/fortinet-integration/fortimgr-integration.mdx +++ /dev/null @@ -1,161 +0,0 @@ ---- -description: Extend FortiManager firewall policies to Kubernetes with Calico Enterprise ---- - -# Extend FortiManager firewall policies to Kubernetes - -:::warning[deprecation and removal notice] - -The Fortinet integration is deprecated and will be removed in a future release. -If you want to use $[prodname] with Fortinet or any other firewall, we recommend instead using an [egress gateway](../../../networking/egress/egress-gateway-on-prem.mdx). - -::: - -## Big picture - -Use FortiManager firewall policies to secure workloads in your Kubernetes cluster. - -## Value - -The $[prodname]/Fortinet integration lets you control Kubernetes clusters directly and apply policy -using the FortiManager UI as the primary interface. This allows firewall administrators to leverage existing -tools and workflows as they learn and adopt Kubernetes orchestration at their own pace. - -## Concepts - -### Integration at a glance - -This $[prodname]/Fortinet solution lets you directly control Kubernetes policies using FortiManager. - -The basic workflow is: - -1. Determine the Kubernetes pods that you want to securely communicate with each other. -1. Label these pods using a key-value pair where key is the `tigera.io/address-group`, and value is the pod matching a label name. -1. In the FortiManager, select the cluster’s ADOM, and create an address group using the key-value pair associated with the pods. -1. Create firewall policies using the address groups for IPv4 Source address and IPv4 Destination Address, and select services and actions as you normally would to allow or deny the traffic. Under the covers, the $[prodname] integration controller periodically reads the FortiManager firewall policies for your Kubernetes cluster, converts them to $[prodname] global network policies, and applies them to clusters. -1. Use the $[prodname] web console to verify the integration, and then FortiManager UI to make all updates to policy rules. - -:::note - -The default value for reading FortiManager firewall policies is three seconds. To change the value, modify environment variable FW_FORTIMGR_EW_POLL_INTERVAL in FortiManager integration manifest; units are in seconds. - -::: - -## Before you begin - -**Supported version** - -- FortiManager v6.4 - -**Required** - -- Pull secret that you used during [$[prodname] installation](../../../getting-started/install-on-clusters/index.mdx) -- IPv4 CIDR’s or IP addresses of all Kubernetes nodes; this is required for FortiManager to treat Kubernetes nodes as trusted hosts. -- Login access to [the $[prodname] web console](../../../operations/cnx/access-the-manager.mdx) - -**Recommended** - -- Experience with [ tiered policy](../../policy-tiers/tiered-policy.mdx) and [ global network policy](../../../reference/resources/globalnetworkpolicy.mdx) -- Experience creating and administering FortiGate/FortiManager firewall policies - -## How to - -- [Create a tier](#create-a-tier) -- [Configure FortiManager to communicate with firewall controller](#configure-fortimanager-to-communicate-with-firewall-controller) -- [Create a FortiManager config map](#create-a-fortimanager-config-map) -- [Install FortiManager password as secrets](#install-fortimanager-password-as-secrets) -- [Deploy the firewall controller in the Kubernetes cluster](#deploy-the-firewall-controller-in-the-kubernetes-cluster) -- [Verify the integration](#verify-the-integration) - -### Create a tier - -Create a [$[prodname] tier](../../policy-tiers/tiered-policy.mdx) in the $[prodname] web console for each Kubernetes cluster you want to secure. We recommend that you create a new tier (rather than reusing an existing tier) for all global network policies created by the $[prodname] integration controller. - -## Configure FortiManager to communicate with firewall controller - -1. Determine and note the CIDR’s or IP addresses of all Kubernetes nodes that can run the `tigera-firewall-controller`. - This is required to explicitly allow the `tigera-firewall-controller` to access the FortiManager API. -1. From system settings, create an Admin profile with Read-Write access for `Policy & Objects`. - For example: `tigera_api_user_profile` -1. Create a JSON API administrator and associate this user with the `tigera_api_user_profile` profile and add CIDR or IP address of your Kubernetes cluster nodes as `trusted hosts`. -1. Note the username and password. - -## Create a FortiManager config map - -1. Create a namespace for the tigera-firewall-controller. - - ```bash - kubectl create namespace tigera-firewall-controller - ``` - -1. In this [FortiManager ConfigMap manifest]($[filesUrl]/manifests/fortimanager-device-configmap.yaml), add your FortiManager device information in the data section: `tigera.firewall.fortimanager-policies`. For example: - - ```yaml noValidation - tigera.firewall.fortimanager-policies: | - - name: prod-east1 - ip: 3.2.1.4 - username: api_user - adom: root - tier: - packagename: sacramento - password: - secretKeyRef: - name: fortimgr-east1 - key: pwd-fortimgr-east1 - ``` - - Where: - - | Field | Description | - | -------------------------- | ---------------------------------------------------------------------------------------------------------------- | - | name | FortiManager device name. | - | ip | FortiManager Management IP address. | - | adom | FortiManager ADOM name that manages Kubernetes cluster. | - | packagename | FortiManager Firewall package. All firewall rules targeted for Kubernetes cluster are packed under this package. | - | username | JSON api access account name to Read/Write FortiManager address objects. | - | password | Secret in tigera-firewall-controller namespace, to store FortiManager password | - | tier | Tier name you created in the web console | - | password.secretKeyRef.name | Name of the secret to store password. | - | password.secretKeyRef.key | Key name in the secret, which stores password. | - -1. Apply the manifest. - - ```bash - kubectl apply -f $[filesUrl]/manifests/fortimanager-device-configmap.yaml - ``` - -## Install FortiManager password as secrets - -Store each FortiManager password as a secret in the `tigera-firewall-controller` namespace. - -For example, in the ConfigMap for FortiMgr `prod-east1`, store its password as a secret name as `fortimgr-east1`, with key as `pwd-fortimgr-east1`. - -```bash -kubectl create secret generic fortimgr-east1 \ --n tigera-firewall-controller \ ---from-literal=pwd-fortimgr-east1= -``` - -### Deploy the firewall controller in the Kubernetes cluster - -1. Install your pull secret. - - ```bash - kubectl create secret generic tigera-pull-secret \ - --from-file=.dockerconfigjson= \ - --type=kubernetes.io/dockerconfigjson -n tigera-firewall-controller - ``` - -1. Apply the manifest. - - ```bash - kubectl apply -f $[filesUrl]/manifests/fortimanager.yaml - ``` - -## Verify the integration - -1. Log in to FortiManager with the correct ADOM. -2. Select **Policy & Objects**, **Object Configuration**, and create new **Address Groups**. -3. Click **Policy packages** and select the Package assigned to your Kubernetes cluster. -4. Create a test firewall policy with the following fields: Name, IPv4 Source Address, IPv4 Destination Address, Service and Action. -5. Log in to the $[prodname] web console, and under the tier that you specified in the ConfigMap, verify that the GlobalNetworkPolicies are created. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/policy-firewalls/fortinet-integration/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/policy-firewalls/fortinet-integration/index.mdx deleted file mode 100644 index ffe1ad82e9..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/policy-firewalls/fortinet-integration/index.mdx +++ /dev/null @@ -1,18 +0,0 @@ ---- -description: Calico Enterprise Fortinet firewall integrations. -hide_table_of_contents: true ---- - -# Fortinet firewall integrations - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - -:::warning[deprecation and removal notice] - -The Fortinet integration is deprecated and will be removed in a future release. -If you want to use $[prodname] with Fortinet or any other firewall, we recommend instead using an [egress gateway](../../../networking/egress/egress-gateway-on-prem.mdx). - -::: - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/policy-firewalls/fortinet-integration/overview.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/policy-firewalls/fortinet-integration/overview.mdx deleted file mode 100644 index 60987966ab..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/policy-firewalls/fortinet-integration/overview.mdx +++ /dev/null @@ -1,45 +0,0 @@ ---- -description: Learn how to integrate Kubernetes clusters with existing Fortinet firewall workflows using Calico Enterprise. ---- - -# Determine the best Calico Enterprise/Fortinet solution - -:::warning[deprecation and removal notice] - -The Fortinet integration is deprecated and will be removed in a future release. -If you want to use $[prodname] with Fortinet or any other firewall, we recommend instead using an [egress gateway](../../../networking/egress/egress-gateway-on-prem.mdx). - -::: - -## Big picture - -Determine the best $[prodname]/Fortinet solution to integrate Kubernetes clusters with your existing Fortinet firewall workflows. - -## Value - -Many security teams must work within the confines of their existing IT security architecture, even though perimeter firewalls do not meet the needs of Kubernetes clusters. The $[prodname]/Fortinet integration allows firewall administrators to leverage existing Fortinet security tools and workflows, continue meeting compliance requirements, while adopting Kubernetes orchestration using $[prodname] at their own pace. - -### Concepts - -The $[prodname]/Fortinet integration provides the following solutions. You can you use them separately, or together without contention. - -### Solution 1: Extend Kubernetes to Fortinet firewall devices - -**Use case**: Control egress traffic for Kubernetes clusters. - -**Problem**: Perimeter firewalls do not have the necessary information to act on traffic that leaves the cluster for Kubernetes workloads. - -**Solution**: The $[prodname]/Fortinet integration leverages the power of $[prodname] policy selectors to provide Kubernetes workload information to FortiManager and FortiGate devices. You create perimeter firewall policies in FortiManager and FortiGate that reference Kubernetes workloads. Policies are applied and enforced by FortiGate devices. And Firewall administrators can write cluster egress policies that reference Kubernetes workloads directly in Fortinet devices. - -### Solution 2: Extend FortiManager firewall policies to Kubernetes - -**Use case**: Control Kubernetes clusters directly and apply policy. - -**Problem**: To avoid disruption, teams need to leverage existing FortiManager as the primary user interface. - -**Solution**: Use FortiManager to create firewall policies that are applied as $[prodname] network policies on Kubernetes workloads. Use the power of a $[prodname] “higher-order tier” so Kubernetes policy is evaluated early in the policy processing order, but update policy using FortiManager UI. Use the $[prodname] web console as a secondary interface to verify the integration and troubleshoot using logs. - -## Next steps - -- [Extend Kubernetes to Fortinet firewall devices](firewall-integration.mdx) -- [Extend FortiManager firewall policies to Kubernetes](fortimgr-integration.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/policy-firewalls/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/policy-firewalls/index.mdx deleted file mode 100644 index dc60c393e7..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/policy-firewalls/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Use Calico Enterprise policy with existing firewalls. -hide_table_of_contents: true ---- - -# Policy for firewalls - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/policy-impact-preview.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/policy-impact-preview.mdx deleted file mode 100644 index edf6346864..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/policy-impact-preview.mdx +++ /dev/null @@ -1,34 +0,0 @@ ---- -description: View the impacts of policies before you apply them. ---- - -# Preview policy impacts - -## Big picture - -Preview the impacts of policy changes before you apply them. - -## Value - -Create, update, and delete policy with confidence knowing you will not unintentionally expose or block other network traffic. - -## Before you begin... - -You must have a running kubernetes cluster with $[prodname] installed. - -You must have permissions to make policy changes. Users can only preview a change if RBAC allows them to make the change. Note that the `tigera-ui-user` role does not have permission to modify policy, and therefore will not allow users to preview changes. - -## How to - -1. From the Edit Policy page on the web console, modify any attribute of the policy. -1. Before applying it, click the "preview" button at the top right. - This launches the flow log visualizer. -1. Click the "changes applied" toggle to see how flows would change if the changes are applied. -1. Click the "only changed flows" to hide all flows that remained the same before and after the change. -1. Click the left arrow at the top-right corner of the view to return to the edit policy page. - -:::note - -There may be some flows that $[prodname] will not be able to predict. Those flows will appear as "Uncertain" as per the legend at the bottom right. - -::: diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/policy-tiers/allow-tigera.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/policy-tiers/allow-tigera.mdx deleted file mode 100644 index 15503c66f4..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/policy-tiers/allow-tigera.mdx +++ /dev/null @@ -1,75 +0,0 @@ ---- -description: Understand how to change the behavior of the allow-tigera tier. ---- - -# Change allow-tigera tier behavior - -:::warning -The `allow-tigera` tier contains policies that secure $[prodname] components and is critical to cluster integrity. It is controlled by the Tigera Operator, and policies in the tier should not be edited, and the tier should not be moved. Although you can change the behavior of allow-tigera using adjacent tiers, you can inadvertently break critical cluster traffic. We highly recommend that you work with Support to implement changes around `allow-tigera` to prevent service disruption. - -::: - -## Big picture - -Change traffic behavior of the tier that secures $[prodname] components. - -## Value - -Although the tier that secures $[prodname] components cannot be changed, you can create policies in adjacent tiers to change its behavior. - -## Concepts - -$[prodname] automatically creates the `allow-tigera` tier during installation with network policies that select traffic to and from Tigera components. These policies ensure that traffic required for $[prodname] operation is allowed, and that any unnecessary traffic involving Tigera components is denied. This tier prevents disruption of $[prodname] functionality in case of network policy misconfiguration impacting Tigera components, and denies unexpected traffic in case of defect or compromise. - -### Ownership and management of allow-tigera - -Tigera defines the `allow-tigera` tier and manages the policies within it. The Tigera Operator installs and monitors these policies, ensuring they always match the state defined by Tigera. Management by the Operator also ensures integrity for upgrades. - -:::note - -The `allow-tigera` tier and its policies should not be edited, and the tier should not be moved. However, if you inadvertently make changes they are automatically reverted by the Operator to ensure your cluster is always protected. - -::: - -## Tutorial - -### Change behavior of allow-tigera - -If you want to change the way traffic is enforced by the `allow-tigera` tier, you must create policy in an adjacent tier to meet your needs. For example, if a policy in the `allow-tigera` tier allows or denies traffic, and you want to change how that traffic is enforced, you can create a policy in a tier before `allow-tigera` that selects the same traffic to make your desired changes. Similarly, if a policy in the `allow-tigera` tier passes or does not select traffic that you want to enforce, you can create a policy in a tier after `allow-tigera` to select this traffic to meet the desired behavior. - -### Example: use preceding tier to tighten security - -Let's say an `allow-tigera` policy allows ingress traffic from a $[prodname] component that you do not use, and you want to tighten enforcement to not allow this traffic. - -Within a tier that comes before `allow-tigera`, you can create a policy that selects the same endpoint and contains ingress rules that deny traffic from that component and pass to `allow-tigera` for traffic from other components. - -```yaml -# allow-tigera.es-gateway-access allows ingress from deep packet inspection, a feature not utilized for the purpose of this example. -# This policy tightens the scope of allowed ingress to es-gateway without modifying the allow-tigera policy directly. - -apiVersion: projectcalico.org/v3 -kind: NetworkPolicy -metadata: - name: preceding-tier.es-gateway-access - namespace: tigera-elasticsearch -spec: - # Place in a tier prior to allow-tigera. - tier: preceding-tier - - # Select the same endpoint as the original policy. - selector: k8s-app == 'tigera-secure-es-gateway' - ingress: - # Select the same component ingress. - - source: - selector: k8s-app == 'tigera-dpi' - namespaceSelector: name == 'tigera-dpi' - # Enact different behavior (originally: Allow) - action: Deny - - # Defer to allow-tigera for other ingress/egress decisions for this endpoint. - - action: Pass -``` - -This example shows how you can change the impact of the `allow-tigera` tier on traffic without modifying the tier itself. This makes your changes more maintainable, and allows the allow-tigera tier to continue to receive updates as $[prodname] evolves without you needing to reconcile your changes each release. - -For help to manage or change the behavior of the `allow-tigera` tier, contact Tigera Support. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/policy-tiers/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/policy-tiers/index.mdx deleted file mode 100644 index 9e9dc39a52..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/policy-tiers/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Learn how policy tiers allow diverse teams to securely manage Kubernetes policy. -hide_table_of_contents: true ---- - -# Policy tiers - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/policy-tiers/policy-tutorial-ui.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/policy-tiers/policy-tutorial-ui.mdx deleted file mode 100644 index 3e1d6768e3..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/policy-tiers/policy-tutorial-ui.mdx +++ /dev/null @@ -1,244 +0,0 @@ ---- -description: Covers the basics of Calico Cloud network policy. ---- - -# Network policy tutorial - -## What you will learn: - -- How to create a policy in the web console -- How labels and selectors work -- Basics of policy ordering and tiers - -## Scenario - -Let's start with a sample Kubernetes cluster. - -![policy-tutorial-overview](/img/calico-enterprise/policy-tutorial-overview.png) - -| Item | Description | -| ------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| Kubernetes cluster | A Kubernetes cluster with four namespaces and three nodes that run the pods in the cluster. | -| Namespace | Four namespaces named blue, red, green, and purple represent different applications running in the cluster. | -| Pod | Pods with meaningful labels for our applications:
    - FE (frontend pods)
    - BE (backend pods) | -| NetworkSet | An arbitrary set of IP subnetworks/CIDRs or domains that can be matched by standard label selectors in Kubernetes or $[prodname] network policy. Network sets are a $[prodname] namespaced resource. | -| GlobalNetwork Set | An arbitrary set of IP subnetworks/CIDRs or domains that can be matched by standard label selectors in Kubernetes or $[prodname] network policy. Global network sets are a $[prodname] global resource. | -| ServiceAccount | Provides an identity for processes that run in a pod. Service accounts are a Kubernetes namespaced resource. | -| HostEndpoint (HEP) | Physical or virtual interfaces attached to a host that runs $[prodname]. HEPs enforce $[prodname] policy on the traffic that enters or leaves the host’s default network namespace through the interfaces. HEPs are a $[prodname] global resource. | -| External component | A machine (physical or virtual) that runs outside of the Kubernetes cluster. | - -## Create a network policy - -To follow along in the web console, click **Policies**. - -There are three main parts to every $[prodname] policy: - -- **Scope** - namespace or global -- **Applies to** - objects within the above scope to which policy rules will be applied using labels and selectors -- **Type** - whether this policy affects ingress, egress, or both - - Ingress - policy rules to apply to connections inbound to the selected objects - - Egress - policy rules to apply to connections outbound from the selected objects - -![policy-parts](/img/calico-enterprise/policy-parts.png) - -Let's look at each part. - -## Scope - -Scope defines the reach of your policy. Use this dropdown to determine whether your policy applies globally or to a specific namespace. Think of scope as the "top-level scope" that can be further specified using the "Applies to" selection that follows. - -- **Global** - - If you select global, but do not add entries in the **Applies to** field to further limit scope, _every pod and host endpoint (HEP) in our cluster would be in scope_. The following example uses the global option to limit the scope to all pods and HEPs (noted by check marks). - - ![policy-tutorial-global-scope](/img/calico-enterprise/policy-tutorial-global-scope.png) - -- **Namespace** - - If you select namespace, but do not add entries in the **Applies to** field to further limit scope, _every pod in this policy's namespace would be in scope_. The following example uses the namespace option to limit the scope to pods in the RED namespace. - - ![policy-tutorial-namespace-scope](/img/calico-enterprise/policy-tutorial-namespace-scope.png) - -### Applies to - -As discussed above, selecting **Applies to** lets you further limit pods in a policy. You can think of it as the "top-level endpoint selector". You define labels on your endpoints, namespaces, and service accounts, then use label selectors to limit connections by matching the following object types: - -- **Endpoints** - - Specify one or more label selectors to match specific endpoints, or select all endpoints - -- **Namespaces** (available only when the Scope is global) - - Specify one or more label selectors to match specific namespaces, or select all namespaces - -- **Service Accounts** - - Specify or more label selectors to match specific service accounts, or select all service accounts - -For example, if we select the BLUE namespace and apply it to only pods with the label, `app/tier == FE`, - -![blue-namespace](/img/calico-enterprise/blue-namespace.png) - -the resulting scope in our diagram would be only the pods labeled, `FE`: - -![blue-namespace-pods](/img/calico-enterprise/blue-namespace-pods.png) - -### Type - -In the Type section, you specify whether the policy impacts ingress, egress, or both. - -Note that ingress and egress are defined from the point of view of the _scoped endpoints_ (pods or host endpoints). In the previous diagram, the scoped endpoints are the three pods labeled, `app/tier:FE`. - -- Ingress rules filter traffic _coming to_ the scoped endpoints -- Egress rules filter traffic _leaving_ the scoped endpoints - -Select the **Ingress** rule, and click **+ Add ingress rule** to access the **Create New Policy rules** page. - -### Endpoint selector - -The endpoint selector lets you select the endpoint traffic that is matched within the scope you've defined in the policy. - -In our example, the policy is scoped to endpoints that have the `app/tier == FE` label in the BLUE namespace. In the context of an egress rule, when we add the `app/tier == BE` endpoint selector, all TCP traffic from endpoints that have the`app/tier == BE` label will be allowed to the `app/tier == FE` endpoints. - -![policy-tutorial-endpoint-selector](/img/calico-enterprise/policy-tutorial-endpoint-selector.png) - -Note that endpoints that have the `app/tier == BE` label in other namespaces are not matched because the policy is namespace scoped. - -### Namespace selector - -This is where things can get interesting. In the previous example, we did not select anything in the namespace selector. Let's change the namespace selector to have both the BLUE and GREEN namespaces. - -![endpoint-selector-blue-green](/img/calico-enterprise/endpoint-selector-blue-green.png) - -Although the overall policy is scoped to the BLUE namespace, we can match endpoints in other namespaces on a per-rule basis. Note that the top-level scope that you select remains unchanged, meaning that the policy is still applied only to endpoints in the BLUE namespace. - -![namespace-selector](/img/calico-enterprise/namespace-selector.png) - -### Network selector - -Using the Nets selector, we can add CIDR addresses to be matched by the policy rule. - -![network-selector](/img/calico-enterprise/network-selector.png) - -### Service account selector - -Network policies can be also applied to the endpoint’s service account. - -Using the service account selector, we can apply rules to traffic from any endpoint whose service account matches the name or label selector. - -![service-account-selector](/img/calico-enterprise/service-account-selector.png) - -### Use Match All for wider matches in policy rules - -The **Match All** policy rule (`all()` in YAML) matches traffic for: - -- All endpoints in a namespace (if the policy scope is namespace) -- All endpoints (if the policy scope is global) - -Let's look at an example of using **Match All** traffic in a namespaced policy: - -- Scope is namespaced (BLUE) -- Applies to `app/tier == FE` - -Suppose we want to match traffic to the pod labeled `BE`, and the $[prodname] `networkset-1`. - -![match-all-namespace](/img/calico-enterprise/match-all-namespace.png) - -To do this, we can use the policy rule endpoint selector, **Match All**. - -![match-all-endpoints](/img/calico-enterprise/match-all-endpoints.png) - -Not only is the pod labeled `BE` included, but also the $[prodname] `networkset-1`. - -![match-all-endpoints-example](/img/calico-enterprise/match-all-endpoints-example.png) - -Note that we could have created individual selectors to match pods labeled, `BE` and for the `network-set-1`. - -**Match All traffic with namespace selectors** - -In the following example, if we select **Match All** endpoints, but in the **Namespace selector**, we select both the BLUE and GREEN namespaces, the results for matching are: all pods and network sets in the BLUE and GREEN namespaces. - -![namespace-match-all](/img/calico-enterprise/namespace-match-all.png) - -**Global selector** - -Let's see what happens when we select the **Global** selector. - -![namespace-selector-global](/img/calico-enterprise/namespace-selector-global.png) - -In our example, the Global selector selects HEPs and global network sets are selected. You might think that Global (`global()` in YAML) would select all endpoints, but it doesn't. Global means "do not select any namespaced resources" (which includes namespaced network set resources). Another way to express it is, do not select any workload endpoints. - -![heps-networksets](/img/calico-enterprise/heps-networksets.png) - -**Endpoint selector, unspecified** - -Next, let's see what happens when the policy rule does not specify any selection criteria. In this example, the rule selects all workloads, network sets, endpoints, and host endpoints within scope of the policy, including external components (the VM database). - -![unspecified](/img/calico-enterprise/unspecified.png) - -Now that you know the basic elements of a network policy, let's move on to policy ordering and tiers. - -## Policy ordering - -$[prodname] policies can have order values that control the order of precedence. For both network policies and global network policies, $[prodname] applies the policy with the lowest value first. - -![policy-ordering](/img/calico-enterprise/policy-ordering.png) - -### Mixing Kubernetes and $[prodname] policies - -Kubernetes and $[prodname] policies work side by side without a problem. However, Kubernetes network policies cannot assign an order value, so $[prodname] will set an implicit order value of 1000 to any Kubernetes network policies. - -:::note -Policies are immediately applied to any new connections. However, for existing connections that are already open, the policy changes will only take effect after the connection has been reestablished. This means that any ongoing sessions may not immediately reflect policy changes until they are initiated again. -::: - -### $[prodname] policies with no order value - -$[prodname] policies with order values take precedence. Policies without order values take lesser precedence and are processed alphabetically. - -## Tiers - -Tiers are a hierarchical construct used to group policies and enforce higher precedence policies that cannot be circumvented by other teams. Access to tiers is controlled using user role permissions. For example, a security team can implement high-level policy (for example, blocking access to/from IP ranges in particular countries), while developers in a later tier can control specific rules for the microservices of an app running in the cluster. - -### Policy processing overview - -When a new connection is processed by $[prodname], each tier that contains a policy that selects the endpoint processes the connection. Tiers are sorted by their order - the smallest number first. Policies in each tier are then processed in order from lowest to highest. For example, a policy of 800 is ordered before a policy of order 1000. - -- If a network policy or global network policy in the tier allows or denies the connection, then evaluation is done: the connection is handled accordingly. - -- If a network policy or global network policy in the tier passes the connection, the next tier containing a policy that selects the endpoint processes the connection - -After a Pass action, if no subsequent tier contains any policies that apply to the pod, the connection is allowed. - -If the tier contains policies that apply to the endpoint, but the policies take no action on the connection, the connection is dropped by an implicit deny. - -If no tiers contain policies that apply to the endpoint, the connection is allowed by an implicit allow. - -### Policies with no order value - -You can create policies without an order value. When a policy with no order value is placed in a tier with other policies that do have an order value, the policies are processed as follows: - -- Policies are evaluated from smallest to largest order value within the tier -- Policies with no order value are processed last in the tier, but before the implicit deny -- When multiple policies without an order value are present in a tier, they are processed in alphabetical order. However, we do not recommended relying on alphabetical ordering because it hard to operationalize. - -### How policy action rules affect traffic processing - -It is also important to understand that $[prodname] policy action rules affect how traffic and connections are processed. Let's go back to the drop-down menu on the Create New Policy Rule page. - -Action defines what should happen when a connection matches this rule. - -![policy-tutorial-action](/img/calico-enterprise/policy-tutorial-action.png) - -- **Allow or Deny** - traffic is allowed or denied and the connection is handled accordingly. No further rules are processed. -- **Pass** - skips to the next tier that contains a policy that applies to the endpoint, and processes the connection. If the tier applies to the endpoint but no action is taken on the connection, the connection is dropped. -- **Log** - creates a log, and evaluation continues processing to the next rule - -## Additional resources - -The following topics go into further detail about concepts described in this tutorial: - -- [Get started with network policy](../beginners/calico-network-policy.mdx) -- [Service account selectors](../beginners/policy-rules/service-accounts.mdx) -- [Get started with tiered network policy](tiered-policy.mdx) -- [Get started with network sets](../networksets.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/policy-tiers/rbac-tiered-policies.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/policy-tiers/rbac-tiered-policies.mdx deleted file mode 100644 index 03e9cc7d3b..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/policy-tiers/rbac-tiered-policies.mdx +++ /dev/null @@ -1,518 +0,0 @@ ---- -description: Configure RBAC to control access to policies and tiers. ---- - -# Configure RBAC for tiered policies - -## Big picture - -Configure fine-grained user access controls for tiered policies. - -## Value - -Self-service is an important part of CI/CD processes for containerization and microservices. $[prodname] provides fine-grained access control (RBAC) for: - -- $[prodname] policy and tiers -- Kubernetes network policy - -## Concepts - -### Standard Kubernetes RBAC - -$[prodname] implements the standard **Kubernetes RBAC Authorization APIs** with `Role` and `ClusterRole` types. The $[prodname] API server integrates with Kubernetes RBAC Authorization APIs as an extension API server. - -### RBAC for policies and tiers - -In $[prodname], global network policy and network policy resources are associated with a specific tier. Admins can configure access control for these $[prodname] policies using standard Kubernetes `Role` and `ClusterRole` resource types. This makes it easy to manage RBAC for both Kubernetes network policies and $[prodname] tiered network policies. RBAC permissions include managing resources using the $[prodname] web console, and `kubectl`. - -### Fine-grained RBAC for policies and tiers - -RBAC permissions can be split by resources ($[prodname] and Kubernetes), and by actions (CRUD). Tiers should be created by administrators. Full CRUD operations on tiers is synonymous with full management of network policy. Full management to network policy and global network policy also requires `GET` permissions to 1) any tier a user can view/manage, and 2) the required access to the tiered policy resources. - -Here are a few examples of how you can fine-tune RBAC for tiers and policies. - -| **User** | **Permissions** | -| --------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| Admin | The default **tigera-network-admin** role lets you create, update, delete, get, watch, and list all $[prodname] resources (full control). Examples of limiting Admin access:
    • List tiers only
    • List only specific tiers
    | -| Non-Admin | The default **tigera-ui-user** role allows users to only list $[prodname] policy and tier resources. Examples of limiting user access:
    • Read-only access to all policy resources across all tiers, but only write access for NetworkPolicies with a specific tier and namespace.
    • Perform any operations on NetworkPolicies and GlobalNetworkPolicies.
    • List tiers only.
    • List or modify any policies in any tier. Fully manage only Kubernetes network policies in the **default** tier, in the **default** namespace, with read-only access for all other tiers.
    | - -### RBAC definitions for Calico Enterprise network policy - -To specify per-tier RBAC for the $[prodname] network policy and $[prodname] global network policy, use pseudo resource kinds and names in the `Role` and `ClusterRole` definitions. For example, - -```yaml -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: tier-default-reader -rules: - - apiGroups: ['projectcalico.org'] - resources: ['tiers'] - resourceNames: ['default'] - verbs: ['get'] - - apiGroups: ['projectcalico.org'] - resources: ['tier.networkpolicies'] - resourceNames: ['default.*'] - verbs: ['get', 'list'] -``` - -Where: - -- **resources**: `tier.globalnetworkpolicies` and `tier.networkpolicies` -- **resourceNames**: - - Blank - any policy of the specified kind across all tiers. - - `.*` - any policy of the specified kind within the named tier. - - `` - the specific policy of the specified kind. Because the policy name is prefixed with the tier name, this also specifies the tier. - -## Before you begin... - -**Required** - -A **cluster-admin** role with full permissions to create and modify resources. - -**Recommended** - -A rough idea of your tiered policy workflow, and who should access what. See [Configure tiered policies](tiered-policy.mdx). - -## How to - -- [Create Admin users, full permissions](#create-admin-users-full-permissions) -- [Create minimum permissions for all non-Admin users](#create-minimum-permissions-for-all-non-admin-users) - -:::note - -` kubectl auth can-i` cannot be used to check RBAC for tiered policy. - -::: - -### Create Admin users, full permissions - -Create an Admin user with full access to the $[prodname] web console (as well as everything else in the cluster) using the following command. See the Kubernetes documentation to identify users based on your chosen [authentication method](https://kubernetes.io/docs/reference/access-authn-authz/authentication/), and how to use the [RBAC resources](https://kubernetes.io/docs/reference/access-authn-authz/rbac/). - -```bash -kubectl create clusterrolebinding permissive-binding \ - --clusterrole=cluster-admin \ - --user= -``` - -### Create minimum permissions for all non-Admin users - -All users using the $[prodname] web console should be able to create authorizationreviews and authorizationrequests as well as access -license information through the services/proxy https:tigera-api:8080. - -1. Download the [min-ui-user-rbac.yaml manifest]($[tutorialFilesURL]/min-ui-user-rbac.yaml). - The roles and bindings in this file provide a minimum starting point for setting up RBAC for your users according to your specific security requirements. - This manifest provides basic RBAC to view some statistical data in the UI but does not provide permissions to - view or modify any network policy related configuration. - -1. Run the following command to replace `` with the name or email of the user you are providing permissions to: - - ```bash - sed -i -e 's///g' min-ui-user-rbac.yaml - ``` - -1. Use the following command to install the bindings: - - ```bash - kubectl apply -f min-ui-user-rbac.yaml - ``` - -## Tutorial - -This tutorial shows how to use RBAC to control access to resources and CRUD actions for a non-Admin user, John, with the username **john**. - -The RBAC examples shown will include: - -- [User cannot read policies in any tier](#user-cannot-read-policies-in-any-tier) -- [User can view all policies, and modify policies in the default namespace and tier](#user-can-view-all-policies-and-modify-policies-in-the-default-namespace-and-tier) -- [User can read policies only in both the default tier and namespace](#user-can-read-policies-only-in-both-the-default-tier-and-namespace) -- [User can read policies only in both a specific tier and in the default namespace](#user-can-read-policies-only-in-both-a-specific-tier-and-in-the-default-namespace) -- [User can only view a specific tier](#user-can-only-view-a-specific-tier) -- [User can read all policies across all tiers and namespaces](#user-can-read-all-policies-across-all-tiers-and-namespaces) -- [User has full control over policies only in both a specific tier and in the default namespace](#user-has-full-control-over-policies-only-in-both-a-specific-tier-and-in-the-default-namespace) - -### User cannot read policies in any tier - -User 'john' is forbidden from reading policies in any tier (**default** tier, and **net-sec** tier). - -When John issues the following command: - -```bash -kubectl get networkpolicies.p -``` - -It returns: - -``` -Error from server (Forbidden): networkpolicies.projectcalico.org is forbidden: User "john" cannot list networkpolicies.projectcalico.org in tier "default" and namespace "default" (user cannot get tier) -``` - -Similarly, when John issues this command: - -```bash -kubectl get networkpolicies.p -l projectcalico.org/tier==net-sec -``` - -It returns: - -``` -Error from server (Forbidden): networkpolicies.projectcalico.org is forbidden: User "john" cannot list networkpolicies.projectcalico.org in tier "net-sec" and namespace "default" (user cannot get tier) -``` - -:::note - -The .p' extension (`networkpolicies.p`) is short -for "networkpolicies.projectcalico.org" and used to -differentiate it from the Kubernetes NetworkPolicy resource and -the underlying CRDs (if using the Kubernetes Datastore Driver). - -::: - -:::note - -The label for selecting a tier is `projectcalico.org/tier`. -When a label selector is not specified, the server defaults the selection to the -`default` tier. Alternatively, a field selector (`spec.tier`) may be used to select -a tier. - -```bash -kubectl get networkpolicies.p --field-selector spec.tier=net-sec -``` - -::: - -### User can view all policies, and modify policies in the default namespace and tier - -1. Download the [`read-all-crud-default-rbac.yaml` manifest]($[tutorialFilesURL]/read-all-crud-default-rbac.yaml). - -1. Run the following command to replace `` with the `name or email` of - the user you are providing permissions to: - - ```bash - sed -i -e 's///g' read-all-crud-default-rbac.yaml - ``` - -1. Use the following command to install the bindings: - - ```bash - kubectl apply -f read-all-crud-default-rbac.yaml - ``` - -The roles and bindings in this file provide the permissions to read all policies across all tiers and to fully manage -policies in the **default** tier and **default** namespace. This file includes the minimum required `ClusterRole` and `ClusterRoleBinding` definitions for all UI users (see `min-ui-user-rbac.yaml` above). - -### User can read policies only in both the default tier and namespace - -In this example, we give user 'john' permission to read policies only in both the **default** tier and namespace. - -```yaml -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: tigera-example-get-default-tier -rules: - # To access Calico policy in a tier, the user requires "get" access to that tier. -- apiGroups: ["projectcalico.org"] - resources: ["tiers"] - resourceNames: ["default"] - verbs: ["get"] - ---- - -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: tigera-example-read-policies-in-default-tier -rules: - # This allows "get" and "list" of the Calico NetworkPolicy resources in the default tier. -- apiGroups: ["projectcalico.org"] - resources: ["tier.networkpolicies"] - resourceNames: ["default.*"] - verbs: ["get", "list"] - ---- - -# tigera-example-get-default-tier is applied globally -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: john-can-get-default-tier -subjects: -- kind: User - name: john - apiGroup: rbac.authorization.k8s.io -roleRef: - kind: ClusterRole - name: tigera-example-get-default-tier - apiGroup: rbac.authorization.k8s.io - ---- - -# tigera-example-read-policies-in-default-tier is applied per-namespace -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: john-can-read-policies-in-default-tier-and-namespace -subjects: -- kind: User - name: john - apiGroup: rbac.authorization.k8s.io -roleRef: - kind: ClusterRole - name: tigera-example-read-policies-in-default-tier - apiGroup: rbac.authorization.k8s.io -``` - -With the above, user john is able to list all NetworkPolicy resources in the **default** tier: - -```bash -kubectl get networkpolicies.p --all-namespaces -``` - -With some example policies on the cluster, returns: - -``` -NAMESPACE NAME CREATED AT -blue default.calico-np-blue-ns-default-tier 2021-07-26T09:05:11Z -default default.calico-np-default-ns-default-tier 2021-07-26T09:05:11Z -green default.calico-np-green-ns-default-tier 2021-07-26T09:05:13Z -red default.calico-np-red-ns-default-tier 2021-07-26T09:05:12Z -yellow default.calico-np-yellow-ns-default-tier 2021-07-26T09:05:13Z -``` - -As intended, user john can only examine those in the **default** namespace: - -```bash -kubectl get networkpolicies.p default.calico-np-green-ns-default-tier -o yaml -n=green -``` - -Correctly returns: - -``` -Error from server (Forbidden): networkpolicies.projectcalico.org "default.calico-np-green-ns-default-tier" is forbidden: User "john" cannot get networkpolicies.projectcalico.org in tier "default" and namespace "green" -``` - -John also still cannot access tier **net-sec**, as intended: - -```bash -kubectl get networkpolicies.p -l projectcalico.org/tier==net-sec -``` - -This returns: - -``` -Error from server (Forbidden): networkpolicies.projectcalico.org is forbidden: User "john" cannot list networkpolicies.projectcalico.org in tier "net-sec" and namespace "default" (user cannot get tier) -``` - -### User can read policies only in both a specific tier and in the default namespace - -Let's assume that the kubernetes-admin gives user 'john' the permission to list the policies in tier **net-sec**, but only examine the detail of the policies that are also in the **default** namespace. -To provide these permissions to user 'john', use the following `ClusterRoles`,`ClusterRoleBinding` and `RoleBinding`. - -```yaml -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: tigera-example-get-net-sec-tier -rules: - # To access Calico policy in a tier, the user requires "get" access to that tier. -- apiGroups: ["projectcalico.org"] - resources: ["tiers"] - resourceNames: ["net-sec"] - verbs: ["get"] - ---- - -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: tigera-example-read-policies-in-net-sec-tier -rules: - # This allows "get" and "list" of the Calico NetworkPolicy resources in the net-sec tier. -- apiGroups: ["projectcalico.org"] - resources: ["tier.networkpolicies"] - resourceNames: ["net-sec.*"] - verbs: ["get", "list"] - ---- - -# tigera-example-get-net-sec-tier is applied globally -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: john-can-get-net-sec-tier -subjects: -- kind: User - name: john - apiGroup: rbac.authorization.k8s.io -roleRef: - kind: ClusterRole - name: tigera-example-get-net-sec-tier - apiGroup: rbac.authorization.k8s.io - ---- - -# tigera-example-read-policies-in-net-sec-tier is applied per-namespace -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: john-can-read-policies-in-net-sec-tier-and-namespace -subjects: -- kind: User - name: john - apiGroup: rbac.authorization.k8s.io -roleRef: - kind: ClusterRole - name: tigera-example-read-policies-in-net-sec-tier - apiGroup: rbac.authorization.k8s.io -``` - -### User can only view a specific tier - -In this example, the following `ClusterRole` and `ClusterRoleBinding` can be used to provide 'get' access to the **net-sec** -tier. This has the effect of making the **net-sec** tier visible in the $[prodname] web console (including listing the names of the policies it contains). - -However, to modify or view the details of policies within the **net-sec** tier, additional RBAC permissions would be required. - -```yaml -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: tigera-example-make-net-sec-tier-visible -rules: - # To access Calico policy in a tier, the user requires "get" access to that tier. -- apiGroups: ["projectcalico.org"] - resources: ["tiers"] - resourceNames: ["net-sec"] - verbs: ["get"] - ---- - -# tigera-example-make-net-sec-tier-visible is applied globally -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: john-can-view-the-net-sec-tier -subjects: -- kind: User - name: john - apiGroup: rbac.authorization.k8s.io -roleRef: - kind: ClusterRole - name: tigera-example-make-net-sec-tier-visible - apiGroup: rbac.authorization.k8s.io -``` - -### User can read all policies across all tiers and namespaces - -In this example, a single `ClusterRole` is used to provide read access to all policy resource types across all tiers. In this case, there is no need to use both `ClusterRoleBindings` and `RoleBindings` to map these abilities to the target user, because the intention is to for the policy to apply to all current and future namespaces on the cluster, so a `ClusterRoleBinding` provides the desired granularity. - -```yaml -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: tigera-example-all-tiers-and-namespaces-policy-reader -rules: - # To access Calico policy in a tier, the user requires "get" access to that tier. - # Not specifying any specific "resourceNames" provides access to all tiers. -- apiGroups: ["projectcalico.org"] - resources: ["tiers"] - verbs: ["get"] - # This allows read access to the Kubernetes NetworkPolicy resources (these are always in the default tier). -- apiGroups: ["networking.k8s.io", "extensions"] - resources: ["networkpolicies"] - verbs: ["get","watch","list"] - # This allows read access to the Calico NetworkPolicy and GlobalNetworkPolicies. - # Not specifying any specific "resourceNames" provides access to them in all tiers. -- apiGroups: ["projectcalico.org"] - resources: ["tier.networkpolicies","tier.globalnetworkpolicies"] - verbs: ["get","watch","list"] - ---- - -# tigera-example-all-tiers-and-namespaces-policy-reader is applied globally, with a single ClusterRoleBinding, -# since all the rules it contains apply to all current and future namespaces on the cluster. -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: read-all-tier -subjects: -- kind: User - name: john - apiGroup: rbac.authorization.k8s.io -roleRef: - kind: ClusterRole - name: tigera-example-all-tiers-and-namespaces-policy-reader - apiGroup: rbac.authorization.k8s.io -``` - -### User has full control over policies only in both a specific tier and in the default namespace - -In this example, two `ClusterRole` objects are used to provide full access control of Calico NetworkPolicy -resource types in the **net-sec** tier: - -- The `tiers` resource is bound to a user using a `ClusterRoleBinding`, because it is a global resource. - This results in the user having the ability to read the contents of the tier across all namespaces. -- The `networkpolicies` resources are bound to a user using a `RoleBinding`, because the aim in this - case was to make them CRUD-able only in the default namespace. - You only need this one `ClusterRole` to be defined, but it can be applied to different namespaces - using additional `RoleBinding` objects. If the intention was to apply it to all current and future namespaces, - a `ClusterRoleBinding` could be used. - -```yaml -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: tigera-example-get-net-sec-tier -rules: - # To access Calico policy in a tier, the user requires "get" access to that tier. -- apiGroups: ["projectcalico.org"] - resources: ["tiers"] - resourceNames: ["net-sec"] - verbs: ["get"] - ---- - -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: tigera-example-crud-policies-in-net-sec-tier -rules: - # This allows full CRUD access to the Calico NetworkPolicy resources in the net-sec tier. -- apiGroups: ["projectcalico.org"] - resources: ["tier.networkpolicies"] - resourceNames: ["net-sec.*"] - verbs: ["*"] - ---- - -# tigera-example-get-net-sec-tier is applied globally -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: john-can-get-net-sec-tier -subjects: -- kind: User - name: john - apiGroup: rbac.authorization.k8s.io -roleRef: - kind: ClusterRole - name: tigera-example-get-net-sec-tier - apiGroup: rbac.authorization.k8s.io - ---- - -# tigera-example-crud-policies-in-net-sec-tier is applied per-namespace -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: john-can-crud-policies-in-net-sec-tier-and-namespace -subjects: -- kind: User - name: john - apiGroup: rbac.authorization.k8s.io -roleRef: - kind: ClusterRole - name: tigera-example-crud-policies-in-net-sec-tier - apiGroup: rbac.authorization.k8s.io -``` diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/policy-tiers/tiered-policy.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/policy-tiers/tiered-policy.mdx deleted file mode 100644 index 4698632229..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/policy-tiers/tiered-policy.mdx +++ /dev/null @@ -1,207 +0,0 @@ ---- -description: Understand how tiered policy works and supports microsegmentation. ---- - -# Get started with policy tiers - -## Seamless network policy integration - -**Network policy** is the primary tool for securing a Kubernetes network. It lets you restrict network traffic in your cluster so only the traffic that you want to flow is allowed. $[prodname] provides more robust policy than Kubernetes, but you can use them together -- seamlessly. $[prodname] supports: - -- $[prodname] network policy, (namespaced) -- $[prodname] global network policy (non-namespaced, global) -- Kubernetes network policy - -## Tiers: what and why? - -**Tiers** are a hierarchical construct used to group policies and enforce higher precedence policies that cannot be circumvented by other teams. As you will learn in this tutorial, tiers have built-in features that support workload microsegmentation. - -All $[prodname] and Kubernetes network policies reside in tiers. You can start "thinking in tiers" by grouping your teams and types of policies within each group. For example, we recommend these three tiers (platform, security, and application). - -![policy-types](/img/calico-enterprise/policy-types.png) - -Next, you can determine the priority of policies in tiers (from top to bottom). In the following example, the platform and security tiers use $[prodname] global network policies that apply to all pods, while developer teams can safely manage pods within namespaces using Kubernetes network policy for their applications and microservices. - -![policy-tiers](/img/calico-enterprise/policy-tiers.png) - -## Create a tier and policy - -To create a tier and policy in the web console: - -1. In the left navbar, click **Policies**. -1. On the **Policies Board**, click **Add Tier**. -1. Name the tier, select **Order, Add after** `tigera-security`, and save. -1. To create a policy in the tier, click **+ Add policy**. - -You can export all policies or a single policy to a YAML file. - -Here is a sample YAML that creates a security tier and uses `kubectl` to apply it. - -```yaml -apiVersion: projectcalico.org/v3 -kind: Tier -metadata: - name: security -spec: - order: 300 -``` - -```bash -kubectl apply -f security.yaml -``` - -## The default tier: always last - -The default tier is created during installation and is always the last tier. - -![default-tier](/img/calico-enterprise/default-tier.png) - -The default tier is where: - -- You manage all Kubernetes network policies -- Network and global network policies are placed when you upgrade from Project Calico to $[prodname] -- Recommended policies are placed when you use the **Recommend a policy** feature - -## System tiers - -System tiers are added during installation and are hidden by default. - -- **allow-tigera** tier contains policies to secure $[prodname] components and are controlled by the Tigera Operator. These policies should not be edited, and the tier should not be moved. Inadvertent changes are automatically reverted by the Operator to ensure your cluster is always protected. - -:::warning - -Although it is possible to change the behavior of the `allow-tigera` using adjacent tiers, it is not a trivial task. You can break critical cluster traffic and impact the operation of $[prodname]. To prevent loss of cluster services, see [Change allow-tigera tier behavior](allow-tigera.mdx), and contact Support for help. - -::: - -## Moving tiers - -You can move tiers by dragging and moving them in the graphical sequence, but all tiers must be visible first before you reorder tiers. - -To show all tiers, click **View** and select all of the tiers in the Show tiers list. - -![hidden-tiers](/img/calico-enterprise/hidden-tiers.png) - -Now you can reorder tiers by dragging and moving them. - -## Tier order - -Tiers are ordered from left to right, starting with the highest priority (also called highest precedence) tiers. - -![tier-order](/img/calico-enterprise/tier-order.png) - -In the example above, tier priorities are as follows: - -- **security tier** - is higher priority than platform tier -- **platform tier** - is higher priority than default tier -- **default tier** - is always the last tier and cannot be reordered - -The tier you put as the highest priority (after system tiers), depends on your environment. In compliance-driven environments, the security tier may be the highest priority (as shown above). There is no one-size-fits-all order. - -## Policy processing - -Policies are processed in sequential order from top to bottom. - -![policy-processing](/img/calico-enterprise/policy-processing.png) - -Two mechanisms drive how traffic is processed across tiered policies: - -- Labels and selectors -- Policy action rules - -It is important to understand the roles they play. - -### Labels and selectors - -Instead of IP addresses and IP ranges, network policies in Kubernetes depend on labels and selectors to determine which workloads can talk to each other. Workload identity is the same for Kubernetes and $[prodname] network policies: as pods dynamically come and go, network policy is enforced based on the labels and selectors that you define. - -The following diagrams show the relationship between all of the elements that affect traffic flow: - -- **Tiers** group and order policies -- **Policy action rules** define how to process traffic in and across tiers, and policy labels and selectors specify how groups of pods are allowed to communicate with each other and other network endpoints -- The **CNI**, **$[prodname] components**, and underlying **dataplane** (iptables/eBPF) all make use of labels and selectors as part of routing traffic. - -![tier-funnel](/img/calico-enterprise/tier-funnel.png) - -### Policy action rules - -$[prodname] network policy uses action rules to specify how to process traffic/packets: - -- **Allow or Deny** - traffic is allowed or denied and the packet is handled accordingly. No further rules are processed. -- **Pass** - skips to the next tier that contains a policy that applies to the endpoint, and processes the packet. If the tier applies to the endpoint but no action is taken on the packet, the packet is dropped. -- **Log** - creates a log, and evaluation continues processing to the next rule - -### Implicit default deny - -As shown in the following diagram, at the end of each tier is an implicit default deny. This is a safeguard that helps mitigate against unsecured policy. Because of this safeguard, you must explicitly apply the **Pass** action rule when you want traffic evaluation to continue. In the following example, the Pass action in a policy ensures that traffic evaluation continues, and overrides the implicit default deny. - -![implicit-deny](/img/calico-enterprise/implicit-deny.svg) - -Let’s look at a Dev/Ops global network policy in a high precedence tier (Platform). The policy denies ingress and egress traffic to workloads that match selector, `env == "stage"`. To ensure that policies continue to evaluate traffic after this policy, the policy adds a Pass action for both ingress and egress. - -**Pass action rule example** - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: devops.stage-env -spec: - tier: devops - order: 255 - selector: env == "stage" - ingress: - - action: Deny - source: - selector: env != "stage" - - action: Pass - egress: - - action: Deny - destination: - selector: env != "stage" - - action: Pass - types: - - Ingress - - Egress -``` - -### Policy endpoint matching across tiers - -Whoever is responsible for tier creation, also needs to understand how policy selects matching endpoints across tiers. For normal policy processing (without apply-on-forward, pre-DNAT, and do-not-track), if no policies within a tier apply to endpoints, the tier is skipped, and the tier's implicit deny behavior is not executed. - -In the following example, **policy D** in the Security tier includes a Pass action rule because we want traffic evaluation to continue to the next tier in sequence. In the Platform tier, there are no selectors in policies that match endpoints so the tier is skipped, including the end of tier deny. Evaluation continues to the Application tier. **Policy J** is the first policy with a matching endpoint. - -![endpoint-match](/img/calico-enterprise/endpoint-match.svg) - -### Default endpoint behavior - -Also, tier managers need to understand the default behavior for endpoints based on whether the endpoint is known or unknown, and the endpoint type. As shown in the following table: - -- **Known endpoints** - $[prodname] resources that are managed by Felix -- **Unknown endpoints** - interfaces/resources not recognizable as part of our data model - -| Endpoint type | Default behavior for known endpoints | Default behavior for unknown endpoints (outside of our data model) | -| ---------------------- | ----------------------------------------------------------------------- | ------------------------------------------------------------------ | -| Workload, $[prodname] | Deny | Deny | -| Workload, Kubernetes | Allow ingress from same Kubernetes namespace; allow all egress | Deny | -| Host | Deny. With exception of auto host endpoints, which get `default-allow`. | Fall through and use iptables rules | - -## Best practices for tiered policy - -To control and authorize access to $[prodname] tiers, policies, and Kubernetes network policies, you use Kubernetes RBAC. Security teams can prevent unauthorized viewing or modification of higher precedence (lower order) tiers, while still allowing developers or service owners to manage the detailed policies related to their workloads. - -We recommend: - -- Limit tier creation permissions to Admin users only; creating and reordering tiers affects your policy processing workflow - -- Limit full CRUD operations on tiers and policy management to select Admin users - -- Review your policy processing whenever you add/reorder tiers - - For example, you may need to update Pass action rules to policies before or after the new tier. Intervening tiers may require changes to policies before and after, depending on the endpoints. - -- Use the **policy preview** feature to see effects of policy in action before enforcing it, and use the **staged network policy** feature to test the entire tier workflow before pushing it to production - -## Additional resources - -- For details on using RBAC for fine-grained access to tiers and policies, see [Configure RBAC for tiered policies](rbac-tiered-policies.mdx). diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/policy-troubleshooting.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/policy-troubleshooting.mdx deleted file mode 100644 index c6a31b829f..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/policy-troubleshooting.mdx +++ /dev/null @@ -1,42 +0,0 @@ ---- -description: Common policy implementation problems. ---- - -# Troubleshoot policies - -### Problem - -I created ​​my first egress policy with default deny behavior, but now I’ve blocked other traffic. - -### Solution - -In Kubernetes, when there are no egress policies that apply to an endpoint, all egress traffic is allowed. However, as soon as you add the first egress policy to an endpoint, $[prodname] switches to default deny and blocks everything else; this is part of our zero trust network policy model. For new users of $[prodname], this is unexpected behavior (but it’s required by both Kubernetes and $[prodname] policy specs.) - -For egress policy in particular, you may not be used to worrying about “system-level” egress traffic that is now suddenly blocked. For example, most workloads rely on DNS, but you may not have thought of this when writing your policy. So you end up with this problem loop: you allow HTTP traffic, but then your DNS traffic gets blocked, but then HTTP traffic stops working because it relies on DNS to function. - -A natural response to this issue is to add an egress rule to allow DNS(!). For example, you add an egress rule “allow UDP to port 53 to namespace kube-system”. In some systems (OpenShift), the DNS pod actually listens on port 5353, not port 53. However, the DNS Service DNATs the traffic from port 53 to port 5353, hiding that detail from the DNS client. $[prodname] then blocks the traffic because it sees the traffic after the DNAT. So $[prodname] sees port 5353, not the expected port 53. - -The solution is to define policy for workload services, not for ports used by workloads. For help, see [Policy for services](beginners/services/index.mdx). - -### Problem - -Traffic is blocked, even though I allow it in a policy. - -### Solution - -The problem of blocking traffic can reside in your tier, or a different tier. - -1. **Check policies in your tier** - - Go to your policy and see if there is a higher precedent policy in the tier that is blocking processing. - - - If that is not the problem, go to step 2. - - If that is the problem, and if it makes sense for the traffic, you can reorder the policies in the tier. If you cannot, you must change the policy that is dropping traffic to allow your traffic flow using a Pass action rule. - -2. **Check policies in other tiers** - - Go to the next applicable higher precedent tier for your workload to see if a policy in that tier is blocking traffic. The policy at the end of the tier could be blocking traffic because the default behavior at the end of a tier is to drop traffic as part of zero trust. To unblock traffic, add a **Pass action rule** to the policy, or create a **Pass policy**. - -For help with visibility, use Service Graph to see how traffic is passed. Click on your flow, and view details in the right panel. - -For help with Pass action rules, see [Get started with tiered policy](policy-tiers/tiered-policy.mdx). diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/recommendations/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/recommendations/index.mdx deleted file mode 100644 index 25f3aec861..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/recommendations/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Enable policy recommendations for namespaces to improve your security posture. -hide_table_of_contents: true ---- - -# Policy recommendations - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/recommendations/learn-about-policy-recommendations.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/recommendations/learn-about-policy-recommendations.mdx deleted file mode 100644 index b84e51aae0..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/recommendations/learn-about-policy-recommendations.mdx +++ /dev/null @@ -1,136 +0,0 @@ ---- -description: Policy recommendations tutorial. ---- - -# Policy recommendations tutorial - -## Big picture - -In this tutorial, we show you how recommendations are generated using flow logs in your cluster for traffic to/from namespaces, network sets, private network IPs and public domains. - -### Create resources for the tutorial - -Because the policy recommendation feature requires traffic between endpoints, this step provides these resources for this hands-on tutorial. If your cluster is already generating traffic for policy recommendations, you can skip this step and follow along using your own cluster. - -1. Configure felix for fast flow logs collection - - ```bash - kubectl patch felixconfiguration.p default -p '{"spec":{"flowLogsFlushInterval":"10s"}}' - ``` - -1. Download the [policy recommendation tutorial deployment]($[tutorialFilesURL]/policy-recommendation-deployments.yaml) YAML. - -1. Use the following command to create the necessary resources: - - ```bash - kubectl apply -f policy-recommendation-deployments.yaml - ``` - -### Enable policy recommendation - -1. In the web console left navbar, click the **Policies** icon. -1. Select **Recommendations**. -1. Click on **Enable Policy Recommendations**. - -Wait for the recommendations to be generated. Unless otherwise configured, recommendations will take at least 2m30s to be generated, which is default time for the [Processing Interval](../../reference/resources/policyrecommendations.mdx#spec) setting. - -Once ready, the recommendations will be listed in the main page, under the **Recommendations** tab. - -### Understand the policy recommendation - -You should find a recommendation named `curl-ns` (appended with a five character suffix, like `-vfzgh`) with policy selector: -``` -Policy Label selector: [[projectcalico.org/namespace == 'curl-ns']] -``` -meaning that this policy pertains to the traffic originating from or destined for the `curl-ns` namespace. - -The policy will display a list of ingress rules: -``` -Allow:Protocol is TCP -From: Namespaces [[projectcalico.org/name == 'service-ns']] -To:Ports [Port is 80 ] -``` -allows ingress traffic, for protocol TCP, on port 80, from the `service-ns` namespace. - -A list of egress rules: -``` -Allow:Protocol is TCP -To:Ports [Port is 8080 ] Domains [www.tigera.io] -``` -allows egress traffic, for protocol TCP, on port 8080, to domain `www.tigera.io`. - -``` -Allow:Protocol is TCP -To:Ports [Port is 80 ] Namespaces [[projectcalico.org/name == 'service-ns']] -``` -allows egress traffic, for protocol TCP, on port 80, to the `service-ns` namespace. - -``` -Allow:Protocol is UDP -To:Ports [Port is 53 ] Namespaces [[projectcalico.org/name == 'kube-system']] -``` -allows egress traffic, for protocol UDP, on port 53, to the `kube-system` namespace. - -``` -Allow:Protocol is TCP -To:Ports [Port is 80 ] Endpoints [[projectcalico.org/name == 'public-ips' and projectcalico.org/kind == 'NetworkSet']] Namespaces global() -``` -allows egress traffic, for protocol TCP, on port 80, to IPs defined in the global network set named: `public-ips`. - -``` -Allow:Protocol is TCP -To:Ports [Port is 8080 ] Nets [Is 10.0.0.0/8 OR Is 172.16.0.0/12 OR Is 192.168.0.0/16 ] -``` -allows egress traffic, for protocol TCP, on port 8080, to private range IPs. - -``` -Allow:Protocol is TCP -To:Ports [Port is 80 ] -``` -allows egress traffic, for protocol TCP, on port 80, to public range IPs. - -### Investigate the flows that are used to generate the policy rules - -To view flow logs in Service Graph: - -1. In the web console left navbar, click **Service Graph**. -1. Select **Default** under the VIEWS option. -1. In the bottom pane you will see flow logs in the Flows tab. - -To generate rules, the recommendation engine queries for flow logs that are not addressed by any other policy in the cluster. Subsequently, it builds the missing policies necessary for allowing that traffic. - -### Understand the flow logs used in policy recommendations - -To get a better understanding of which flows contributed to generating the rules in your policy, select **Filter Flows** - -* To find the flows that were used to generate the egress to global network set rule, add: -``` -source_namespace = "curl-ns" AND dest_name_aggr = "public-ips" -``` - -* To find the flows that generated the egress rule to namespace `kube-system`, define query: -``` -source_namespace = "curl-ns" AND dest_namespace = "kube-system" -``` - -You'll notice that each of the flow logs contains a field named, `policies` with a entry like: -``` -1|__PROFILE__|__PROFILE__.kns.curl-ns|allow|0 -``` -meaning that the particular flow was not addressed by any other policy within your cluster. - -You will also find input like: -``` -0|namespace-isolation|curl-ns/namespace-isolation.staged:curl-ns-vfzgh|allow|3 -``` -indicating that the 3rd rule defined in policy **curl-ns-vfzgh**, will allow traffic defined by this flow, once the policy is enforced. - -### Examine policy traffic - -Examine the **Allowed Bytes** field in the **Recommendations** tab for the `curl-ns-recommendation` policy to get a sense of the total bytes allowed by the policy. - -Examine the **Allowed/sec** of each rule in the policy to get a sense of the quantity of traffic allowed per second by the rule in question. - -### When policy recommendations are not generated - -You may wonder why you are not getting policy recommendations, even though there is traffic between endpoints. This is because policy recommendations are generated only for flows that are not captured by any other policy in your cluster. To see if policy is already enforcing the traffic in question, search for the flow log in question, examine the `policies` field, and verify that no other enforced policy allows or denies traffic for that flow. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/recommendations/policy-recommendations.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/recommendations/policy-recommendations.mdx deleted file mode 100644 index 6af1b8948f..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/recommendations/policy-recommendations.mdx +++ /dev/null @@ -1,195 +0,0 @@ ---- -description: Enable continuous policy recommendations to secure unprotected namespaces or workloads. ---- - -# Enable policy recommendations - -## Big picture - -Use policy recommendations to automatically isolate namespaces with network policy. - -## Value - -One of the best practices for improving the security posture of your Kubernetes cluster is to implement namespace isolation with network policy. Namespace isolation helps to implement a zero-trust and least-privileged security model, where only required communication between namespaces is authorized and everything else is blocked. This helps mitigate the risk of lateral movement within a cluster in the event of an attack. - -$[prodname] makes it easy for platform operators to implement namespace isolation without experience in authoring network policy or detailed knowledge of how application workloads are communicating. $[prodname] analyzes the flow logs that are generated from workloads, and automatically recommends and stages policies for each namespace that can be used for isolation. - -## Before you begin - -**Unsupported** -- Windows nodes - -**Required RBAC** - -To enable/disable and use policy recommendations, you must have the **tigera-network-admin** role or permissions to **update**, **patch**, **get**, **list**, **watch** `projectcalico.org` resources: -* tiers -* policyrecommendationscopes -* stagednetworkpolicies -* tier.stagednetworkpolicies -* networkpolicies -* tier.networkpolicies -* globalnetworksets - -Specifically, you will need access to the `namespace-isolation` tier and to staged and network policies in the `namespace-isolation` tier. - -**Recommended** - -Basic knowledge of policies in the web console and tiers: -- [Get started with tiered network policy](../../network-policy/policy-tiers/tiered-policy) -- [Network policy tutorial](../../network-policy/policy-tiers/policy-tutorial-ui) - -**Limitations** - -Creating and managing policy recommendations is available only in the web console. - -## How to - -- [Enable policy recommendations](#enable-policy-recommendations) -- [Activate and review policy recommendations](#activate-and-review-policy-recommendations) -- [Review global settings for workloads](#review-global-settings-for-workloads) -- [Update policy recommendations](#update-policy-recommendations) -- [Private network recommendations](#private-network-recommendations) -- [Troubleshoot policy recommendations](#troubleshoot-policy-recommendations) -- [Disable the policy recommendations feature](#disable-the-policy-recommendations-feature) - -### Enable policy recommendations **using the web console** - -1. In the left navbar in the web console, click **Policies**, **Recommendations**. -1. On the opt-in page, click **Enable Policy Recommendations**. - -The **Policy Recommendations** board is automatically displayed. - -![Policy-recommendations-board](/img/calico-enterprise/policy-recommendations-board.png) - -**Notes**: - -- A policy recommendation is generated for every namespace in your cluster (unless namespaces are filtered out by an Admin using the [selector](../../reference/resources/policyrecommendations.mdx#namespaceSpec#selector) in the PolicyRecommendationScope resource). -- Flow logs are continuously monitored for policy recommendations. -- Recommended policies are continuously updated until you **Add to policy board** or **Dismiss policy** using the Actions menu. -- Policy recommendations are created as **staged network policies** so you can safely observe the traffic before enforcing them. -- Traffic originating from the recommended policy's namespace is used to generate egress rules, and traffic destined for the namespace is used to define ingress rules. -- To stop policy recommendations from being processed and updated for a namespace, click the **Action** menu, **Dismiss policy**. - -### Enable policy recommendations **using kubectl**. - -To enable the policy recommendations feature, set the **RecStatus** parameter to `Enabled`, in the [Policy recommendations resource](../../reference/resources/policyrecommendations.mdx). - -```bash -kubectl patch PolicyRecommendationScope default --type='json' -p='[{"op": "replace", "path": "/spec/namespaceSpec/recStatus", "value": "Enabled"}]' -``` - -### Activate and review policy recommendations - -Policy recommendations are not enabled until you activate them and move them to the **Active** board. - -From the Policy Recommendation board, select a policy recommendation (or bulk select) and select, **Add to policy board**. Click on the **Active tab**. - -You can now view the activated policies in the **Policies Board**. In the left navbar, click **Policies**. - -Policy recommendations are added to the **namespace-isolation** tier. Note the following: - -- Staged network policy recommendations work like any other staged network policy. -- You cannot move recommended staged policies in the `namespace-isolation` tier. -- The name of the `namespace-isolation` tier is fixed and cannot be changed - -You are now ready to observe traffic flows in Policies board to verify that the policy is authorizing traffic as expected. When a policy works as expected, you can safely enforce it. See [Stage, preview impacts, and enforce policy](network-policy/staged-network-policies.mdx) for help. - -### Review global settings for workloads - -The default global settings for capturing flows for policy recommendations are based on application workloads with *frequent communication with other namespaces in your cluster*. - -Global settings are found on the Policy Recommendations board, **Action** menu. - -![Global-settings-dialog](/img/calico-enterprise/global-settings.png) - -- **Stabilization Period** is the learning time to capture flow logs so that a recommendation accurately reflects the cluster's traffic patterns. - -- **Processing Interval** is the frequency to process new flow logs and refine recommendations. - -:::tip -For application workloads with less frequent communication, the stabilization period setting may not be long enough to get accurate traffic flows, so you’ll want to increase the time. We recommend that you review your workloads immediately after you enable policy recommendations and adjust the settings accordingly. -::: - -Changes to all other policy recommendations parameters require Admin permissions and can be changed using the [Policy recommendations resource](../../reference/resources/policyrecommendations.mdx). - -### Update policy recommendations - -This section describes common changes you may want to make to policy recommendations. - -#### Relearn activated recommendations - -As new namespace and components are added to a cluster, your activated policy recommendation may need to be updated to reflect those changes. If a policy recommendation has not been enforced, you’ll need to update it to allow traffic. - -1. On the **Policies Recommendations** board, click the **Active tab**, which lists the active staged network policies. -1. Select the Actions menu associated with the policy in question, and click **Dismiss policy**. -1. Click the **Dismissed tab**, select the Actions menu, and **Reactivate** the policy. - -#### Rerun policy recommendations for an enforced policy - -To generate a new recommendation for an enforced policy, delete the network policy on the **Policy** board. - -#### Stop policy recommendation updates for a namespace - -1. On the Policy Recommendations board, click the **Recommendations** tab, which lists the recommendations. -1. Select the recommendation, click the **Actions** menu, and click **Dismiss policy**. - -To reactivate a policy recommendation for a namespace, select the dismissed staged policy, and from the Actions menu, select **Reactivate**. - -### Private network recommendations - -If any flow to a private network in your cluster is found, a private rule is automatically created that contains RFC 1918 subnets, which will allow traffic to/from those endpoints. If you need to apply a more restrictive approach, create a [GlobalNetworkSet](../../reference/resources/globalnetworkset.mdx) and update it with the desired CIDR blocks. The recommendation engine will identify flows to your private IPs and generate the appropriate NetworkSet Rule. - -**Notes**: -Exclude any CIDR ranges used by the cluster for nodes and pods. - -### Troubleshoot policy recommendations - -To view policy-recommendation logs: - -```batch -kubectl logs -n tigera-policy-recommendation -l k8s-app=tigera-policy-recommendation -``` - -**Notes**: -This option is not available on managed clusters if you have implemented the multi-cluster-management feature. - -**Problem**: I’m not seeing policy recommendations on the Policy Recommendations board. - -**Solution/workaround**: Policy recommendations are based on historical flow logs that match a request, and are generated only for flows that have not been addressed by any other policy. As such, there are times when policy recommendations will not be generated: - -- Not enough traffic history - - If you recently installed $[prodname], you may not have enough traffic history. Workloads must run for some time (around 5 days) to get “typical network traffic” for applications. - -- Traffic is covered by existing policy - - Even if your cluster has been running for a long time with traffic, the flows may already be covered by existing policies. - -To verify why there may not be any recommendations, follow these steps: - -1. Go to **Service Graph**, **Default**. -1. Filter flow logs for your namespace. -1. Investigate the content within the `policies` field for the flow logs in question. -1. Validate that no other enforced policy already addresses the flow. - -**Problem**: Why are egress-to-domain rules being generated for a Kubernetes service? - -**Solution/workaround**: The policy recommendation controller can only read the cluster domain of the cluster it runs in. If you have managed clusters with a non-default domain (`cluster.local`), the controller will treat egress traffic as though it is to a domain. - -### Disable policy recommendations - -To disable the policy recommendations feature, set the **RecStatus** parameter to `Disabled`, in the [Policy recommendations resource](../../reference/resources/policyrecommendations.mdx). - -```bash -kubectl patch PolicyRecommendationScope default --type='json' -p='[{"op": "replace", "path": "/spec/namespaceSpec/recStatus", "value": "Disabled"}]' -``` - -:::note - -When disabled, non-active staged policies in the **Policies Recommendations** board are no longer updated. Existing activated and enforced staged network policies are not affected by disabling policy recommendations. - -::: - -## Additional resources - -- [Policy best practices](../../network-policy/policy-best-practices.mdx) \ No newline at end of file diff --git a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/staged-network-policies.mdx b/calico-enterprise_versioned_docs/version-3.19-2/network-policy/staged-network-policies.mdx deleted file mode 100644 index 766dc4d534..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/network-policy/staged-network-policies.mdx +++ /dev/null @@ -1,181 +0,0 @@ ---- -description: Stage and preview policies to observe traffic implications before enforcing them. ---- - -# Stage, preview impacts, and enforce policy - -## Big picture - -Stage and preview impacts on traffic before enforcing policy. - -## Value - -$[prodname] staged network policy resources lets you test the traffic impact of the policy as if it were enforced, but without changing traffic flow. You can also preview the impacts of a staged policy on existing traffic. By verifying that correct flows are allowed and denied before enforcement, you can minimize misconfiguration and potential network disruption. - -## Concepts - -### About staged policies - -The following staged policy resources have the same structure (i.e. the resource spec has the same fields) as their “enforced” counterpart. - -- Staged global network policy -- Staged network policy -- Staged Kubernetes network policy - -### Review permissions - -The default `tigera-network-admin` cluster role has the required permissions to manage the different enforced -and staged network policies. Adjust permissions for your environment. As with $[prodname] network policy and global network policies, the RBAC for $[prodname] staged network policy and staged global network policy is tier-dependent. - -## How to - -- [Create a policy recommendation](#create-a-policy-recommendation) -- [Stage a policy](#stage-a-policy) -- [Preview policy impact](#preview-policy-impact) -- [Enforce a staged policy](#enforce-a-staged-policy) -- [Stage updates to an enforced policy](#stage-updates-to-an-enforced-policy) - -### Create a policy recommendation - -One of the first things developers need to do is secure unprotected workloads with network policy. (For example, by default, Kubernetes pods accept traffic from any source.) The **Recommend policy** feature allows developers with minimal experience writing policy to secure workloads. - -Because **Recommend policy** looks at historical flow log entries that match your request, you should run your workloads for a reasonable amount of time to get "typical network traffic" for your application. - -1. In the left navbar, click **Policies**. -1. Click **Recommend a policy**. -1. Enter time range, Namespace, Name, and click **Recommend**. -1. If relevant flow logs are found within the time range for the workload endpoint, click **Preview** to assess the impact of the recommended policy, or **Stage**. - -![recommend-policy](/img/calico-enterprise/recommend-policy.png) - -### Stage a policy - -Stage a policy to test it in a near replica of a production environment. A best practice is to stage a policy before enforcing it to avoid unintentionally exposing or blocking other network traffic. - -1. In the left navbar, click **Policies**. -1. In a tier, click **Add Policy**. -1. Create your policy and click **Stage** to save and stage it. - -![stage-new-policy](/img/calico-enterprise/stage-new-policy.png) - -### Enforce a staged policy - -1. From **Policies Board**, click a staged policy. -1. Click **Edit policy**, make changes and click **Enforce**. The staged policy is deleted and the enforced policy is created/updated (depending on whether it already exists). - -### Preview policy impact - -The policy preview impact feature assesses traffic impact only on _enforced staged policies_. - -1. From the **Policies Board**, select a staged policy and click **Edit policy**. -1. Make some edits and click **Preview**. - -![policy-preview](/img/calico-enterprise/policy-preview.png) - -### Stage updates to an enforced policy - -1. From the **Policies Board**, open an enforced policy. -1. In **View Policy**, click **Edit policy**. -1. Make your changes, and click **Preview**. Depending on the results, you can click **Stage** or **Enforce**. - -You can also use custom resources to stage Kubernetes and $[prodname] policies, and apply them using `kubectl`. Here are sample YAML files. - -**Example: StagedGlobalNetworkPolicy** - -```yaml -apiVersion: projectcalico.org/v3 -kind: StagedGlobalNetworkPolicy -metadata: - name: default.allow-tcp-6379 -spec: - tier: default - selector: role == 'database' - types: - - Ingress - - Egress - ingress: - - action: Allow - protocol: TCP - source: - selector: role == 'frontend' - destination: - ports: - - 6379 - egress: - - action: Allow -``` - -**Example: StagedNetworkPolicy** - -```yaml -apiVersion: projectcalico.org/v3 -kind: StagedNetworkPolicy -metadata: - name: default.allow-tcp-6379 - namespace: default -spec: - tier: default - selector: role == 'database' - types: - - Ingress - - Egress - ingress: - - action: Allow - protocol: TCP - source: - selector: role == 'frontend' - destination: - ports: - - 6379 - egress: - - action: Allow -``` - -**Example: StagedKubernetesNetworkPolicy** - -```yaml -apiVersion: projectcalico.org/v3 -kind: StagedKubernetesNetworkPolicy -metadata: - name: test-network-policy - namespace: default -spec: - podSelector: - matchLabels: - role: db - policyTypes: - - Ingress - - Egress - ingress: - - from: - - ipBlock: - cidr: 172.17.0.0/16 - except: - - 172.17.1.0/24 - - namespaceSelector: - matchLabels: - project: myproject - - podSelector: - matchLabels: - role: frontend - ports: - - protocol: TCP - port: 6379 - egress: - - to: - - ipBlock: - cidr: 10.0.0.0/24 - ports: - - protocol: TCP - port: 5978 -``` - -## Additional resources - -- [Staged global network policy](../reference/resources/stagedglobalnetworkpolicy.mdx) -- [Staged network policy](../reference/resources/stagednetworkpolicy.mdx) -- [Staged Kubernetes network policy](../reference/resources/stagedkubernetesnetworkpolicy.mdx) -- For details on how to configure RBAC for staged policy resources, see [Configuring RBAC for tiered policy](policy-tiers/rbac-tiered-policies.mdx) -- For details on staged policy metrics, see - - [Flow logs](../observability/elastic/flow/datatypes.mdx) - - [Prometheus metrics](../operations/monitor/metrics/index.mdx#content-main) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/networking/configuring/advertise-service-ips.mdx b/calico-enterprise_versioned_docs/version-3.19-2/networking/configuring/advertise-service-ips.mdx deleted file mode 100644 index 618746530e..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/networking/configuring/advertise-service-ips.mdx +++ /dev/null @@ -1,273 +0,0 @@ ---- -description: Configure Calico to advertise Kubernetes service cluster IPs and external IPs outside the cluster using BGP. ---- - -# Advertise Kubernetes service IP addresses - -## Big picture - -Enable $[prodname] to advertise Kubernetes service IPs outside a cluster. $[prodname] supports advertising a service’s cluster IPs and external IPs. - -## Value - -Typically, Kubernetes service cluster IPs are accessible only within the cluster, so external access to the service requires a dedicated load balancer or ingress controller. In cases where a service’s cluster IP is not routable, the service can be accessed using its external IP. - -Just as $[prodname] supports advertising **pod IPs** over BGP, it also supports advertising Kubernetes **service IPs** outside a cluster over BGP. This avoids the need for a dedicated load balancer. This feature also supports equal cost multi-path (ECMP) load balancing across nodes in the cluster, as well as source IP address preservation for local services when you need more control. - -## Concepts - -### BGP makes it easy - -In Kubernetes, all requests for a service are redirected to an appropriate endpoint (pod) backing that service. Because $[prodname] uses BGP, external traffic can be routed directly to Kubernetes services by advertising Kubernetes service IPs into the BGP network. - -If your deployment is configured to peer with BGP routers outside the cluster, those routers (plus any other upstream places the routers propagate to) can send traffic to a Kubernetes service IP for routing to one of the available endpoints for that service. - -### Advertising service IPs: quick glance - -$[prodname] implements the Kubernetes **externalTrafficPolicy** using kube-proxy to direct incoming traffic to a correct pod. Advertisement is handled differently based on the service type that you configure for your service. - -| **Service mode** | **Cluster IP advertisement** | **Traffic is...** | Source IP address is... | -| ----------------- | ------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ----------------------- | -| Cluster (default) | All nodes in the cluster statically advertise a route to the service CIDR. | Load balanced across nodes in the cluster using ECMP, then forwarded to appropriate pod in the service using SNAT. May incur second hop to another node, but good overall load balancing. | Obscured by SNAT | -| Local | The nodes with a pod backing the service advertise a specific route (/32 or /128) to the service's IP. | Load balanced across nodes with endpoints for the service. Avoids second hop for LoadBalancer and NodePort type services, traffic may be unevenly load balanced. (Other traffic is load balanced across nodes in the cluster.) | Preserved | - -If your $[prodname] deployment is configured to peer with BGP routers outside the cluster, those routers - plus any further upstream places that those routers propagate to - will be able to send traffic to a Kubernetes service cluster IP, and that traffic is routed to one of the available endpoints for that service. - -### Tips for success - -- Generally, we recommend using “Local” for the following reasons: - - If any of your network policy uses rules to match by specific source IP addresses, using Local is the obvious choice because the source IP address is not altered, and the policy will still work. - - Return traffic is routed directly to the source IP because “Local” services do not require undoing the source NAT (unlike “Cluster” services). -- Cluster IP advertisement works best with a ToR that supports ECMP. Otherwise, all traffic for a given route is directed to a single node. - -## Before you begin... - -**Required** - -- Calico CNI -- [Configure BGP peering](bgp.mdx) between $[prodname] and your network infrastructure -- For ECMP load balancing to services, the upstream routers must be configured to use BGP multipath. -- You need at least one external node outside the cluster that acts as a router, route reflector, or ToR that is peered with calico nodes inside the cluster. -- Services must be configured with the correct service mode (“Cluster” or “Local”) for your implementation. For `externalTrafficPolicy: Local`, the service must be type `LoadBalancer` or `NodePort`. - -**Limitations** - -- Supported in EKS and AWS, but only if you are using Calico CNI -- OpenShift, versions 4.5 and 4.6 - There is a [bug](https://github.com/kubernetes/kubernetes/issues/91374) where the source IP is not preserved by NodePort services or traffic via a Service ExternalIP with externalTrafficPolicy:Local. - - OpenShift users on v4.5 or v4.6 can use this [workaround to avoid SNAT with ExternalIP](https://docs.openshift.com/container-platform/4.7/nodes/clusters/nodes-cluster-enabling-features.html): - - ``` - oc edit featuregates.config.openshift.io cluster - spec: - customNoUpgrade: - enabled: - - ExternalPolicyForExternalIP - ``` - - Kubernetes users on version v1.18 or v1.19 can enable source IP preservation for NodePort services using the ExternalPolicyForExternalIP feature gate. - - Source IP preservation for NodePort and services and ExternalIPs is enabled by default in OpenShift v4.7+, and Kubernetes v1.20+. - -## How to - -- [Advertise service cluster IP addresses](#advertise-service-cluster-ip-addresses) -- [Advertise service external IP addresses](#advertise-service-external-ip-addresses) -- [Advertise service load balancer IP addresses](#advertise-service-load-balancer-ip-addresses) -- [Exclude certain nodes from advertisement](#exclude-certain-nodes-from-advertisement) - -### Advertise service cluster IP addresses - -1. Determine the service cluster IP range. (Or ranges, if your cluster is [dual stack](../ipam/ipv6.mdx).) - - The range(s) for your cluster can be inferred from the `--service-cluster-ip-range` option passed to the Kubernetes API server. For help, see the [Kubernetes API server reference guide](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/). - -1. Check to see if you have a default BGPConfiguration. - - ```bash - kubectl get bgpconfiguration.projectcalico.org default - ``` - -1. Based on above results, update or create a BGPConfiguration. - - **Update default BGPConfiguration**. - - Patch the BGPConfiguration using the following command, using your own service cluster IP CIDR in place of "10.0.0.0/24": - - ```bash - kubectl patch bgpconfiguration.projectcalico.org default -p '{"spec":{"serviceClusterIPs": [{"cidr": "10.0.0.0/24"}]}}' - ``` - - **Create default BGPConfiguration**. - - Use the following sample command to create a default BGPConfiguration. Add your CIDR blocks, covering the cluster IPs to be advertised, in the `serviceClusterIPs` field, for example: - - ```bash - kubectl create -f - < 100). - -For a deeper look at common on-premises deployment models, see [Calico over IP Fabrics](../../reference/architecture/design/l2-interconnect-fabric.mdx). - -## Before you begin... - -**Required** - -- Calico CNI -- [calicoctl](../../operations/clis/calicoctl/install.mdx) must be installed and configured - -## How to - -:::note - -Significantly changing $[prodname]'s BGP topology, such as changing from full-mesh to peering with ToRs, may result in temporary loss of pod network connectivity during the reconfiguration process. It is recommended to only make such changes during a maintenance window. - -::: - -- [Configure a global BGP peer](#configure-a-global-bgp-peer) -- [Configure a per-node BGP peer](#configure-a-per-node-bgp-peer) -- [Configure a node to act as a route reflector](#configure-a-node-to-act-as-a-route-reflector) -- [Disable the default BGP node-to-node mesh](#disable-the-default-bgp-node-to-node-mesh) -- [Change from node-to-node mesh to route reflectors without any traffic disruption](#change-from-node-to-node-mesh-to-route-reflectors-without-any-traffic-disruption) -- [View BGP peering status for a node](#view-bgp-peering-status-for-a-node) -- [View BGP info on all peers for a node](#view-bgp-info-on-all-peers-for-a-node) -- [Change the default global AS number](#change-the-default-global-as-number) -- [Change AS number for a particular node](#change-as-number-for-a-particular-node) -- [Configure a BGP filter](#configure-a-bgp-filter) -- [Configure a BGP peer with a BGP filter](#configure-a-bgp-peer-with-a-bgp-filter) - -### Configure a global BGP peer - -Global BGP peers apply to all nodes in your cluster. This is useful if your network topology includes BGP speakers that will be peered with every $[prodname] node in your deployment. - -The following example creates a global BGP peer that configures every $[prodname] node to peer with **192.20.30.40** in AS **64567**. - -```yaml -apiVersion: projectcalico.org/v3 -kind: BGPPeer -metadata: - name: my-global-peer -spec: - peerIP: 192.20.30.40 - asNumber: 64567 -``` - -### Configure a per-node BGP peer - -Per-node BGP peers apply to one or more nodes in the cluster. You can choose which nodes by specifying the node’s name exactly, or using a label selector. - -The following example creates a BGPPeer that configures every $[prodname] node with the label, **rack: rack-1** to peer with **192.20.30.40** in AS **64567**. - -```yaml -apiVersion: projectcalico.org/v3 -kind: BGPPeer -metadata: - name: rack1-tor -spec: - peerIP: 192.20.30.40 - asNumber: 64567 - nodeSelector: rack == 'rack-1' -``` - -### Configure a node to act as a route reflector - -$[prodname] nodes can be configured to act as route reflectors. To do this, each node that you want to act as a route reflector must have a cluster ID - typically an unused IPv4 address. - -To configure a node to be a route reflector with cluster ID 244.0.0.1, run the following command. - -```bash -kubectl annotate node my-node projectcalico.org/RouteReflectorClusterID=244.0.0.1 -``` - -Typically, you will want to label this node to indicate that it is a route reflector, allowing it to be easily selected by a BGPPeer resource. You can do this with kubectl. For example: - -```bash -kubectl label node my-node route-reflector=true -``` - -Now it is easy to configure route reflector nodes to peer with each other and other non-route-reflector nodes using label selectors. For example: - -```yaml -kind: BGPPeer -apiVersion: projectcalico.org/v3 -metadata: - name: peer-with-route-reflectors -spec: - nodeSelector: all() - peerSelector: route-reflector == 'true' -``` - -:::note - -Adding `routeReflectorClusterID` to a node spec will remove it from the node-to-node mesh immediately, tearing down the -existing BGP sessions. Adding the BGP peering will bring up new BGP sessions. This will cause a short (about 2 seconds) -disruption to data plane traffic of workloads running in the nodes where this happens. To avoid this, make sure no -workloads are running on the nodes, by provisioning new nodes or by running `kubectl drain` on the node (which may -itself cause a disruption as workloads are drained). - -::: - -### Disable the default BGP node-to-node mesh - -The default **node-to-node BGP mesh** may be turned off to enable other BGP topologies. To do this, modify the default **BGP configuration** resource. - -Run the following command to disable the BGP full-mesh: - -```bash -calicoctl patch bgpconfiguration default -p '{"spec": {"nodeToNodeMeshEnabled": false}}' -``` - -:::note - -If the default BGP configuration resource does not exist, you need to create it first. See [BGP configuration](../../reference/resources/bgpconfig.mdx) for more information. - -::: - -:::note - -Disabling the node-to-node mesh will break pod networking until/unless you configure replacement BGP peerings using BGPPeer resources. -You may configure the BGPPeer resources before disabling the node-to-node mesh to avoid pod networking breakage. - -::: - -### Change from node-to-node mesh to route reflectors without any traffic disruption - -Switching from node-to-node BGP mesh to BGP route reflectors involves tearing down BGP sessions and bringing up new ones. This causes a short -data plane network disruption (of about 2 seconds) for workloads running on the nodes in the cluster. To avoid this, you may provision -route reflector nodes and bring their BGP sessions up before tearing down the node-to-node mesh sessions. - -Follow these steps to do so: - -1. [Provision new nodes to be route reflectors.](#configure-a-node-to-act-as-a-route-reflector) The nodes [should not be schedulable](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) - and they should have `routeReflectorClusterID` in their spec. These won't be part of the existing - node-to-node BGP mesh, and will be the route reflectors when the mesh is disabled. These nodes should also have a label like - `route-reflector` to select them for the BGP peerings. [Alternatively](https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/), - you can drain workloads from existing nodes in your cluster by running `kubectl drain ` to configure them to be route reflectors, - but this will cause a disruption on the workloads on those nodes as they are drained. - -2. Also set up a [BGPPeer](#configure-a-node-to-act-as-a-route-reflector) spec to configure route reflector nodes to peer with each other and other non-route-reflector nodes - using label selectors. - -3. Wait for these peerings to be established. This can be [verified](#view-bgp-peering-status-for-a-node) by running `sudo calicoctl node status` on the nodes. Alternatively, you can create a [`CalicoNodeStatus` resource](../../reference/resources/caliconodestatus.mdx) to get BGP session status for the node. - -4. [Disable the BGP node-to-node mesh for the cluster.](#disable-the-default-bgp-node-to-node-mesh) - -5. If you did drain workloads from the nodes or created them as unschedulable, mark the nodes as schedulable again (e.g. by running `kubectl uncordon `). - -### View BGP peering status for a node - -Create a [CalicoNodeStatus resource](../../reference/resources/caliconodestatus.mdx) to monitor BGP session status for the node. - -Alternatively, you can run the `calicoctl node status` command on a given node to learn more about its BGP status. - -:::note - -This command communicates with the local $[prodname] agent, so you must execute it on the node whose status you are attempting to view. - -::: - -### View BGP info on all peers for a node - -You can use `calicoctl` to view the BGP information for all peers of a particular node, including connection status, routing statistics, and BGP state. This is useful for confirming that your configuration is behaving as desired, and for more detailed troubleshooting. - -Run the following command from anywhere you have access to `kubectl`: - -```bash -calicoctl bgp peers -``` - -Where `` is the resource name for one of the Calico node pods within your cluster. - -:::note - -The above command can be run from anywhere you have access to kubectl. We recommend running it as a kubectl plugin. [Follow these instructions](../../operations/clis/calicoctl/install.mdx#install-calicoctl-as-a-kubectl-plugin-on-a-single-host) for how to install `calicoctl` as a kubectl plugin. - -::: -If you install the binary as a kubectl plugin using the above instructions, you can then run the command as follows: - -```bash -kubectl calico bgp peers -``` - -### Change the default global AS number - -By default, all Calico nodes use the 64512 autonomous system, unless a per-node AS has been specified for the node. You can change the global default for all nodes by modifying the default **BGPConfiguration** resource. The following example command sets the global default AS number to **64513**. - -```bash -kubectl patch bgpconfiguration default -p '{"spec": {"asNumber": "64513"}}' -``` - -:::note - -If the default BGP configuration resource does not exist, you need to create it first. See [BGP configuration](../../reference/resources/bgpconfig.mdx) for more information. - -::: - -### Change AS number for a particular node - -You can configure an AS for a particular node by modifying the node object using `calicoctl`. For example, the following command changes the node named **node-1** to belong to **AS 64514**. - -```bash -calicoctl patch node node-1 -p '{"spec": {"bgp": {"asNumber": "64514"}}}' -``` - -### Configure a BGP filter - -BGP filters control which routes are imported and exported between BGP peers. - -The BGP filter rules (importVX, exportVX) are applied sequentially, taking the -`action` of the first matching rule. When no rules are matched, the default -`action` is `Accept`. - -In order for a BGPFilter to be used in a BGP peering, its `name` -must be added to `filters` of the corresponding BGPPeer resource. - -The following example creates a BGPFilter - -```yaml -apiVersion: projectcalico.org/v3 -kind: BGPFilter -metadata: - name: my-filter -spec: - exportV4: - - action: Accept - matchOperator: In - cidr: 77.0.0.0/16 - - action: Reject - source: RemotePeers - - action: Reject - interface: '*.calico' - importV4: - - action: Reject - matchOperator: NotIn - cidr: 44.0.0.0/16 - exportV6: - - action: Reject - source: RemotePeers - - action: Reject - interface: '*.calico' - importV6: - - action: Accept - matchOperator: Equal - cidr: 5000::0/64 - - action: Reject -``` - -### Configure a BGP peer with a BGP filter - -BGP peers can use BGP filters to control which routes are imported or exported between them. - -The following example creates a BGPFilter and associates it with a BGPPeer -:::note - -BGPFilters are applied in the order listed on a BGPPeer - -::: - -```yaml -kind: BGPFilter -apiVersion: projectcalico.org/v3 -metadata: - name: first-bgp-filter -spec: - exportV4: - - action: Accept - matchOperator: In - cidr: 77.0.0.0/16 - source: RemotePeers - importV4: - - action: Reject - matchOperator: NotIn - cidr: 44.0.0.0/16 - exportV6: - - action: Reject - interface: '*.calico' - importV6: - - action: Accept - matchOperator: Equal - cidr: 5000::0/64 ---- -kind: BGPFilter -apiVersion: projectcalico.org/v3 -metadata: - name: second-bgp-filter -spec: - exportV4: - - action: Accept - matchOperator: In - cidr: 77.0.0.0/16 - interface: '*.calico' - importV4: - - action: Reject - matchOperator: NotIn - cidr: 44.0.0.0/16 - exportV6: - - action: Reject - source: RemotePeers - importV6: - - action: Reject ---- -kind: BGPPeer -apiVersion: projectcalico.org/v3 -metadata: - name: peer-with-filter -spec: - peerSelector: has(filter-bgp) - filters: - - first-bgp-filter - - second-bgp-filter -``` - -## Additional resources - -- [Node resource](../../reference/resources/node.mdx) -- [BGP configuration resource](../../reference/resources/bgpconfig.mdx) -- [BGP peer resource](../../reference/resources/bgppeer.mdx) -- [BGP filter resource](../../reference/resources/bgpfilter.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/networking/configuring/custom-bgp-config.mdx b/calico-enterprise_versioned_docs/version-3.19-2/networking/configuring/custom-bgp-config.mdx deleted file mode 100644 index bacfa6f30a..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/networking/configuring/custom-bgp-config.mdx +++ /dev/null @@ -1,52 +0,0 @@ ---- -description: Customize your BGP configuration. ---- - -# Custom BGP configuration - -## Big picture - -Customize BGP configurations for special use-cases. - -:::caution -Customizing BGP configuration templates is not supported by Tigera for production because they are not guaranteed to work when upgrading. However, if you want to use custom templates for POCs and other temporary uses cases, contact your Tigera Support representative for help. - -::: - -## Concepts - -In $[prodname], BGP is handled by [BIRD](https://github.com/projectcalico/bird). -The BIRD configurations are templated out through [confd](https://github.com/projectcalico/confd). -You can modify the BIRD configuration to use BIRD features which are not typically exposed using the -default configuration provided with $[prodname]. - -## Before you begin - -**Required** - -- Calico CNI - -## How to - -- [Update BGP configuration](#update-bgp-configuration) -- [Apply BGP customizations](#apply-bgp-customizations) based on how you've deployed $[prodname]: - -### Update BGP configuration - -Using the directions provided with the templates, set the correct values -for the BGP configuration using these resources: - -- [BGP Configuration](../../reference/resources/bgpconfig.mdx) -- [BGP Peer](../../reference/resources/bgppeer.mdx) -- [calicoctl](../../reference/clis/calicoctl/index.mdx) - -### Apply BGP Customizations - -1. Create your confd templates. -1. Create a ConfigMap from the templates. - -``` -kubectl create configmap bird-templates -n tigera-operator --from-file= -``` - -The created config map will be used to populate the $[prodname] BIRD configuration file templates. If a template with the same name already exists within the node container, it will be overwritten with the contents from the config map. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/networking/configuring/dual-tor.mdx b/calico-enterprise_versioned_docs/version-3.19-2/networking/configuring/dual-tor.mdx deleted file mode 100644 index 023854822b..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/networking/configuring/dual-tor.mdx +++ /dev/null @@ -1,674 +0,0 @@ ---- -description: Configure a dual plane cluster for redundant connectivity between workloads. ---- - -# Deploy a dual ToR cluster - -## Big picture - -Deploy a dual plane cluster to provide redundant connectivity between your workloads for on-premises deployments. - -:::note - -Dual ToR is not supported if you are using BGP with encapsulation (VXLAN or IP-in-IP). - -::: - -## Value - -A dual plane cluster provides two independent planes of connectivity between all cluster -nodes. If a link or software component breaks somewhere in one of those planes, cluster -nodes can still communicate over the other plane, and the cluster as a whole continues to -operate normally. - -## Concepts - -### Dual plane connectivity, aka "dual ToR" - -Large on-prem Kubernetes clusters, split across multiple server racks, can use two or more -independent planes of connectivity between all the racks. The advantages are: - -- The cluster can still function, even if there is a single break in connectivity - somewhere. - -- The cluster can load balance across the bandwidth of _both_ planes, when both planes - are available. - -The redundant approach can be applied within each rack as well, such that each node has -two or more independent connections to those connectivity planes. Typically, each rack -has two top-of-rack routers ("ToRs") and each node has two fabric-facing interfaces, each -of which connects over a separate link or Ethernet to one of the ToRs for the rack. - -Here's an example of how a dual plane setup might look, with just two racks and two nodes -in each rack. For simplicity, we've shown the connections _between_ racks as single -links; in reality that would be more complex, but still following the overall dual plane -paradigm. - -![dual-tor](/img/calico-enterprise/dual-tor.png) - -Because of the two ToRs per rack, the whole setup is often referred to as "dual ToR". - -### Network design for a dual ToR cluster - -For a dual ToR cluster to operate seamlessly when there is a break on one of the planes, -several things are needed. - -- Each node should have a stable IP address that is independent of its per-interface - addresses and remains valid if the connectivity through _one_ of those interfaces goes - down. - -- Each node must somehow know or learn the stable IP address of every other node. - -- Wherever a connection (other than BGP) is to or from a _node_ (as opposed to a - non-host-networked pod), that connection should use the node's stable address as its - destination or source IP (respectively), so that the connection can continue working if - one of the planes has an outage. - -- Importantly, this includes connections that Kubernetes uses as part of its own control - plane, such as between the Kubernetes API server and kubelet on each node. Ideally, - therefore, the stable IP address setup on each node should happen before Kubernetes - starts running. - -- BGP is an exception to the previous points - in fact, the _only_ exception - because we - want each node's BGP peerings to be interface-specific and to reflect what is actually - reachable, moment by moment, over that interface. The Linux routing table then - automatically adjusts so that the route to each remote destination is either ECMP - - when both planes are up - or non-ECMP when it can only be reached over one of the - planes. - -- BGP peerings should be configured to detect any outages, and to propagate their - consequences, as quickly as possible, so that the routing can quickly respond on each - node. Note that this is quite different from the reasoning for a single connectivity - plane, where it's better to delay any network churn, on the assumption that an outage - will be quickly fixed. - -Finally, to spread load evenly and maximise use of both planes, when both available, the -routers and Linux kernel need to be configured for efficient ECMP. - -### Calico's early networking architecture - -$[prodname]'s $[nodecontainer] image can be run in an "early networking" mode, -on each node, to perform all of the above points that are needed before Kubernetes starts -running. That means that it: - -- Provisions the stable IP address. - -- Makes the changes needed to ensure that the stable address will be used as the source - IP for any outgoing connections from the node. - -- Starts running BGP, peering with the node's ToRs, to advertise the node's - stable address to other nodes. - -- Configures efficient ECMP in the Linux kernel (with `fib_multipath_hash_policy=1` and - `fib_multipath_use_neigh=1`). - -More detail is given below on how to run this early networking image. A key point is that -it must run as soon as possible after each node boot, and before Kubernetes starts on the -node, so it is typically run as a Docker or podman container. - -After its start-of-day provisioning, the early networking container keeps running so that -it can tag-team the BGP role with Calico's regular BGP service running inside the -$[nodecontainer] _pod_: - -- Initially the $[nodecontainer] pod does not yet exist, so the early networking - BGP runs to advertise out the node's stable address. - -- After Kubernetes has started on the node, and Calico has been installed in Kubernetes, - the $[nodecontainer] pod runs and starts its own BGP service. The early - networking container spots that the regular BGP service is now running and so shuts - down its own BGP. Now the regular BGP service handles the advertisement of the stable - address, as well as pod IPs and so on. - -- Later, the $[nodecontainer] pod might be shut down, e.g. for restart or upgrade. - If the downtime continues for longer than the graceful restart period, the early - networking container spots this and restarts its own BGP, to ensure that the node's - stable IP address continues to be advertised to other nodes. The cycle can now repeat - from the "Initially" state above. - - :::note - - The default graceful restart period is 120s for traditional BGP GR and - 3600s for LLGR. - - ::: - -### BGP configuration for rapid outage detection - -A dual ToR cluster needs Calico BGPPeer resources to specify how each node should peer -with its ToRs. The remaining parts of the dual ToR network design are implemented as -properties of those BGP peerings, and as corresponding properties on the BGP configuration -between and within the ToRs and core infrastructure. - -Specifically, on Calico's BGPPeer resource, - -- the `failureDetectionMode` field is used to enable BFD - -- the `restartMode` field can be used to enable long-lived graceful restart (LLGR). - -See below for more on the benefits of these settings. When they are used, consistent -settings are needed on the ToRs and core infrastructure. - -### ECMP routing - -An "Equal Cost Multiple Path" (ECMP) route is one that has multiple possible ways to reach -a given destination or prefix, all of which are considered to be equally good. A dual ToR -setup naturally generates ECMP routes, with the different paths going over the different -connectivity planes. - -When using an ECMP route, Linux decides how to balance traffic across the available paths, -including whether this is informed by TCP and UDP port numbers as well as source and -destination IP addresses, whether the decision is made per-packet, per-connection, or in -some other way, and so on; and the details here have varied with Linux kernel version. -For a clear account of the exact options and behaviors for different kernel versions, -please see [this blog](https://web.archive.org/web/20210204031636/https://cumulusnetworks.com/blog/celebrating-ecmp-part-two/). - -### BFD - -Bidirectional Forwarding Detection (BFD) is [a protocol](https://tools.ietf.org/html/rfc5880) - that detects very quickly when forwarding -along a particular path stops working - whether that's because a link has broken -somewhere, or some software component along the path. - -In a dual ToR setup, rapid failure detection is important so that traffic flows within the -cluster can quickly adjust to using the other available connectivity plane. - -### Long lived graceful restart - -Long Lived Graceful Restart (LLGR) is [an extension for BGP](https://tools.ietf.org/html/draft-uttaro-idr-bgp-persistence-05) - that handles link -failure by lowering the preference of routes over that link. This is a compromise between -the base BGP behaviour - which is immediately to remove those routes - and traditional BGP -Graceful Restart behaviour - which is not to change those routes at all, until some -configured time has passed. - -For a dual ToR setup, LLGR is helpful, as explained in more detail by [this blog](https://vincent.bernat.ch/en/blog/2018-bgp-llgr) -, because: - -- If a link fails somewhere, the immediate preference lowering allows traffic to adjust - immediately to use the other connectivity plane. - -- If a node is restarted, we still get the traditional Graceful Restart behaviour whereby - routes to that node persist in the rest of the network. - -### Default routing and "nearly default" routes - -Calico's early networking architecture - and more generally, the considerations for dual -ToR that are presented on this page - is compatible with many possible [L3 fabric designs](../../reference/architecture/design/l3-interconnect-fabric.mdx) -. One of -the options in such designs is "downward default", which means that each ToR only -advertises the default route to its directly connected nodes, even when it has much more -detailed routing information. "Downward default" works because the ToR should indeed be -the node's next hop for all destinations, except for directly connected nodes in the same -rack. - -In a dual ToR cluster, each node has two ToRs, and "downward default" should result in the -node having an ECMP default route like this: - -``` -default proto bird - nexthop via 172.31.11.100 dev eth0 - nexthop via 172.31.12.100 dev eth0 -``` - -If one of the planes is broken, BGP detects and propagates the outage and that route -automatically changes to a non-ECMP route via the working plane: - -``` -default via 172.31.12.100 dev eth0 proto bird -``` - -That is exactly the behaviour that is wanted in a dual ToR cluster. The snag with it is -that there can be other procedures in the node's operating system that also update the -default route - in particular, DHCP - and that can interfere with this desired behaviour. -For example, if a DHCP lease renewal occurs for one of the node's interfaces, the node may -then replace the default route as non-ECMP via that interface. - -A simple way to avoid such interference is to export the "nearly default" routes 0.0.0.0/1 -and 128.0.0.0/1 from the ToRs, instead of the true default route 0.0.0.0/0. 0.0.0.0/1 and -128.0.0.0/1 together cover the entire IPv4 address space and so provide correct dual ToR -routing for any destination outside the local rack. They also mask the true default route -0.0.0.0/0, by virtue of having longer prefixes (1 bit instead of 0 bits), and so it no -longer matters if there is any other programming of the true default route on the node. - -## Before you begin - -**Unsupported** - -- AKS -- EKS -- GKE - -**Required** - -- Calico CNI - -## How to - -- [Prepare YAML resources describing the layout of your cluster](#prepare-yaml-resources-describing-the-layout-of-your-cluster) -- [Arrange for dual-homed nodes to run $[nodecontainer] on each boot](#arrange-for-dual-homed-nodes-to-run-cnx-node-on-each-boot) -- [Configure your ToR routers and infrastructure](#configure-your-tor-routers-and-infrastructure) -- [Install Kubernetes and $[prodname]](#install-kubernetes-and-calico-enterprise) -- [Verify the deployment](#verify-the-deployment) - -### Prepare YAML resources describing the layout of your cluster - -1. Prepare BGPPeer resources to specify how each node in your cluster should peer with - the ToR routers in its rack. For example, if your rack 'A' has ToRs with IPs - 172.31.11.100 and 172.31.12.100 and the rack AS number is 65001: - - ```yaml - apiVersion: projectcalico.org/v3 - kind: BGPPeer - metadata: - name: ra1 - spec: - nodeSelector: "rack == 'ra' || rack == 'ra_single'" - peerIP: 172.31.11.100 - asNumber: 65001 - sourceAddress: None - --- - apiVersion: projectcalico.org/v3 - kind: BGPPeer - metadata: - name: ra2 - spec: - nodeSelector: "rack == 'ra'" - peerIP: 172.31.12.100 - asNumber: 65001 - sourceAddress: None - ``` - - :::note - - The effect of the `nodeSelector` fields here is that any node with label - `rack: ra` will peer with both these ToRs, while any node with label `rack: ra_single` will peer with only the first ToR. For optimal dual ToR function and - resilience, nodes in rack 'A' should be labelled `rack: ra`, but `rack: ra_single` - can be used instead on any nodes which cannot be dual-homed. - - ::: - - Repeat for as many racks as there are in your cluster. Each rack needs a new pair of - BGPPeer resources with its own ToR addresses and AS number, and `nodeSelector` fields - matching the nodes that should peer with its ToR routers. - - Depending on what your ToR supports, consider also setting these fields in each - BGPPeer: - - - `failureDetectionMode: BFDIfDirectlyConnected` to enable BFD, when possible, for - fast failure detection. - - :::note - - $[prodname] only supports BFD on directly connected peerings, but - in practice nodes are normally directly connected to their ToRs. - - ::: - - - `restartMode: LongLivedGracefulRestart` to enable LLGR handling when the node needs - to be restarted, if your ToR routers support LLGR. If not, we recommend instead - `maxRestartTime: 10s`. - - - `birdGatewayMode: DirectIfDirectlyConnected` to enable the "direct" next hop - algorithm, if that is helpful for optimal interworking with your ToR routers. - - :::note - - For directly connected BGP peerings, BIRD provides two gateway - computation modes, "direct" and "recursive". - - "recursive" is the default, but "direct" can give better results when the ToR - also acts as the route reflector (RR) for the rack. - Specifically, a combined ToR/RR should ideally keep the BGP next hop intact (aka - "next hop keep") when reflecting routes from other nodes in the same rack, but - add itself as the BGP next hop (aka "next hop self") when forwarding routes from - outside the rack. If your ToRs can be configured to do that, fine. - If they cannot, an effective workaround is to configure the ToRs to do "next hop - keep" for all routes, with "gateway direct" on the $[prodname] nodes. In - effect the “gateway direct” applies a “next hop self” when needed, but otherwise - not. - - ::: - -1. Prepare this BGPConfiguration resource to [disable the full node-to-node mesh](bgp.mdx#disable-the-default-bgp-node-to-node-mesh): - - ```yaml - apiVersion: projectcalico.org/v3 - kind: BGPConfiguration - metadata: - name: default - spec: - nodeToNodeMeshEnabled: false - ``` - -1. Prepare disabled IPPool resources for the CIDRs from which you will allocate stable - addresses for dual-homed nodes. For example, if the nodes in rack 'A' will have - stable addresses from 172.31.10.0/24: - - ```yaml - apiVersion: projectcalico.org/v3 - kind: IPPool - metadata: - name: ra-stable - spec: - cidr: 172.31.10.0/24 - disabled: true - nodeSelector: all() - ``` - - If the next rack uses a different CIDR, define a similar IPPool for that rack, and so - on. - - :::note - - These IPPool definitions tell $[prodname]'s BGP component to export - routes within the given CIDRs, which is essential for the core BGP infrastructure to - learn how to route to each stable address. `disabled: true` tells $[prodname] - _not_ to use these CIDRs for pod IPs. - - ::: - -1. Prepare an enabled IPPool resource for your default CIDR for pod IPs. For example: - - ```yaml - apiVersion: projectcalico.org/v3 - kind: IPPool - metadata: - name: default-ipv4 - spec: - cidr: 10.244.0.0/16 - nodeSelector: all() - ``` - - :::note - - The CIDR must match what you specify elsewhere in the Kubernetes - installation. For example, `networking.clusterNetwork.cidr` in OpenShift's install - config, or `--pod-network-cidr` with kubeadm. You should not specify `ipipMode` or - `vxlanMode`, as these are incompatible with dual ToR operation. `natOutgoing` can - be omitted, as here, if your core infrastructure will perform an SNAT for traffic - from pods to the Internet. - - ::: - -1. Prepare an EarlyNetworkConfiguration resource to specify the additional information - that is needed for each node in a multi-rack dual ToR cluster: - - - The stable address for the node. - - Its BGP AS number. - - The IPs that the node should peer with, when $[nodecontainer] runs - as a container for early networking setup after each node boot. - - Any labels that the node should have, so as to match the right BGPPeer definitions - for its rack, when $[nodecontainer] runs as a Kubernetes pod. - -
    - With OpenShift, also add a toplevel `platform: openshift` setting. - - :::note - - `platform: openshift` triggers additional per-node setup that is needed - during OpenShift's bootstrapping phase. - - ::: - - For example, with IP addresses and AS numbers similar as for other resources above: - - ```yaml noValidation - apiVersion: projectcalico.org/v3 - kind: EarlyNetworkConfiguration - spec: - platform: openshift - nodes: - # worker1 - - interfaceAddresses: - - 172.31.11.3 - - 172.31.12.3 - stableAddress: - address: 172.31.10.3 - asNumber: 65001 - peerings: - - peerIP: 172.31.11.100 - - peerIP: 172.31.12.100 - labels: - rack: ra - # worker2 - - interfaceAddresses: - - 172.31.21.4 - - 172.31.22.4 - stableAddress: - address: 172.31.20.4 - asNumber: 65002 - peerings: - - peerIP: 172.31.21.100 - - peerIP: 172.31.22.100 - labels: - rack: rb - ... - ``` - -1. Prepare a ConfigMap resource named "bgp-layout", in namespace "tigera-operator", that - wraps the EarlyNetworkConfiguration like this: - - ```yaml - apiVersion: v1 - kind: ConfigMap - metadata: - name: bgp-layout - namespace: tigera-operator - data: - earlyNetworkConfiguration: | - apiVersion: projectcalico.org/v3 - kind: EarlyNetworkConfiguration - spec: - nodes: - # worker1 - - interfaceAddresses: - ... - ``` - -:::note - -EarlyNetworkConfiguration supplies labels and AS numbers to apply to each -Calico node, as well as peering and other network configuration to use during node -startup prior to receiving BGPPeer and BGPConfiguration resources from the datastore. -EarlyNetworkConfiguration will be superseded by any BGPPeer or BGPConfiguration -resources after successful startup. - -::: - -### Arrange for dual-homed nodes to run $[nodecontainer] on each boot - -$[prodname]'s $[nodecontainer] image normally runs as a Kubernetes pod, but -for dual ToR setup it must also run as a container after each boot of a dual-homed node. -For example: - -``` -podman run --privileged --net=host \ - -v /calico-early:/calico-early -e CALICO_EARLY_NETWORKING=/calico-early/cfg.yaml \ - $[registry]$[imageNames.node]:latest -``` - -The environment variable `CALICO_EARLY_NETWORKING` must point to the -EarlyNetworkConfiguration prepared above, so that EarlyNetworkConfiguration YAML must be -copied into a file on the node (here, `/calico-early/cfg.yaml`) and mapped into the -$[nodecontainer] container. - -We recommend defining systemd services to ensure that early networking runs on each boot, -and before kubelet starts on the node. Following is an example that may need tweaking for -your particular platform, but that illustrates the important points. - -Firstly, a "calico-early" service that runs the Calico early networking on each boot: - -``` -[Unit] -Wants=network-online.target -After=network-online.target -After=nodeip-configuration.service -[Service] -ExecStartPre=/bin/sh -c "rm -f /etc/systemd/system/kubelet.service.d/20-nodenet.conf /etc/systemd/system/crio.service.d/20-nodenet.conf; systemctl daemon-reload" -ExecStartPre=-/bin/podman rm -f calico-early -ExecStartPre=/bin/mkdir -p /etc/calico-early -ExecStartPre=/bin/sh -c "test -f /etc/calico-early/details.yaml || /bin/curl -o /etc/calico-early/details.yaml http://172.31.1.1:8080/calico-early/details.yaml" -ExecStart=/bin/podman run --rm --privileged --net=host --name=calico-early -v /etc/calico-early:/etc/calico-early -e CALICO_EARLY_NETWORKING=/etc/calico-early/details.yaml $[registry]$[imageNames.node]:latest -[Install] -WantedBy=multi-user.target -``` - -:::note - -- You must also install your Tigera-issued pull secret at `/root/.docker/config.json`, - on each node, to enable pulling from $[registry]. -- Some OpenShift versions have a `nodeip-configuration` service that configures - kubelet's `--node-ip` option **wrongly** for a dual ToR setup. The - `After=nodeip-configuration.service` setting and the deletion of `20-nodenet.conf` - undo that service's work so that kubelet can choose its own IP correctly (using a - reverse DNS lookup). -- The `/bin/curl ...` line shows how you can download the EarlyNetworkConfiguration - YAML from a central hosting point within your cluster. - -::: - -Secondly, a "calico-early-wait" service that delays kubelet until after the Calico early -networking setup is in place: - -``` -[Unit] -After=calico-early.service -Before=kubelet.service -[Service] -Type=oneshot -ExecStart=/bin/sh -c "while sleep 5; do grep -q 00000000:1FF3 /proc/net/tcp && break; done; sleep 15" -[Install] -WantedBy=multi-user.target -``` - -:::note - -- The `ExecStart` line here arranges that kubelet will not start running until the - calico-early service has started listening on port 8179 (hex `1FF3`). 8179 is the - port that the calico-early service uses for pre-Kubernetes BGP. -- We have sometimes observed issues if kubelet starts immediately after Calico's early - networking setup, because of NetworkManager toggling the hostname. The final `sleep 15` allows for such changes to settle down before kubelet starts. - -::: - -On OpenShift you should wrap the above service definitions in `MachineConfig` resources -for the control and worker nodes. - -On other platforms either define and enable the above services directly, or use -whatever API the platform provides to define and enable services on new nodes. - -### Configure your ToR routers and infrastructure - -You should configure your ToR routers to accept all the BGP peerings from -$[prodname] nodes, to reflect routes between the nodes in that rack, and to -propagate routes between the ToR routers in different racks. In addition we recommend -consideration of the following points. - -BFD should be enabled if possible on all BGP sessions - both to the $[prodname] -nodes, and between racks in your core infrastructure - so that a break in connectivity -anywhere can be rapidly detected. The handling should be to initiate LLGR procedures if -possible, or else terminate the BGP session non-gracefully. - -LLGR should be enabled if possible on all BGP sessions - again, both to the -$[prodname] nodes, and between racks in your core infrastructure. Traditional BGP -graceful restart should not be used, because this will delay the cluster's response to a -break in one of the connectivity planes. - -### Install Kubernetes and $[prodname] - -Details here vary, depending on **when** your Kubernetes installer gives an opportunity -for you to define custom resources, but fundamentally what is needed here is to perform -the installation as usual, except that all of the Calico resources prepared above, except -the EarlyNetworkConfiguration, must be added into the datastore **before** the -$[nodecontainer] pods start running on any node. We can illustrate this by looking -at two examples: with OpenShift, and when adding Calico to an existing Kubernetes cluster. - -**OpenShift** - -With OpenShift, follow [our documentation](../../getting-started/install-on-clusters/openshift/index.mdx) as -far as the option to [provide additional configuration](../../getting-started/install-on-clusters/openshift/installation.mdx). -Then use `kubectl create configmap ...`, as that documentation says, to combine the -prepared BGPPeer, BGPConfiguration and IPPool resources into a `calico-resources` -ConfigMap. Place the generated file in the manifests directory for the OpenShift install. - -Also place the "bgp-layout" ConfigMap file in the manifests directory. - -Now continue with the OpenShift install process, and it will take care of adding those -resources into the datastore as early as possible. - -**Adding to an existing Kubernetes cluster** - -Follow [our documentation](../../getting-started/install-on-clusters/kubernetes/generic-install.mdx) as -far as the option for installing any custom Calico resources. Then use `calicoctl`, as -that documentation says, to install the prepared BGPPeer, BGPConfiguration and IPPool -resources. - -Also use `kubectl` to install the "bgp-layout" ConfigMap. - -Now continue with the $[prodname] install process, and you should observe each node -establishing BGP sessions with its ToRs. - -### Verify the deployment - -If you examine traffic and connections within the cluster - for example, using `ss` or -`tcpdump` - you should see that all connections use loopback IP addresses or pod CIDR IPs -as their source and destination. For example: - -- The kubelet on each node connecting to the API server. - -- The API server's connection to its backing etcd database, and peer connections between - the etcd cluster members. - -- Pod connections that involve an SNAT or MASQUERADE in the data path, as can be the case - when connecting to a Service through a cluster IP or NodePort. At the point of the - SNAT or MASQUERADE, a loopback IP address should be used. - -- Direct connections between pod IPs on different nodes. - -The only connections using interface-specific addresses should be BGP. - -:::note - -If you plan to use [Egress Gateways](../egress/egress-gateway-on-prem.mdx) in your cluster, you must also adjust the $[nodecontainer] IP -auto-detection method to pick up the stable IP, for example using the `interface: lo` setting -(The default first-found setting skips over the lo interface). This can be configured via the -$[prodname] [Installation resource](../../reference/installation/api.mdx#nodeaddressautodetection). - -::: - -If you look at the Linux routing table on any cluster node, you should see ECMP routes -like this to the loopback address of every other node in other racks: - -``` -172.31.20.4/32 - nexthop via 172.31.11.250 dev eth0 - nexthop via 172.31.12.250 dev eth1 -``` - -and like this to the loopback address of every other node in the same rack: - -``` -172.31.10.4/32 - nexthop dev eth0 - nexthop dev eth1 -``` - -If you launch some pods in the cluster, you should see ECMP routes for the /26 IP blocks -for the nodes where those pods were scheduled, like this: - -``` -10.244.192.128/26 - nexthop via 172.31.11.250 dev eth0 - nexthop via 172.31.12.250 dev eth1 -``` - -If you do something to break the connectivity between racks of one of the planes, you -should see, within only a few seconds, that the affected routes change to have a single -path only, via the plane that is still unbroken; for example: - -``` -172.31.20.4/32 via 172.31.12.250 dev eth1` -10.244.192.128/26 via 172.31.12.250 dev eth1 -``` - -When the connectivity break is repaired, those routes should change to become ECMP again. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/networking/configuring/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/networking/configuring/index.mdx deleted file mode 100644 index 5619e84926..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/networking/configuring/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Configure Calico networking options. -hide_table_of_contents: true ---- - -# Configure Calico Enterprise networking - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/networking/configuring/mtu.mdx b/calico-enterprise_versioned_docs/version-3.19-2/networking/configuring/mtu.mdx deleted file mode 100644 index a46090b002..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/networking/configuring/mtu.mdx +++ /dev/null @@ -1,119 +0,0 @@ ---- -description: Optimize network performance for workloads by configuring the MTU in Calico to best suit your underlying network. ---- - -# Configure MTU to maximize network performance - -## Big picture - -Configure the maximum transmission unit (MTU) for your $[prodname] environment. - -## Value - -Optimize network performance for workloads by configuring the MTU in $[prodname] to best suit your underlying network. - -Increasing the MTU can improve performance, and decreasing the MTU can resolve packet loss and fragmentation problems when it is too high. - -## Concepts - -### MTU and $[prodname] defaults - -The maximum transmission unit (MTU) setting determines the largest packet size that can be transmitted through your network. MTU is configured on the veth attached to each workload, and tunnel devices (if you enable IP in IP, VXLAN, or WireGuard). - -In general, maximum performance is achieved by using the highest MTU value that does not cause fragmentation or dropped packets on the path. Maximum bandwidth increases and CPU consumption may drop for a given traffic rate. The improvement is often more significant when pod to pod traffic is being encapsulated (IP in IP, VXLAN, or WireGuard), and splitting and combining such traffic cannot be offloaded to your NICs. - -By default, $[prodname] will auto-detect the correct MTU for your cluster based on node configuration and enabled networking modes. This guide explains how you can override auto-detection -of MTU by providing an explicit value if needed. - -To ensure auto-detection of MTU works correctly, make sure that the correct encapsulation modes are set in your [felix configuration](../../reference/resources/felixconfig.mdx). Disable any unused encapsulations (`vxlanEnabled`, `ipipEnabled`, `wireguardEnabled` and `wireguardEnabledV6`) in your felix configuration to ensure that auto-detection can pick the optimal MTU for your cluster. - -## Before you begin... - -**Required** - -- Calico CNI - -For help on using IP in IP and/or VXLAN overlays, see [Configure overlay networking](vxlan-ipip.mdx). - -For help on using WireGuard encryption, see [Configure WireGuard encryption](../../compliance/encrypt-cluster-pod-traffic.mdx). - -## How to - -- [Determine MTU size](#determine-mtu-size) -- [Configure MTU](#configure-mtu) -- [View current tunnel MTU values](#view-current-tunnel-mtu-values) - -### Determine MTU size - -The following table lists common MTU sizes for $[prodname] environments. Because MTU is a global property of the network path between endpoints, you should set the MTU to the minimum MTU of any path that packets may take. - -**Common MTU sizes** - -| Network MTU | $[prodname] MTU | $[prodname] MTU with IP-in-IP (IPv4) | $[prodname] MTU with VXLAN (IPv4) | $[prodname] MTU with VXLAN (IPv6) | $[prodname] MTU with WireGuard (IPv4) | $[prodname] MTU with WireGuard (IPv6) | -| ---------------------- | ---------------- | ------------------------------------- | ---------------------------------- | ---------------------------------- | -------------------------------------- | -------------------------------------- | -| 1500 | 1500 | 1480 | 1450 | 1430 | 1440 | 1420 | -| 9000 | 9000 | 8980 | 8950 | 8930 | 8940 | 8920 | -| 1500 (AKS) | 1500 | 1480 | 1450 | 1430 | 1340 | 1320 | -| 1460 (GCE) | 1460 | 1440 | 1410 | 1390 | 1400 | 1380 | -| 9001 (AWS Jumbo) | 9001 | 8981 | 8951 | 8931 | 8941 | 8921 | -| 1450 (OpenStack VXLAN) | 1450 | 1430 | 1400 | 1380 | 1390 | 1370 | - -**Recommended MTU for overlay networking** - -The extra overlay header used in IP in IP, VXLAN and WireGuard protocols, reduces the minimum MTU by the size of the header. (IP in IP uses a 20-byte header, IPv4 VXLAN uses a 50-byte header, IPv6 VXLAN uses a 70-byte header, IPv4 WireGuard uses a [60-byte header](https://lists.zx2c4.com/pipermail/wireguard/2017-December/002201.html) and IPv6 WireGuard uses an 80-byte header). - -When using AKS, the underlying network has an [MTU of 1400](https://docs.microsoft.com/en-us/azure/virtual-network/virtual-network-tcpip-performance-tuning#azure-and-vm-mtu), even though the network interface will have an MTU of 1500. -WireGuard sets the Don't Fragment (DF) bit on its packets, and so the MTU for WireGuard on AKS needs to be set to 60 bytes below (or 80 bytes for IPv6) the 1400 MTU of the underlying network to avoid dropped packets. - -If you have a mix of WireGuard and either IP in IP or VXLAN in your cluster, you should configure the MTU to be the smallest of the values of each encap type. The reason for this is that only WireGuard encapsulation will be used between any nodes where both have WireGuard enabled, and IP in IP or VXLAN will then be used between any nodes where both do not have WireGuard enabled. This could be the case if, for example, you are in the process of installing WireGuard on your nodes. - -Therefore, we recommend the following: - -- If you use IPv4 WireGuard encryption anywhere in your pod network, configure MTU size as “physical network MTU size minus 60”. -- If you use IPv6 WireGuard encryption anywhere in your pod network, configure MTU size as “physical network MTU size minus 80”. -- If you don't use WireGuard, but use IPv4 VXLAN anywhere in your pod network, configure MTU size as “physical network MTU size minus 50”. -- If you don't use WireGuard, but use IPv6 VXLAN anywhere in your pod network, configure MTU size as “physical network MTU size minus 70”. -- If you don't use WireGuard, but use only IP in IP, configure MTU size as “physical network MTU size minus 20” -- Set the workload endpoint MTU and the tunnel MTUs to the same value (so all paths have the same MTU) - -**eBPF mode** - -Implementation of NodePorts uses VXLAN tunnel to hand off packets from one node to another, therefore VXLAN MTU setting -is used to set the MTUs of workloads (veths) and should be “physical network MTU size minus 50” (see above). - -**MTU for flannel networking** - -When using flannel for networking, the MTU for network interfaces should match the MTU of the flannel interface. - -- If using flannel with VXLAN, use the “$[prodname] MTU with VXLAN” column in the table above for common sizes. - -### Configure MTU - -:::note - -The updated MTU used by $[prodname] only applies to new workloads. - -::: - -For Operator installations, edit the $[prodname] operator `Installation` resource to set the `mtu` -field in the `calicoNetwork` section of the `spec`. For example: - -```bash -kubectl patch installation.operator.tigera.io default --type merge -p '{"spec":{"calicoNetwork":{"mtu":1440}}}' -``` - -Similarly, for OpenShift: - -```bash -oc patch installation.operator.tigera.io default --type merge -p '{"spec":{"calicoNetwork":{"mtu":1440}}}' -``` - -### View current tunnel MTU values - -To view the current tunnel size, use the following command: - -`ip link show` - -The IP in IP tunnel appears as tunlx (for example, tunl0), along with the MTU size. For example: - -![Tunnel MTU](/img/calico-enterprise/tunnel.png) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/networking/configuring/multiple-networks.mdx b/calico-enterprise_versioned_docs/version-3.19-2/networking/configuring/multiple-networks.mdx deleted file mode 100644 index 3576d62821..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/networking/configuring/multiple-networks.mdx +++ /dev/null @@ -1,249 +0,0 @@ ---- -description: Configure a cluster with multiple Calico Enterprise networks on each pod, and enforce security using Calico Enterprise tiered network policy. ---- - -# Configure multiple Calico Enterprise networks on a pod - -## Big picture - -Configure a Kubernetes cluster with multiple $[prodname] networks on each pod, and enforce security using $[prodname] tiered network policy. - -## Value - -By default, you can configure only one CNI (network and pod interface) in a cluster. But many deployments require multiple networks (for example, one that is faster or more secure) for sending different types of data. $[prodname] supports configuring additional $[prodname] networks and interfaces in your pods using the Multus-CNI plugin. You can then use $[prodname] tiered policy and other features to enforce security on all of your workload traffic. - -## Concepts - -### About the Multus-CNI plugin - -$[prodname] uses the [Multus-CNI plugin](https://github.com/intel/multus-cni/) to create multiple $[prodname] networks and multiple pod interfaces to access these networks. This extends the default network and pod interface that comes with the Calico CNI. - -You install Multus on a cluster, then simply enable Multus in the $[prodname] Installation resource. Using the Multus **NetworkAttachmentDefinition**, you define the new networks and reference them as an annotation in the pod resource. - -### Labels, workload endpoints, and policy - -When you set the `MultiInterfaceMode` field to `Multus` in the Installation resource, the following network and network interface labels are automatically added to new workload endpoints. - -- `projectcalico.org/network` -- `projectcalico.org/network-namespace` -- `projectcalico.org/network-interface` - -You can then create $[prodname] policies using these label selectors to target specific networks or network interfaces. - -### Limitations - -**Maximum additional networks per pod** - -You can define a maximum of nine additional $[prodname] networks on a pod. If you add a network that exceeds the limit for the pod, networking is not configured and the pod fails to start with an associated error. - -**$[prodname] features** - -Although the following $[prodname] features are supported for your default $[prodname] network, they are not supported at this time for additional networks/network interfaces using Multus: - -- Floating IPs -- Specific IPs -- Specifying IP pools on a per-namespace or per-pod basis -- Egress gateways - -## Before you begin... - -**Required** - -- Calico CNI - - :::note - - Verify that you are using the Calico Enterprise CNI. The CNI plugin used by Kubernetes for AKS, EKS, and GKE may be different, which means this feature will not work. - - ::: - -- [Install Multus 3.0+ on your Kubernetes cluster](https://github.com/intel/multus-cni/) - :::note - - Multus is installed on OpenShift 4.0+ clusters. - - ::: - -- [Install and configure calicoctl](../../operations/clis/calicoctl/index.mdx) or configure access to [the web console](../../operations/cnx/access-the-manager.mdx) - -## How to - -1. [Configure cluster for multiple networks](#configure-cluster-for-multiple-networks) -1. [Create a new network](#create-a-new-network) -1. [Create a pod interface for the new network](#create-a-pod-interface-for-the-new-network) -1. [Configure the IP pool for the network](#configure-the-ip-pool-for-the-network) -1. [Enforce policy on the new network and pod interface](#enforce-policy-on-the-new-network-and-pod-interface) -1. [View workload endpoints](#view-workload-endpoints) - -### Configure cluster for multiple networks - -In the [Installation custom resource](../../reference/installation/api.mdx#caliconetworkspec), set the `MultiInterfaceMode` to **Multus**. - -### Create a new network - -Create a new network using the Multus **NetworkAttachmentDefinition**, and set the following required field to `"type":"calico"`. - -```yaml -apiVersion: 'k8s.cni.cncf.io/v1' -kind: NetworkAttachmentDefinition -metadata: - name: additional-calico-network -spec: - config: '{ - "cniVersion": "0.3.1", - "type": "calico", - "log_level": "info", - "datastore_type": "kubernetes", - "mtu": 1410, - "nodename_file_optional": false, - "ipam": { - "type": "calico-ipam", - "assign_ipv4" : "true", - "assign_ipv6" : "false" - }, - "policy": { - "type": "k8s" - }, - "kubernetes": { - "kubeconfig": "/etc/cni/net.d/calico-kubeconfig" - } - }' -``` - -### Create a pod interface for the new network - -Create a pod interface that specifies the new network using an annotation. - -In the following example, we create a pod with an additional pod interface named, `cali1`. The pod interface is attached to the network named, `additional-calico-network`, using the `k8s.v1.cni.cncf.io/networks` annotation. -Note that all networks in `k8s.v1.cni.cncf.io/networks` are assumed to be $[prodname] networks. - -```yaml -apiVersion: v1 -kind: Pod -metadata: - name: multus-test-pod-1 - namespace: default - annotations: - k8s.v1.cni.cncf.io/networks: additional-calico-network@cali1 -spec: - nodeSelector: - kubernetes.io/os: linux - containers: - - name: multus-test - command: ['/bin/sh', '-c', 'trap : TERM INT; sleep infinity & wait'] - image: alpine -``` - -### Configure the IP pool for the network - -Although not required, you may want to assign IPs from specific pools to specific network interfaces. If you are using the [Calico Enterprise IPAM plugin](../../reference/component-resources/configuration.mdx#specifying-ip-pools-on-a-per-namespace-or-per-pod-basis), specify the IP pools in the **NetworkAttachmentDefinition** custom resource. For example: - -``` - "ipam": { - "type": "calico-ipam", - "assign_ipv4" : "true", - "assign_ipv6" : "false" - "ipv4_pools": ["10.0.0.0/24", "20.0.0.0/16", "default-ipv4-ippool"], -}, -``` - -### Enforce policy on the new network and pod interface - -When MultiInterfaceMode is set to Multus, WorkloadEndpoints are created with these labels: - -- `projectcalico.org/network` -- `projectcalico.org/network-namespace` -- `projectcalico.org/network-interface` - -You can use these labels to enforce policies on specific interfaces and networks using policy label selectors. - -:::note - -Prior to $[prodname] 3.0, if you were using Kubernetes datastore (kdd mode), the workload endpoint field and name suffix were always **eth0**. In 3.0, the value for workload labels may not be what you expect. Before creating policies targeting WorkloadEndpoints using the new labels, you should verify label values using the commands in [View workload endpoints](#view-workload-endpoints). - -::: - -In this policy example, we use the selector field to target all WorkloadEndpoints with the network interface of, `cali1`. - -```yaml -apiVersion: projectcalico.org/v3 -kind: NetworkPolicy -metadata: - name: internal-access.allow-tcp-6379 - namespace: production -spec: - tier: internal-access - selector: projectcalico.org/network-interface == cali1 - types: - - Ingress - - Egress - ingress: - - action: Allow - metadata: - annotations: - from: frontend - to: database - protocol: TCP - source: - selector: role == 'frontend' - destination: - ports: - - 6379 - egress: - - action: Allow -``` - -### View workload endpoints - -**In the $[prodname] web console**, go to the **WorkloadEndpoint** page to see all of the WorkloadEndpoints, including the network labels are for targeting WorkloadEndpoints with policy. - -**Using the CLI...** - -To view all WorkloadEndpoints for pods (default and new), use the following command. - -``` -MULTI_INTERFACE_MODE=multus calicoctl get workloadendpoints -o wide -``` - -``` -NAME WORKLOAD NODE NETWORKS INTERFACE PROFILES NATS -test--bo--72vg--kadm--infra--0-k8s-multus--test--pod--1-eth0 multus-test-pod-1 bryan-bo-72vg-kadm-infra-0 192.168.53.129/32 calif887e436e8b kns.default,ksa.default.default -test--bo--72vg--kadm--infra--0-k8s-multus--test--pod--1-net1 multus-test-pod-1 bryan-bo-72vg-kadm-infra-0 192.168.53.140/32 calim17CD6INXIX kns.default,ksa.default.default -test--bo--72vg--kadm--infra--0-k8s-multus--test--pod--1-testiface multus-test-pod-1 bryan-bo-72vg-kadm-infra-0 192.168.53.142/32 calim27CD6INXIX kns.default,ksa.default.default -test--bo--72vg--kadm--infra--0-k8s-multus--test--pod--1-net3 multus-test-pod-1 bryan-bo-72vg-kadm-infra-0 192.168.52.143/32 calim37CD6INXIX kns.default,ksa.default.default -``` - -To view specific WorkloadEndpoints, use the following command. - -``` -MULTI_INTERFACE_MODE=multus calicoctl get workloadendpoint test--bz--72vg--kadm--infra--0-k8s-multus--test--pod--1-net1 -o yaml -``` - -```yaml -apiVersion: projectcalico.org/v3 -kind: WorkloadEndpoint -metadata: - creationTimestamp: '2020-05-04T22:23:05T' - labels: - projectcalico.org/namespace: default - projectcalico.org/network: calico - projectcalico.org/network-interface: net1 - projectcalico.org/network-namespace: default - projectcalico.org/orchestrator: k8s - projectcalico.org/serviceaccount: default - name: test--bz--72vg--kadm--infra--0-k8s-multus--test--pod--1-net1 - namespace: default - resourceVersion: '73572' - uid: b9bb7482-cdb8-48d4-9ae5-58322d48391a -spec: - endpoint: net1 - interfaceName: calim16CD6INXIX - ipNetworks: - - 192.168.52.141/32 - node: bryan-bo-72vg-kadm-infra-0 - orchestrator: k8s - pod: multus-test-pod-1 - profiles: - - kns.default - - ksa.default.default -``` diff --git a/calico-enterprise_versioned_docs/version-3.19-2/networking/configuring/node-local-dns-cache.mdx b/calico-enterprise_versioned_docs/version-3.19-2/networking/configuring/node-local-dns-cache.mdx deleted file mode 100644 index 31f2ea4889..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/networking/configuring/node-local-dns-cache.mdx +++ /dev/null @@ -1,69 +0,0 @@ ---- -description: Install NodeLocal DNSCache ---- - -# Use NodeLocal DNSCache in your cluster - -## Big picture - -Set up NodeLocal DNSCache to improve DNS lookup latency. - -## Before you begin - -### Required - -Follow these [steps](https://kubernetes.io/docs/tasks/administer-cluster/nodelocaldns/) to enable NodeLocal DNSCache connectivity. - -### Unsupported - -- OpenShift - -NodeLocal DNSCache does not support OpenShift. Openshift has its own DNS caching mechanism which deploys CoreDNS pods as DaemonSets. -For more information, see [Openshift DNS setup](https://docs.openshift.com/container-platform/4.10/networking/dns-operator.html). - -## Create a policy to allow traffic from NodeLocal DNSCache - -The following is a sample network policy that allows all incoming TCP traffic (including incoming traffic from -`node-local-dns` pods) on port 53 on `kube-dns`. - -```yaml -apiVersion: projectcalico.org/v3 -kind: NetworkPolicy -metadata: - name: default.local-dns-to-core-dns - namespace: kube-system -spec: - tier: default - selector: k8s-app == "kube-dns" - ingress: - - action: Allow - protocol: TCP - destination: - selector: k8s-app == "kube-dns" - ports: - - '53' - types: - - Ingress -``` - -To refine the sources permitted by this policy, take into account that NodeLocal DNSCache pods are host networked, -and make sure to allow traffic from the addresses of your hosts. -If you're using encapsulation, you will need to allow connectivity from the tunnel IPs. - -The Tigera Operator creates policy to allow Tigera components to connect to NodeLocal DNSCache when detected. -Felix accounts for the NodeLocal DNSCache in creating DNS Logs and enforcing DNS Policy. - -## Enable NodeLocal DNSCache on $[prodname] using the eBPF data plane - -If your $[prodname] installation uses the eBPF data plane, then you need to annotate the `kube-dns` service. -Without this, the `kube-proxy` replacement will improperly resolve traffic going to a specific service IP. - -Annotate the `kube-dns` service by running the following command: - -```bash -kubectl annotate service kube-dns -n kube-system projectcalico.org/natExcludeService=true -``` - -## Additional resources - -- [Blog: Optimizing Kubernetes DNS with NodeLocal DNSCache and Calico eBPF: A Practitioner’s Guide](https://www.tigera.io/blog/optimizing-kubernetes-dns-with-nodelocal-dnscache-and-calico-ebpf-a-practitioners-guide/) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/networking/configuring/pod-mac-address.mdx b/calico-enterprise_versioned_docs/version-3.19-2/networking/configuring/pod-mac-address.mdx deleted file mode 100644 index 0ec4c2519d..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/networking/configuring/pod-mac-address.mdx +++ /dev/null @@ -1,35 +0,0 @@ ---- -description: Specify the MAC address for a pod instead of allowing the operating system to assign one ---- - -# Use a specific MAC address for a pod - -## Big picture - -Choose the MAC address for a pod instead of allowing the operating system to assign one. - -## Value - -Some applications bind software licenses to networking interface MAC addresses. - -## Concepts - -### Container MAC address - -The MAC address configured by the annotation described here will be visible from within the container on the eth0 interface. Since it is isolated to the container it will not collide with any other MAC addresses assigned to other pods on the same node. - -## Before you begin... - -Your cluster must be using Calico CNI to use this feature. - -[Configuring the Calico CNI Plugins](../../reference/component-resources/configuration.mdx) - -## How to - -Annotate the pod with cni.projectcalico.org/hwAddr set to the desired MAC address. For example: - -``` - "cni.projectcalico.org/hwAddr": "1c:0c:0a:c0:ff:ee" -``` - -The annotation must be present when the pod is created; adding it later has no effect. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/networking/configuring/vxlan-ipip.mdx b/calico-enterprise_versioned_docs/version-3.19-2/networking/configuring/vxlan-ipip.mdx deleted file mode 100644 index ec0c6a99ec..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/networking/configuring/vxlan-ipip.mdx +++ /dev/null @@ -1,148 +0,0 @@ ---- -description: Configure Calico to use IP in IP or VXLAN overlay networking so the underlying network doesn’t need to understand pod addresses. ---- - -# Overlay networking - -## Big picture - -Enable inter workload communication across networks that are not aware of workload IPs. - -## Value - -In general, we recommend running Calico without network overlay/encapsulation. This gives you the highest performance and simplest network; the packet that leaves your workload is the packet that goes on the wire. - -However, selectively using overlays/encapsulation can be useful when running on top of an underlying network that cannot easily be made aware of workload IPs. A common example is if you are using Calico networking in AWS across multiple VPCs/subnets. In this case, Calico can selectively encapsulate only the traffic that is routed between the VPCs/subnets, and run without encapsulation within each VPC/subnet. You might also decide to run your entire Calico network with encapsulation as an overlay network -- as a quick way to get started without setting up BGP peering or other routing information in your underlying network. - -## Concepts - -### Routing workload IP addresses - -Networks become aware of workload IP addresses through layer 3 routing techniques like static routes or BGP route distribution, or layer 2 address learning. As such, they can route unencapsulated traffic to the right host for the endpoint that is the ultimate destination. However, not all networks are able to route workload IP addresses. For example, public cloud environments where you don’t own the hardware, AWS across VPC subnet boundaries, and other scenarios where you cannot peer Calico over BGP to the underlay, or easily configure static routes. This is why Calico supports encapsulation, so you can send traffic between workloads without requiring the underlying network to be aware of workload IP addresses. - -### Encapsulation types - -Calico supports two types of encapsulation: VXLAN and IP in IP. VXLAN is supported in some environments where IP in IP is not (for example, Azure). VXLAN has a slightly higher per-packet overhead because the header is larger, but unless you are running very network intensive workloads the difference is not something you would typically notice. The other small difference between the two types of encapsulation is that Calico's VXLAN implementation does not use BGP, whereas Calico's IP in IP implementation uses BGP between Calico nodes. - -### Cross-subnet - -Encapsulation of workload traffic is typically required only when traffic crosses a router that is unable to route workload IP addresses on its own. Calico can perform encapsulation on: all traffic, no traffic, or only on traffic that crosses a subnet boundary. - -## Before you begin - -**Required** - -- Calico CNI - -**Not supported** - -- Calico for OpenStack (i.e. when Calico is used as the Neutron plugin) - -**Limitations** - -- IP in IP supports only IPv4 addresses -- VXLAN in IPv6 is only supported for kernel versions ≥ 4.19.1 or redhat kernel version ≥ 4.18.0 - -## How to - -- [Configure default IP pools at install time](#configure-default-ip-pools-at-install-time) -- [Configure IP in IP encapsulation for only cross-subnet traffic](#configure-ip-in-ip-encapsulation-for-only-cross-subnet-traffic) -- [Configure IP in IP encapsulation for all inter workload traffic](#configure-ip-in-ip-encapsulation-for-all-inter-workload-traffic) -- [Configure VXLAN encapsulation for only cross-subnet traffic](#configure-vxlan-encapsulation-for-only-cross-subnet-traffic) -- [Configure VXLAN encapsulation for all inter workload traffic](#configure-vxlan-encapsulation-for-all-inter-workload-traffic) - -### IPv4/6 address support - -IP in IP supports only IPv4 addresses. - -### Best practice - -Calico has an option to selectively encapsulate only traffic that crosses subnet boundaries. We recommend using the **cross-subnet** option with IP in IP or VXLAN to minimize encapsulation overhead. Cross-subnet mode provides better performance in AWS multi-AZ deployments, Azure VNETs, and on networks where routers are used to connect pools of nodes with L2 connectivity. - -Be aware that switching encapsulation modes can cause disruption to in-progress connections. Plan accordingly. - -### Configure default IP pools at install time - -Default IP pools are configured at install-time automatically by Calico. - -For operator managed clusters, you can configure encapsulation in the IP pools section of the default Installation. For example, the following installation snippet will enable VXLAN across subnets. - -```yaml -kind: Installation -apiVersion: operator.tigera.io/v1 -metadata: - name: default -spec: - calicoNetwork: - ipPools: - - cidr: 192.168.0.0/16 - encapsulation: VXLANCrossSubnet -``` - -### Configure IP in IP encapsulation for only cross-subnet traffic - -IP in IP encapsulation can be performed selectively, and only for traffic crossing subnet boundaries. - -To enable this feature, set `ipipMode` to `CrossSubnet`. - -```yaml -apiVersion: projectcalico.org/v3 -kind: IPPool -metadata: - name: ippool-ipip-cross-subnet-1 -spec: - cidr: 192.168.0.0/16 - ipipMode: CrossSubnet - natOutgoing: true -``` - -### Configure IP in IP encapsulation for all inter workload traffic - -With `ipipMode` set to `Always`, Calico routes traffic using IP in IP for all traffic originating from a Calico enabled-host, to all Calico networked containers and VMs within the IP pool. - -```yaml -apiVersion: projectcalico.org/v3 -kind: IPPool -metadata: - name: ippool-ipip-1 -spec: - cidr: 192.168.0.0/16 - ipipMode: Always - natOutgoing: true -``` - -### Configure VXLAN encapsulation for only cross subnet traffic - -VXLAN encapsulation can be performed selectively, and only for traffic crossing subnet boundaries. - -To enable this feature, set `vxlanMode` to `CrossSubnet`. - -```yaml -apiVersion: projectcalico.org/v3 -kind: IPPool -metadata: - name: ippool-vxlan-cross-subnet-1 -spec: - cidr: 192.168.0.0/16 - vxlanMode: CrossSubnet - natOutgoing: true -``` - -### Configure VXLAN encapsulation for all inter workload traffic - -With `vxlanMode` set to `Always`, Calico routes traffic using VXLAN for all traffic originating from a Calico enabled host, to all Calico networked containers and VMs within the IP pool. - -```yaml -apiVersion: projectcalico.org/v3 -kind: IPPool -metadata: - name: ippool-vxlan-1 -spec: - cidr: 192.168.0.0/16 - vxlanMode: Always - natOutgoing: true -``` - -## Additional resources - -For details on IP pool resource options, see [IP pool](../../reference/resources/ippool.mdx). diff --git a/calico-enterprise_versioned_docs/version-3.19-2/networking/configuring/workloads-outside-cluster.mdx b/calico-enterprise_versioned_docs/version-3.19-2/networking/configuring/workloads-outside-cluster.mdx deleted file mode 100644 index c3f932f73a..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/networking/configuring/workloads-outside-cluster.mdx +++ /dev/null @@ -1,70 +0,0 @@ ---- -description: Configure networking to perform outbound NAT for connections from pods to outside of the cluster. ---- - -# Configure outgoing NAT - -## Big picture - -Configure $[prodname] networking to perform outbound NAT for connections from pods to outside of the cluster. $[prodname] optionally source NATs the pod IP to the node IP. - -## Value - -The $[prodname] NAT outbound connection option is flexible; it can be enabled, disabled, and applied to $[prodname] IP pools with public IPs, private IPs, or a specific range of IP addresses. This article describes some use cases for enabling and disabling outgoing NAT. - -## Concepts - -### $[prodname] IP pools and NAT - -When a pod with an IP address in the pool initiates a network connection to an IP address to outside of $[prodname]’s IP pools, the outgoing packets will have their source IP address changed from the pod IP address to the node IP address using SNAT (Source Network Address Translation). Any return packets on the connection automatically get this change reversed before being passed back to the pod. - -### Enable NAT: for pods with IP addresses that are not routable beyond the cluster - -A common use case for enabling NAT outgoing, is to allow pods in an overlay network to connect to IP addresses outside of the overlay, or pods with private IP addresses to connect to public IP addresses outside the cluster/the internet (subject to network policy allowing the connection, of course). When NAT is enabled, traffic is NATed from pods in that pool to any destination outside of all other $[prodname] IP pools. - -### Disable NAT: For on-premises deployments using physical infrastructure - -If you choose to implement $[prodname] networking with [BGP peered with your physical network infrastructure](bgp.mdx), you can use your own infrastructure to NAT traffic from pods to the internet. In this case, you should disable the $[prodname] `natOutgoing` option. For example, if you want your pods to have public internet IPs, you should: - -- Configure $[prodname] to peer with your physical network infrastructure -- Create an IP pool with public IP addresses for those pods that are routed to your network with NAT disabled (`nat-outgoing: false`) -- Verify that other network equipment does not NAT the pod traffic - -## Before you begin - -**Required** - -- Calico CNI - -## How to - -- [Create an IP pool with NAT outgoing enabled](#create-an-ip-pool-with-nat-outgoing-enabled) -- [Use additional IP pools to specify addresses that can be reached without NAT](#use-additional-ip-pools-to-specify-addresses-that-can-be-reached-without-nat) - -### Create an IP pool with NAT outgoing enabled - -In the following example, we create a $[prodname] IPPool with natOutgoing enabled. Outbound NAT is performed locally on the node where each workload in the pool is hosted. - -```yaml -apiVersion: projectcalico.org/v3 -kind: IPPool -metadata: - name: default-ipv4-ippool -spec: - cidr: 192.168.0.0/16 - natOutgoing: true -``` - -### Use additional IP pools to specify addresses that can be reached without NAT - -Because $[prodname] performs outgoing NAT only when connecting to an IP address that is not in a $[prodname] IPPool, you can create additional IPPools that are not used for pod IP addresses, but prevent NAT to certain CIDR blocks. This is useful if you want nodes to NAT traffic to the internet, but not to IPs in certain internal ranges. For example, if you did not want to NAT traffic from pods to 10.0.0.0/8, you could create the following pool. You must ensure that the network between the cluster and 10.0.0.0/8 can route pod IPs. - -```yaml -apiVersion: projectcalico.org/v3 -kind: IPPool -metadata: - name: no-nat-10.0.0.0-8 -spec: - cidr: 10.0.0.0/8 - disabled: true -``` diff --git a/calico-enterprise_versioned_docs/version-3.19-2/networking/determine-best-networking.mdx b/calico-enterprise_versioned_docs/version-3.19-2/networking/determine-best-networking.mdx deleted file mode 100644 index 9f7c50fe2c..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/networking/determine-best-networking.mdx +++ /dev/null @@ -1,263 +0,0 @@ ---- -description: Learn about the different networking options Calico Enterprise supports so you can choose the best option for your needs. ---- - -# Determine best networking option - -## Big picture - -Learn about the different networking options $[prodname] supports so you can choose the best option for your needs. - -## Value - -$[prodname]’s flexible modular architecture supports a wide range of deployment options, so you can select the best networking approach for your specific environment and needs. This includes the ability to run with a variety of CNI and IPAM plugins, and underlying network types, in non-overlay or overlay modes, with or without BGP. - -## Concepts - -If you want to fully understand the network choices available to you, we recommend you make sure you are familiar with and understand the following concepts. If you would prefer to skip the learning and get straight to the choices and recommendations, you can jump ahead to [Networking Options](#networking-options). - -### Kubernetes networking basics - -The Kubernetes network model defines a “flat” network in which: - -- Every pod get its own IP address. -- Pods on any node can communicate with all pods on all other nodes without NAT. - -This creates a clean, backwards-compatible model where pods can be treated much like VMs or physical hosts from the perspectives of port allocation, naming, service discovery, load balancing, application configuration, and migration. Network segmentation can be defined using network policies to restrict traffic within these base networking capabilities. - -Within this model there’s quite a lot of flexibility for supporting different networking approaches and environments. The details of exactly how the network is implemented depend on the combination of CNI, network, and cloud provider plugins being used. - -### CNI plugins - -CNI (Container Network Interface) is a standard API which allows different network implementations to plug into Kubernetes. Kubernetes calls the API any time a pod is being created or destroyed. There are two types of CNI plugins: - -- CNI network plugins: responsible for adding or deleting pods to/from the Kubernetes pod network. This includes creating/deleting each pod’s network interface and connecting/disconnecting it to the rest of the network implementation. -- CNI IPAM plugins: responsible for allocating and releasing IP addresses for pods as they are created or deleted. Depending on the plugin, this may include allocating one or more ranges of IP addresses (CIDRs) to each node, or obtaining IP addresses from an underlying public cloud’s network to allocate to pods. - -### Cloud provider integrations - -Kubernetes cloud provider integrations are cloud-specific controllers that can configure the underlying cloud network to help provide Kubernetes networking. Depending on the cloud provider, this could include automatically programming routes into the underlying cloud network so it knows natively how to route pod traffic. - -### Kubenet - -Kubenet is an extremely basic network plugin built into Kubernetes. It does not implement cross-node networking or network policy. It is typically used together with a cloud provider integration that sets up routes in the cloud provider network for communication between nodes, or in single node environments. Kubenet is not compatible with $[prodname]. - -### Overlay networks - -An overlay network is a network that is layered on top of another network. In the context of Kubernetes, an overlay network can be used to handle pod-to-pod traffic between nodes on top of an underlying network that is not aware of pod IP addresses or which pods are running on which nodes. Overlay networks work by encapsulating network packets that an underlying network doesn’t know how to handle (for example using pod IP addresses) within an outer packet which the underlying network does know how to handle (for example node IP addresses). Two common network protocols used for encapsulation are VXLAN and IP-in-IP. - -The main advantage of using an overlay network is that it reduces dependencies on the underlying network. For example, you can run a VXLAN overlay on top of almost any underlying network, without needing to integrate with or make any changes to the underlying network. - -The main disadvantages of using an overlay network are: - -- A slight performance impact. The process of encapsulating packets takes a small amount of CPU, and the extra bytes required in the packet to encode the encapsulation (VXLAN or IP-in-IP headers) reduces the maximum size of inner packet that can be sent, which in turn can mean needing to send more packets for the same amount of total data. -- The pod IP addresses are not routable outside of the cluster. More on this below! - -### Cross-subnet overlays - -In addition to standard VXLAN or IP-in-IP overlays, $[prodname] also supports “cross-subnet” modes for VXLAN and IP-in-IP. In this mode, within each subnet, the underlying network acts as an L2 network. Packets sent within a single subnet are not encapsulated, so you get the performance of a non-overlay network. Packets sent across subnets are encapsulated, like a normal overlay network, reducing dependencies on the underlying network (without the need to integrate with or make any changes to the underlying network). - -Just like with a standard overlay network, the underlying network is not aware of pod IP addresses and the pod IP addresses are not routable outside of the cluster. - -### Pod IP routability outside of the cluster - -An important distinguishing feature of different Kubernetes network implementations is whether or not pod IP addresses are routable outside of the cluster across the broader network. - -**Not routable** - -If the pod IP addresses are not routable outside of the cluster then when a pod tries to establish a network connection to an IP address that is outside of the cluster, Kubernetes uses a technique called SNAT (Source Network Address Translation) to change the source IP address from the IP address of the pod, to the IP address of the node hosting the pod. Any return packets on the connection get automatically mapped back to the pod IP address. So the pod is unaware the SNAT is happening, the destination for the connection sees the node as the source of the connection, and the underlying broader network never sees pod IP addresses. - -For connections in the opposite direction, where something outside of the cluster needs to connect to a pod, this can only be done via Kubernetes services or Kubernetes ingress. Nothing outside of the cluster can directly connect to a pod IP address, because the broader network doesn’t know how to route packets to pod IP addresses. - -**Routable** - -If the pod IP addresses are routable outside of the cluster then pods can connect to the outside world without SNAT, and the outside world can connect directly to pods without going via a Kubernetes service or Kubernetes ingress. - -The advantage of pod IP addresses that are routable outside the cluster are: - -- Avoiding SNAT for outbound connections may be essential for integrating with existing broader security requirements. It can also simplify debugging and understandability of operation logs. -- If you have specialized workloads that mean some pods need to be directly accessible without going via Kubernetes services or Kubernetes ingress, then routable pod IPs can be operationally simpler than the alternative of using host networked pods. - -The main disadvantage of pod IP addresses that are routable outside the cluster is that the pod IPs must be unique across the broader network. So for example, if running multiple clusters you will need to use a different IP address range (CIDR) for pods in each cluster. This in turn can lead to IP address range exhaustion challenges when running at scale, or if there are other significant existing enterprise demands on IP address space. - -**What determines routability?** - -If you are using an overlay network for your cluster, then pod IPs are not normally routable outside of the cluster. - -If you aren’t using an overlay network, then whether pod IPs are routable outside of the cluster depends on what combination of CNI plugins, cloud provider integrations, or (for on-prem) BGP peering with the physical network, is being used. - -### BGP - -BGP (Border Gateway Protocol) is a standards based networking protocol for sharing routes across a network. It’s one of the fundamental building blocks of the internet, with exceptional scaling characteristics. - -$[prodname] has built in support for BGP. In an on-prem deployment, this allows $[prodname] to peer with the physical network (typically to Top of Rack routers) to exchange routes, making a non-overlay network where pod IP addresses routable across the broader network, just like any other workload attached to the network. - -## About $[prodname] Networking - -$[prodname]’s flexible modular architecture for networking includes the following. - -**$[prodname] CNI network plugin** - -The $[prodname] CNI network plugin connects pods to the host network namespace’s L3 routing using a pair of virtual Ethernet devices (veth pair). This L3 architecture avoids the unnecessary complexity and performance overheads of additional L2 bridges that feature in many other Kubernetes networking solutions. - -**$[prodname] CNI IPAM plugin** - -The $[prodname] CNI IPAM plugin allocates IP addresses for pods out of one or more configurable IP address ranges, dynamically allocating small blocks of IPs per node as required. The result is a more efficient IP address space usage compared to many other CNI IPAM plugins, including the host local IPAM plugin which is used in many networking solutions. - -**Overlay network modes** - -$[prodname] can provide both VXLAN or IP-in-IP overlay networks, including cross-subnet only modes. - -**Non-overlay network modes** - -$[prodname] can provide non-overlay networks running on top of any underlying L2 network, or an L3 network that is either a public cloud network with appropriate cloud provider integration, or a BGP capable network (typically an on-prem network with standard Top-of-Rack routers). - -**Network policy enforcement** - -$[prodname]’s network policy enforcement engine implements the full range of Kubernetes Network Policy features, plus the extended features of $[prodname] Network Policy. This works in conjunction with $[prodname]’s built in networking modes, or any other $[prodname] compatible network plugins and cloud provider integrations. - -## $[prodname] compatible CNI plugins and cloud provider integrations - -In addition to the $[prodname] CNI plugins and built in networking modes, $[prodname] is also compatible with a number of third party CNI plugins and cloud provider integrations. - -**Amazon VPC CNI** - -The Amazon VPC CNI plugin allocates pod IPs from the underlying AWS VPC and uses AWS elastic network interfaces to provide VPC native pod networking (pod IPs that are routable outside of the cluster). It is the default networking used in [Amazon EKS](https://aws.amazon.com/eks/), with Calico for network policy enforcement. - -**Azure CNI** - -The Azure CNI plugin allocates pod IPs from the underlying Azure VNET configures the Azure virtual network to provide VNET native pod networking (pod IPs that are routable outside of the cluster). It is the default networking used in [Microsoft AKS](https://azure.microsoft.com/en-us/products/kubernetes-service/), with Calico for network policy enforcement. - -**Azure cloud provider** - -The Azure cloud provider integration can be used as an alternative to the Azure CNI plugin. It uses the host-local IPAM CNI plugin to allocate pod IPs, and programs the underlying Azure VNET subnet with corresponding routes. Pod IPs are only routable within the VNET subnet (which often equates to meaning they are not routable outside of the cluster). - -**Google cloud provider** - -The Google cloud provider integration uses host-local IPAM CNI plugin to allocate pod IPs, and programs the Google cloud network Alias IP ranges to provide VPC native pod networking on Google cloud (pod IPs that are routable outside of the cluster). It is the default for Google Kubernetes Engine (GKE), with Calico for network policy enforcement. - -**Host local IPAM** - -The host local CNI IPAM plugin is a commonly used IP address management CNI plugin, which allocates a fixed size IP address range (CIDR) to each node, and then allocates pod IP addresses from within that range. The default address range size is 256 IP addresses (a /24), though two of those IP addresses are reserved for special purposes and not assigned to pods. The simplicity of host local CNI IPAM plugin makes it easy to understand, but results in less efficient IP address space usage compared to $[prodname] CNI IPAM plugin. - -## Networking Options - -### On-prem - -The most common network setup for $[prodname] on-prem is non-overlay mode using [BGP to peer](configuring/bgp.mdx) with the physical network (typically top of rack routers) to make pod IPs routable outside of the cluster. (You can of course configure the rest of your on-prem network to limit the scope of pod IP routing outside of the cluster if desired.) This setup provides a rich range of advanced $[prodname] features, including the ability to advertise Kubernetes service IPs (cluster IPs or external IPs), and the ability to control IP address management at the pod, namespace, or node level, to support a wide range of possibilities for integrating with existing enterprise network and security requirements. - - - -If peering BGP to the physical network is not an option, you can also run non-overlay mode if the cluster is within a single L2 network, with Calico just peering BGP between the nodes in the cluster. Even though this is not strictly an overlay network, the pod IPs are not routable outside of the cluster, because the broader network does not have routes for the pod IPs. - - - -Alternatively you can run $[prodname] in either VXLAN or IP-in-IP overlay mode, with cross-subnet overlay mode to optimize performance within each L2 subnet. - -_Recommended:_ - - - -_Alternative:_ - - - -### AWS - -If you would like pod IP addresses to be routable outside of the cluster then you must use the Amazon VPC CNI plugin. This is the default networking mode for [EKS](https://aws.amazon.com/eks/), with Calico for network policy. Pod IP addresses are allocated from the underlying VPC and the maximum number of pods per node is dependent on the [instance type](https://github.com/aws/amazon-vpc-cni-k8s#eni-allocation). - - - -If you prefer to avoid dependencies on a specific cloud provider, or allocating pod IPs from the underlying VPC is problematic due to IP address range exhaustion challenges, or if the maximum number of pods supported per node by the Amazon VPC CNI plugin is not sufficient for your needs, we recommend using $[prodname] networking in cross-subnet overlay mode. Pod IPs will not be routable outside of the cluster, but you can scale the cluster up to the limits of Kubernetes with no dependencies on the underlying cloud network. - - - -You can learn more about Kubernetes Networking on AWS, including how each of the above options works under the covers, in this short video: [Everything you need to know about Kubernetes networking on AWS](https://www.projectcalico.org/everything-you-need-to-know-about-kubernetes-pod-networking-on-aws/). - -### Azure - -If you would like pod IP addresses to be routable outside of the cluster then you must use the Azure CNI plugin. This is supported by [AKS](https://azure.microsoft.com/en-us/products/kubernetes-service/), with Calico for network policy. Pod IP addresses are allocated from the underlying VNET. - - - -Alternatively you can run $[prodname] in Azure CNI overlay mode. - - - -If you are using AKS and prefer to avoid dependencies on a specific cloud provider or allocating pod IPs from the underlying VNET is problematic due to IP address range exhaustion challenges, we recommend using $[prodname] networking in cross-subnet overlay mode. Pod IPs will not be routable outside of the cluster, but you can scale the cluster up to the limits of Kubernetes with no dependencies on the underlying cloud network. - - - -You can learn more about Kubernetes Networking on Azure, including how each of the above options works under the covers, in this short video: [Everything you need to know about Kubernetes networking on Azure](https://www.projectcalico.org/everything-you-need-to-know-about-kubernetes-networking-on-azure/). - -### Google Cloud - -If you would like pod IP addresses to be routable outside of the cluster then you must use the Google cloud provider integration in conjunction with host-local IPAM CNI plugin. This is supported by [GKE](https://cloud.google.com/kubernetes-engine), with Calico for network policy. Pod IP addresses are allocated from the underlying VPC, and corresponding Alias IP addresses are automatically assigned to nodes. - - - -If you prefer to avoid dependencies on a specific cloud provider, or allocating pod IPs from the underlying VPC is problematic due to IP address range exhaustion challenges, we recommend using $[prodname] networking in overlay mode. As Google cloud network is a pure L3 network, cross-subnet mode is not supported. Pod IPs will not be routable outside of the cluster, but you can scale the cluster up to the limits of Kubernetes with no dependencies on the underlying cloud network. - -_Recommended:_ - - - -_Alternative:_ - - - -You can learn more about Kubernetes Networking on Google cloud, including how each of the above options works under the covers, in this short video: [Everything you need to know about Kubernetes networking on Google cloud](https://www.projectcalico.org/everything-you-need-to-know-about-kubernetes-networking-on-google-cloud/). - -### Anywhere - -The above list of environments is obviously not exhaustive. Understanding the concepts and explanations in this guide has hopefully helped you figure out what is right for your environment. If you still aren't sure then you can ask for advice through the Calico Users's Slack or Discourse forum. And remember you can run Calico in VXLAN overlay mode in almost any environment if you want to get started without worrying too deeply about the different options. - - - -## Additional resources - -- [Video playlist: Everything you need to know about Kubernetes networking](https://www.youtube.com/playlist?list=PLoWxE_5hnZUZMWrEON3wxMBoIZvweGeiq) -- [Configure BGP peering](configuring/bgp.mdx) -- [Configure overlay networking](configuring/vxlan-ipip.mdx) -- [Advertise Kubernetes service IP addresses](configuring/advertise-service-ips.mdx) -- [Customize IP address management](ipam/index.mdx) -- [Restrict a pod to use an IP address in a specific range](ipam/legacy-firewalls.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/networking/egress/egress-gateway-aws.mdx b/calico-enterprise_versioned_docs/version-3.19-2/networking/egress/egress-gateway-aws.mdx deleted file mode 100644 index 4803df211d..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/networking/egress/egress-gateway-aws.mdx +++ /dev/null @@ -1,1174 +0,0 @@ ---- -description: Configure specific application traffic to exit the cluster through an egress gateway with a native AWS IP address. ---- - -# Configure egress gateways, AWS - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -## Big picture - -Control the source IP address seen by external services/appliances by routing the traffic from certain pods -through egress gateways. Use native VPC subnet IP addresses for the egress gateways so that the IPs are valid in the AWS fabric. - -## Value - -Controlling the source IP seen when traffic leaves the cluster allows groups of pods to be identified -by external firewalls, appliances and services (even as the groups are scaled up/down or pods restarted). -$[prodname] controls the source IP by directing traffic through one or more "egress gateway" pods, which -change the source IP of the traffic to their own IP. The egress gateways used can be chosen at the pod or namespace -scope allowing for flexibility in how the cluster is seen from outside. - -In AWS, egress gateway source IP addresses are chosen from an IP pool backed by a [VPC subnet](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) -using $[prodname] IPAM. $[prodname] IPAM allows the IP addresses to be precisely controlled, this allows -for static configuration of external appliances. Using an IP pool backed by a VPC subnet allows $[prodname] to -configure the AWS fabric to route traffic to and from the egress gateway using its own IP address. - -## Concepts - -### CIDR notation - -This article assumes that you are familiar with network masks and CIDR notation. - -- CIDR notation is defined in [RFC4632](https://datatracker.ietf.org/doc/html/rfc4632). -- The [Wikipedia article on CIDR notation](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing#CIDR_notation) - provides a good reference. - -### AWS-backed IP pools - -$[prodname] supports IP pools that are backed by the AWS fabric. Workloads that use an IP address from an -AWS-backed pool can communicate on the AWS network using their own IP address and AWS will route their traffic -to/from their host without changing the IP address. - -Pods that use an IP address from an AWS-backed pool may also be [assigned an AWS Elastic IP via a pod annotation](#add-aws-elastic-ips-to-the-egress-gateway-deployment) -. Elastic IPs used in this -way have the normal AWS semantics: when accessing resources inside the AWS network, the workload's private IP -(from the IP pool) is used. When accessing resources outside the AWS network, AWS translates the workload's IP to -the Elastic IP. Elastic IPs also allow for incoming requests from outside the AWS fabric, direct to the workload. - -In overview, the AWS-backed IP Pools feature works as follows: - -- An IP pool is created with its `awsSubnetID` field set to the ID of a VPC subnet. This "AWS-backed" IP pool's - CIDR must be contained within the VPC subnet's CIDR. - - :::caution - - You must ensure that the CIDR(s) used for AWS-backed IP pool(s) are reserved in the AWS fabric. - For example, by creating a dedicated VPC subnet for $[prodname]. If the CIDR is not reserved; both - $[prodname] and AWS may try to assign the same IP address, resulting in a conflict. - - ::: - -- Since they are a limited resource, $[prodname] IPAM does not use AWS-backed pools by default. To request an - AWS-backed IP address, a pod must have a resource request: - - ```yaml noValidation - spec: - containers: - - ... - resources: - requests: - projectcalico.org/aws-secondary-ipv4: 1 - limits: - projectcalico.org/aws-secondary-ipv4: 1 - ``` - - $[prodname] manages the `projectcalico.org/aws-secondary-ipv4` capacity on the Kubernetes Node resource, - ensuring that Kubernetes will not try to schedule too many AWS-backed workloads to the same node. Only AWS-backed - pods are limited in this way; there is no limit on the number of non-AWS-backed pods. - -- When the CNI plugin spots such a resource request, it will choose an IP address from an AWS-backed pool. Only - pools with VPC subnets in the availability zone of the host are considered. - -- When Felix, $[prodname]'s per-host agent spots a local workload with an AWS-backed address it tries to ensure - that the IP address of the workload is assigned to the host in the AWS fabric. If need be, it will create a - new [secondary ENI](#secondary-elastic-network-interfaces-enis) device and attach it to the host to house the IP address. - Felix supports two modes for assigning secondary ENIs: **ENI-per-workload** mode (added in v3.13) and - **Secondary-IP-per-workload** mode. These modes are described [below](#secondary-elastic-network-interfaces-enis). - -- If the pod has one or more AWS Elastic IPs listed in the `cni.projectcalico.org/awsElasticIPs` pod annotation, - Felix will try to ensure that _one_ of the Elastic IPs is assigned to the pod's private IP address in the AWS fabric. - (Specifying multiple Elastic IPs is useful for multi-pod deployments; ensuring that each pod in the deployment - gets one of the IPs.) - -### Egress gateway - -An egress gateway acts as a transit pod for the outbound application traffic that is configured to -use it. As traffic leaving the cluster passes through the egress gateway, its source IP is changed -to that of the egress gateway pod, and the traffic is then forwarded on. - -### Source IP - -When an outbound application flow leaves the cluster, its IP packets will have a source IP. -This begins as the pod IP of the pod that originated the flow, then: - -- _If no egress gateway is configured_ and the pod IP came from an [IP pool](../../reference/resources/ippool.mdx) - with `natOutgoing: true`, the node hosting the pod will change the source IP to its own as the - traffic leaves the host. This allows the pod to communicate with external service even though the - external network is unaware of the pod's IP. - -- _If the pod is configured with an egress gateway_, the traffic is first forwarded to the egress gateway, which - changes the source IP to its own and then sends the traffic on. To function correctly, egress gateways - should have IPs from an IP pool with `natOutgoing: false`, meaning their host forwards the packet onto - the network without changing the source IP again. Since the egress gateway's IP is visible to - the underlying network fabric, the fabric must be configured to know about the egress gateway's - IP and to send response traffic back to the same host. - -### AWS VPCs and subnets - -An AWS VPC is a virtual network that is, by default, logically isolated from other VPCs. Each VPC has one or more -(often large) CIDR blocks associated with it (for example `10.0.0.0/16`). In general, VPC CIDRs may overlap, but only -if the VPCs remain isolated. AWS allows VPCs to be peered with each other through VPC Peerings. VPCs can only be -peered if _none_ of their associated CIDRs overlap. - -Each VPC has one or more VPC subnets associated with it, each subnet owns a non-overlapping part of one of the -VPC's CIDR blocks. Each subnet is associated with a particular availability zone. Instances in one availability -zone can only use IP addresses from subnets in that zone. Unfortunately, this adds some complexity to managing -egress gateways IP addresses: much of the configuration must be repeated per-AZ. - -### AWS VPC and DirectConnect peerings - -AWS [VPC Peerings](https://docs.aws.amazon.com/vpc/latest/peering/vpc-peering-basics.html) allow multiple VPCs to be -connected together. Similarly, [DirectConnect](https://docs.aws.amazon.com/directconnect/latest/UserGuide/Welcome.html) -allows external datacenters to be connected to an AWS VPC. Peered VPCs and datacenters communicate using private IPs -as if they were all on one large private network. - -By using AWS-backed IP pools, egress gateways can be assigned private IPs allowing them to communicate without NAT -within the same VPC, with peered VPCs, and, with peered datacenters. - -### Secondary Elastic Network Interfaces (ENIs) - -Elastic network interfaces are network interfaces that can be added and removed from an instance dynamically. Each -ENI has a primary IP address from the VPC subnet that it belongs to, and it may also have one or more secondary IP -addresses, chosen for the same subnet. While the primary IP address is fixed and cannot be changed, the secondary -IP addresses can be added and removed at runtime. - -To arrange for AWS to route traffic to and from egress gateways, $[prodname] adds _secondary_ Elastic -Network Interfaces (ENIs) to the host. $[prodname] supports two modes for provisioning the -secondary ENIs. The table below describes the trade-offs between **ENI-per-workload** and **Secondary-IP-per-workload** -modes: - -| **ENI-per-workload** (since v3.13) | **Secondary-IP-per-workload** | -| ------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | -| One secondary ENI is attached for each AWS-backed workload. | Secondary ENIs are shared, multiple workloads per ENI. | -| Supports one AWS-backed workload per secondary ENI. | Supports 2-49 AWS-backed workloads per secondary ENI (depending on instance type). | -| ENI Primary IP is set to Workload's IP. | ENI Primary IP chosen from dedicated "host secondary" IP pools. | -| Makes best use of AWS IP space, no need to reserve IPs for hosts. | Requires "host secondary" IPs to be reserved. These cannot be used for workloads. | -| ENI deleted when workload deleted. | ENI retained (ready for next workload to be scheduled). | -| Slower to handle churn/workload mobility. (Creating ENI is slower than assigning IP.) | Faster at handling churn/workload mobility. | - -The number of ENIs that an instance can support and the number of secondary IPs that each ENI can support depends on -the instance type according to [this table](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#AvailableIpPerENI). -Note: the table lists the total number of network interfaces and IP addresses but the first interface on the host (the -primary interface) and, in Secondary-IP-per-workload mode, the first IP of each interface (its primary IP) cannot be -used for egress gateways. - -The primary interface cannot be used for egress gateways because it belongs to the VPC subnet that is -in use for Kubernetes hosts; this means that a planned egress gateway IP could get used by AWS as the primary IP of -an instance (for example when scaling up the cluster). - -## Before you begin - -**Required** - -- Calico CNI -- Open port UDP 4790 on the host - -**Not Supported** - -- Amazon VPC CNI - - $[prodname] CNI and IPAM is required. The ability to control the egress gateway’s IP is a feature of $[prodname] CNI and IPAM. AWS VPC CNI does not support that feature, so it is incompatible with egress gateways. - -## How to - -- [Configure IP autodetection](#configure-ip-autodetection) -- [Ensure Kubernetes VPC has free CIDR range](#ensure-kubernetes-vpc-has-free-cidr-range) -- [Create dedicated VPC subnets](#create-dedicated-vpc-subnets) -- [Configure AWS IAM roles](#configure-aws-iam-roles) -- [Configure IP reservations for each VPC subnet](#configure-ip-reservations-for-each-vpc-subnet) -- [Enable egress gateway support](#enable-egress-gateway-support) -- [Enable AWS-backed IP pools](#enable-aws-backed-ip-pools) -- [Configure IP pools backed by VPC subnets](#configure-ip-pools-backed-by-vpc-subnets) -- [Deploy a group of egress gateways](#deploy-a-group-of-egress-gateways) -- [Configure iptables backend for egress gateways](#configure-iptables-backend-for-egress-gateways) -- [Configure namespaces and pods to use egress gateways](#configure-namespaces-and-pods-to-use-egress-gateways) -- [Optionally enable ECMP load balancing](#optionally-enable-ecmp-load-balancing) -- [Verify the feature operation](#verify-the-feature-operation) -- [Control the use of egress gateways](#control-the-use-of-egress-gateways) -- [Policy enforcement for flows via an egress gateway](#policy-enforcement-for-flows-via-an-egress-gateway) -- [Upgrade egress gateways](#upgrade-egress-gateways) - -### Configure IP autodetection - -Since this feature adds additional network interfaces to nodes, it is important to configure $[prodname] to -autodetect the correct primary interface to use for normal pod-to-pod traffic. Otherwise, $[prodname] may -autodetect a newly-added secondary ENI as the main interface, causing an outage. - -For EKS clusters, the default IP autodetection method is `can-reach=8.8.8.8`, which will choose the interface -with a route to `8.8.8.8`; this is typically the interface with a default route, which will be the correct (primary) ENI. -($[prodname] ensures that the secondary ENIs do not have default routes in the main routing table.) - -For other AWS clusters, $[prodname] may default to `firstFound`, which is **not** suitable. - -To examine the autodetection method, check the operator's installation resource: - -```bash -$ kubectl get installations.operator.tigera.io -o yaml default -``` -```yaml noValidation -apiVersion: operator.tigera.io/v1 -kind: Installation -metadata: - ... - name: default - ... -spec: - calicoNetwork: - ... - nodeAddressAutodetectionV4: - firstFound: true -... -``` - -If `nodeAddressAutodetectionV4` is set to `firstFound: true` or is not specified, then you must change it to another method by editing the -resource. The NodeAddressAutodetection options, `canReach` and `cidrs` are suitable. See [Installation reference](../../reference/installation/api.mdx). If using the `cidrs` option, set the CIDRs list to include only the -CIDRs from which your primary ENI IPs are chosen (do not include the dedicated VPC subnets chosen below). - -### Ensure Kubernetes VPC has free CIDR range - -For egress gateways to be useful in AWS, we want to assign them IP addresses from a VPC subnet that is in the same AZ -as their host. - -To avoid clashes between AWS IP allocations and $[prodname] IP allocations, it is important that the range of -IP addresses assigned to $[prodname] IP pools is not used by AWS for automatic allocations. In this guide we -assume that you have created a dedicated VPC subnet per Availability Zone (AZ) that is reserved for $[prodname] -and configured not to be used as the default subnet for the AZ. - -If you are creating your cluster and VPC from scratch, plan to subdivide the VPC CIDR into (at least) two VPC subnets -per AZ. One VPC subnet for the Kubernetes (and any other) hosts and one VPC subnet for egress gateways. (The next -section explains the sizing requirements for the egress gateway subnets.) - -If you are adding this feature to an existing cluster, you may find that the existing VPC subnets already cover the -entire VPC CIDR, making it impossible to create a new subnet. If that is the case, you can make more room by -adding a second CIDR to the VPC that is large enough for the new subnets. For information on adding a secondary -CIDR range to a VPC, see [this guide](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html#vpc-resize). - -### Create dedicated VPC subnets - -$[prodname] requires a dedicated VPC subnet in each AWS availability zone that you wish to deploy egress -gateways. The subnet must be dedicated to $[prodname] so that AWS will not -use IP addresses from the subnet for other purposes (as this could clash with an egress gateway's IP). When creating the -subnet you should configure it not to be used for instances. - -Some IP addresses from the dedicated subnet are reserved for AWS and $[prodname] internal use: - -- The first four IP addresses in the subnet cannot be used. These are [reserved by AWS for internal use](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html#vpc-sizing-ipv4). -- Similarly, the last IP in the subnet (the broadcast address) cannot be used. -- _In **Secondary-IP-per-workload** mode_, $[prodname] requires one IP address from the subnet per secondary ENI - that it provisions (for use as the primary IP address of the ENI). In **ENI-per-workload** mode, this is not required. - - - - -Example for **ENI-per-workload** mode: - -- You anticipate having up to 30 instances running in each availability zone (AZ). -- You intend to use `t3.large` instances, [these are limited to](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#AvailableIpPerENI) 3 ENIs per host. -- So, each host can accept 2 secondary ENIs, each of which can handle one egress gateway. -- With 2 ENIs per node and 30 nodes, the part of the cluster in this AZ could handle up to `30 * 2 = 60` egress - gateways. -- AWS reserves 5 IPs from the AWS subnet for internal use, no "host secondary IPs" need to be reserved in this mode. -- Since VPC subnets are allocated by CIDR, a `/25` subnet containing 128 IP addresses would comfortably fit the 5 - reserved IPs as well as the 60 possible gateways (with headroom for more nodes to be added later). - - - - -Example for **Secondary-IP-per-workload** mode: - -- You anticipate having up to 30 instances running in each availability zone (AZ). -- You intend to use `t3.large` instances, [these are limited to](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#AvailableIpPerENI) - 3 ENIs per host (one of which is the primary) and each ENI can handle 12 IP addresses, (one of which is the primary). -- So, each host can accept 2 secondary ENIs and each secondary ENI could handle 11 egress gateway pods. -- Each in-use secondary ENI requires one IP from the VPC subnet (up to 60 in this case) and AWS requires 5 IPs to be - reserved so that's up to 65 IPs reserved in total. -- With 2 ENIs and 11 IPs per ENI, the part of the cluster in this AZ could handle up to `30 * 2 * 11 = 660` egress - gateways. -- Since VPC subnets are allocated by CIDR, a `/22` subnet containing 1024 IP addresses would comfortably fit the 65 - reserved IPs as well as the 660 possible gateways. - -$[prodname] allocates ENIs on-demand so each instance will only claim one of those reserved IP addresses when the -first egress gateway is assigned to that node. It will only claim its second IP when that ENI becomes full and then an -extra egress gateway is provisioned. - - - - -### Configure AWS IAM roles - -To provision the required AWS resources, each $[noderunning] pod in your cluster requires the -following IAM permissions to be granted. The permissions can be granted to the node IAM Role itself, or by using -the AWS [IAM roles for service accounts](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) feature to grant the permissions to the -`calico-node` service account. - -- DescribeInstances -- DescribeInstanceTypes -- DescribeNetworkInterfaces -- DescribeSubnets -- DescribeTags -- CreateTags -- AssignPrivateIpAddresses -- UnassignPrivateIpAddresses -- AttachNetworkInterface -- CreateNetworkInterface -- DeleteNetworkInterface -- DetachNetworkInterface -- ModifyNetworkInterfaceAttribute - -The above permissions are similar to those used by the AWS VPC CNI (since both CNIs need to provision the same kinds -of resources). In addition, to support elastic IPs, each $[noderunning] also requires the following permissions: - -- DescribeAddresses -- AssociateAddress -- DisassociateAddress - -### Configure AWS Security Group rules - -To allow egress gateway traffic into the egress gateway pod's host from the client, the ingress rules of the security -group need to be updated. A rule to allow all packets from within the security group must be added to the inbound rules. - -### Configure IP reservations for each VPC subnet - -Since the first four IP addresses and the last IP address in a VPC subnet cannot be used, it is important to -prevent $[prodname] from _trying_ to use them. For each VPC subnet that you plan to use, -ensure that you have an entry in an [IP reservation](../../reference/resources/ipreservation.mdx) for its first -four IP addresses and its final IP address. - -For example, if your chosen VPC subnets are `100.64.0.0/22` and `100.64.4.0/22`, you could create the following -`IPReservation` resource, which covers both VPC subnets (if you're not familiar with CIDR notation, replacing the -`/22` of the original subnet with `/30` is a shorthand for "the first four IP addresses"): - -```yaml -apiVersion: projectcalico.org/v3 -kind: IPReservation -metadata: - name: aws-ip-reservations -spec: - reservedCIDRs: - - 100.64.0.0/30 - - 100.64.3.255 - - 100.64.4.0/30 - - 100.64.7.255 -``` - -### Enable egress gateway support - -In the default **FelixConfiguration**, set the `egressIPSupport` field to `EnabledPerNamespace` or -`EnabledPerNamespaceOrPerPod`, according to the level of support that you need in your cluster. For -support on a per-namespace basis only: - -```bash -kubectl patch felixconfiguration default --type='merge' -p \ - '{"spec":{"egressIPSupport":"EnabledPerNamespace"}}' -``` - -Or for support both per-namespace and per-pod: - -```bash -kubectl patch felixconfiguration default --type='merge' -p \ - '{"spec":{"egressIPSupport":"EnabledPerNamespaceOrPerPod"}}' -``` - -:::note - -- `egressIPSupport` must be the same on all cluster nodes, so you should set them only in the - `default` FelixConfiguration resource. -- The operator automatically enables the required policy sync API in the FelixConfiguration. - -::: - -### Enable AWS-backed IP pools - - - - -To enable **ENI-per-workload** mode, in the default **FelixConfiguration**, set the `awsSecondaryIPSupport` field to -`EnabledENIPerWorkload`: - -```bash -kubectl patch felixconfiguration default --type='merge' -p \ - '{"spec":{"awsSecondaryIPSupport":"EnabledENIPerWorkload"}}' -``` - - - - -To enable **Secondary-IP-per-workload** mode, set the field to `Enabled` (the name `Enabled` predates -the addition of the **ENI-per-workload** mode): - -```bash -kubectl patch felixconfiguration default --type='merge' -p \ - '{"spec":{"awsSecondaryIPSupport":"Enabled"}}' -``` - - - - -You can verify that the setting took effect by examining the Kubernetes Node resources: - -```bash -kubectl describe node -``` - -Should show the new `projectcalico.org/aws-secondary-ipv4` capacity (in the Allocated Resources section). - -#### Changing modes - -You can change between the two modes by: - -- Ensuring that the number of egress gateways on every node is within the limits of the particular mode. i.e. - when switching to **ENI-per-workload** mode, the number of egress gateways must be less than or equal to the number - of secondary ENIs that your instances can handle. -- Editing the setting (using the patch commands above, for example). - -Changing the mode will cause disruption as ENIs must be removed and re-added. - -### Configure IP pools backed by VPC subnets - - - - -In **ENI-per-workload** mode, IP pools are (only) used to subdivide the VPC subnets into small pools used for -particular groups of egress gateways. These IP Pools must have: - -- `awsSubnetID` set to the ID of the relevant VPC subnet. This activates the AWS-backed IP feature for these pools. -- `allowedUse` set to `["Workload"]` to tell $[prodname] IPAM to use those pools for the egress gateway workloads. -- `vxlanMode` and `ipipMode` set to `Never` to disable encapsulation for the egress gateway pods. (`Never` is the default if these fields are not specified.) -- `blockSize` set to 32. This aligns $[prodname] IPAM with the behaviour of the AWS fabric. -- `disableBGPExport` set to `true`. This prevents routing conflicts if your cluster is using IPIP or BGP networking. - -It's also recommended to: - -- Set `nodeSelector` to `"!all()"`. This prevents $[prodname] IPAM from using the pool automatically. It will - only be used for workloads that explicitly name it in the `cni.projectcalico.org/ipv4pools` annotation. - -Continuing the example above, with VPC subnets - -- `100.64.0.0/22` in, say, availability zone west-1 and id `subnet-000000000000000001` -- `100.64.4.0/22` in, say, availability zone west-2 and id `subnet-000000000000000002` - -And, assuming that there are two clusters of egress gateways "red" and "blue" (which in turn serve namespaces "red" -and "blue"), one way to structure the IP pools is to have one IP pool for each group of egress gateways in each -subnet. Then, if a particular egress gateway from the egress gateway cluster is scheduled to one AZ or the other, -it will take an IP from the appropriate pool. - -For the "west-1" availability zone: - -- IP pool "egress-red-west-1", CIDR `100.64.0.4/30` (the first non-reserved /30 CIDR in the VPC subnet). These - addresses will be used for "red" egress gateways in the "west-1" AZ. - -- IP pool "egress-blue-west-1", CIDR `100.64.0.8/30` (the next 4 IPs from the "west-1" subnet). These addresses - will be used for "blue" egress gateways in the "west-1" AZ. - -For the "west-2" availability zone: - -- IP pool "egress-red-west-2", CIDR `100.64.4.4/30` (the first non-reserved /30 CIDR in the VPC subnet). These - addresses will be used for "red" egress gateways in the "west-2" AZ. - -- IP pool "egress-blue-west-2", CIDR `100.64.4.8/30` (the next 4 IPs from the "west-2" subnet). These addresses - will be used for "blue" egress gateways in the "west-2" AZ. - -Converting this to `IPPool` resources: - -```yaml -apiVersion: projectcalico.org/v3 -kind: IPPool -metadata: - name: egress-red-west-1 -spec: - cidr: 100.64.0.4/30 - allowedUses: ['Workload'] - awsSubnetID: subnet-000000000000000001 - blockSize: 32 - nodeSelector: '!all()' - disableBGPExport: true ---- -apiVersion: projectcalico.org/v3 -kind: IPPool -metadata: - name: egress-blue-west-1 -spec: - cidr: 100.64.0.8/30 - allowedUses: ['Workload'] - awsSubnetID: subnet-000000000000000001 - blockSize: 32 - nodeSelector: '!all()' - disableBGPExport: true ---- -apiVersion: projectcalico.org/v3 -kind: IPPool -metadata: - name: egress-red-west-2 -spec: - cidr: 100.64.4.4/30 - allowedUses: ['Workload'] - awsSubnetID: subnet-000000000000000002 - blockSize: 32 - nodeSelector: '!all()' - disableBGPExport: true ---- -apiVersion: projectcalico.org/v3 -kind: IPPool -metadata: - name: egress-blue-west-2 -spec: - cidr: 100.64.4.8/30 - allowedUses: ['Workload'] - awsSubnetID: subnet-000000000000000002 - blockSize: 32 - nodeSelector: '!all()' - disableBGPExport: true -``` - - - - -In **Secondary-IP-per-workload** mode, IP pools are used to subdivide the VPC subnets as follows: - -- One medium-sized IP pool per-Subnet reserved for $[prodname] to use for the _primary_ IP addresses of its _secondary_ ENIs. - These pools must have: - - - `awsSubnetID` set to the ID of the relevant VPC subnet. This activates the AWS-backed IP feature for these pools. - - `allowedUse` set to `["HostSecondaryInterface"]` to reserve them for this purpose. - - `blockSize` set to 32. This aligns $[prodname] IPAM with the behaviour of the AWS fabric. - - `vxlanMode` and `ipipMode` set to `Never`. (`Never` is the default if these fields are not specified.) - - `disableBGPExport` set to `true`. This prevents routing conflicts if your cluster is using IPIP or BGP networking. - -- Small pools used for particular groups of egress gateways. These must have: - - - `awsSubnetID` set to the ID of the relevant VPC subnet. This activates the AWS-backed IP feature for these pools. - - `allowedUse` set to `["Workload"]` to tell $[prodname] IPAM to use those pools for the egress gateway workloads. - - `vxlanMode` and `ipipMode` set to `Never` to disable encapsulation for the egress gateway pods. (`Never` is the default if these fields are not specified.) - - `blockSize` set to 32. This aligns $[prodname] IPAM with the behaviour of the AWS fabric. - - `disableBGPExport` set to `true`. This prevents routing conflicts if your cluster is using IPIP or BGP networking. - - It's also recommended to: - - - Set `nodeSelector` to `"!all()"`. This prevents $[prodname] IPAM from using the pool automatically. It will - only be used for workloads that explicitly name it in the `cni.projectcalico.org/ipv4pools` annotation. - -Continuing the example above, with VPC subnets - -- `100.64.0.0/22` in, say, availability zone west-1 and id `subnet-000000000000000001` -- `100.64.4.0/22` in, say, availability zone west-2 and id `subnet-000000000000000002` - -And, assuming that there are two clusters of egress gateways "red" and "blue" (which in turn serve namespaces "red" -and "blue"), one way to structure the IP pools is to have a "hosts" IP pool in each VPC subnet and one IP pool for each -group of egress gateways in each subnet. Then, if a particular egress gateway from the egress gateway cluster is -scheduled to one AZ or the other, it will take an IP from the appropriate pool. - -For the "west-1" availability zone: - -- IP pool "hosts-west-1", CIDR `100.64.0.0/25` (the first 128 addresses in the "west-1" VPC subnet). - - - We'll reserve these addresses for hosts to use. - - `100.64.0.0/25` covers the addresses from `100.64.0.0` to `100.64.0.127` (but addresses `100.64.0.0` to `100.64.0.3` - were reserved above). - -- IP pool "egress-red-west-1", CIDR `100.64.0.128/30` (the next 4 IPs from the "west-1" subnet). - - - These addresses will be used for "red" egress gateways in the "west-1" AZ. - -- IP pool "egress-blue-west-1", CIDR `100.64.0.132/30` (the next 4 IPs from the "west-1" subnet). - - - These addresses will be used for "blue" egress gateways in the "west-1" AZ. - -For the "west-2" availability zone: - -- IP pool "hosts-west-2", CIDR `100.64.4.0/25` (the first 128 addresses in the "west-2" VPC subnet). - - - `100.64.4.0/25` covers the addresses from `100.64.4.0` to `100.64.4.127` (but addresses `100.64.4.0` to `100.64.4.3` - were reserved above). - -- IP pool "egress-red-west-2", CIDR `100.64.4.128/30` (the next 4 IPs from the "west-2" subnet). - - - These addresses will be used for "red" egress gateways in the "west-2" AZ. - -- IP pool "egress-blue-west-2", CIDR `100.64.4.132/30` (the next 4 IPs from the "west-2" subnet). - - - These addresses will be used for "blue" egress gateways in the "west-2" AZ. - -Converting this to `IPPool` resources: - -```yaml -apiVersion: projectcalico.org/v3 -kind: IPPool -metadata: - name: hosts-west-1 -spec: - cidr: 100.64.0.0/25 - allowedUses: ['HostSecondaryInterface'] - awsSubnetID: subnet-000000000000000001 - blockSize: 32 - disableBGPExport: true ---- -apiVersion: projectcalico.org/v3 -kind: IPPool -metadata: - name: egress-red-west-1 -spec: - cidr: 100.64.0.128/30 - allowedUses: ['Workload'] - awsSubnetID: subnet-000000000000000001 - blockSize: 32 - nodeSelector: '!all()' - disableBGPExport: true ---- -apiVersion: projectcalico.org/v3 -kind: IPPool -metadata: - name: egress-blue-west-1 -spec: - cidr: 100.64.0.132/30 - allowedUses: ['Workload'] - awsSubnetID: subnet-000000000000000001 - blockSize: 32 - nodeSelector: '!all()' - disableBGPExport: true ---- -apiVersion: projectcalico.org/v3 -kind: IPPool -metadata: - name: hosts-west-2 -spec: - cidr: 100.64.4.0/25 - allowedUses: ['HostSecondaryInterface'] - awsSubnetID: subnet-000000000000000002 - blockSize: 32 - disableBGPExport: true ---- -apiVersion: projectcalico.org/v3 -kind: IPPool -metadata: - name: egress-red-west-2 -spec: - cidr: 100.64.4.128/30 - allowedUses: ['Workload'] - awsSubnetID: subnet-000000000000000002 - blockSize: 32 - nodeSelector: '!all()' - disableBGPExport: true ---- -apiVersion: projectcalico.org/v3 -kind: IPPool -metadata: - name: egress-blue-west-2 -spec: - cidr: 100.64.4.132/30 - allowedUses: ['Workload'] - awsSubnetID: subnet-000000000000000002 - blockSize: 32 - nodeSelector: '!all()' - disableBGPExport: true -``` - - - - -### Deploy a group of egress gateways - -Use an egress gateway custom resource to deploy a group of egress gateways. - -Using the example of the "red" egress gateway cluster, we use several features of Kubernetes and $[prodname] -in tandem to get a cluster of egress gateways that spans both availability zones and uses AWS-backed IP addresses: - -```bash -kubectl apply -f - < -# - -# timeoutSeconds: 15 -# intervalSeconds: 5 -# httpProbe: -# urls: -# - -# - -# timeoutSeconds: 30 -# intervalSeconds: 10 - aws: - nativeIP: Enabled - template: - metadata: - labels: - egress-code: red - spec: - nodeSelector: - kubernetes.io/os: linux - terminationGracePeriodSeconds: 0 - topologySpreadConstraints: - - maxSkew: 1 - topologyKey: "topology.kubernetes.io/zone" - whenUnsatisfiable: "DoNotSchedule" - labelSelector: - matchLabels: - egress-code: red -EOF -``` - -- `replicas: 2` tells Kubernetes to schedule two egress gateways in the "red" cluster. -- ipPools tells $[prodname] IPAM to use one of the "red" IP pools: - - ```yaml - ipPools: - - name: "egress-red-west-1" - - name: "egress-red-west-2" - ``` - Depending on which AZ the pod is scheduled in, $[prodname] IPAM will automatically ignore IP pools that - are backed by AWS subnets that are not in the local AZ. - - External services and appliances can recognise "red" traffic because it will all come from the CIDRs of the "red" - IP pools. - -- When nativeIP is enabled, IPPools must be AWS-backed. It also tells Kubernetes to only schedule the gateway to a node - with available AWS IP capacity: - - ```yaml - aws: - nativeIP: Enabled - ``` - -- The following [topology spread constraint](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/) - ensures that Kubernetes spreads the Egress gateways evenly between AZs (assuming that your nodes are labeled with - the expected [well-known label](https://kubernetes.io/docs/reference/labels-annotations-taints/#topologykubernetesiozone) - `topology.kubernetes.io/zone`): - - ```yaml - topologySpreadConstraints: - - maxSkew: 1 - topologyKey: topology.kubernetes.io/zone - whenUnsatisfiable: DoNotSchedule - labelSelector: - matchLabels: - egress-code: red - ``` - -- The labels are arbitrary. You can choose whatever names and values are convenient for your cluster's Namespaces and Pods to refer to in their egress selectors. - If labels are not specified, a default label `projectcalico.org/egw`:`name` will be added by the Tigera Operator. - -- icmpProbe may be used to specify the Probe IPs, ICMP interval and timeout in seconds. `ips` if set, the - egress gateway pod will probe each IP periodically using an ICMP ping. If all pings fail then the egress - gateway will report non-ready via its health port. `intervalSeconds` controls the interval between probes. - `timeoutSeconds` controls the timeout before reporting non-ready if no probes succeed. - - ```yaml - icmpProbe: - ips: - - - - - timeoutSeconds: 20 - intervalSeconds: 10 - ``` - -- httpProbe may be used to specify the Probe URLs, HTTP interval and timeout in seconds. `urls` if set, the - egress gateway pod will probe each external service periodically. If all probes fail then the egress - gateway will report non-ready via its health port. `intervalSeconds` controls the interval between probes. - `timeoutSeconds` controls the timeout before reporting non-ready if all probes are failing. - - ```yaml - httpProbe: - urls: - - - - - timeoutSeconds: 30 - intervalSeconds: 10 - ``` -- Please refer to the [operator reference docs](../../reference/installation/api.mdx) for details about the egress gateway resource type. - -:::note - -- It is advisable to have more than one egress gateway per group, so that the egress IP function - continues if one of the gateways crashes or needs to be restarted. When there are multiple - gateways in a group, outbound traffic from the applications using that group is load-balanced - across the available gateways. The number of `replicas` specified must be less than or equal - to the number of free IP addresses in the IP Pool. -- IPPool can be specified either by its name (e.g. `-name: egress-ippool-1`) or by its CIDR (e.g. `-cidr: 10.10.10.0/31`). -- The labels are arbitrary. You can choose whatever names and values are convenient for - your cluster's Namespaces and Pods to refer to in their egress selectors. - The health port `8080` is used by: -- The Kubernetes `readinessProbe` to expose the status of the egress gateway pod (and any ICMP/HTTP - probes). -- Remote pods to check if the egress gateway is "ready". Only "ready" egress - gateways will be used for remote client traffic. This traffic is automatically allowed by $[prodname] and - no policy is required to allow it. $[prodname] only sends probes to egress gateway pods that have a named - "health" port. This ensures that during an upgrade, health probes are only sent to upgraded egress gateways. - -::: - -### Configure iptables backend for egress gateways - -The Tigera Operator configures egress gateways to use the same iptables backend as `calico-node`. -To modify the iptables backend for egress gateways, you must change the `iptablesBackend` field in the [Felix configuration](../../reference/resources/felixconfig.mdx). - -### Configure namespaces and pods to use egress gateways - -You can configure namespaces and pods to use an egress gateway by: -* annotating the namespace or pod -* applying an egress gateway policy to the namespace or pod. - -Using an egress gateway policy is more complicated, but it allows advanced use cases. - -#### Configure a namespace or pod to use an egress gateway (annotation method) - -In a $[prodname] deployment, the Kubernetes namespace and pod resources honor annotations that -tell that namespace or pod to use particular egress gateways. These annotations are selectors, and -their meaning is "the set of pods, anywhere in the cluster, that match those selectors". - -So, to configure all the pods in a namespace to use the egress gateways that are -labelled with `egress-code: red`, you would annotate that namespace like this: - -```bash -kubectl annotate ns egress.projectcalico.org/selector="egress-code == 'red'" -``` - -By default, that selector can only match egress gateways in the same namespace. To select gateways -in a different namespace, specify a `namespaceSelector` annotation as well, like this: - -```bash -kubectl annotate ns egress.projectcalico.org/namespaceSelector="projectcalico.org/name == 'default'" -``` - -Egress gateway annotations have the same [syntax and range of expressions](../../reference/resources/networkpolicy.mdx#selector) as the selector fields in -$[prodname] [network policy](../../reference/resources/networkpolicy.mdx#entityrule). - -To configure a specific Kubernetes Pod to use egress gateways, specify the same annotations when -creating the pod. For example: - -```bash -kubectl apply -f - < egress.projectcalico.org/egressGatewayPolicy="egw-policy1" -``` - -To configure a specific Kubernetes pod to use the same policy, specify the same annotations when -creating the pod. -For example: - -```bash -kubectl apply -f - < -n -- nc 8089 ` should be the IP address of the netcat server. - -Then, if you check the logs or output of the netcat server, you should see: - -``` -Connection from received -``` - -with `` being one of the IPs of the egress IP pool that you provisioned. - -### Control the use of egress gateways - -If a cluster ascribes special meaning to traffic flowing through egress gateways, it will be -important to control when cluster users can configure their pods and namespaces to use them, so that -non-special pods cannot impersonate the special meaning. - -If namespaces in a cluster can only be provisioned by cluster admins, one option is to enable egress -gateway function only on a per-namespace basis. Then only cluster admins will be able to configure -any egress gateway usage. - -Otherwise -- if namespace provisioning is open to users in general, or if it's desirable for egress -gateway function to be enabled both per-namespace and per-pod -- a [Kubernetes admission controller](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/) - will be -needed. This is a task for each deployment to implement for itself, but possible approaches include -the following. - -1. Decide whether a given Namespace or Pod is permitted to use egress annotations at all, based on - other details of the Namespace or Pod definition. - -1. Evaluate egress annotation selectors to determine the egress gateways that they map to, and - decide whether that usage is acceptable. - -1. Impose the cluster's own bespoke scheme for a Namespace or Pod to identify the egress gateways - that it wants to use, less general than $[prodname]'s egress annotations. Then the - admission controller would police those bespoke annotations (that that cluster's users could - place on Namespace or Pod resources) and either reject the operation in hand, or allow it - through after adding the corresponding $[prodname] egress annotations. - -### Policy enforcement for flows via an egress gateway - -For an outbound connection from a client pod, via an egress gateway, to a destination outside the -cluster, there is more than one possible enforcement point for policy: - -The path of the traffic through policy is as follows: - -1. Packet leaves the client pod and passes through its egress policy. -2. The packet is encapsulated by the client pod's host and sent to the egress gateway -3. The encapsulated packet is sent from the host to the egress gateway pod. -4. The egress gateway pod de-encapsulates the packet and sends the packet out again with its own address. -5. The packet leaves the egress gateway pod through its egress policy. - -To ensure correct operation, (as of v3.15) the encapsulated traffic between host and egress gateway is auto-allowed by -$[prodname] and other ingress traffic is blocked. That means that there are effectively two places where -policy can be applied: - -1. on egress from the client pod -2. on egress from the egress gateway pod (see limitations below). - -The policy applied at (1) is the most powerful since it implicitly sees the original source of the traffic (by -virtue of being attached to that original source). It also sees the external destination of the traffic. - -Since an egress gateway will never originate its own traffic, one option is to rely on policy applied at (1) and -to allow all traffic to at (2) (either by applying no policy or by applying an "allow all"). - -Alternatively, for maximum "defense in depth" applying policy at both (1) and (2) provides extra protection should -the policy at (1) be disabled or bypassed by an attacker. Policy at (2) has the following limitations: - -- [Domain-based policy](../../network-policy/domain-based-policy.mdx) is not supported at egress from egress - gateways. It will either fail to match the expected traffic, or it will work intermittently if the egress gateway - happens to be scheduled to the same node as its clients. This is because any DNS lookup happens at the client pod. - By the time the policy reaches (2) the DNS information is lost and only the IP addresses of the traffic are available. - -- The traffic source will appear to be the egress gateway pod, the source information is lost in the address - translation that occurs inside the egress gateway pod. - -That means that policies at (2) will usually take the form of rules that match only on destination port and IP address, -either directly in the rule (via a CIDR match) or via a (non-domain based) NetworkSet. Matching on source has little -utility since the IP will always be the egress gateway and the port of translated traffic is not always preserved. - -:::note - -Since v3.15.0, $[prodname] also sends health probes to the egress gateway pods from the nodes where -their clients are located. In iptables mode, this traffic is auto-allowed at egress from the host and ingress -to the egress gateway. In eBPF mode, the probe traffic can be blocked by policy, so you must ensure that this traffic is allowed; this should be fixed in an upcoming -patch release. - -::: - -## Upgrade egress gateways - -From v3.16, egress gateway deployments are managed by the Tigera Operator. - -- When upgrading from a pre-v3.16 release, no automatic upgrade will occur. To upgrade a pre-v3.16 egress gateway deployment, - create an equivalent EgressGateway resource with the same namespace and the same name as mentioned [above](#deploy-a-group-of-egress-gateways); - the operator will then take over management of the old Deployment resource, replacing it with the upgraded version. - -- Use `kubectl apply` to create the egress gateway resource. Tigera Operator will read the newly created resource and wait - for the other $[prodname] components to be upgraded. Once the other $[prodname] components are upgraded, Tigera Operator - will upgrade the existing egress gateway deployment with the new image. - -By default, upgrading egress gateways will sever any connections that are flowing through them. To minimise impact, -the egress gateway feature supports some advanced options that give feedback to affected pods. For more details see -the [egress gateway maintenance guide](egress-gateway-maintenance.mdx). - -## Additional resources - -Please see also: - -- The `egressIP...` and `aws...` fields of the [FelixConfiguration resource](../../reference/resources/felixconfig.mdx#spec). -- [Troubleshooting egress gateways](troubleshoot.mdx). -- [Additional configuration for egress gateway maintenance](egress-gateway-maintenance.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/networking/egress/egress-gateway-azure.mdx b/calico-enterprise_versioned_docs/version-3.19-2/networking/egress/egress-gateway-azure.mdx deleted file mode 100644 index 233aced630..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/networking/egress/egress-gateway-azure.mdx +++ /dev/null @@ -1,759 +0,0 @@ ---- -description: Configure specific application traffic to exit the cluster through an egress gateway with a native Azure IP address. ---- - -# Configure egress gateways, Azure - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -## Big picture - -Control the source IP address seen by external services/appliances by routing the traffic from certain pods -through egress gateways. Use native VNet subnet IP addresses for the egress gateways so that the IPs are valid in the Azure fabric. - -## Value - -Controlling the source IP seen when traffic leaves the cluster allows groups of pods to be identified -by external firewalls, appliances and services (even as the groups are scaled up/down or pods restarted). -$[prodname] controls the source IP by directing traffic through one or more "egress gateway" pods, which -change the source IP of the traffic to their own IP. The egress gateways used can be chosen at the pod or namespace -scope allowing for flexibility in how the cluster is seen from outside. - -In Azure, egress gateway source IP addresses are chosen from an arbitrary user-defined IP pool -using $[prodname] IPAM. egress gateway pods use dedicated IPPools to use as source IP addresses, which enables static configuration -of external appliances. - -## Concepts - -### Egress gateway - -An egress gateway acts as a transit pod for the outbound application traffic that is configured to -use it. As traffic leaving the cluster passes through the egress gateway, its source IP is changed -to that of the egress gateway pod, and the traffic is then forwarded on. - -### Source IP - -When an outbound application flow leaves the cluster, its IP packets will have a source IP. -This begins as the pod IP of the pod that originated the flow, then: - -- _If no egress gateway is configured_ and the pod IP came from an [IP pool](../../reference/resources/ippool.mdx) -with `natOutgoing: true`, the node hosting the pod will change the source IP to its own as the -traffic leaves the host. This allows the pod to communicate with external service even though the -external network is unaware of the pod's IP. - -- _If the pod is configured with an egress gateway_, the traffic is first forwarded to the egress gateway, which -changes the source IP to its own and then sends the traffic on. To function correctly, egress gateways -should have IPs from an IP pool with `natOutgoing: false`, meaning their host forwards the packet onto -the network without changing the source IP again. Since the egress gateway's IP is visible to -the underlying network fabric, the fabric must be configured to know about the egress gateway's -IP and to send response traffic back to the same host. - -### Azure VNets and subnets - -An Azure VNet is a virtual network that is, by default, logically isolated from other VNets. Each VNet has one or more -(often large) CIDR blocks associated with it (for example `10.0.0.0/16`). In general, VNet CIDRs may overlap, but only -if the VNets remain isolated. Azure allows VNets to be peered with each other through VNet Peerings. VNets can only be -peered if *none* of their associated CIDRs overlap. - -Each VNet has one or more VNet subnets associated with it, each subnet owns a non-overlapping part of one of the -VNet's CIDR blocks. VNet subnets span all availability zones in a region, so it is possible to distribute egress gateway -resources across availability zones in a region without the need to repeat the configuration per-AZ. - -### Azure VNet and ExpressRoute peerings - -Azure [VNet Peerings](https://learn.microsoft.com/en-us/azure/virtual-network/virtual-network-peering-overview) allow multiple VNets to be -connected together. Similarly, [ExpressRoute](https://learn.microsoft.com/en-us/azure/expressroute/expressroute-introduction) -allows external datacenters to be connected to an Azure VNet. Peered VPCs and datacenters communicate using private IPs -as if they were all on one large private network. - -By advertising routes to Azure fabric using Azure Route Servers, egress gateways can be assigned private IPs allowing them to -communicate without NAT within the same VPC, with peered VPCs, and, with peered datacenters. - -### Azure Route Server - -Azure [Route Server](https://learn.microsoft.com/en-us/azure/route-server/) is a managed networking service that allows -a network virtual appliance, like $[prodname], to dynamically configure Azure fabric by exchanging routing information -using Border Gateway Protocol (BGP). $[prodname] can establish BGP sessions with an Azure Route Server in a VNet -to advertise the IPs of egress gateways to that VNet. The learned routes are then propagated to the rest of VNets through -VNet peering, or to external datacenters through ExpressRoute, allowing communication with egress gateway. - -## Before you begin - -**Required** - -- Calico CNI -- Open port UDP 4790 on the host - -**Not Supported** - -- Azure VNet CNI - -$[prodname] CNI and IPAM are required. The ability to control the egress gateway’s IP is a feature of $[prodname] CNI and IPAM. Azure VNet CNI does not support that feature, so it is incompatible with egress gateways. - -## How to - -- [Choose route reflectors](#choose-route-reflectors) -- [Create Azure Route Server](#create-azure-route-server) -- [Disable the default BGP node-to-node mesh](#disable-the-default-bgp-node-to-node-mesh) -- [Enable BGP](#enable-bgp) -- [Provision an egress IP pool](#provision-an-egress-ip-pool) -- [(Optional) Limit number of route advertisement](#limit-number-of-route-advertisement) -- [Configure route reflector](#configure-route-reflector) -- [Enable egress gateway support](#enable-egress-gateway-support) -- [Deploy a group of egress gateways](#deploy-a-group-of-egress-gateways) -- [Configure iptables backend for egress gateways](#configure-iptables-backend-for-egress-gateways) -- [Configure namespaces and pods to use egress gateways](#configure-namespaces-and-pods-to-use-egress-gateways) -- [(Optional) Enable ECMP load balancing](#optionally-enable-ecmp-load-balancing) -- [Verify the feature operation](#verify-the-feature-operation) -- [Control the use of egress gateways](#control-the-use-of-egress-gateways) -- [Policy enforcement for flows via an egress gateway](#policy-enforcement-for-flows-via-an-egress-gateway) - -### Choose route reflectors - -It is possible to establish BGP connections between all $[prodname] nodes and Azure Route Servers, but -to avoid hitting Azure Route Server peers [limit](https://learn.microsoft.com/en-us/azure/route-server/overview#route-server-limits), -it is better to select some nodes as route reflectors and set up BGP connections between those and Azure Route Server. -The number of route reflectors depends on the cluster size, but it is recommended to have at least 3 at all times, -and Azure Route Servers supports up to 8 peers in a VNet. - -### Create Azure Route Server - -Deploy Azure [Route Server](https://learn.microsoft.com/en-us/azure/route-server/) in the VNet (hub or spoke VNet) that routes -egress addresses. Then, add the selected route reflectors as peers to the Azure Route Server. - -:::note - -- The BGP connections between Calico route reflectors and Azure Route Servers are critical for the functionality -of egress gateways. It is important to maintain route reflectors with care, and to make sure there are always -enough healthy route reflectors. -- If possible, assign a static address to the route reflectors so after reboots the same address is kept. -- In AKS, it is [recommended](https://learn.microsoft.com/en-us/azure/aks/use-system-pools?tabs=azure-cli#system-and-user-node-pools) -to run applications in user node pools and leave system node pools for running critical system pods. -The nodes in system node pools are perfect Route Reflector candidates. - -::: - -### Disable the default BGP node-to-node mesh - -The default $[prodname] **node-to-node BGP mesh** may be turned off to enable other BGP topologies. -To do this, modify the default **BGP configuration** resource. - -Run the following command to disable the BGP full-mesh: - -```bash -kubectl apply -f - < projectcalico.org/RouteReflectorClusterID=244.0.0.1 -``` - -Typically, you will want to label this node to indicate that it is a route reflector, allowing it to be easily selected by a BGPPeer resource. You can do this with kubectl. For example: - -```bash -kubectl label node route-reflector=true -``` - -Now it is easy to configure route reflector nodes to peer with each other and other non-route-reflector nodes using label selectors. For example: - -```yaml -kind: BGPPeer -apiVersion: projectcalico.org/v3 -metadata: - name: peer-with-route-reflectors -spec: - nodeSelector: all() - peerSelector: route-reflector == 'true' -``` - -Finally, Add the IP addresses of Azure Route Servers, usually there are two, as peers to route reflectors. For example, if the $[prodname] -cluster is in a subnet with 10.224.0.0 as network address, the 10.224.0.1 is, [by definition](https://learn.microsoft.com/en-us/azure/virtual-network/virtual-networks-faq#are-there-any-restrictions-on-using-ip-addresses-within-these-subnets), the gateway and Azure Route Servers are deployed with -ASN 65515 and IP addresses of 10.225.0.4 and 10.225.0.5, create two **BGPPeer** resources: - -```yaml -apiVersion: projectcalico.org/v3 -kind: BGPPeer -metadata: - name: azure-route-server-a -spec: - peerIP: 10.225.0.4 - reachableBy: 10.224.0.1 - asNumber: 65515 - keepOriginalNextHop: true - nodeSelector: route-reflector == 'true' - filters: - - export-egress-ips ---- -apiVersion: projectcalico.org/v3 -kind: BGPPeer -metadata: - name: azure-route-server-b -spec: - peerIP: 10.225.0.5 - reachableBy: 10.224.0.1 - asNumber: 65515 - keepOriginalNextHop: true - nodeSelector: route-reflector == 'true' - filters: - - export-egress-ips -``` - -:::note - -- Adding `routeReflectorClusterID` to a node spec will remove it from the node-to-node mesh immediately, tearing down the -existing BGP sessions. Adding the BGP peering will bring up new BGP sessions. This will cause a short (about 2 seconds) -disruption to data plane traffic of workloads running in the nodes where this happens. To avoid this, make sure no -workloads are running on the nodes, by provisioning new nodes or by running `kubectl drain` on the node (which may -itself cause a disruption as workloads are drained). -- It is important to set `keepOriginalNextHop: true` since route reflectors advertise routes on behalf of other nodes. -Advertised routes to Azure Route Servers should have the original next hop otherwise the return packets will be sent to -route reflectors, and get dropped. -- It is mandatory to set `reachableBy` field set to the gateway of the subnet $[prodname] cluster is running in for -peering with Azure Route Servers to prevent BGP connection flapping. See [BGP peer](../../reference/resources/bgppeer.mdx) for more information. -- Including `filters` to apply the BGP filter configured in the previous section, is optional. -::: - -### Enable egress gateway support - -In the default **FelixConfiguration**, set the `egressIPSupport` field to `EnabledPerNamespace` or -`EnabledPerNamespaceOrPerPod`, according to the level of support that you need in your cluster. For -support on a per-namespace basis only: - -```bash -kubectl patch felixconfiguration default --type='merge' -p \ - '{"spec":{"egressIPSupport":"EnabledPerNamespace"}}' -``` - -Or for support both per-namespace and per-pod: - -```bash -kubectl patch felixconfiguration default --type='merge' -p \ - '{"spec":{"egressIPSupport":"EnabledPerNamespaceOrPerPod"}}' -``` - -:::note - -- `egressIPSupport` must be the same on all cluster nodes, so you should set them only in the `default` FelixConfiguration resource. -- The operator automatically enables the required policy sync API in the FelixConfiguration. - -::: - -### Deploy a group of egress gateways - -Use a Kubernetes Deployment to deploy a group of egress gateways. - -Using the example of the "red" egress gateway cluster, we use several features of Kubernetes and $[prodname] -in tandem to get a cluster of egress gateways that uses the user defined IP addresses: - -```bash -kubectl apply -f - < -# - -# timeoutSeconds: 15 -# intervalSeconds: 5 -# httpProbe: -# urls: -# - -# - -# timeoutSeconds: 30 -# intervalSeconds: 10 - template: - metadata: - labels: - egress-code: red - spec: - terminationGracePeriodSeconds: 0 - nodeSelector: - kubernetes.io/os: linux - topologySpreadConstraints: - - maxSkew: 1 - topologyKey: topology.kubernetes.io/zone - whenUnsatisfiable: DoNotSchedule - labelSelector: - matchLabels: - egress-code: red -EOF -``` - -* `replicas: 2` tells Kubernetes to schedule two egress gateways in the "red" cluster. - -* This annotation tells $[prodname] IPAM to use one of the "egress-ip-red-pool" IP pool. - -External services and appliances can recognise "red" traffic because it will all come from the CIDRs of the "red" -IP pool. - -* The following [topology spread constraint](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/) -ensures that Kubernetes spreads the Egress gateways evenly between AZs (assuming that your nodes are labeled with -the expected [well-known label](https://kubernetes.io/docs/reference/labels-annotations-taints/#topologykubernetesiozone) -`topology.kubernetes.io/zone`): - -```yaml -topologySpreadConstraints: -- maxSkew: 1 - topologyKey: topology.kubernetes.io/zone - whenUnsatisfiable: DoNotSchedule - labelSelector: - matchLabels: - egress-code: red - ``` - -* The labels are arbitrary. You can choose whatever names and values are convenient for your cluster's Namespaces and Pods to refer to in their egress selectors. -If labels are not specified, a default label `projectcalico.org/egw`:`name` will be added by the Tigera Operator. - -* icmpProbe may be used to specify the Probe IPs, ICMP interval and timeout in seconds. `ips` if set, the -egress gateway pod will probe each IP periodically using an ICMP ping. If all pings fail then the egress -gateway will report non-ready via its health port. `intervalSeconds` controls the interval between probes. -`timeoutSeconds` controls the timeout before reporting non-ready if no probes succeed. - -```yaml -icmpProbe: - ips: - - - - - timeoutSeconds: 20 - intervalSeconds: 10 - ``` - -* httpProbe may be used to specify the Probe URLs, HTTP interval and timeout in seconds. `urls` if set, the -egress gateway pod will probe each external service periodically. If all probes fail then the egress -gateway will report non-ready via its health port. `intervalSeconds` controls the interval between probes. -`timeoutSeconds` controls the timeout before reporting non-ready if all probes are failing. - -```yaml -httpProbe: - urls: - - - - - timeoutSeconds: 30 - intervalSeconds: 10 - ``` -- Please refer to the [operator reference docs](../../reference/installation/api.mdx) for details about the egress gateway resource type. - -:::note - -* It is advisable to have more than one egress gateway per group, so that the egress IP function -continues if one of the gateways crashes or needs to be restarted. When there are multiple -gateways in a group, outbound traffic from the applications using that group is load-balanced -across the available gateways. The number of `replicas` specified must be less than or equal -to the number of free IP addresses in the IP Pool. - -* IPPool can be specified either by its name (e.g. `-name: egress-ip-red-pool`) or by its CIDR (e.g. `-cidr: 10.10.10.0/30`). - -* The labels are arbitrary. You can choose whatever names and values are convenient for -your cluster's Namespaces and Pods to refer to in their egress selectors. - -The health port `8080` is used by: - -* The Kubernetes `readinessProbe` to expose the status of the egress gateway pod (and any ICMP/HTTP probes). - -* Remote pods to check if the egress gateway is "ready". Only "ready" egress -gateways will be used for remote client traffic. This traffic is automatically allowed by $[prodname] and -no policy is required to allow it. $[prodname] only sends probes to egress gateway pods that have a named -"health" port. This ensures that during an upgrade, health probes are only sent to upgraded egress gateways. -::: - -### Configure iptables backend for egress gateways - -The Tigera Operator configures egress gateways to use the same iptables backend as `calico-node`. -To modify the iptables backend for egress gateways, you must change the `iptablesBackend` field in the [Felix configuration](../../reference/resources/felixconfig.mdx). - -### Configure namespaces and pods to use egress gateways - -You can configure namespaces and pods to use an egress gateway by: -* annotating the namespace or pod -* applying an egress gateway policy to the namespace or pod. - -Using an egress gateway policy is more complicated, but it allows advanced use cases. - -#### Configure a namespace or pod to use an egress gateway (annotation method) - -In a $[prodname] deployment, the Kubernetes namespace and pod resources honor annotations that -tell that namespace or pod to use particular egress gateways. These annotations are selectors, and -their meaning is "the set of pods, anywhere in the cluster, that match those selectors". - -So, to configure all the pods in a namespace to use the egress gateways that are -labelled with `egress-code: red`, you would annotate that namespace like this: - -```bash -kubectl annotate ns egress.projectcalico.org/selector="egress-code == 'red'" -``` - -By default, that selector can only match egress gateways in the same namespace. To select gateways -in a different namespace, specify a `namespaceSelector` annotation as well, like this: - -```bash -kubectl annotate ns egress.projectcalico.org/namespaceSelector="projectcalico.org/name == 'default'" -``` - -Egress gateway annotations have the same [syntax and range of expressions](../../reference/resources/networkpolicy.mdx#selector) as the selector fields in -$[prodname] [network policy](../../reference/resources/networkpolicy.mdx#entityrule). - -To configure a specific Kubernetes Pod to use egress gateways, specify the same annotations when -creating the pod. For example: - -```bash -kubectl apply -f - < egress.projectcalico.org/egressGatewayPolicy="egw-policy1" -``` - -To configure a specific Kubernetes pod to use the same policy, specify the same annotations when -creating the pod. -For example: - -```bash -kubectl apply -f - < -spec: - containers: - - name: alpine - image: alpine - command: ["/bin/sleep"] - args: ["infinity"] -EOF -``` - -Now you can use `kubectl exec` to initiate an outbound connection from that pod: - -```bash -kubectl exec -n -- nc 8089 ` should be the IP address of the netcat server. - -Then, if you check the logs or output of the netcat server, you should see: - -``` -Connection from received -``` - -with `` being one of the IPs of the egress IP pool that you provisioned. - -### Control the use of egress gateways - -If a cluster ascribes special meaning to traffic flowing through egress gateways, it will be -important to control when cluster users can configure their pods and namespaces to use them, so that -non-special pods cannot impersonate the special meaning. - -If namespaces in a cluster can only be provisioned by cluster admins, one option is to enable egress -gateway function only on a per-namespace basis. Then only cluster admins will be able to configure -any egress gateway usage. - -Otherwise -- if namespace provisioning is open to users in general, or if it's desirable for egress -gateway function to be enabled both per-namespace and per-pod -- a [Kubernetes admission controller](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/) - will be -needed. This is a task for each deployment to implement for itself, but possible approaches include -the following. - -1. Decide whether a given Namespace or Pod is permitted to use egress annotations at all, based on -other details of the Namespace or Pod definition. - -1. Evaluate egress annotation selectors to determine the egress gateways that they map to, and -decide whether that usage is acceptable. - -1. Impose the cluster's own bespoke scheme for a Namespace or Pod to identify the egress gateways -that it wants to use, less general than $[prodname]'s egress annotations. Then the -admission controller would police those bespoke annotations (that that cluster's users could -place on Namespace or Pod resources) and either reject the operation in hand, or allow it -through after adding the corresponding $[prodname] egress annotations. - -#### Policy enforcement for flows via an egress gateway - -For an outbound connection from a client pod, via an egress gateway, to a destination outside the -cluster, there is more than one possible enforcement point for policy: - -The path of the traffic through policy is as follows: - -1. Packet leaves the client pod and passes through its egress policy. -2. The packet is encapsulated by the client pod's host and sent to the egress gateway -3. The encapsulated packet is sent from the host to the egress gateway pod. -4. The egress gateway pod de-encapsulates the packet and sends the packet out again with its own address. -5. The packet leaves the egress gateway pod through its egress policy. - -To ensure correct operation, (as of v3.15) the encapsulated traffic between host and egress gateway is auto-allowed by -$[prodname] and other ingress traffic is blocked. That means that there are effectively two places where -policy can be applied: - -1. on egress from the client pod -2. on egress from the egress gateway pod (see limitations below). - -The policy applied at (1) is the most powerful since it implicitly sees the original source of the traffic (by -virtue of being attached to that original source). It also sees the external destination of the traffic. - -Since an egress gateway will never originate its own traffic, one option is to rely on policy applied at (1) and -to allow all traffic to at (2) (either by applying no policy or by applying an "allow all"). - -Alternatively, for maximum "defense in depth" applying policy at both (1) and (2) provides extra protection should -the policy at (1) be disabled or bypassed by an attacker. Policy at (2) has the following limitations: - -- [Domain-based policy](../../network-policy/domain-based-policy.mdx) is not supported at egress from egress -gateways. It will either fail to match the expected traffic, or it will work intermittently if the egress gateway -happens to be scheduled to the same node as its clients. This is because any DNS lookup happens at the client pod. -By the time the policy reaches (2) the DNS information is lost and only the IP addresses of the traffic are available. - -- The traffic source will appear to be the egress gateway pod, the source information is lost in the address -translation that occurs inside the egress gateway pod. - -That means that policies at (2) will usually take the form of rules that match only on destination port and IP address, -either directly in the rule (via a CIDR match) or via a (non-domain based) NetworkSet. Matching on source has little -utility since the IP will always be the egress gateway and the port of translated traffic is not always preserved. - -:::note - -Since v3.15.0, $[prodname] also sends health probes to the egress gateway pods from the nodes where their clients are located. -In iptables mode, this traffic is auto-allowed at egress from the host and ingress to the egress gateway. -In eBPF mode, the probe traffic can be blocked by policy, so you must ensure that this traffic is allowed. - -::: - -## Additional resources - -Please see also: - -- [Configure egress gateways, on-premises](egress-gateway-on-prem.mdx) -- [Troubleshooting egress gateways](troubleshoot.mdx). -- [Additional configuration for egress gateway maintenance](egress-gateway-maintenance.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/networking/egress/egress-gateway-maintenance.mdx b/calico-enterprise_versioned_docs/version-3.19-2/networking/egress/egress-gateway-maintenance.mdx deleted file mode 100644 index f91b9d9890..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/networking/egress/egress-gateway-maintenance.mdx +++ /dev/null @@ -1,372 +0,0 @@ ---- -description: React to egress gateway maintenance windows and minimize the impact of egress gateway downtime on sensitive workloads ---- - -# Optimize egress networking for workloads with long-lived TCP connections - -## Big Picture - -React to egress gateway maintenance windows and minimize the impact of egress gateway downtime on sensitive workloads - -## Value - -While most workloads benefit from the reduced downtime associated with increased replicas, there are some specific cases where increasing your number of egress gateways will not have as significant an effect on availability. - -One area where this applies is when your workloads must maintain long-lived TCP connections that are coupled to higher-level abstractions, such as database sessions, transactions, or similar. In these environments, if an egress gateway becomes unavailable, these long-lived sessions may close. Data may need to be retransmitted. And in some cases, data may be lost. - -## Before you begin - -These features require you to have configured a functioning egress gateway deployment in a cluster. For more information on deploying egress gateways, [see our other egress gateway guides](../egress/index.mdx) - -## How to - -- [Observe gateway maintenance impact](#observe-gateway-maintenance-impact) -- [Expose gateway maintenance annotations to your application](#expose-gateway-maintenance-annotations-to-your-application) -- [Reduce the impact of gateway downtime](#reduce-the-impact-of-gateway-downtime) - -### Observe gateway maintenance impact - -A number of egress-related annotations are automatically added to your workloads when an egress gateway they use is in the "terminating" phase. These annotations will outline _which_ gateway is about to terminate, _when it began terminating_, and _when it will fully terminate_. This information can prove useful for conducting non-disruptive maintenance on your cluster, as it means any planned termination of egress pods will be communicated to dependent workloads. - -Before we can observe these annotations, we must first configure a _termination grace period_ for our egress gateways. The termination grace period prolongs an egress gateway's termination phase, giving us a window to react to the termination. Without configuring the grace period, we would have a zero-second window to react to gateway termination. - -#### Add a termination grace period to egress gateway replicas - -To widen our maintenance window, we can adjust the `terminationGracePeriodSeconds` field on our egress gateway pods. The amount of time we set for the termination grace period dictates how much time a dependent workload has to prepare for a gateway going down. - -Let's add a termination grace period of 60 seconds to all pods in our egress gateway deployment, so that our egress-dependent workloads have a wider window to react. The Egress Gateway resource must be patched with the new `terminationGracePeriodSeconds` as below: - -Patch a termination grace period of 60s into egress gateway resource. -```sh - kubectl get egressgateway - ``` - Response: - ``` - NAME AGE - egress-gateway 8m43s -``` -Patch our deployment with a 60-second termination grace period. -```sh - kubectl patch egressgateway egress-gateway --type=merge -p '{"spec": {"template": {"spec": {"terminationGracePeriodSeconds": 60}}}}' -``` -Wait for the change to roll out. - ```sh - kubectl rollout status deploy/egress-gateway -``` - -:::note - -- This assumes that you have upgraded your Egress Gateways to use the new managed resource in v3.16 or above -- Making the above alterations to an egress gateway resource will trigger a new rollout of the deployment - you can monitor the rollout status with `kubectl rollout status deploy/` -- If your rollout seems to have stalled and egress gateway pods are stuck on "ContainerCreating" phase, it's possible the deployment's IPPool has been exhausted. You can check if this is the case by inspecting a stuck pod with `kubectl describe pod ` - -::: - -#### Inspect workload annotations - -Once the updated egress gateway deployment rolls out, we're ready to observe the gateway maintenance annotations $[prodname] adds to your dependent workloads. Let's simulate cluster maintenance by deleting an egress gateway pod. It should take 60 seconds to terminate - the amount of time defined by `terminationGracePeriodSeconds`. - -Show pods in default namespace - one application pod using two egress gateway pods. -```sh - kubectl get pods -o wide -``` -Response: -``` -NAME READY STATUS RESTARTS AGE IP -application-with-long-lived-connections 1/1 Running 0 20m 192.168.192.210 -egress-gateway-6576ccdf66-fxdvh 1/1 Running 0 3m 10.10.10.1 -egress-gateway-6644fbb56b-5xbh2 1/1 Running 0 3m 10.10.10.0 -``` -Delete one of the egress gateways being used by the application pod; do not block waiting for termination to finish. -```sh -kubectl delete pod egress-gateway-6576ccdf66-fxdvh --wait=false -``` -Response: -``` -pod "egress-gateway-6576ccdf66-fxdvh" deleted -``` - -The gateway we just deleted should now wait in the "terminating" phase until its termination grace period expires, at-which point it will be deleted. If our application pod depends on the terminating egress gateway, we'll see gateway maintenance annotations added to the dependent application pod automatically, outlining what gateway is going down, when it began terminating, and when it will be deleted: - -Observe the annotations added to the dependent application pod. -```sh - kubectl get pod application-with-long-lived-connections -o yaml -``` -Response: -``` -apiVersion: v1 -kind: Pod -metadata: - annotations: - ... - egress.projectcalico.org/gatewayMaintenanceFinishedTimestamp: "2022-04-19T16:00:18Z" - egress.projectcalico.org/gatewayMaintenanceGatewayIP: 10.10.10.1 - egress.projectcalico.org/gatewayMaintenanceStartedTimestamp: "2022-04-19T15:59:18Z" -... -``` - -Success! Our workload's annotations mark a 60-second maintenance window for the gateway we terminated, indicating when the egress gateway began terminating, and when it will fully terminate. - -:::note - -- Adjusting egress deployments, say, by modifying the `terminationGracePeriodSeconds` field, will trigger a new rollout. -- IPPool specified should be large enough to include terminating pods. -- Egress pods terminating due to a new rollout will behave the same as if they were deleted for maintenance - dependent workloads will receive gateway maintenance annotations, and gateway pods will terminate after their termination grace period has elapsed. -- Deleting an egress gateway in a way that overrides the termination grace period, say, by using `kubectl delete pod my-pod --grace-period=0`, will result in the gateway going down immediately, and dependent workloads will not have any time to react to the termination. - -::: - -
    - -### Expose gateway maintenance annotations to your application - -While the presence of gateway maintenance annotations may be useful to a cluster administrator inspecting pods, it's not quite enough if our workload wishes to react to terminating egress gateways, say, by restarting its session gracefully before loss of connectivity. - -The [Kubernetes downward API](https://kubernetes.io/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information/#the-downward-api) provides a means of exposing pod information [as files](https://kubernetes.io/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information/#store-pod-fields) or as [environment variables](https://kubernetes.io/docs/tasks/inject-data-application/environment-variable-expose-pod-information/#use-pod-fields-as-values-for-environment-variables) within the pod. This value can then be polled by your workload, to react to changes as you see fit. - -Let's write a simple pod manifest that uses the downward API to expose maintenance annotations to the program running within: - -```yaml -apiVersion: v1 -kind: Pod -metadata: - annotations: - egress.projectcalico.org/selector: egress-code == 'red' - egress.projectcalico.org/namespaceSelector: projectcalico.org/name == 'default' - name: poll-my-own-annotations - namespace: default -spec: - containers: - - name: application-container - image: k8s.gcr.io/busybox:1.24 - command: ['sh', '-c'] - args: - - while true; do - echo 'polling egress maintenance timestamp...'; - if [[ -e /var/run/egress/gatewayMaintenanceStartedTimestamp ]]; then - echo -n 'gatewayMaintenanceStartedTimestamp has value "'; cat /var/run/egress/gatewayMaintenanceStartedTimestamp; echo -en '"\n'; - fi; - sleep 3; - done; - volumeMounts: - - name: egress-maintenance-started - mountPath: /var/run/egress - volumes: - - name: egress-maintenance-started - downwardAPI: - items: - - path: 'gatewayMaintenanceStartedTimestamp' - fieldRef: - fieldPath: metadata.annotations['egress.projectcalico.org/gatewayMaintenanceStartedTimestamp'] -``` - -This sample manifest will create a pod whose `gatewayMaintenanceStartedTimestamp` annotation is mounted to the file `/var/run/egress/gatewayMaintenanceStartedTimestamp`. The pod's main process is a script which polls the value of this file. - -After deleting an egress gateway this workload relies on, let's check its logs: - -```sh -kubectl logs poll-my-own-annotations -``` -Response: -``` -polling egress maintenance timestamp... -gatewayMaintenanceStartedTimestamp has value "" -polling egress maintenance timestamp... -gatewayMaintenanceStartedTimestamp has value "" -polling egress maintenance timestamp... -gatewayMaintenanceStartedTimestamp has value "" -polling egress maintenance timestamp... -gatewayMaintenanceStartedTimestamp has value "2022-04-19T17:24:46Z" -polling egress maintenance timestamp... -gatewayMaintenanceStartedTimestamp has value "2022-04-19T17:24:46Z" -``` - -We can see above that our script saw the value of the mounted volume change at the same time what we terminated our egress gateway pod. This work can be further developed to propagate notifications to our production workloads, without any need for polling kubernetes itself. - -**Note**: It's not recommended to couple your production applications to a Kubernetes client for the purposes of polling pod information, as it could give an attacker greater privileges if successful in compromising a workload. Instead, use a method such as the downward API that fully decouples the program. - -
    - -### Reduce the impact of gateway downtime - -So far we have observed egress gateway maintenance windows, added a termination grace period to gateway pods, and propagated maintenance information directly to our workloads. Finally, we are going to look at the `maxNextHops` annotation, which is designed to limit the impact of a terminating egress gateway. - -Below is a sample kubernetes deployment which was adapted from the earlier annotation-aware pod manifest. The deployment has 3 replicas, and is configured to use egress gateways: - -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: annotation-aware-workloads -spec: - replicas: 3 - selector: - matchLabels: - app: annotation-aware-workload - strategy: - rollingUpdate: - maxSurge: 25% - maxUnavailable: 25% - type: RollingUpdate - template: - metadata: - labels: - app: annotation-aware-workload - annotations: - egress.projectcalico.org/selector: egress-code == 'red' - egress.projectcalico.org/namespaceSelector: projectcalico.org/name == 'default' - spec: - containers: - - name: application-container - image: k8s.gcr.io/busybox:1.24 - command: ['sh', '-c'] - args: - - while true; do - echo "[${MY_POD_NAME}] polling egress maintenance timestamp..."; - if [[ -e /var/run/egress/gatewayMaintenanceStartedTimestamp ]]; then - echo -n "[${MY_POD_NAME}] gatewayMaintenanceStartedTimestamp has value '"; cat /var/run/egress/gatewayMaintenanceStartedTimestamp; echo -en "'\n"; - fi; - sleep 3; - done; - volumeMounts: - - name: egress-maintenance-started - mountPath: /var/run/egress - env: - - name: MY_POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - volumes: - - name: egress-maintenance-started - downwardAPI: - items: - - path: 'gatewayMaintenanceStartedTimestamp' - fieldRef: - fieldPath: metadata.annotations['egress.projectcalico.org/gatewayMaintenanceStartedTimestamp'] -``` - -#### Observing the impact of egress gateway maintenance - -We now want to conduct maintenance on a particular node in our cluster, resulting in one of our egress gateways terminating. In this scenario, since our dependent applications' connections are load-balanced evenly across all available egress gateways, all of our application pods will receive a maintenance window annotation. If we have configured our applications to react to such a window, we will see them all react at once in a thundering herd: - -Begin the termination of an egress gateway pod. - -```sh - kubectl delete pod egress-gateway-6576ccdf66-mtqzl --wait=false -``` -Response: -``` -pod "egress-gateway-6576ccdf66-mtqzl" deleted -``` - -Let's collect logs from all pods in our maintenance-aware application deployment. -```sh -kubectl logs --selector=app=annotation-aware-workload -``` -Response: -``` -[annotation-aware-workloads-7987f55c9f-f7mkq] polling egress maintenance timestamp... -[annotation-aware-workloads-7987f55c9f-f7mkq] gatewayMaintenanceStartedTimestamp has value '' -[annotation-aware-workloads-7987f55c9f-qtcs2] polling egress maintenance timestamp... -[annotation-aware-workloads-7987f55c9f-qtcs2] gatewayMaintenanceStartedTimestamp has value '' -[annotation-aware-workloads-7987f55c9f-z5x25] polling egress maintenance timestamp... -[annotation-aware-workloads-7987f55c9f-z5x25] gatewayMaintenanceStartedTimestamp has value '' -[annotation-aware-workloads-7987f55c9f-qtcs2] polling egress maintenance timestamp... -[annotation-aware-workloads-7987f55c9f-qtcs2] gatewayMaintenanceStartedTimestamp has value '2022-04-20T12:24:34Z' -[annotation-aware-workloads-7987f55c9f-f7mkq] polling egress maintenance timestamp... -[annotation-aware-workloads-7987f55c9f-f7mkq] gatewayMaintenanceStartedTimestamp has value '2022-04-20T12:24:34Z' -[annotation-aware-workloads-7987f55c9f-z5x25] polling egress maintenance timestamp... -[annotation-aware-workloads-7987f55c9f-z5x25] gatewayMaintenanceStartedTimestamp has value '2022-04-20T12:24:34Z' -``` - -We can see in the above logs that all of our applications have been affected by the downtime of just a single egress gateway. In the worst case, this could lead to a window of downtime for the application, as all replicas scramble to restart their connections at once. To avoid this, lets use the `egress.projectcalico.org/maxNextHops` annotation to restrict the total number of gateways each application can depend on. - -#### Reducing the impact of egress gateway maintenance - -To place a limit on the number of egress gateways an application can depend on, annotate the application's pod with the `egress.projectcalico.org/maxNextHops` annotation. Alternatively, to limit all pods in a certain namespace, annotate that namespace. Let's annotate all pods in our sample deployment from earlier: - -```sh -kubectl patch deploy annotation-aware-workloads --type=merge -p \ -'{"spec": - {"template": - {"metadata": - {"annotations": - {"egress.projectcalico.org/maxNextHops": "1"} - } - } - } -}' -``` - -:::note - -- Either a _pod or a namespace_ can be annotated with `egress.projectcalico.org/maxNextHops`, however, the `egress.projectcalico.org/selector` annotation must also be present on the selected resource. -- If annotating pods, the `egressIPSupport` Felixconfiguration option must be set to `EnabledPerNamespaceOrPerPod`. -- If a pod's desired `maxNextHops` exceeds the total number of available egress gateways, scaling up the egress gateway deployment will result in the pod's egress networking updating until the desired number of gateways are being used. -- In all other cases, the `maxNextHops` annotation only takes effect at the time a pod is created. To ensure a pod's egress networking remains functional for its entire lifecycle, modifications to `maxNextHops` after a pod's creation will have no effect. For this reason, it's recommended that any egress gateway deployments have been scaled prior to deploying dependent workloads. - -::: - -After our patched sample deployment has been fully rolled out, each application pod should now depend on at most one egress gateway replica. Let's bring down another egress gateway pod and monitor our application logs: - -Begin the termination of an egress gateway pod. - -```sh -kubectl delete pod egress-gateway-6576ccdf66-c42v7 --wait=false -``` -Response: -``` -pod "egress-gateway-6576ccdf66-c42v7" deleted -``` -Collect logs from each application pod. -```sh -kubectl logs --selector=app=annotation-aware-workload -``` -Response: -``` -[annotation-aware-workloads-565b6855b9-tjvqr] polling egress maintenance timestamp... -[annotation-aware-workloads-565b6855b9-tjvqr] gatewayMaintenanceStartedTimestamp has value '' -[annotation-aware-workloads-565b6855b9-s44pt] polling egress maintenance timestamp... -[annotation-aware-workloads-565b6855b9-s44pt] gatewayMaintenanceStartedTimestamp has value '' -[annotation-aware-workloads-565b6855b9-46cw5] polling egress maintenance timestamp... -[annotation-aware-workloads-565b6855b9-46cw5] gatewayMaintenanceStartedTimestamp has value '' -[annotation-aware-workloads-565b6855b9-tjvqr] polling egress maintenance timestamp... -[annotation-aware-workloads-565b6855b9-tjvqr] gatewayMaintenanceStartedTimestamp has value '2022-04-20T12:53:32Z' -[annotation-aware-workloads-565b6855b9-s44pt] polling egress maintenance timestamp... -[annotation-aware-workloads-565b6855b9-s44pt] gatewayMaintenanceStartedTimestamp has value '' -[annotation-aware-workloads-565b6855b9-46cw5] polling egress maintenance timestamp... -[annotation-aware-workloads-565b6855b9-46cw5] gatewayMaintenanceStartedTimestamp has value '' -``` - -We can see from the above logs that only a single application pod has now been affected by the terminating egress gateway. The other pods have not received an annotation for a terminating gateway because they have chosen different gateways to depend on, and thus won't be affected. - -:::note - -The subset of egress gateway replicas that each pod will depend on when using the `maxNextHops` annotation can't be manually selected. -$[prodname] selects a subset of replicas in such a way as to evenly distribute load across the whole replica set. - -::: - -
    - -## Reference - -**The following are annotations that $[prodname] sets automatically on any egress-dependent pods:** - -| Annotation | Description | Datatype | Default value | Expected values | -| -------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------ | ---------- | ------------- | ---------------------------------------------------- | -| `egress.projectcalico.org/gatewayMaintenanceGatewayIP` | Indicates the IP of a terminating egress gateway your pod is using. | IP Address | "" | Any IP within the egress gateway deployment's IPSet. | -| `egress.projectcalico.org/gatewayMaintenanceStartedTimestamp` | Indicates when the egress gateway identified by `gatewayMaintenanceGatewayIP` began terminating. | String | "" | An RFC3339 date string | -| `egress.projectcalico.org/gatewayMaintenanceFinishedTimestamp` | Indicates when the egress gateway identified by `gatewayMaintenanceGatewayIP` will finish terminating. | String | "" | An RFC3339 date string | - -
    - -**The following annotations are used to configure your egress-dependent workloads. These annotations can be set either on a namespace, or on a pod. If setting the annotations on pods, the `egressIPSupport` FelixConfiguration option must be set to `EnabledPerNamespaceOrPerPod`.** - -| Annotation | Description | Datatype | Default value | Possible values | -| -------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -------- | ------------- | ----------------------- | -| `egress.projectcalico.org/maxNextHops` | Specifies the maximum number of egress gateway replicas from the selected deployment that a pod should depend on. Replicas will be chosen in a manner that attempts to balance load across the whole egress gateway replicaset. If unset, or set to "0", egress traffic will behave in the default manner (load balanced over all available gateways). | String | "" | "0", "1", "2", "3", ... | - -## Additional resources - -- [Troubleshooting egress gateways](troubleshoot.mdx). diff --git a/calico-enterprise_versioned_docs/version-3.19-2/networking/egress/egress-gateway-on-prem.mdx b/calico-enterprise_versioned_docs/version-3.19-2/networking/egress/egress-gateway-on-prem.mdx deleted file mode 100644 index ad1567e096..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/networking/egress/egress-gateway-on-prem.mdx +++ /dev/null @@ -1,587 +0,0 @@ ---- -description: Configure specific application traffic to exit the cluster through an egress gateway. -redirect_from: - - /compliance/egress-gateways ---- - -# Configure egress gateways, on-premises - -## Big picture - -Configure specific application traffic to exit the cluster through an egress gateway. - -## Value - -When traffic from particular applications leaves the cluster to access an external destination, it -can be useful to control the source IP of that traffic. For example, there may be an additional -firewall around the cluster, whose purpose includes policing external accesses from the cluster, and -specifically that particular external destinations can only be accessed from authorised workloads -within the cluster. - -$[prodname]'s own policy (including [DNS policy](../../network-policy/domain-based-policy.mdx)) and -per-node firewalls can ensure this, but deployments may like to deepen their defense by adding an -external firewall as well. If the external firewall is configured to allow outbound connections -only from particular source IPs, and the intended cluster workloads can be configured so that their -outbound traffic will have one of those source IPs, then the defense in depth objective is achieved. - -$[prodname] allows specifying an [IP pool](../../reference/resources/ippool.mdx) for each pod or namespace, and -even a [specific IP](../ipam/use-specific-ip.mdx) for a new pod, but this requires predicting how many pods -there will be representing a particular application, so that the IP pool can be correctly sized. -When IPs are a precious resource, over-sizing the pool is wasteful; but under-sizing is also -problematic, as then IPs will not be available within the desired range as the application is -scaled. - -Egress gateways provide an alternative approach. Application pods and namespaces are provisioned -with IPs from the default (and presumably plentiful) pool, but also configured so that their -outbound traffic is directed through an egress gateway. (Or, for resilience, through one of a small -number of egress gateways.) The egress gateways are set up to use a [specific IP pool](../ipam/legacy-firewalls.mdx) - and to perform an SNAT on the traffic passing through them. Hence, any -number of application pods can have their outbound connections multiplexed through a fixed small -number of egress gateways, and all of those outbound connections acquire a source IP from the egress -gateway IP pool. - -:::note - -The source port of an outbound flow through an egress gateway can generally _not_ be -preserved. Changing the source port is how Linux maps flows from many upstream IPs onto a single -downstream IP. - -::: - -Egress gateways are also useful if there is a reason for wanting all outbound traffic from a -particular application to leave the cluster through a particular node or nodes. For this case, the -gateways just need to be scheduled to the desired nodes, and the application pods/namespaces -configured to use those gateways. - -## Concepts - -### Egress gateway - -An egress gateway acts as a transit pod for the outbound application traffic that is configured to -use it. As traffic leaving the cluster passes through the egress gateway, its source IP is changed -to that of the egress gateway pod, and the traffic is then forwarded on. - -### Source IP - -When an outbound application flow leaves the cluster, its IP packets will have a source IP. -Normally this is the pod IP of the pod that originated the flow, or the node IP of the node hosting -that pod. It will be the **node IP** if the pod IP came from an [IP pool](../../reference/resources/ippool.mdx) with `natOutgoing: true`, and the **pod IP** if -not. (Assuming no other CNI plugin has been configured to NAT outgoing traffic.) - -With an egress gateway involved that is all still true, except that now it's the egress gateway that -counts, instead of the original application pod. So the flow will have the egress gateway's **node -IP**, if the egress gateway's pod IP came from an [IP pool](../../reference/resources/ippool.mdx) - with `natOutgoing: true`, and the egress -gateway's **pod IP** otherwise. - -### Control the use of egress gateways - -If a cluster ascribes special meaning to traffic flowing through egress gateways, it will be -important to control when cluster users can configure their pods and namespaces to use them, so that -non-special pods cannot impersonate the special meaning. - -If namespaces in a cluster can only be provisioned by cluster admins, one option is to enable egress -gateway function only on a per-namespace basis. Then only cluster admins will be able to configure -any egress gateway usage. - -Otherwise -- if namespace provisioning is open to users in general, or if it's desirable for egress -gateway function to be enabled both per-namespace and per-pod -- a [Kubernetes admission controller](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/) - will be -needed. This is a task for each deployment to implement for itself, but possible approaches include -the following. - -1. Decide whether a given Namespace or Pod is permitted to use egress annotations at all, based on - other details of the Namespace or Pod definition. - -1. Evaluate egress annotation selectors to determine the egress gateways that they map to, and - decide whether that usage is acceptable. - -1. Impose the cluster's own bespoke scheme for a Namespace or Pod to identify the egress gateways - that it wants to use, less general than $[prodname]'s egress annotations. Then the - admission controller would police those bespoke annotations (that that cluster's users could - place on Namespace or Pod resources) and either reject the operation in hand, or allow it - through after adding the corresponding $[prodname] egress annotations. - -### Policy enforcement for flows via an egress gateway - -For an outbound connection from a client pod, via an egress gateway, to a destination outside the -cluster, there is more than one possible enforcement point for policy: - -The path of the traffic through policy is as follows: - -1. Packet leaves the client pod and passes through its egress policy. -2. The packet is encapsulated by the client pod's host and sent to the egress gateway -3. The encapsulated packet is sent from the host to the egress gateway pod. -4. The egress gateway pod de-encapsulates the packet and sends the packet out again with its own address. -5. The packet leaves the egress gateway pod through its egress policy. - -To ensure correct operation, (as of v3.15) the encapsulated traffic between host and egress gateway is auto-allowed by -$[prodname] and other ingress traffic is blocked. That means that there are effectively two places where -policy can be applied: - -1. on egress from the client pod -2. on egress from the egress gateway pod (see limitations below). - -The policy applied at (1) is the most powerful since it implicitly sees the original source of the traffic (by -virtue of being attached to that original source). It also sees the external destination of the traffic. - -Since an egress gateway will never originate its own traffic, one option is to rely on policy applied at (1) and -to allow all traffic to at (2) (either by applying no policy or by applying an "allow all"). - -Alternatively, for maximum "defense in depth" applying policy at both (1) and (2) provides extra protection should -the policy at (1) be disabled or bypassed by an attacker. Policy at (2) has the following limitations: - -- [Domain-based policy](../../network-policy/domain-based-policy.mdx) is not supported at egress from egress - gateways. It will either fail to match the expected traffic, or it will work intermittently if the egress gateway - happens to be scheduled to the same node as its clients. This is because any DNS lookup happens at the client pod. - By the time the policy reaches (2) the DNS information is lost and only the IP addresses of the traffic are available. - -- The traffic source will appear to be the egress gateway pod, the source information is lost in the address - translation that occurs inside the egress gateway pod. - -That means that policies at (2) will usually take the form of rules that match only on destination port and IP address, -either directly in the rule (via a CIDR match) or via a (non-domain based) NetworkSet. Matching on source has little -utility since the IP will always be the egress gateway and the port of translated traffic is not always preserved. - -:::note - -Since v3.15.0, $[prodname] also sends health probes to the egress gateway pods from the nodes where -their clients are located. In iptables mode, this traffic is auto-allowed at egress from the host and ingress -to the egress gateway. In eBPF mode, the probe traffic can be blocked by policy, so you must ensure that this traffic allowed; this should be fixed in an upcoming -patch release. - -::: - -## Before you begin - -**Unsupported** - -- GKE - -**Required** - -- Calico CNI -- Open port UDP 4790 on the host - -## How to - -- [Enable egress gateway support](#enable-egress-gateway-support) -- [Provision an egress IP pool](#provision-an-egress-ip-pool) -- [Deploy a group of egress gateways](#deploy-a-group-of-egress-gateways) -- [Configure iptables backend for egress gateways](#configure-iptables-backend-for-egress-gateways) -- [Configure namespaces and pods to use egress gateways](#configure-namespaces-and-pods-to-use-egress-gateways) -- [Optionally enable ECMP load balancing](#optionally-enable-ecmp-load-balancing) -- [Verify the feature operation](#verify-the-feature-operation) -- [Control the use of egress gateways](#control-the-use-of-egress-gateways) -- [Upgrade egress gateways](#upgrade-egress-gateways) - -### Enable egress gateway support - -In the default **FelixConfiguration**, set the `egressIPSupport` field to `EnabledPerNamespace` or -`EnabledPerNamespaceOrPerPod`, according to the level of support that you need in your cluster. For -support on a per-namespace basis only: - -```bash -kubectl patch felixconfiguration default --type='merge' -p \ - '{"spec":{"egressIPSupport":"EnabledPerNamespace"}}' -``` - -Or for support both per-namespace and per-pod: - -```bash -kubectl patch felixconfiguration default --type='merge' -p \ - '{"spec":{"egressIPSupport":"EnabledPerNamespaceOrPerPod"}}' -``` - -:::note - -- `egressIPSupport` must be the same on all cluster nodes, so you should set them only in the - `default` FelixConfiguration resource. -- The operator automatically enables the required policy sync API in the FelixConfiguration. - -::: - -### Provision an egress IP pool - -Provision a small IP Pool with the range of source IPs that you want to use for a particular -application when it connects to an external service. For example: - -```bash -kubectl apply -f - < egress.projectcalico.org/selector="egress-code == 'red'" -``` - -By default, that selector can only match egress gateways in the same namespace. To select gateways -in a different namespace, specify a `namespaceSelector` annotation as well, like this: - -```bash -kubectl annotate ns egress.projectcalico.org/namespaceSelector="projectcalico.org/name == 'default'" -``` - -Egress gateway annotations have the same [syntax and range of expressions](../../reference/resources/networkpolicy.mdx#selector) as the selector fields in -$[prodname] [network policy](../../reference/resources/networkpolicy.mdx#entityrule). - -To configure a specific Kubernetes Pod to use egress gateways, specify the same annotations when -creating the pod. For example: - -```bash -kubectl apply -f - < egress.projectcalico.org/egressGatewayPolicy="egw-policy1" -``` - -To configure a specific Kubernetes pod to use the same policy, specify the same annotations when -creating the pod. -For example: - -```bash -kubectl apply -f - < -n -- nc 8089 ` should be the IP address of the netcat server. - -Then, if you check the logs or output of the netcat server, you should see: - -``` -Connection from received -``` - -with `` being one of the IPs of the egress IP pool that you provisioned. - -## Upgrade egress gateways - -From v3.16, egress gateway deployments are managed by the Tigera Operator. - -- When upgrading from a pre-v3.16 release, no automatic upgrade will occur. To upgrade a pre-v3.16 egress gateway deployment, - create an equivalent EgressGateway resource with the same namespace and the same name as mentioned [above](#deploy-a-group-of-egress-gateways); - the operator will then take over management of the old Deployment resource, replacing it with the upgraded version. - -- Use `kubectl apply` to create the egress gateway resource. Tigera Operator will read the newly created resource and wait - for the other $[prodname] components to be upgraded. Once the other $[prodname] components are upgraded, Tigera Operator - will upgrade the existing egress gateway deployment with the new image. - -By default, upgrading egress gateways will sever any connections that are flowing through them. To minimise impact, -the egress gateway feature supports some advanced options that give feedback to affected pods. For more details see -the [egress gateway maintenance guide](egress-gateway-maintenance.mdx). - -## Additional resources - -Please see also: - -- The `egressIP...` fields of the [FelixConfiguration resource](../../reference/resources/felixconfig.mdx#spec). -- [Additional configuration for egress gateway maintenance](egress-gateway-maintenance.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/networking/egress/external-network.mdx b/calico-enterprise_versioned_docs/version-3.19-2/networking/egress/external-network.mdx deleted file mode 100644 index f899ba02c4..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/networking/egress/external-network.mdx +++ /dev/null @@ -1,182 +0,0 @@ ---- -description: Allows workloads from different namespaces of a Kubernetes cluster to egress onto different external networks that (may) have overlapping IPs with each other. ---- - -# Configure egress traffic to multiple external networks - -## Big Picture - -Allows workloads from different namespaces of a Kubernetes cluster to egress onto different external networks that (may) have overlapping IPs with each other. - -## Value - -When traffic from particular applications leaves the cluster to access an external destination, egress gateway enables users to control the source IP of that traffic. However, in this case, egress gateway sees all the external destinations as a flat L3 network. If a user has a network topology which involves multiple external networks and the service endpoints in those networks may have overlapping IPs, it becomes a mandatory requirement that $[prodname] should be able to direct the egress traffic to the service endpoint in the correct network. - -For example, suppose a user has a network setup as below: -There are two external networks (Red and Yellow) outside the cluster. A web server on network Red is exposing its service at `105.8.10.1`. A database server on network Yellow is exposing its service at `105.8.10.1` too. - -Then the cluster could have requirements as follows: -Pods in Red namespace should send traffic via egress gateways to `105.8.10.1` on external network Red. -Pods in Yellow namespace should send traffic via egress gateways to `105.8.10.1` on external network Yellow. - -External network support is introduced into $[prodname] to meet those requirements. It allows user to associate a egress gateway to external networks so that the egress traffic originated from the client via that egress gateway will be routed to the service endpoints on the associated external networks. In a nutshell, the external network feature adds support for L3 segmentation of outgoing egress traffic. - -## Prerequisites - -- IPs on multiple external networks may overlap with each other, however, those IPs must not overlap with the IPs of pods within the cluster or their hosts. -- IPs of the BGP peers peering with the cluster should not overlap. - -## Limitations - -- This feature should be used in combination with egress gateways. Sending traffic from a client pod to external networks without going through an egress gateway is not supported. -- This feature only supports IPv4. - -## Before you begin - -This feature requires you to understand how to configure a functioning egress gateway deployment in a cluster. For more information on deploying egress gateways, [see our other egress gateway guides](./). - -## Concepts - -### Egress node -A node in the cluster which is peering with external networks and populating routes to direct traffic from the egress gateway to the destinations on external networks. Any egress gateway which is associated to an external network should be scheduled to be running on a egress node. - -### BGP bootstrap routes -BGP bootstrapping routes are routes on the node for traffic from that node to reach other nodes in the cluster, or other destinations outside the cluster, via whatever routers and external networks the user may need. This is considered outside of the scope of $[prodname], as both $[prodname] and external BGP routers on external networks may require bootstrapping routes to be set up before peering with each other. - -## How to - -### Enable egress gateway support - -In the default **FelixConfiguration**, set the `egressIPSupport` field to `EnabledPerNamespace` or -`EnabledPerNamespaceOrPerPod`, according to the level of granularity that you need in your cluster. For support on a per-namespace basis only: - -```bash -kubectl patch felixconfiguration default --type='merge' -p \ - '{"spec":{"egressIPSupport":"EnabledPerNamespace"}}' -``` - -Or to support both per-namespace and per-pod: - -```bash -kubectl patch felixconfiguration default --type='merge' -p \ - '{"spec":{"egressIPSupport":"EnabledPerNamespaceOrPerPod"}}' -``` - -### Enable external network support - -In the default **FelixConfiguration**, set the `externalNetworkSupport` field to `Enabled`. - -```bash -kubectl patch felixconfiguration default --type='merge' -p \ - '{"spec":{"externalNetworkSupport":"Enabled"}}' -``` - -### Create external network resources - -Add an `ExternalNetwork` resource to the cluster for each external networks. For example, to create an `ExternalNetwork` for `red` network. - -```bash -kubectl create -f - < -# - -# timeoutSeconds: 15 -# intervalSeconds: 5 -# httpProbe: -# urls: -# - -# - -# timeoutSeconds: 30 -# intervalSeconds: 10 - template: - metadata: - labels: - egress-code: red - spec: - nodeSelector: - kubernetes.io/os: linux - terminationGracePeriodSeconds: 0 -EOF -``` - -Where: -- Where externalNetworks associates this egress gateway deployments with external network `red`. All traffic leaving the egress gateway to an external destination will be routed to the destination on external network `red`. -- Node selector `egress: true` forces egress gateway pods of the deployment to be scheduled on egress nodes. - -:::warning - -If multiple networks in the `externalNetworkNames` list (of the egress gateway) advertise the same prefix to $[prodname] then traffic to those prefixes will flow to a non-deterministic network, which may result in hard-to-debug connectivity issues. It is the user's responsibility to make sure it will never happen. - -::: - -### Use egress gateways - -Add annotations to namespaces or pods to configure them to use the relevant egress gateways created by the step above. Egress traffic from the application pods will be routed to destinations within the designated external networks. - -## Additional resources - -- [Troubleshooting egress gateways](troubleshoot.mdx). diff --git a/calico-enterprise_versioned_docs/version-3.19-2/networking/egress/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/networking/egress/index.mdx deleted file mode 100644 index c4df1be0fa..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/networking/egress/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Configure specific application traffic to exit the cluster through an egress gateway for additional security. -hide_table_of_contents: true ---- - -# Egress gateways - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/networking/egress/troubleshoot.mdx b/calico-enterprise_versioned_docs/version-3.19-2/networking/egress/troubleshoot.mdx deleted file mode 100644 index 76aca6e4a6..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/networking/egress/troubleshoot.mdx +++ /dev/null @@ -1,276 +0,0 @@ ---- -description: Use checklist to troubleshoot common problems. ---- - -# Troubleshoot egress gateways - -- [Checklist of common problems](#checklist-of-common-problems) -- [Connection to an egress gateway cannot be established](#connection-to-an-egress-gateway-cannot-be-established) -- [Connection to an egress gateway is established, but destination is not getting correct IP](#connection-to-an-egress-gateway-is-established-but-destination-is-not-getting-correct-ip) - -## Checklist of common problems - -Use the following checklist to troubleshoot, or to collect details before opening a Support ticket. - -### Is the egress gateway feature enabled? - -Egress gateway is disabled by default.  Have you enabled it in [Felix configuration](../../reference/resources/felixconfig.mdx) by setting `egressIPSupport` to `EnabledPerNamespace` or `EnabledPerNamespaceOrPerPod`? - -### Does your egress gateway routing go through a router? (On-prem only.) - -As shown in the following diagram, from the gateway to the destination, the source IP is the egress IP.  On the return path, from the destination back to the gateway, the destination IP is the egress IP. If there are any routers between the gateway and the destination, they must all know how to route the egress IP back to the gateway. If they don’t, the attempted connection cannot be established. - -![egress-basic-routing](/img/calico-enterprise/egress-basic-routing.svg) - -Options to make routers aware of egress IP: - -- Program routes statically on routers -- Peer routers with the cluster, directly or indirectly using BGP or some other protocol, or other method so routers learn about the egress IP - -### Does your egress gateway have required metadata? - -Review important egress gateway metadata (for example, namespace and labels); they are required for a client to identify the gateway(s) that it should use. - -### Is natOutgoing on your IPPool set up correctly? - -For most egress gateway scenarios you should have: `natOutgoing: false` on the egress IPPool. If you have `natOutgoing: true`, the egress gateway will SNAT to its own IP, which is the intended egress gateway IP. But then the egress gateway’s node will also SNAT to its own IP (i.e. the node IP), which immediately overrides the egress gateway IP. - -### Do clients and nodes have required selectors? - -Review the following annotations that are required for the client to identify its egress gateways: - -- egress.projectcalico.org/egressGatewayPolicy -- egress.projectcalico.org/selector -- egress.projectcalico.org/namespaceSelector - -on - -- Client pod -- Client pod’s namespace - -### Does Calico have enough route tables? - -On Linux, Calico claims a number of routing tables for various purposes. The exact number of tables allocated is determined by the [FelixConfiguration](../../reference/resources/felixconfig.mdx) option `routeTableRanges`. Failing to allocate enough route tables will result in Calico failing to program the networking needed for Egress Gateways to function. - -Under these conditions, `calico-node` pods will log error messages and report [unhealthy](#check-calico-node-health). - -[See more about routeTableRanges](../../reference/resources/felixconfig.mdx#routetableranges) - -### Check egress gateway health - -As of v3.15.0, the egress gateway `Deployment` includes a Kubernetes `readinessProbe`. The egress gateway will -only report ready if it is able to connect to the local `calico-node` pod and if any configured HTTP/ICMP probes -are succeeding. - -If the egress gateway reports not-Ready then more information can be found in its logs. The egress gateway logs to -`stdout` so its logs are available via `kubectl logs -n `. - -### Check health of calico-node to egress gateway probes - -As of v3.15.0, each `calico-node` pod probes the health of the remote egress gateways that its local pods are using. -If probes fail, the failures are logged in `calico-node`'s log (search for `egress_gw_tracker`) and reported via Prometheus metrics: - -``` -felix_egress_gateway_remote_polls{status="probe-failed"} 0 -felix_egress_gateway_remote_polls{status="total"} 2 -felix_egress_gateway_remote_polls{status="up"} 2 -``` - -Where, the `total` metric reports the total number of remote egress gateways that are being polled and the `up` and `probe-failed` -metrics report the number of egress gateways in each of those states. - -### Check calico-node health - -Check that your calico-node pods are consistently running and ready, especially on the nodes hosting the client and -gateway pods. Confirming healthy pods will rule out possible issues. If you find that `calico-node` is not ready, -describing the pod should show which health check is failing: - -```bash -kubectl describe pod ... -``` - -In AWS, issues such as permissions problems will report a problem with the `"aws-eni-addresses-in-sync"` health check. -For more information on the problem, review the `calico-node` log. For example, a permissions issue will result in a -log such as the following: - -``` -2021-11-16 13:11:59.292 [ERROR][26606] felix/aws_iface_provisioner.go 343: Failed to resync with AWS. Will retry after backoff. error=failed to create ENI: operation error EC2: CreateNetworkInterface, https response error StatusCode: 403, RequestID: 13dead98-7da0-4695-9be8-80cab4d5528e, api error UnauthorizedOperation: You are not authorized to perform this operation. Encoded authorization failure message: j4x3cFwZdJ......ShGkw -``` - -If you see such a log, check the AWS IAM permissions assigned to the nodes in your cluster to ensure that the nodes -have the [required permissions](egress-gateway-aws.mdx#configure-aws-iam-roles). It is also possible -to decode the "encoded authorization failure message" in the log by following [this guide](https://aws.amazon.com/premiumsupport/knowledge-center/aws-backup-encoded-authorization-failure/); this gives more detail on the error. - -### Check IP rule and routing setup on the client node - -**Run `ip rule`** - -On the client node, run: - -``` -ip rule -``` - -**Sample output** - -You will see a line for each pod on the node that is configured to use an egress gateway. - -``` -from 192.168.24.35 fwmark 0x80000/0x80000 lookup 250 -``` - -Where: - -- `192.168.24.35` is the relevant client's pod IP -- `250` is the routing table number -- `fwmark 0x80000/0x80000` is the bit/mask - -If you don’t see this, it means one of the following: - -- egressIPSupport is not enabled -- egressIPSupport is enabled, but you have not configured egress annotations on the client pod or on its namespace -- egressIPSupport is EnabledPerNamespace and you have configured egress annotations on the client pod, but not on its namespace - -**Run `ip route show table`** - -On the client node, run the following command using the routing table number from the `ip rule` command. For example: `250`. - -``` -ip route show table -``` - -**Sample output: clients using a single egress gateway** - -``` -default via 11.11.11.1 dev egress.calico onlink -``` - -**Sample: clients using multiple gateways** - -``` -default onlink - nexthop via 11.11.11.1 dev egress.calico weight 1 onlink - nexthop via 11.11.11.2 dev egress.calico weight 1 onlink -``` - -**Sample: clients using the following egress gateway policy** - -```yaml noValidation -apiVersion: projectcalico.org/v3 -kind: EgressGatewayPolicy -metadata: - name: "egw-policy1" -spec: - rules: - - destination: - cidr: 10.0.0.0/8 - description: "Local: no gateway" - - destination: - cidr: 11.0.0.0/8 - description: "Gateway to on prem" - gateway: - namespaceSelector: "projectcalico.org/name == 'default'" - selector: "egress-code == 'blue'" - maxNextHops: 2 - - description: "Gateway to internet" - gateway: - namespaceSelector: "projectcalico.org/name == 'default'" - selector: "egress-code == 'red'" -``` - -``` -default onlink - nexthop via 11.11.11.1 dev egress.calico weight 1 onlink - nexthop via 11.11.11.2 dev egress.calico weight 1 onlink - nexthop via 11.11.11.3 dev egress.calico weight 1 onlink -throw 10.0.0.0/8 -11.0.0.0/8 onlink - nexthop via 11.11.12.1 dev egress.calico weight 1 onlink - nexthop via 11.11.12.2 dev egress.calico weight 1 onlink -``` - -If you see nothing at all, or the following: - -``` -unreachable default scope link -``` - -- Verify that you have provisioned the gateways -- Review the selectors, and gateway namespace and labels to determine why they aren’t matching each other - -### Do you have egress IPs in BGPConfiguration svcExternalIPs? - -You should not have any egress IPs or pod IP ranges in BGPConfiguration `serviceExternalIPs` or `serviceClusterIPs` fields; it causes problems if you do. - -By default, $[prodname] BGP exports all pod IPs, which includes egress gateway IPs because they are pod IPs. But you can also use [BGPConfiguration resource parameters](../../reference/resources/bgpconfig.mdx) like `BGPConfiguration`, `serviceClusterIPs`, `serviceExternalIPs` and `serviceLoadBalancerIPs` to export additional IP ranges, in particular Kubernetes Service IPs. Because $[prodname] exports additional IP ranges in a different way from pod IPs, things can go wrong if you include pod IPs in the additional ranges. - -## Connection to an egress gateway cannot be established - -If the outbound connection cannot be established, the policy may be denying the flow. As shown in the following diagram, policy is enforced at more points in an egress gateway flow. - -![egress-basic-routing](/img/calico-enterprise/egress-basic-routing.svg) - -**Policy enforcement**: - -- From the client pod, egress -- To the gateway pod, ingress -- From the gateway pod, egress -- Any relevant HostEndpoints that are configured in your cluster - -In [the web console](../../observability/get-started-cem.mdx), check for dropped packets because of policy on the outbound connection path. If you are using the iptables data plane, you can also run the following command on the client and gateway nodes to look at a lower level. - -``` -watch iptables-save -c | grep DROP | grep -v 0:0 -``` - -## Connection to an egress gateway is established, but destination is not getting correct IP - -If you see that the outbound connection established, but the source IP is incorrect at the destination, this can indicate that other SNAT or MASQUERADE is taking effect after the packet leaves the egress gateway pod and is overriding the egress gateway IP. If you intentionally have a MASQUERADE/SNAT for another general purpose, you must filter it so it does not apply to traffic whose source IP comes from the egress gateway pool. - -To check the egress gateway’s node, use iptables: - -``` -iptables-save -c | grep -i MASQUERADE -iptables-save -c | grep -i SNAT -``` - -## Finding leaked AWS ENIs - -In normal usage of the AWS-backed IP pools feature, the $[noderunning] Pod on each node will manage the -secondary ENIs used for networking AWS-backed IP pools. It also marks its secondary ENIs for deletion on -instance termination to avoid leaking any ENIs when an instance is terminated. - -However, in certain highly unusual situations, such as the following sequence of events: - -- $[noderunning] adds an ENI. -- The AWS API call to mark the ENI for "delete on termination" fails. -- The entire instance is deleted before the automatic retry of the above operation succeeds. - -Then, it would be possible for an ENI to be leaked. $[prodname] marks all the ENIs that it creates with tags -to identify them as $[prodname] secondary ENIs and the ID of the instance they _should_ belong to. -To find potentially leaked ENIs, you can use the AWS command line tool as follows: - -```bash -aws ec2 describe-network-interfaces --filters Name=status,Values=available Name=tag-key,Values=calico:use -``` - -Then, examine the "Tag set" of the returned network interface values to see if any of them belong to nodes that have -been deleted: - -``` -"TagSet": [ - { - "Key": "calico:use", - "Value": "secondary" - }, - { - "Key": "calico:instance", - "Value": "i-00122bf604c6ab776" - } -], -``` - -If the instance ID recorded in the "calico:instance" tag is for an instance that no longer exists then the ENI -has been leaked; it is safe to delete the ENI. If the instance ID belongs to an active instance then there -is no need to delete the ENI, it should be cleaned up (or put into use) by the $[noderunning] Pod running -on that instance. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/networking/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/networking/index.mdx deleted file mode 100644 index d32f714c98..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/networking/index.mdx +++ /dev/null @@ -1,59 +0,0 @@ ---- -description: Calico's flexible networking options reduce the barriers to adopting a CaaS platform solution. Determine the best networking option for your implementation. ---- - -import { DocCardLink, DocCardLinkLayout } from '/src/___new___/components'; - -# Networking - -The $[prodname] network plugins provide a range of networking options to fit your implementation and maximize performance. - -## Getting started - - - - - - - -## Configuring networking - - - - - - - - - - - - - - -## IP address management - - - - - - - - - - - - - - - -## Egress gateways - - - - - - - - - \ No newline at end of file diff --git a/calico-enterprise_versioned_docs/version-3.19-2/networking/ipam/assign-ip-addresses-topology.mdx b/calico-enterprise_versioned_docs/version-3.19-2/networking/ipam/assign-ip-addresses-topology.mdx deleted file mode 100644 index 754bcf657b..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/networking/ipam/assign-ip-addresses-topology.mdx +++ /dev/null @@ -1,181 +0,0 @@ ---- -description: Configure Calico Enterprise to use specific IP pools for different topologies including zone, rack, or region. ---- - -# Assign IP addresses based on topology - -## Big picture - -Assign blocks of IP addresses from an IP pool for different topological areas. - -## Value - -If you have workloads in different regions, zones, or rack, you may want them to get IP addresses from the same IP pool. This strategy is useful for reducing the number of routes that are required in the network, or to meet requirements imposed by an external firewall device or policy. $[prodname] makes it easy to do this using an IP pool resource with node labels and node selectors. - -## Concepts - -### IP address assignment - -Topology-based IP address assignment requires addresses to be per-host (node). -As such, Kubernetes annotations cannot be used because annotations are only per-namespace and per-pod. And although you can configure IP addresses for nodes in the CNI configuration, you are making changes within the host’s file system. The best option is to use node-selection IP address assignment using IP pools. - -### Node-selection IP address management - -Node selection-based IP address assignment is exactly what it sounds like: node labels are set, and Calico uses node selectors to decide which IP pools to use when assigning IP addresses to the node. - -### Best practice - -Nodes only assign workload addresses from IP pools which select them. To avoid having a workload not get an IP and fail to start, it is important to ensure that all nodes are selected by at least one IP pool. - -## How to - -### Create an IP pool, specific nodes - -In the following example, we create an IP pool that only allocates IP addresses for nodes with the label, **zone=west**. - -```yaml -apiVersion: projectcalico.org/v3 -kind: IPPool -metadata: - name: zone-west-ippool -spec: - cidr: 192.168.0.0/24 - ipipMode: Always - natOutgoing: true - nodeSelector: zone == "west" -``` - -Then, we label a node with zone=west. For example: - -```bash -kubectl label nodes kube-node-0 zone=west -``` - -## Tutorial - -In this tutorial, we create a cluster with four nodes across two racks (two nodes/rack). - -``` - ------------------- - | router | - ------------------- - | | ---------------- --------------- -| rack-0 | | rack-1 | ---------------- --------------- -| kube-node-0 | | kube-node-2 | -- - - - - - - - - - - - - - - - -| kube-node-1 | | kube-node-3 | -- - - - - - - - - - - - - - - - -``` - -Using the pod IP range `192.168.0.0/16`, we target the following setup: reserve -the `192.168.0.0/24` and `192.168.1.0/24` pools for `rack-0`, `rack-1`. Let's -get started. - -By installing $[prodname] without setting the default IP pool to match, -running `calicoctl get ippool -o wide` shows that $[prodname] created its -default IP pool of `192.168.0.0/16`: - -``` -NAME CIDR NAT IPIPMODE DISABLED SELECTOR -default-ipv4-ippool 192.168.0.0/16 true Always false all() -``` - -1. Delete the default IP pool. - - Since the `default-ipv4-ippool` IP pool resource already exists and accounts - for the entire `/16` block, we will have to delete this first: - - ```bash - kubectl delete ippools default-ipv4-ippool - ``` - -2. Label the nodes. - - To assign IP pools to specific nodes, these nodes must be labelled - using [kubectl label](https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes/#add-a-label-to-a-node). - - ```bash - kubectl label nodes kube-node-0 rack=0 - kubectl label nodes kube-node-1 rack=0 - kubectl label nodes kube-node-2 rack=1 - kubectl label nodes kube-node-3 rack=1 - ``` - -3. Create an IP pool for each rack. - - ```bash - kubectl create -f -< - nginx-5c7588df-s7qw6 1/1 Running 0 6m7s 192.168.0.129 kube-node-1 - nginx-5c7588df-w7r7g 1/1 Running 0 6m3s 192.168.1.65 kube-node-2 - nginx-5c7588df-62lnf 1/1 Running 0 6m3s 192.168.1.1 kube-node-3 - nginx-5c7588df-pnsvv 1/1 Running 0 6m3s 192.168.1.64 kube-node-2 - ``` - - The grouping of IP addresses assigned to the workloads differ based on what - node that they were scheduled to. Additionally, the assigned address for - each workload falls within the respective IP pool that selects the rack that - they run on. - -:::note - -$[prodname] IPAM will not reassign IP addresses to workloads -that are already running. To update running workloads with IP addresses from -a newly configured IP pool, they must be recreated. We recommend doing this -before going into production or during a maintenance window. - -::: - -## Additional resources - -[Calico IPAM](../../reference/component-resources/configuration.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/networking/ipam/change-block-size.mdx b/calico-enterprise_versioned_docs/version-3.19-2/networking/ipam/change-block-size.mdx deleted file mode 100644 index fce1c92575..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/networking/ipam/change-block-size.mdx +++ /dev/null @@ -1,257 +0,0 @@ ---- -description: Expand or shrink the IP pool block size to efficiently manage IP pool addresses. ---- - -# Change IP pool block size - -import DetermineIpam from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_determine-ipam.mdx'; - -## Big picture - -Change the IP pool block size to efficiently manage IP pool addresses. - -## Value - -Changing IP pool block size after installation requires ordered steps to minimize pod connectivity disruption. - -## Concepts - -### About IP pools - -By default, $[prodname] uses an IPAM block size of 64 addresses – /26 for IPv4, and /122 for IPv6. However, the block size can be changed depending on the IP pool address family. - -- IPv4: 20-32, inclusive -- IPv6: 116-128, inclusive - -You can have **only one default IP pool for per protocol** in your installation manifest. In this example, there is one IP pool for IPv4 (/26), and one IP pool for IPv6 (/122). - -```yaml -apiVersion: operator.tigera.io/v1 -kind: Installation -metadata: - name: default -spec: - # Configures Calico networking. - calicoNetwork: - ipPools: - - blockSize: 26 - cidr: 10.48.0.0/21 - encapsulation: IPIP - natOutgoing: Enabled - nodeSelector: all() - - blockSize: 122 - cidr: 2001::00/64 - encapsulation: None - natOutgoing: Enabled - nodeSelector: all() -``` - -However, the following is invalid because it has two IP pools for IPv4. - -```yaml -apiVersion: operator.tigera.io/v1 -kind: Installation -metadata: - name: default -spec: - # Configures Calico networking. - calicoNetwork: - ipPools: - - blockSize: 26 - cidr: 10.48.0.0/21 - encapsulation: IPIP - natOutgoing: Enabled - nodeSelector: all() - - blockSize: 31 - cidr: 10.48.8.0/21 - encapsulation: IPIP - natOutgoing: Enabled - nodeSelector: all() -``` - -### Expand or shrink IP pool block sizes - -By default, the $[prodname] IPAM block size for an IP pool is /26. To expand from the default size /26, lower the `blockSize` (for example, /24). To shrink the `blockSize` from the default /26, raise the number (for example, /28). - -### Best practice: change IP pool block size before installation - -Because the `blockSize` field cannot be edited directly after $[prodname] installation, it is best to change the IP pool block size before installation to minimize disruptions to pod connectivity. - -## Before you begin... - -**Required** - -Verify that you are using $[prodname] IPAM. - - - -## How to - -:::note - -Follow the steps to minimize pod connectivity disruption. Pods may lose connectivity when they are redeployed, and may lose external connectivity while in the temporary pool. Also, when pods are deleted, applications may be temporarily unavailable (depending on the type of application). Plan your changes accordingly. -::: - -The high-level steps to follow are: - -1. [Create a temporary IP pool](#create-a-temporary-ip-pool) - **Note**: The temporary IP pool must not overlap with the existing one. -1. [Disable the existing IP pool](#disable-the-existing-ip-pool) - **Note**: When you disable an IP pool, only new IP address allocations are prevented; networking of existing pods are not affected. -1. [Delete pods from the existing IP pool](#delete-pods-from-the-existing-ip-pool) - This includes any new pods that may have been created with the existing IP pool prior to disabling the pool. Verify that new pods get an address from the temporary IP pool. -1. [Delete the existing IP pool](#delete-the-existing-ip-pool) -1. [Create a new IP pool with the desired block size](#create-a-new-ip-pool-with-the-desired-block-size) -1. [Disable the temporary IP pool](#disable-the-temporary-ip-pool) -1. [Delete pods from the temporary IP pool](#delete-pods-from-the-temporary-ip-pool) -1. [Delete the temporary IP pool](#delete-the-temporary-ip-pool) - -## Tutorial - -In the following steps, our Kubernetes cluster has a default CIDR block size of /26. We want to shrink the block size to /28 to use the pool more efficiently. - -### Create a temporary IP pool - -We add a new IPPool with the CIDR range, 10.0.0.0/16. - -Create a temporary-pool.yaml. - -```yaml -apiVersion: projectcalico.org/v3 -kind: IPPool -metadata: - name: temporary-pool -spec: - cidr: 10.0.0.0/16 - ipipMode: Always - natOutgoing: true -``` - -Apply the changes. - -```bash -kubectl apply -f temporary-pool.yaml -``` - -Let’s verify the temporary IP pool. - -```bash -calicoctl get ippool -o wide -``` - -``` -NAME CIDR NAT IPIPMODE DISABLED -default-ipv4-ippool 192.168.0.0/16 true Always false -temporary-pool 10.0.0.0/16 true Always false -``` - -### Disable the existing IP pool - -Disable allocations in the default pool. - -```bash -kubectl patch ippool default-ipv4-ippool -p '{"spec": {"disabled": "true"}}' -``` - -Verify the changes. - -```bash -calicoctl get ippool -o wide -``` - -``` -NAME CIDR NAT IPIPMODE DISABLED -default-ipv4-ippool 192.168.0.0/16 true Always true -temporary-pool 10.0.0.0/16 true Always false -``` - -### Delete pods from the existing IP pool - -In our example, **coredns** is our only pod; for multiple pods you would trigger a deletion for all pods in the cluster. - -```bash -kubectl delete pod -n kube-system coredns-6f4fd4bdf-8q7zp -``` - -Restart all pods with just one command. - -:::caution - -The following command is disruptive and may take several minutes depending on the number of pods deployed. - -::: - -```bash -kubectl delete pod -A --all -``` - -### Delete the existing IP pool - -Now that you’ve verified that pods are getting IPs from the new range, you can safely delete the existing pool. - -```bash -kubectl delete ippool default-ipv4-ippool -``` - -### Create a new IP pool with the desired block size - -In this step, we update the IPPool with the new block size of (/28). - -```yaml -apiVersion: projectcalico.org/v3 -kind: IPPool -metadata: - name: default-ipv4-ippool -spec: - blockSize: 28 - cidr: 192.0.0.0/16 - ipipMode: Always - natOutgoing: true -``` - -Apply the changes. - -```bash -kubectl apply -f pool.yaml -``` - -### Disable the temporary IP pool - -```bash -kubectl patch ippool temporary-pool -p '{"spec": {"disabled": "true"}}' -``` - -### Delete pods from the temporary IP pool - -In our example, **coredns** is our only pod; for multiple pods you would trigger a deletion for all pods in the cluster. - -```bash -kubectl delete pod -n kube-system coredns-6f4fd4bdf-8q7zp -``` - -Restart all pods with just one command. - -:::caution - -The following command is disruptive and may take several minutes depending on the number of pods deployed. - -::: - -```bash -kubectl delete pod -A --all -``` - -Validate your pods and block size are correct by running the following commands: - -```bash -kubectl get pods --all-namespaces -o wide -calicoctl ipam show --show-blocks -``` - -### Delete the temporary IP pool - -Clean up the IP pools by deleting the temporary IP pool. - -```bash -kubectl delete pool temporary-pool -``` diff --git a/calico-enterprise_versioned_docs/version-3.19-2/networking/ipam/get-started-ip-addresses.mdx b/calico-enterprise_versioned_docs/version-3.19-2/networking/ipam/get-started-ip-addresses.mdx deleted file mode 100644 index eb5fafed11..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/networking/ipam/get-started-ip-addresses.mdx +++ /dev/null @@ -1,84 +0,0 @@ ---- -description: Configure Calico Enterprise to use Calico Enterprise IPAM or host-local IPAM, and when to use one or the other. ---- - -# Get started with IP address management - -## Big picture - -Understand how IP address management (IPAM) functions in a Kubernetes cluster using $[prodname]. - -## Value - -Different IPAM techniques provide different feature sets. $[prodname]’s IPAM provides additional IP allocation efficiency and flexibility compared to other address management approaches. - -## Concepts - -### IPAM in Kubernetes - -Kubernetes uses IPAM plugins to allocate and manage IP addresses assigned to pods. Different IPAM plugins provide different feature sets. $[prodname] provides its own IPAM plugin called **calico-ipam** which is designed to work well with $[prodname] and includes a number of features. - -### $[prodname] IPAM - -The **calico-ipam** plugin uses $[prodname]’s IP pool resource to control how IP addresses are allocated to pods within the cluster. This is the default plugin used by most $[prodname] installations. - -By default, $[prodname] uses a single IP pool for the entire Kubernetes pod CIDR, but you can divide the pod CIDR into several pools. You can assign separate IP pools to particular selections of **nodes**, or to teams, users, or applications within a cluster using **namespaces**. - -You can control which pools $[prodname] uses for each pod using - -- node selectors -- an annotation on the pod’s namespace, or -- an annotation on the pod - -$[prodname] also supports the **host-local** IPAM plugin. However, when using the host-local IPAM plugin some $[prodname] features are not available. - -### $[prodname] IPAM blocks - -In $[prodname] IPAM, IP pools are subdivided into blocks -- smaller chunks that are associated with a particular node in the cluster. Each node in the cluster can have one or more blocks associated with it. $[prodname] will automatically create and destroy blocks as needed as the number of nodes and pods in the cluster grows or shrinks. - -Blocks allow $[prodname] to efficiently aggregate addresses assigned to pods on the same node, reducing the size of the routing table. By default $[prodname] will try to allocate IP addresses from within an associated block, creating a new block if necessary. $[prodname] can also assign addresses to pods on a node that are not within a block associated with that node. This allows for IP allocations independent of the node on which a pod is launched. - -By default, $[prodname] creates blocks with room for 64 addresses (a /26), but you can control block sizes for each IP pool. - -### Host-local IPAM - -The host-local plugin is a simple IP address management plugin. It uses predetermined CIDRs statically allocated to each node to choose addresses for pods. Once set, the CIDR for a node cannot be modified. Pods can be assigned addresses only from within the CIDR allocated to the node. - -$[prodname] can use the host-local IPAM plugin, using the **Node.Spec.PodCIDR** field in the Kubernetes API to determine the CIDR to use for each node. However, per-node, per-pod, and per-namespace IP allocation features are not available using the host-local plugin. - -The host-local IPAM plugin is primarily used by other methods of routing pod traffic from one host to another. For example, it is used when installing $[prodname] for policy enforcement with flannel networking, as well as when using $[prodname] in Google Kubernetes Engine (GKE). - -## Before you begin - -**Limitations** - -- Works for platforms that use the Calico CNI - -## How to - -### Install $[prodname] with calico-ipam - -Follow one of the [getting started guides](../../getting-started/index.mdx) to install $[prodname]. - -### Install $[prodname] with host-local IPAM - -Follow one of the [getting started guides](../../getting-started/index.mdx) to install $[prodname] with flannel networking, or on GKE. - -Or, see the [reference documentation on host-local IPAM](../../reference/component-resources/configuration.mdx#using-host-local-ipam). - -## Tutorial - -For a blog/tutorial on IP pools, see [Calico Enterprise IPAM: Explained and Enhanced](https://www.tigera.io/blog/calico-ipam-explained-and-enhanced/). - -## Additional resources - -- [IP Pool](../../reference/resources/ippool.mdx) - -There are several other ways to leverage $[prodname] IPAM including: - -- [Assign addresses based on topology](assign-ip-addresses-topology.mdx) -- [Use a specific address for a pod](use-specific-ip.mdx) -- [Migrate from one IP pool to another](migrate-pools.mdx) -- [Restrict a pod to use an IP address in a specific range](legacy-firewalls.mdx) -- [View IP address utilization](../../reference/clis/calicoctl/ipam/show.mdx) -- [Change IP address block size](../../reference/resources/ippool.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/networking/ipam/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/networking/ipam/index.mdx deleted file mode 100644 index 834d48a7fd..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/networking/ipam/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Calico IPAM is flexible and efficient. Learn how to interoperate with legacy firewalls using IP address ranges, advertise Kubernetes service IPs, and more. -hide_table_of_contents: true ---- - -# IP address management - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/networking/ipam/initial-ippool.mdx b/calico-enterprise_versioned_docs/version-3.19-2/networking/ipam/initial-ippool.mdx deleted file mode 100644 index 0fc5eccfb6..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/networking/ipam/initial-ippool.mdx +++ /dev/null @@ -1,68 +0,0 @@ ---- -description: Configure the default IP address ranges for operator installation. ---- - -# Configure default IP pools - -## Big picture - -Configure the default IP pool values to use during Tigera Operator installation. - -## Value - -During Tigera Operator installation, you must configure the CIDR range to use for pods that reflects your environment. - -## Concepts - -### Kubernetes pod CIDR - -The **Kubernetes pod CIDR** is the expected IP address range for pod IPs. It is defined for the entire cluster, and is used by various Kubernetes components to determine if an IP belongs to a pod. For example, kube-proxy treats traffic differently if an IP is from a pod than if it is not. All pod IPs must be in the CIDR range for Kubernetes to function correctly. - -### Tigera Operator and IP pools - -[Calico IP pools](../../reference/resources/ippool.mdx) are ranges of IP addresses that Calico uses to assign to pods; the ranges must within the Kubernetes pod CIDR. - -The Tigera Operator reads the [Installation](../../reference/installation/api.mdx#installation) -resource and configures the default Calico IP pool. Note the following: - -- Default fields for any that are omitted: - - CIDR: 192.168.0.0/16 - - Encapsulation: IPIP - - NodeSelector: all() - - NATOutgoing: Enabled -- IP pools are only used when Calico is used for pod networking, IP pools are not utilized when using other pod networking solutions. -- To make changes to the IP pools after Tigera Operator install, you may use [calicoctl](../../reference/clis/calicoctl/index.mdx) or kubectl. If you make the changes to the IP Pool in the Installation resource (Operator IPPool) after installation, the changes are not applied. - -## Before you begin... - -- Verify that your IP pool is within the Kubernetes pod CIDR -- If you are using encapsulation (IP in IP or VXLAN), ensure that the traffic is allowed on your network -- You are making these changes for a cluster that has not yet had $[prodname] deployed. - -## How to - -1. Download the custom-resource.yaml file. -1. Edit the [Installation resource](../../reference/installation/api.mdx#installation). - **Required values**: `cidr:` - **Empty values**: Defaulted - - ```bash - apiVersion: operator.tigera.io/v1 - kind: Installation - metadata: - name: default - spec: - calicoNetwork: - ipPools: - - cidr: "192.168.0.0/16" - encapsulation: "IPIP" - nodeSelector: "label == 'value'" - natOutgoing: "Enabled" - ``` - -1. Apply the manifest and continue with your installation as normal. - -## Additional resources - -- [IP pool resource](../../reference/resources/ippool.mdx) -- Use [calicoctl](../../reference/clis/calicoctl/index.mdx) or `kubectl` to edit the IPPool resource. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/networking/ipam/ip-autodetection.mdx b/calico-enterprise_versioned_docs/version-3.19-2/networking/ipam/ip-autodetection.mdx deleted file mode 100644 index de48a0b7af..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/networking/ipam/ip-autodetection.mdx +++ /dev/null @@ -1,240 +0,0 @@ ---- -description: Calico IP autodetection ensures the correct IP address is used for routing. Learn how to customize it. ---- - -# Configure IP autodetection - -## Big picture - -Configure IP autodetection for $[prodname] nodes to ensure the correct IP address is used for routing. - -## Value - -When you install $[prodname] on a node, an IP address and subnet is automatically detected. $[prodname] provides several ways to configure IP/subnet autodetection, and supports configuring specific IPs for: - -- Hosts with multiple external interfaces -- Host interfaces with multiple IP addresses -- [Changes to cross subnet packet encapsulation](../configuring/vxlan-ipip.mdx) -- Changes to host IP address - -## Concepts - -### Autodetecting node IP address and subnet - -For internode routing, each $[prodname] node must be configured with an IPv4 address and/or an IPv6 address. When you install $[prodname] on a node, a node resource is automatically created using routing information that is detected from the host. For some deployments, you may want to update autodetection to ensure nodes get the correct IP address. - -**Sample default node resource after installation** - -```yaml -apiVersion: projectcalico.org/v3 -kind: Node -metadata: - name: node-hostname -spec: - bgp: - asNumber: 64512 - ipv4Address: 10.244.0.1/24 - ipv6Address: 2000:db8:85a3::8a2e:370:7335/120 - ipv4IPIPTunnelAddr: 192.168.0.1 -``` - -### Autodetection methods - -By default, $[prodname] uses the **firstFound** method; the first valid IP address on the first interface (excluding local interfaces such as the docker bridge). However, you can change the default method to any of the following: - -- Address used by the node to reach a particular IP or domain (**canReach**) -- Address assigned to Kubernetes node (**kubernetes: InternalIP**) -- Regex to include matching interfaces (**interface**) -- Regex to exclude matching interfaces (**skipInterface**) -- A list of IP ranges in CIDR format to determine valid IP addresses on the node to choose from (**cidrs**) - -For help on autodetection methods, see -[NodeAddressAutodetection](../../reference/installation/api.mdx#nodeaddressautodetection) in the operator Installation reference -and for more details see the [node configuration](../../reference/component-resources/node/configuration.mdx#ip-autodetection-methods) reference. - -### Manually configure IP address and subnet - -To manually configure an IP address and subnet, disable autodetection and update the node resources with the IP address. - -## How to - -- [Change the autodetection method](#change-the-autodetection-method) -- [Manually configure IP address and subnet for a node](#manually-configure-ip-address-and-subnet-for-a-node) - -### Change the autodetection method - -As noted previously, the default autodetection method is **first valid interface found** (firstFound). To use a different autodetection method, -configure the NodeAddressAutodetection field(s) in the Installation resource. You can update the Installation resource before applying it -during installation or edit it later with `kubectl edit installation default`. - -:::note - -To configure the default autodetection method for IPv6 for any of the below methods, use the field `nodeAddressAutodetectionV6`. - -::: - -- **Kubernetes Node IP** - - $[prodname] will select the first internal IP address listed in the Kubernetes node's `Status.Addresses` field. - - ```yaml - kind: Installation - apiVersion: operator.tigera.io/v1 - metadata: - name: default - spec: - calicoNetwork: - nodeAddressAutodetectionV4: - kubernetes: NodeInternalIP - ``` - -- **Source address used to reach an IP or domain name** - - $[prodname] will choose the IP address that is used to reach the given "can reach" IP address or domain. For example: - - ```yaml - kind: Installation - apiVersion: operator.tigera.io/v1 - metadata: - name: default - spec: - calicoNetwork: - nodeAddressAutodetectionV4: - canReach: 8.8.8.8 - ``` - -- **Including matching interfaces** - - $[prodname] will choose an address on each node from an interface that matches the given [regex](https://pkg.go.dev/regexp). - For example: - - ```yaml - kind: Installation - apiVersion: operator.tigera.io/v1 - metadata: - name: default - spec: - calicoNetwork: - nodeAddressAutodetectionV4: - interface: eth.* - ``` - -- **Excluding matching interfaces** - - $[prodname] will choose an address on each node from an interface that does not match the given [regex](https://pkg.go.dev/regexp). - For example: - - ```yaml - kind: Installation - apiVersion: operator.tigera.io/v1 - metadata: - name: default - spec: - calicoNetwork: - nodeAddressAutodetectionV4: - skipInterface: eth.* - ``` - -- **Including CIDRs** - - $[prodname] will select any IP address from the node that falls within the given CIDRs. For example: - - ```yaml - kind: Installation - apiVersion: operator.tigera.io/v1 - metadata: - name: default - spec: - calicoNetwork: - nodeAddressAutodetectionV4: - cidrs: - - '192.168.200.0/24' - ``` - -- **IPv4** - - ```yaml noValidation - apiVersion: operator.tigera.io/v1 - kind: Installation - metadata: - name: default - spec: - variant: TigeraSecureEnterprise - ... - calicoNetwork: - nodeAddressAutodetectionV4: - : - ``` - -- **IPv6** - - ```yaml noValidation - apiVersion: operator.tigera.io/v1 - kind: Installation - metadata: - name: default - spec: - variant: TigeraSecureEnterprise - ... - calicoNetwork: - nodeAddressAutodetectionV6: - : - ``` - -:::note - -You can use both `nodeAddressAutodetectionV4` and `nodeAddressAutodetectionV6` to specify IPv4 and IPv6 methods. - -::: - -### Manually configure IP address and subnet for a node - -In the following scenarios, you may want to configure a specific IP and subnet: - -- Hosts with multiple external interfaces -- Host interfaces with multiple IP addresses -- Changes to cross subnet packet encapsulation -- Changes to host IP address - -You can configure specific IP address and subnet for a node by disabling IP autodetection and then updating the [Node resource](../../reference/resources/node.mdx). - -#### Disable autodetection - -To disable autodetection method, update the proper `NodeAddressAutodetection` field in the Installation resource: - -```yaml noValidation -apiVersion: operator.tigera.io/v1 -kind: Installation -metadata: - name: default -spec: - variant: TigeraSecureEnterprise - ... - calicoNetwork: - nodeAddressAutodetectionV4: {} - nodeAddressAutodetectionV6: {} -``` - -#### Configure IP and subnet using node resource - -You can also configure the IP address and subnet on a Node resource. - -:::note - -When configuring the IP address on a Node resource, you may want to disable IP address options or environment variables on the node. IP options on the container take precedence, and will overwrite the values you configure on the node resource. - -::: - -Use `calicoctl patch` to update the current node configuration. For example: - -``` -calicoctl patch node kind-control-plane \ - --patch='{"spec":{"bgp": {"ipv4Address": "10.0.2.10/24", "ipv6Address": "fd80:24e2:f998:72d6::/120"}}}' -``` - -## Additional resources - -- For details on autodetection methods, see the [node configuration](../../reference/component-resources/node/configuration.mdx) reference. -- For calicoctl environment variables, see [Configuring $[nodecontainer]](../../reference/component-resources/node/configuration.mdx) -- [Node resource](../../reference/resources/node.mdx) -- [Reference documentation for calicoctl patch](../../reference/clis/calicoctl/patch.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/networking/ipam/ippools.mdx b/calico-enterprise_versioned_docs/version-3.19-2/networking/ipam/ippools.mdx deleted file mode 100644 index 1426dda3bf..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/networking/ipam/ippools.mdx +++ /dev/null @@ -1,117 +0,0 @@ ---- -description: Create multiple IP pools ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -# Create multiple IP pools - -## Understanding multiple IP pools - -By default when you install $[prodname], a single IPv4 pool is created. This IP pool is used for allocating IP addresses to pods and, if needed, -tunnels within your cluster. - -Sometimes you may want to configure additional IP pools. For example: - -- If the IP address space available for pods in your cluster is disjointed. -- You want to [assign IP addresses based on cluster topology](assign-ip-addresses-topology.mdx). - -## Create multiple IP pools when installing Calico - - - - -You can edit the Installation resource within `custom-resources.yaml` to include multiple unique IP pools. The following -example creates two IP pools assigned to different sets of nodes. - -```yaml -apiVersion: operator.tigera.io/v1 -kind: Installation -metadata: - name: default -spec: - calicoNetwork: - ipPools: - - name: pool-zone-1 - cidr: 192.168.0.0/24 - encapsulation: VXLAN - nodeSelector: "zone == 'zone-1'" - - name: pool-zone-2 - cidr: 192.168.1.0/24 - encapsulation: VXLAN - nodeSelector: "zone == 'zone-2'" -``` - -After installing $[prodname], you can confirm the IP pools were created by using the following command: - -```bash -kubectl get ippools -``` - -## Prevent the operator from managing IP pools - -In some cases, you may want to disable IP pool management within the operator and instead use calicoctl or kubectl to -create and delete IP pools. To do this, you can edit the **Installation** resource with `custom-resources.yaml` to specify -an empty list of IP pools. - -```yaml -apiVersion: operator.tigera.io/v1 -kind: Installation -metadata: - name: default -spec: - calicoNetwork: - ipPools: [] -``` - -With this configuration, the operator will wait for you to create IP pools before installing $[prodname] components. - - - - -When using manifests to install $[prodname], you can use calicoctl to manage multiple IP pools. For complete control, you can disable -creation of the default IP pool before doing so. - -1. Disable the default IP pool by adding the following environment variable to the calico-node DaemonSet in `calico.yaml`. - - ```yaml - env: - - name: NO_DEFAULT_POOLS - value: "true" - ``` - -1. Then, install `calico.yaml`. - -1. Create the desired IP pools. For example, the following commands create two IP pools assigned to different sets of nodes. - - ```bash - calicoctl create -f -< - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/networking/ipam/ipv6.mdx b/calico-enterprise_versioned_docs/version-3.19-2/networking/ipam/ipv6.mdx deleted file mode 100644 index 80ea34a2f7..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/networking/ipam/ipv6.mdx +++ /dev/null @@ -1,94 +0,0 @@ ---- -description: Configure dual stack for workloads. ---- - -# Configure dual stack - -## Big picture - -Configure $[prodname] IP address allocation to use dual stack for workload communications. - -## Value - -In addition to IPv4, IPv6 is increasingly desirable for Workload communication. $[prodname] supports: - -- **IPv4 only** (default) - - Each workload gets an IPv4 address, and can communicate over IPv4. - -- **Dual stack** - - Each workload gets an IPv4 and an IPv6 address, and can communicate over IPv4 or IPv6. - -## Before you begin - -**Unsupported** - -- IPv6 only -- AKS -- AWS/kOps -- EKS -- GKE -- RKE -- RKE2 - -**$[prodname] requirements** - -- $[prodname] IPAM -- OpenShift - - Requires 4.8 for IPv6/IPv4 dual-stack and IPv6 single stack support - - Requires 3.11 and later using $[prodname] 3.4 and later for IPv6 support - -**Kubernetes version requirements** - -- For dual stack, 1.20 and later -- For one IP stack at a time (IPv4 or IPv6), any Kubernetes version - -**Kubernetes IPv6 host requirements** - -- An IPv6 address that is reachable from the other hosts -- The sysctl setting, `net.ipv6.conf.all.forwarding`, is set to `1`. - This ensures both Kubernetes service traffic and $[prodname] traffic is forwarded appropriately. -- A default IPv6 route - -**Kubernetes IPv4 host requirements** - -- An IPv4 address that is reachable from the other hosts -- The sysctl setting, `net.ipv4.conf.all.forwarding`, is set to `1`. - This ensures both Kubernetes service traffic and $[prodname] traffic is forwarded appropriately. -- A default IPv4 route - -## How to - -:::note - -The following task is only for new clusters. - -::: - -### Enable dual stack - -1. Set up a new cluster following the Kubernetes [prerequisites](https://kubernetes.io/docs/concepts/services-networking/dual-stack/#prerequisites) and [enablement steps](https://kubernetes.io/docs/concepts/services-networking/dual-stack/#enable-ipv4-ipv6-dual-stack). - -To configure dual-stack cluster using the operator, edit your default Installation at install time to include both an IPv4 and IPv6 pool. For example: - -```yaml -apiVersion: operator.tigera.io/v1 -kind: Installation -metadata: - name: default -spec: - # Configures Calico networking. - calicoNetwork: - ipPools: - - blockSize: 26 - cidr: 10.48.0.0/21 - encapsulation: IPIP - natOutgoing: Enabled - nodeSelector: all() - - blockSize: 122 - cidr: 2001::00/64 - encapsulation: None - natOutgoing: Enabled - nodeSelector: all() -``` diff --git a/calico-enterprise_versioned_docs/version-3.19-2/networking/ipam/legacy-firewalls.mdx b/calico-enterprise_versioned_docs/version-3.19-2/networking/ipam/legacy-firewalls.mdx deleted file mode 100644 index 3495b5d817..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/networking/ipam/legacy-firewalls.mdx +++ /dev/null @@ -1,66 +0,0 @@ ---- -description: Restrict the IP address chosen for a pod to a specific range of IP addresses. ---- - -# Restrict a pod to use an IP address in a specific range - -import DetermineIpam from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_determine-ipam.mdx'; - -## Big picture - -Restrict the IP address chosen for a pod to a specific range of IP addresses. - -## Value - -When Kubernetes pods interact with external systems that make decisions based on IP ranges (for example legacy firewalls), it can be useful to define several IP ranges and explicitly assign pods to those ranges. Using $[prodname] IP Address Management (IPAM), you can restrict a pod to use an address from within a specific range. - -## Concepts - -### Kubernetes pod CIDR - -The **Kubernetes pod CIDR** is the range of IPs Kubernetes expects pod IPs to be assigned from. It is defined for the entire cluster and is used by various Kubernetes components to determine whether an IP belongs to a pod. For example, kube-proxy treats traffic differently if that traffic is from a pod than if it is not. All pod IPs must be in the CIDR range for Kubernetes to function correctly. - -### IP Pool - -**IP pools** are ranges of IP addresses from which $[prodname] assigns pod IPs. By default, $[prodname] creates an IP pool for the entire Kubernetes pod CIDR, but you can change this to break the pod CIDR up into several pools. You can control which pool $[prodname] uses for each pod using node selectors, or annotations on the pod or the pod’s namespace. - -## Before you begin... - -The features in this How to guide require: - -- $[prodname] IPAM - - - -Additionally, cluster administrators must have [configured IP pools](../../reference/resources/ippool.mdx) to define the valid IP ranges to use for allocating pod IP addresses. - -## How to - -### Restrict a pod to use an IP address range - -Annotate the pod with key `cni.projectcalico.org/ipv4pools` and/or `cni.projectcalico.org/ipv6pools` and value set to a list of IP pool names, enclosed in brackets. For example: - -``` -cni.projectcalico.org/ipv4pools: '["pool-1", "pool-2"]' -``` - -Note the use of the escaped \" for the inner double quotes around the pool names. - -### Restrict all pods within a namespace to use an IP address range - -Annotate the namespace with key `cni.projectcalico.org/ipv4pools` and/or `cni.projectcalico.org/ipv6pools` and value set to a list of IP pool names, enclosed in brackets. For example: - -``` -cni.projectcalico.org/ipv4pools: '["pool-1", "pool-2"]' - -``` - -Note the use of the escaped `\"` for the inner double quotes around the pool names. - -If both the pod and the pod’s namespace have the annotation, the pod annotation takes precedence. - -The annotation must be present at the time the pod is created. Adding it to an existing pod has no effect. - -## Additional resources - -For help configuring $[prodname] IPAM, see [Configuring the $[prodname] CNI Plugins](../../reference/component-resources/configuration.mdx). diff --git a/calico-enterprise_versioned_docs/version-3.19-2/networking/ipam/migrate-pools.mdx b/calico-enterprise_versioned_docs/version-3.19-2/networking/ipam/migrate-pools.mdx deleted file mode 100644 index 5e53622541..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/networking/ipam/migrate-pools.mdx +++ /dev/null @@ -1,337 +0,0 @@ ---- -description: Migrate pods from one IP pool to another on a running cluster without network disruption. ---- - -# Migrate from one IP pool to another - -import DetermineIpam from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_determine-ipam.mdx'; -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -## Big picture - -Migrate pods from one IP pool to another on a running cluster without network disruption. - -## Value - -Pods are assigned IP addresses from IP pools that you configure in $[prodname]. As the number of pods increase, you may need to increase the number of addresses available for pods to use. Or, you may need to move pods from a CIDR that was used by mistake. $[prodname] lets you migrate from one IP pool to another one on a running cluster without network disruption. - -## Concepts - -### IP pools and cluster CIDRs - -$[prodname] supports using multiple disjoint IP pool CIDRs within the cluster. However, Kubernetes expects that all pods have addresses within the same cluster CIDR. This means that although it is technically feasible to create an IP pool outside of the cluster CIDR, we do not recommend it. Pods allocated addresses outside of the Kubernetes cluster CIDR will lose network connectivity. - -## Before you begin... - -**Verify that you are using $[prodname] IPAM**. - - - -**Verify orchestrator support for changing the pod network CIDR**. - -Although Kubernetes supports changing the pod network CIDR, not all orchestrators do. For example, OpenShift does not support this feature. - -## How to - -### Migrate from one IP pool to another - -Follow these steps to migrate pods from one IP pool to another pool. - -:::note - -If you follow these steps, existing pod connectivity will not be affected. (If you delete the old IP pool before you create and verify the new pool, existing pods will be affected.) When pods are deleted, applications may be temporarily unavailable (depending on the type of application); plan accordingly. - -::: - -1. Add a new IP pool. - - :::note - - It is highly recommended that your Calico IP pools are within the Kubernetes cluster CIDR. If pods IPs are allocated from outside of the Kubernetes cluster CIDR, some traffic flows may have NAT applied unnecessarily causing unexpected behavior. - - ::: - -1. Disable the old IP pool. - - :::note - - Disabling an IP pool only prevents new IP address allocations; it does not affect the networking of existing pods. - - ::: - -1. Delete pods from the old IP pool. This includes any new pods that may have been created with the old IP pool prior to disabling the pool. - -1. Verify that new pods get an address from the new IP pool. - -1. Delete the old IP pool. - -## Tutorial - -In the following example, we created a Kubernetes cluster using **kubeadm**. But the IP pool CIDR we configured (192.168.0.0/16) doesn't match the -Kubernetes cluster CIDR. Let's change the CIDR to **10.0.0.0/16**, which for the purposes of this example falls within the cluster CIDR. - - - - -Let’s run `kubectl get ippools` to see the IP pool, `default-ipv4-ippool`. - -``` -NAME CREATED AT -default-ipv4-ippool 2024-03-28T16:14:28Z -``` - -### Step 1: Add a new IP pool - -We add a new `IPPool` with the CIDR range, **10.0.0.0/16**. - -Add the following to your `default` installation, below the existing IP pool. - -```bash -kubectl edit installation default -``` - -```yaml -- name: new-ipv4-pool - cidr: 10.0.0.0/16 - encapsulation: IPIP -``` - -Let’s verify the new IP pool. - -```bash -kubectl get ippools -``` - -``` -NAME CREATED AT -default-ipv4-ippool 2024-03-28T16:14:28Z -test-pool 2024-03-28T18:30:15Z -``` - -### Step 2: Disable the old IP pool - -Edit the `default` installation, and modify the `default-ipv4-ippool` so it no longer selects -any nodes. This prevents IP allocation from the pool. - -```bash -kubectl edit installation default -``` - -```yaml -- name: 192.168.0.0-16 - allowedUses: - - Workload - - Tunnel - blockSize: 26 - cidr: 192.168.0.0/16 - disableBGPExport: false - encapsulation: VXLANCrossSubnet - natOutgoing: Enabled -- nodeSelector: all() -+ nodeSelector: "!all()" -``` - -Apply the changes. - -Remember, disabling a pool only affects new IP allocations; networking for existing pods is not affected. - -### Step 3: Delete pods from the old IP pool - -Next, we delete all of the existing pods from the old IP pool. (In our example, **coredns** is our only pod; for multiple pods you would trigger a deletion for all pods in the cluster.) - -```bash -kubectl delete pod -n kube-system coredns-6f4fd4bdf-8q7zp -``` - -### Step 4: Verify that new pods get an address from the new IP pool - -1. Create a test namespace and nginx pod. - - ```bash - kubectl create ns ippool-test - ``` - -1. Create an nginx pod. - - ```bash - kubectl -n ippool-test create deployment nginx --image nginx - ``` - -1. Verify that the new pod gets an IP address from the new range. - - ```bash - kubectl -n ippool-test get pods -l app=nginx -o wide - ``` - -1. Clean up the `ippool-test` namespace. - - ```bash - kubectl delete ns ippool-test - ``` - -### Step 5: Delete the old IP pool - -Now that you've verified that pods are getting IPs from the new range, you can safely delete the old pool. To do this, -remove it from the default installation, leaving only the newly create IP pool. - - - - -Let’s run `calicoctl get ippool -o wide` to see the IP pool, `default-ipv4-ippool`. - -``` -NAME CIDR NAT IPIPMODE VXLANMODE DISABLED -default-ipv4-ippool 192.168.0.0/16 true Always Never false -``` - -When we run `calicoctl get wep --all-namespaces`, we see that a pod is created using the default range (192.168.52.130/32). - -``` -NAMESPACE WORKLOAD NODE NETWORKS INTERFACE -kube-system coredns-6f4fd4bdf-8q7zp vagrant 192.168.52.130/32 cali800a63073ed -``` - -Let’s get started changing this pod to the new IP pool (10.0.0.0/16). - -### Step 1: Add a new IP pool - -We add a new `IPPool` with the CIDR range, **10.0.0.0/16**. - -```yaml -apiVersion: projectcalico.org/v3 -kind: IPPool -metadata: - name: new-pool -spec: - cidr: 10.0.0.0/16 - ipipMode: Always - natOutgoing: true -``` - -Let’s verify the new IP pool. - -```bash -calicoctl get ippool -o wide -``` - -``` -NAME CIDR NAT IPIPMODE DISABLED -default-ipv4-ippool 192.168.0.0/16 true Always false -new-pool 10.0.0.0/16 true Always false -``` - -### Step 2: Disable the old IP pool - -List the existing IP pool definition. - -```bash -calicoctl get ippool -o yaml > pools.yaml - -``` - -```yaml -apiVersion: projectcalico.org/v3 -items: - - apiVersion: projectcalico.org/v3 - kind: IPPool - metadata: - name: default-ipv4-ippool - spec: - cidr: 192.0.0.0/16 - ipipMode: Always - natOutgoing: true - - apiVersion: projectcalico.org/v3 - kind: IPPool - metadata: - name: new-pool - spec: - cidr: 10.0.0.0/16 - ipipMode: Always - natOutgoing: true -``` - -Edit pools.yaml. - -Disable this IP pool by setting: `disabled: true` - -```yaml -apiVersion: projectcalico.org/v3 -kind: IPPool -metadata: - name: default-ipv4-ippool -spec: - cidr: 192.0.0.0/16 - ipipMode: Always - natOutgoing: true - disabled: true -``` - -Apply the changes. - -Remember, disabling a pool only affects new IP allocations; networking for existing pods is not affected. - -```bash -kubectl apply -f pools.yaml -``` - -Verify the changes. - -```bash -calicoctl get ippool -o wide -``` - -``` -NAME CIDR NAT IPIPMODE DISABLED -default-ipv4-ippool 192.168.0.0/16 true Always true -new-pool 10.0.0.0/16 true Always false -``` - -### Step 3: Delete pods from the old IP pool - -Next, we delete all of the existing pods from the old IP pool. (In our example, **coredns** is our only pod; for multiple pods you would trigger a deletion for all pods in the cluster.) - -```bash -kubectl delete pod -n kube-system coredns-6f4fd4bdf-8q7zp -``` - -### Step 4: Verify that new pods get an address from the new IP pool - -1. Create a test namespace and nginx pod. - - ```bash - kubectl create ns ippool-test - ``` - -1. Create an nginx pod. - - ```bash - kubectl -n ippool-test create deployment nginx --image nginx - ``` - -1. Verify that the new pod gets an IP address from the new range. - - ```bash - kubectl -n ippool-test get pods -l app=nginx -o wide - ``` - -1. Clean up the `ippool-test` namespace. - - ```bash - kubectl delete ns ippool-test - ``` - -### Step 5: Delete the old IP pool - -Now that you've verified that pods are getting IPs from the new range, you can safely delete the old pool. - -```bash -kubectl delete ippool default-ipv4-ippool -``` - - - - -## Additional resources - -- [IP pools reference](../../reference/resources/ippool.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/networking/ipam/use-specific-ip.mdx b/calico-enterprise_versioned_docs/version-3.19-2/networking/ipam/use-specific-ip.mdx deleted file mode 100644 index a456ef9a28..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/networking/ipam/use-specific-ip.mdx +++ /dev/null @@ -1,65 +0,0 @@ ---- -description: Specify the IP address for a pod instead of allowing Calico Enterprise to automatically choose one. ---- - -# Use a specific IP address with a pod - -import DetermineIpam from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_determine-ipam.mdx'; - -## Big picture - -Choose the IP address for a pod instead of allowing $[prodname] to choose automatically. - -## Value - -Some applications require the use of stable IP addresses. Also, you may want to create entries in external DNS servers that point directly to pods, and this requires static IPs. - -## Concepts - -### Kubernetes pod CIDR - -The **Kubernetes pod CIDR** is the range of IPs Kubernetes expects pod IPs to be assigned from. It is defined for the entire cluster and is used by various Kubernetes components to determine whether an IP belongs to a pod. For example, kube-proxy treats traffic differently if an IP is from a pod than if it is not. All pod IPs must be in the CIDR range for Kubernetes to function correctly. - -**IP Pools** - -IP pools are ranges of IP addresses from which $[prodname] assigns pod IPs. Static IPs must be in an IP pool. - -## Before you begin... - -Your cluster must be using Calico IPAM to use this feature. - - - -## How to - -Annotate the pod with cni.projectcalico.org/ipAddrs set to a list of IP addresses to assign, enclosed in brackets. For example: - -``` - "cni.projectcalico.org/ipAddrs": "[\"192.168.0.1\", \"2001:db8::1\"]" -``` - -Note the use of the escaped `\"` for the inner double quotes around the addresses. - -The address must be within a configured $[prodname] IP pool and not currently in use. The annotation must be present when the pod is created; adding it later has no effect. - -Note that currently only a single IP address per IP family is supported per-pod using this annotation. - -### Reserving IPs for manual assignments - -The `cni.projectcalico.org/ipAddrs` annotation requires the IP address to be within an IP pool. This means that, -by default, $[prodname] may decide to use the IP address that you select for another workload or for an internal -tunnel address. To prevent this, there are several options: - -- To reserve a whole IPPool for manual allocations, you can set its node selector in the [IP pool](../../reference/resources/ippool.mdx) to `"!all()"`. Since the `!all()` - cannot match any nodes, the IPPool will not be used for any automatic assignments. - -- To reserve part of a pool, you can create an [IP reservation](../../reference/resources/ipreservation.mdx). This allows for certain IPs to be reserved so - that Calico IPAM will not use them automatically. However, manual assignments (using the annotation) can still use - IPs that are "reserved". - -- To prevent $[prodname] from using IPs from a certain pool for internal IPIP and/or VXLAN tunnel addresses, you - can set the `allowedUses` field on the [IP Pool](../../reference/resources/ippool.mdx) to `["Workload"]`. - -## Additional resources - -For help configuring $[prodname] CNI and $[prodname] IPAM, see [Configuring the $[prodname] CNI Plugins](../../reference/component-resources/configuration.mdx). diff --git a/calico-enterprise_versioned_docs/version-3.19-2/networking/training/about-kubernetes-networking.mdx b/calico-enterprise_versioned_docs/version-3.19-2/networking/training/about-kubernetes-networking.mdx deleted file mode 100644 index e2571b6179..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/networking/training/about-kubernetes-networking.mdx +++ /dev/null @@ -1,134 +0,0 @@ ---- -description: Learn network behaviors of the Kubernetes network model. ---- - -# Kubernetes network model - -{/* Do not change the canonical_url above *or the title*. Content is shared with Marketing and is used for SEO purposes. If you change the title, it will screw up Marketing metrics. */} - -:::note - -This guide provides educational material that is not specific to $[prodname]. - -::: - -Kubernetes defines a network model that helps provide simplicity and consistency across a range of networking -environments and network implementations. The Kubernetes network model provides the foundation for understanding how -containers, pods, and services within Kubernetes communicate with each other. This guide explains the key concepts and -how they fit together. - -In this guide you will learn: - -- The fundamental network behaviors the Kubernetes network model defines. -- How Kubernetes works with a variety of different network implementations. -- What Kubernetes Services are. -- How DNS works within Kubernetes. -- What "NAT outgoing" is and when you would want to use it. -- What "dual stack" is. - -## The Kubernetes network model - -The Kubernetes network model specifies: - -- Every pod gets its own IP address -- Containers within a pod share the pod IP address and can communicate freely with each other -- Pods can communicate with all other pods in the cluster using pod IP addresses (without - [NAT](about-networking.mdx)) -- Isolation (restricting what each pod can communicate with) is defined using network policies - -As a result, pods can be treated much like VMs or hosts (they all have unique IP addresses), and the containers within -pods very much like processes running within a VM or host (they run in the same network namespace and share an IP -address). This model makes it easier for applications to be migrated from VMs and hosts to pods managed by Kubernetes. -In addition, because isolation is defined using network policies rather than the structure of the network, the network -remains simple to understand. This style of network is sometimes referred to as a "flat network". - -Note that, although very rarely needed, Kubernetes does also support the ability to map host ports through to pods, or -to run pods directly within the host network namespace sharing the host's IP address. - -## Kubernetes network implementations - -Kubernetes built in network support, kubenet, can provide some basic network connectivity. However, it is more common to -use third party network implementations which plug into Kubernetes using the CNI (Container Network Interface) API. - -There are lots of different kinds of CNI plugins, but the two main ones are: - -- network plugins, which are responsible for connecting pod to the network -- IPAM (IP Address Management) plugins, which are responsible for allocating pod IP addresses. - -$[prodname] provides both network and IPAM plugins, but can also integrate and work seamlessly with some other CNI -plugins, including AWS, Azure, and Google network CNI plugins, and the host local IPAM plugin. This flexibility allows -you to choose the best networking options for your specific needs and deployment environment. You can read more about -this in the $[prodname] [determine best networking option](../determine-best-networking.mdx) -guide. - -## Kubernetes Services - -Kubernetes [Services](https://kubernetes.io/docs/concepts/services-networking/service/) provide a way of abstracting access to a group -of pods as a network service. The group of pods is usually defined using a [label selector](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels) -. Within the cluster the -network service is usually represented as virtual IP address, and kube-proxy load balances connections to the virtual IP -across the group of pods backing the service. The virtual IP is discoverable through Kubernetes DNS. The DNS name and -virtual IP address remain constant for the life time of the service, even though the pods backing the service may be -created or destroyed, and the number of pods backing the service may change over time. - -Kubernetes Services can also define how a service is accessed from outside of the cluster, for example using - -- a node port, where the service can be accessed via a specific port on every node -- or a load balancer, whether a network load balancer provides a virtual IP address that the service can be accessed via - from outside the cluster. - -Note that when using $[prodname] in on-prem deployments you can also [advertise service IP addresses](../configuring/advertise-service-ips.mdx) -, allowing services to be conveniently accessed without -going via a node port or load balancer. - -## Kubernetes DNS - -Each Kubernetes cluster provides a DNS service. Every pod and every service is discoverable through the Kubernetes DNS -service. - -For example: - -- Service: `my-svc.my-namespace.svc.cluster-domain.example` -- Pod: `pod-ip-address.my-namespace.pod.cluster-domain.example` -- Pod created by a deployment exposed as a service: - `pod-ip-address.deployment-name.my-namespace.svc.cluster-domain.example`. - -The DNS service is implemented as Kubernetes Service that maps to one or more DNS server pods (usually CoreDNS), that -are scheduled just like any other pod. Pods in the cluster are configured to use the DNS service, with a DNS search list -that includes the pod's own namespace and the cluster's default domain. - -This means that if there is a service named `foo` in Kubernetes namespace `bar`, then pods in the same namespace can -access the service as `foo`, and pods in other namespaces can access the service as `foo.bar` - -Kubernetes supports a rich set of options for controlling DNS in different scenarios. You can read more about these in -the Kubernetes guide [DNS for Services and Pods](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/). - -## NAT outgoing - -The Kubernetes network model specifies that pods must be able to communicate with each other directly using pod IP -addresses. But it does not mandate that pod IP addresses are routable beyond the boundaries of the cluster. Many -Kubernetes network implementations use [overlay networks](about-networking.mdx#overlay-networks). -Typically for these deployments, when a pod initiates a connection to an IP address outside of the cluster, the node -hosting the pod will SNAT (Source Network Address Translation) map the source address of the packet from the pod IP to -the node IP. This enables the connection to be routed across the rest of the network to the destination (because the -node IP is routable). Return packets on the connection are automatically mapped back by the node replacing the node IP -with the pod IP before forwarding the packet to the pod. - -When using $[prodname], depending on your environment, you can generally choose whether you prefer to run an -overlay network, or prefer to have fully routable pod IPs. You can read more about this in the $[prodname] -[determine best networking option](../determine-best-networking.mdx) guide. $[prodname] also -allows you to [configure outgoing NAT](../configuring/workloads-outside-cluster.mdx) for specific IP address -ranges if more granularity is desired. - -## Dual stack - -If you want to use a mix of IPv4 and IPv6 then you can enable Kubernetes [dual-stack](https://kubernetes.io/docs/concepts/services-networking/dual-stack/) mode. When enabled, all -pods will be assigned both an IPv4 and IPv6 address, and Kubernetes Services can specify whether they should be exposed -as IPv4 or IPv6 addresses. - -## Additional resources - -- [The Kubernetes Network Model](https://kubernetes.io/docs/concepts/cluster-administration/networking/#the-kubernetes-network-model) -- [Video: Everything you need to know about Kubernetes networking on AWS](https://www.projectcalico.org/everything-you-need-to-know-about-kubernetes-pod-networking-on-aws/) -- [Video: Everything you need to know about Kubernetes networking on Azure](https://www.projectcalico.org/everything-you-need-to-know-about-kubernetes-networking-on-azure/) -- [Video: Everything you need to know about Kubernetes networking on Google Cloud](https://www.projectcalico.org/everything-you-need-to-know-about-kubernetes-networking-on-google-cloud/) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/networking/training/about-networking.mdx b/calico-enterprise_versioned_docs/version-3.19-2/networking/training/about-networking.mdx deleted file mode 100644 index 376804a111..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/networking/training/about-networking.mdx +++ /dev/null @@ -1,166 +0,0 @@ ---- -description: Learn about networking layers, packets, IP addressing, and routing. ---- - -# Networking overview - -:::note - -This guide provides educational material that is not specific to $[prodname]. - -::: - -You can get up and running with $[prodname] by following any of the $[prodname] [install guides](../../getting-started/index.mdx) - without needing to be a networking expert. $[prodname] hides the complexities for -you. However, if you would like to learn more about networking so you can better understand what is happening under the -covers, this guide provides a short introduction to some of the key fundamental networking concepts for anyone who is -not already familiar with them. - -In this guide you will learn: - -- The terms used to described different layers of the network. -- The anatomy of a network packet. -- What MTU is and why it makes a difference. -- How IP addressing, subnets, and IP routing works. -- What an overlay network is. -- What DNS and NAT are. - -## Network layers - -The process of sending and receiving data over a network is commonly categorized into 7 layers (referred to as the [OSI model](https://en.wikipedia.org/wiki/OSI_model)). The layers are -typically abbreviated as L1 - L7. You can think of data as passing through each of these layers in turn as it is sent or -received from an application, with each layer being responsible for a particular part of the processing required to -send or receive the data over the network. - -![OSI network layers diagram](/img/calico-enterprise/osi-network-layers.svg) - -In a modern enterprise or public cloud network, the layers commonly map as follows: - -- L5-7: all the protocols most application developers are familiar with. e.g. HTTP, FTP, SSH, SSL, DNS. -- L4: TCP or UDP, including source and destination ports. -- L3: IP packets and IP routing. -- L2: Ethernet packets and Ethernet switching. - -## Anatomy of a network packet - -When sending data over the network, each layer in the network stack adds its own header containing the control/metadata -the layer needs to process the packet as it traverses the network, passing the resulting packet on to the next -layer of the stack. In this way the complete packet is produced, which includes all the control/metadata required by -every layer of the stack, without any layer understanding the data or needing to process the control/metadata of -adjacent network layers. - -![Anatomy of a network packet](/img/calico-enterprise/anatomy-of-a-packet.svg) - -## IP addressing, subnets and IP routing - -The L3 network layer introduces IP addresses and typically marks the boundary between the part of networking that -application developers care about, and the part of networking that network engineers care about. In particular -application developers typically regard IP addresses as the source and destination of the network traffic, but have much -less of a need to understand L3 routing or anything lower in the network stack, which is more the domain of network -engineers. - -There are two variants of IP addresses: IPv4 and IPv6. - -- IPv4 addresses are 32 bits long and the most commonly used. They are typically represented as 4 bytes in decimal (each - 0-255) separated by dots. e.g. `192.168.27.64`. There are several ranges of IP addresses that are reserved as - "private", that can only be used within local private networks, are not routable across the internet. These can be - reused by enterprises as often as they want to. In contrast "public" IP addresses are globally unique across the whole - of the internet. As the number of network devices and networks connected to the internet has grown, public IPv4 - addresses are now in short supply. -- IPv6 addresses are 128 bits long and designed to overcome the shortage of IPv4 address space. They are typically - represented by 8 groups of 4 digit hexadecimal numbers. e.g. `1203:8fe0:fe80:b897:8990:8a7c:99bf:323d`. Due to the 128 - bit length, there's no shortage of IPv6 addresses. However, many enterprises have been slow to adopt IPv6, so for now - at least, IPv4 remains the default for many enterprise and data center networks. - -Groups of IP addresses are typically represented using [CIDR notation](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) that consists of an IP address and number of -significant bits on the IP address separated by a `/`. For example, `192.168.27.0/24` represents the group of 256 IP -addresses from `192.168.27.0` to `192.168.27.255`. - -A group of IP addresses within a single L2 network is referred to as a subnet. Within a subnet, packets can be sent -between any pair of devices as a single network hop, based solely on the L2 header (and footer). - -To send packets beyond a single subnet requires L3 routing, with each L3 network device (router) being responsible for -making decisions on the path to send the packet based on L3 routing rules. Each network device acting as a router has -routes that determine where a packet for a particular CIDR should be sent next. So for example, in a Linux system, a -route of `10.48.0.128/26 via 10.0.0.12 dev eth0` indicates that packets with destination IP address in `10.48.0.128/26` -should be routed to a next network hop of `10.0.0.12` over the `eth0` interface. - -Routes can be configured statically by an administrator, or programmed dynamically using routing protocols. When using -routing protocols each network device typically needs to be configured to tell it which other network devices it should -be exchanging routes with. The routing protocol then handles programming the right routes across the whole of the -network as devices are added or removed, or network links come in or out of service. - -One common routing protocol used in large enterprise and data center networks is [BGP](https://en.wikipedia.org/wiki/Border_Gateway_Protocol). BGP is one of the main protocols that powers -the internet, so scales incredibly well, and is very widely supported by modern routers. - -## Overlay networks - -An overlay network allows network devices to communicate across an underlying network (referred to as the underlay) -without the underlay network having any knowledge of the devices connected to the overlay network. From the point of -view of the devices connected to the overlay network, it looks just like a normal network. There are many different -kinds of overlay networks that use different protocols to make this happen, but in general they share the same common -characteristic of taking a network packet, referred to as the inner packet, and encapsulating it inside an outer network -packet. In this way the underlay sees the outer packets without needing to understand how to handle the inner packets. - -How the overlay knows where to send packets varies by overlay type and the protocols they use. Similarly exactly how the -packet is wrapped varies between different overlay types. In the case of VXLAN for example, the inner packet is wrapped -and sent as UDP in the outer packet. - -![Anatomy of an overlay network packet](/img/calico-enterprise/anatomy-of-an-overlay-packet.svg) - -Overlay networks have the advantage of having minimal dependencies on the underlying network infrastructure, but have -the downsides of: - -- having a small performance impact compared to non-overlay networking, which you might want to avoid if running - network intensive workloads -- workloads on the overlay are not easily addressable from the rest of the network. so NAT gateways or load balancers - are required to bridge between the overlay and the underlay network for any ingress to, or egress from, the overlay. - -$[prodname] networking options are exceptionally flexible, so in general you can choose whether you prefer -$[prodname] to provide an overlay network, or non-overlay network. You can read more about this in the $[prodname] -[determine best networking option](../determine-best-networking.mdx) guide. - -## DNS - -While the underlying network packet flow across the network is determined using IP addresses, users and applications -typically want to use well known names to identify network destinations that remain consistent over time, even if the -underlying IP addresses change. For example, to map `google.com` to `216.58.210.46`. This translation from name to IP -address is handled by [DNS](https://en.wikipedia.org/wiki/Domain_Name_System). DNS runs on top of the base networking described so far. Each device connected to a network is typically configured -with the IP addresses of one or more DNS servers. When an application wants to connect to a domain name, a DNS message is -sent to the DNS server, which then responds with information about which IP address(es) the domain name maps to. The -application can then initiate its connection to the chosen IP address. - -## NAT - -Network Address Translation ([NAT](https://en.wikipedia.org/wiki/Network_address_translation)) is the process of mapping an IP address in a packet -to a different IP address as the packet passes through the device performing the NAT. Depending on the use case, NAT can -apply to the source or destination IP address, or to both addresses. - -One common use case for NAT is to allow devices with private IP address to talk to devices with public IP address across -the internet. For example, if a device with a private IP address attempts to connect to a public IP address, then the -router at the border of the private network will typically use SNAT (Source Network Address Translation) to map the -private source IP address of the packet to the router's own public IP address before forwarding it on to the internet. -The router then maps response packets coming in the opposite direction back to the original private IP address, so -packets flow end-to-end in both directions, with neither source or destination being aware the mapping is happening. The -same technique is commonly used to allow devices connected to an overlay network to connect with devices outside of the -overlay network. - -Another common use case for NAT is load balancing. In this case the load balancer performs DNAT (Destination Network -Address Translation) to change the destination IP address of the incoming connection to the IP address of the chosen -device it is load balancing to. The load balancer then reverses this NAT on response packets so neither source or -destination device is aware the mapping is happening. - -## MTU - -The Maximum Transmission Unit ([MTU](https://en.wikipedia.org/wiki/Maximum_transmission_unit)) of a network link is the maximum size of packet that -can be sent across that network link. It is common for all links in a network to be configured with the same MTU to -reduce the need to fragment packets as they traverse the network, which can significantly lower the performance of the -network. In addition, TCP tries to learn path MTUs, and adjust packet sizes for each network path based on the smallest -MTU of any of the links in the network path. When an application tries to send more data than can fit in a single -packet, TCP will fragment the data into multiple TCP segments, so the MTU is not exceeded. - -Most networks have links with an MTU of 1,500 bytes, but some networks support MTUs of 9,000 bytes. In a Linux system, -larger MTU sizes can result in lower CPU being used by the Linux networking stack when sending large amounts of data, -because it has to process fewer packets for the same amount of data. Depending on the network interface hardware being -used, some of this overhead may be offloaded to the network interface hardware, so the impact of small vs large MTU -sizes varies from device to device. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/networking/training/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/networking/training/index.mdx deleted file mode 100644 index 2b9b0ca407..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/networking/training/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Learn the basics of Kubernetes networking and Calico Enterprise networking. -hide_table_of_contents: true ---- - -# Networking basics - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/observability/alerts.mdx b/calico-enterprise_versioned_docs/version-3.19-2/observability/alerts.mdx deleted file mode 100644 index 4a48af4eb6..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/observability/alerts.mdx +++ /dev/null @@ -1,230 +0,0 @@ ---- -description: Manage alerts and events for Calico Enterprise features. ---- - -# Manage alerts - -## Big picture - -Manage alerts and alert events for $[prodname] features. - -## Value - -You can configure alerts for many $[prodname] features. Alerts are critical to teams for different reasons, for example: - -- **Visibility and troubleshooting** - alerts may indicate infrastructure problems, application bugs, or performance degradation -- **Security** - alerts on suspicious traffic or workload behavior may indicate a compromise or malicious actor - -You can manage alerts and alert events in the web console, or using the CLI. $[prodname] also provides alert templates -for common tasks that you can rename and edit to suit your own needs. - -## Before you begin - -**Recommended** - -We recommend turning down the aggregation level for flow logs to ensure that you see pod-specific results. $[prodname] aggregates flow logs over the external IPs for allowed traffic, and alert events will not provide pod-specific results (unless the traffic is denied by policy). - -To turn down aggregation on flow logs, go to [FelixConfiguration](../reference/resources/felixconfig.mdx) and set the field, **flowLogsFileAggregationKindForAllowed** to **1**. - -## How To - -- [Manage alerts in the web console](#manage-alerts-in-manager-ui) -- [Manage alerts using CLI](#manage-alerts-using-cli) - -### Manage alerts in the web console - -You can view alert events in the web console in several places: the **Alerts** page, **Service Graph**, and the **Kibana** dashboard. - -Click **Activity**, **Alerts** to follow along. - -**Alerts page** - -The Alerts page lists **alert events** that are generated by alerts that you’ve configured. (A list of Alerts can be found by clicking the **Alert configuration** icon). - -![alerts-list](/img/calico-enterprise/alerts-list.png) - -You can create alerts for many $[prodname] features. Although the following list of features is not exhaustive and will grow, you get a sense of the range of alerts that can be displayed on this page. - -- $[prodname] logs from Elasticsearch (flow, dns, audit, bgp, L7) -- Honeypods -- Deep packet inspection (DPI) -- Threat defense (suspicious IPs, suspicious domains) -- Web Application Firewall (WAF) - -Note the following: - -- The alert event list will be empty until you configure alerts for a feature -- You can dismiss alert events from view using the checkboxes or bulk action -- The list may contain alert events that are identical or nearly identical. For nearly identical events, you can see differences in the `record` field when you expand the event. -- Because alert events share the same interface, fields that do not apply to the alert are noted by “N/A” -- You can filter alert events by Type. - - ![filter-alerts](/img/calico-enterprise/filter-alerts.png) - - Note these types: - - - **Custom** - filters legacy global alert events that were created before v3.12 - - **Global Alert** - includes alerts for $[prodname] Elasticsearch logs (audit, dns, flow, L7, WAF) - -**Add/edit/delete alerts** - -To manage alerts, click the **Alerts Configuration** icon. - -The following alert is an example of a global alert in the list view. This sample alert generates alert events when there are 100 flows in the cluster in the last 5 mins. (The YAML version of this alert is shown in the section on using the [CLI](#examples).) - -![alert-list-view](/img/calico-enterprise/alert-list-view.png) - -To create a new alert, click the **New** drop-down menu, and select **Blank**. - -Global alerts use a domain-specific query language to select records from a data set to use in the alert. You can also select/omit specific namespaces. - -![alert-example-ui](/img/calico-enterprise/alert-example-ui.png) - -For help with fields on this page, see [GlobalAlert](../reference/resources/globalalert.mdx). - -**Alert templates** - -From the **New** drop-down menu, select **Template**. - -![alert-template](/img/calico-enterprise/alert-template.png) - -The template list contains alerts for common tasks created by $[prodname]. With templates you can: - -- Update and rename an existing template -- Create a new template from scratch -- Create a new alert and save it as a template - -### Manage alerts using CLI - -This section provides examples of how to create and delete global alerts using `kubectl` and YAML files. - -**Create a global alert** - -1. Create a YAML file with one or more alerts. -1. Apply the alert to your cluster. - - ```bash - kubectl apply -f - ``` - -1. Wait until the alert runs and check the status. - - ```bash - kubectl get globalalert -o yaml - ``` - -1. In the web console, go to the **Alerts** page to view alert events. - -### Examples - -The following alert generates alert events when there are 100 flows in the cluster in the last 5 mins. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalAlert -metadata: - name: example-flows -spec: - description: '100 flows Example' - summary: 'Flows example ${count} > 100' - severity: 100 - dataSet: flows - metric: count - condition: gt - threshold: 100 -``` - -The following alert generates alert events when there is ssh traffic in the default namespace. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalAlert -metadata: - name: network.ssh -spec: - description: 'ssh flows to default namespace' - summary: '[flows] ssh flow in default namespace detected from ${source_namespace}/${source_name_aggr}' - severity: 100 - period: 10m - lookback: 10m - dataSet: flows - query: proto='tcp' AND action='allow' AND dest_port='22' AND (source_namespace='default' OR dest_namespace='default') AND reporter=src - aggregateBy: [source_namespace, source_name_aggr] - field: num_flows - metric: sum - condition: gt - threshold: 0 -``` - -The following alert generates alert events when $[prodname] globalnetworksets are modified. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalAlert -metadata: - name: policy.globalnetworkset -spec: - description: 'Changed globalnetworkset' - summary: '[audit] [privileged access] change detected for ${objectRef.resource} ${objectRef.name}' - severity: 100 - period: 10m - lookback: 10m - dataSet: audit - query: (verb=create OR verb=update OR verb=delete OR verb=patch) AND "objectRef.resource"=globalnetworksets - aggregateBy: [objectRef.resource, objectRef.name] - metric: count - condition: gt - threshold: 0 -``` - -The following alert generates alert events for all flow from processes in the data set. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalAlert -metadata: - name: example-process-set-embedded -spec: - description: Generate alerts for all flows from processes in the set - summary: Generate alerts for all flows from processes in the set - severity: 100 - dataSet: flows - query: process_name IN {"python?", "*checkoutservice"} -``` - -The following example generates alert events for DNS lookups that are not in the allowed domain set. Because this set can be potentially large, a variable is used in the query string and is referenced in the substitutions list. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalAlert -metadata: - name: example-domain-set-variable -spec: - description: Generate alerts for all DNS lookups not in the domain set - summary: Generate alerts for all DNS lookups not in the domain set with variable - severity: 100 - dataSet: dns - query: qname NOTIN ${domains} - substitutions: - - name: domains - values: - - '*cluster.local' - - '?.mydomain.com' -``` - -**Delete a global alert** - -To delete a global alert and stop all alert event generation, use the following command. - -```bash -kubectl delete globalalert -``` - -## Additional resources - -- [GlobalAlert and templates](../reference/resources/globalalert.mdx) -- Alerts for [honeypods](../threat/honeypods.mdx) -- Alerts for [Deep packet inspection](../threat/deeppacketinspection.mdx) -- Alerts for [suspicious IPs](../threat/suspicious-ips.mdx) -- Alerts for [suspicious domains](../threat/suspicious-domains.mdx) -- Alerts for [Web Application Firewall](../threat/web-application-firewall.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/archive-storage.mdx b/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/archive-storage.mdx deleted file mode 100644 index 89a0aecfe3..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/archive-storage.mdx +++ /dev/null @@ -1,208 +0,0 @@ ---- -description: Archive logs to Syslog, Splunk, or Amazon S3 for maintaining compliance data. ---- - -# Archive logs - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -## Big picture - -Archive $[prodname] logs to SIEMs like Syslog, Splunk, or Amazon S3 to meet compliance storage requirements. - -## Value - -Archiving your $[prodname] Elasticsearch logs to storage services like Amazon S3, Syslog, or Splunk are reliable -options for maintaining and consolidating your compliance data long term. - -## Before you begin - -**Supported logs for export** - -- Syslog - flow, dns, idsevents, audit -- Splunk - flow, audit, dns -- Amazon S3 - l7, flow, dns, audit - -## How to - -:::note -Because $[prodname] and Kubernetes logs are integral to $[prodname] diagnostics, there is no mechanism to tune down the verbosity. To manage log verbosity, filter logs using your SIEM. -::: - - - - -1. Create an AWS bucket to store your logs. - You will need the bucket name, region, key, secret key, and the path in the following steps. - -2. Create a Secret in the `tigera-operator` namespace named `log-collector-s3-credentials` with the fields `key-id` and `key-secret`. - Example: - - ``` - kubectl create secret generic log-collector-s3-credentials \ - --from-literal=key-id= \ - --from-literal=key-secret= \ - -n tigera-operator - ``` - -3. Update the [LogCollector](../../reference/installation/api.mdx#logcollector) - resource named, `tigera-secure` to include an [S3 section](../../reference/installation/api.mdx#s3storespec) - with your information noted from above. - Example: - - ```yaml - apiVersion: operator.tigera.io/v1 - kind: LogCollector - metadata: - name: tigera-secure - spec: - additionalStores: - s3: - bucketName: - bucketPath: - region: - ``` - - This can be done during installation by editing the custom-resources.yaml - by applying it, or after installation by editing the resource with the command: - - ```bash - kubectl edit logcollector tigera-secure - ``` - - - - -1. Update the [LogCollector](../../reference/installation/api.mdx#logcollector) - resource named `tigera-secure` to include a [Syslog section](../../reference/installation/api.mdx#syslogstorespec) - with your syslog information. - Example: - ```yaml - apiVersion: operator.tigera.io/v1 - kind: LogCollector - metadata: - name: tigera-secure - spec: - additionalStores: - syslog: - # (Required) Syslog endpoint, in the format protocol://host:port - endpoint: tcp://1.2.3.4:514 - # (Optional) If messages are being truncated set this field - packetSize: 1024 - # (Required) Types of logs to forward to Syslog (must specify at least one option) - logTypes: - - Audit - - DNS - - Flows - - IDSEvents - ``` - This can be done during installation by editing the custom-resources.yaml by applying it or after installation by editing the resource with the command: - ```bash - kubectl edit logcollector tigera-secure - ``` -2. You can control which types of $[prodname] log data you would like to send to syslog. - The [Syslog section](../../reference/installation/api.mdx#syslogstorespec) - contains a field called `logTypes` which allows you to list which log types you would like to include. - The allowable log types are: - - - Audit - - DNS - - Flows - - IDSEvents - - Refer to the [Syslog section](../../reference/installation/api.mdx#syslogstorespec) for more details on what data each log type represents. - - :::note - - The log type `IDSEvents` is only supported for a cluster that has [LogStorage](../../reference/installation/api.mdx#logstorage) configured. It is because intrusion detection event data is pulled from the corresponding LogStorage datastore directly. - - ::: - - The `logTypes` field is a required, which means you must specify at least one type of log to export to syslog. - -**TLS configuration** - -3. You can enable TLS option for syslog forwarding by including the "encryption" option in the [Syslog section](../../reference/installation/api.mdx#syslogstorespec). - - ```yaml - apiVersion: operator.tigera.io/v1 - kind: LogCollector - metadata: - name: tigera-secure - spec: - additionalStores: - syslog: - # (Required) Syslog endpoint, in the format protocol://host:port - endpoint: tcp://1.2.3.4:514 - # (Optional) If messages are being truncated set this field - packetSize: 1024 - # (Optional) To Configure TLS mode - encryption: TLS - # (Required) Types of logs to forward to Syslog (must specify at least one option) - logTypes: - - Audit - - DNS - - Flows - - IDSEvents - ``` - -4. Using the self-signed CA with the field name tls.crt, create a configmap in the tigera-operator namespace named, syslog-ca. Example: - - :::note - - Skip this step if publicCA bundle is good enough to verify the server certificates. - - ::: - - ```bash - kubectl create configmap syslog-ca --from-file=tls.crt -n tigera-operator - ``` - - - - -**Support** In this release, only [Splunk Enterprise](https://www.splunk.com/en_us/products/splunk-enterprise.html) is supported. - -$[prodname] uses Splunk's **HTTP Event Collector** to send data to Splunk server. To copy the flow, audit, and dns logs to Splunk, follow these steps: - -1. Create a HTTP Event Collector token by following the steps listed in Splunk's documentation for your specific Splunk version. Here is the link to do this for [Splunk version 8.0.0](https://docs.splunk.com/Documentation/Splunk/8.0.0/Data/UsetheHTTPEventCollector). - -2. Create a Secret in the `tigera-operator` namespace named `logcollector-splunk-credentials` with the field `token`. - Example: - - ``` - kubectl create secret generic logcollector-splunk-credentials \ - --from-literal=token= \ - -n tigera-operator - ``` - -3. Update the - [LogCollector](../../reference/installation/api.mdx#logcollector) - resource named `tigera-secure` to include - a [Splunk section](../../reference/installation/api.mdx#splunkstorespec) - with your Splunk information. - Example: - - ```yaml - apiVersion: operator.tigera.io/v1 - kind: LogCollector - metadata: - name: tigera-secure - spec: - additionalStores: - splunk: - # Splunk HTTP Event Collector endpoint, in the format protocol://host:port - endpoint: https://1.2.3.4:8088 - ``` - - This can be done during installation by editing the custom-resources.yaml - by applying it or after installation by editing the resource with the command: - - ``` - kubectl edit logcollector tigera-secure - ``` - - - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/audit-overview.mdx b/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/audit-overview.mdx deleted file mode 100644 index 519f559ada..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/audit-overview.mdx +++ /dev/null @@ -1,45 +0,0 @@ ---- -description: Calico Enterprise audit logs provide data on changes to resources. ---- - -# Audit logs - -## Big picture - -$[prodname] audit logs provide security teams and auditors historical data of all changes to resources over time. - -## Concepts - -### Resources used in audit logs - -$[prodname] audit logs are enabled by default for the following resources: - -- Global networkpolicies -- Network policies -- Staged global networkpolicies -- Staged networkpolicies -- Staged Kubernetes network policies -- Global network sets -- Network sets -- Tiers -- Host endpoints - -### Audit logs in the web console - -$[prodname] audit logs are displayed in the Timeline dashboard in the web console. You can filter logs, and export data in .json or .yaml formats. - -![audit-logs](/img/calico-enterprise/audit-logs.png) - -Audit logs are also visible in the Kibana dashboard (indexed by, `tigera_secure_ee_audit_ee`), and are useful for looking at policy differences. - -![kibana-auditlogs](/img/calico-enterprise/kibana-auditlogs.png) - -Finally, audit logs provide the core data for compliance reports. - -![compliance-reports](/img/calico-enterprise/configuration-compliance.png) - -## Required next step - -**Kubernetes resources** are also used in compliance reports and other audit-related features, but they are not enabled by default. You must enable Kubernetes resources through the Kubernetes API server. If you miss this step, some compliance reports will not work, and audit trails will not provide a complete view to your security team. - -- [Enable Kubernetes audit logs](../kube-audit.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/bgp.mdx b/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/bgp.mdx deleted file mode 100644 index 0d44aff407..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/bgp.mdx +++ /dev/null @@ -1,22 +0,0 @@ ---- -description: Key/value pairs of BGP activity logs and how to construct queries. ---- - -# BGP logs - -$[prodname] pushes BGP activity logs to Elasticsearch. To view them, go to the Discovery view, and from the dropdown menu, select `tigera_secure_ee_bgp.*` to view the collected BIRD and BIRD6 logs. - -The following table details key/value pairs for constructing queries, including their Elasticsearch datatype. - -| Name | Datatype | Description | -| ------------ | -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `logtime` | date | When the log was collected in UTC timestamp format. | -| `host` | keyword | The name of the node where log was collected. | -| `ip_version` | keyword | Contains one of the following values:
    ● IPv4: Log from BIRD process
    ● IPv6: Log from BIRD6 process | -| `message` | text | The message contained in the log. | - -Once a set of BGP logs has accumulated in Elasticsearch, you can perform many interesting queries. Depending on the field that you want to query, different techniques are required. For example: - -- To view BGP logs only for IPv4 or IPv6, query on the `ip_version` field and sort by `logtime` -- To see all logs from a specific node, query on the `host` field -- To view events in the cluster, query on the `message` field diff --git a/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/dns/dns-logs.mdx b/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/dns/dns-logs.mdx deleted file mode 100644 index 2d8956ab4f..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/dns/dns-logs.mdx +++ /dev/null @@ -1,64 +0,0 @@ ---- -description: Key/value pairs of DNS activity logs and how to construct queries. ---- - -# Configure DNS logs - -$[prodname] pushes DNS activity logs to Elasticsearch, for DNS information that is obtained from [trusted DNS servers](../../../network-policy/domain-based-policy.mdx#trusted-dns-servers). The following table -details the key/value pairs in the JSON blob, including their -[Elasticsearch datatype](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-types.html). -This information should assist you in constructing queries. - -| Name | Datatype | Description | -| ------------------ | ----------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `start_time` | date | When the collection of the log began in UNIX timestamp format. | -| `end_time` | date | When the collection of the log concluded in UNIX timestamp format. | -| `type` | keyword | This field contains one of the following values:
    ● LOG: Indicates that this is a normal DNS activity log.
    ● UNLOGGED: Indicates that this log is reporting DNS activity that could not be logged in detail because of [DNSLogsFilePerNodeLimit](../../../reference/resources/felixconfig.mdx#spec). | -| `count` | long | When `type` is:
    ● LOG: How many DNS lookups there were, during the log collection interval, with details matching this log.
    ● UNLOGGED: The number of DNS responses that could not be logged in detail because of [DNSLogsFilePerNodeLimit](../../../reference/resources/felixconfig.mdx#spec). In this case none of the following fields are provided. | -| `client_ip` | ip | The IP address of the client pod. A null value indicates aggregation. | -| `client_name` | keyword |

    This field contains one of the following values:
    ● The name of the client pod.
    ● -: the name of the pod was aggregated. Check client_name_aggr for the pod name prefix.

    | -| `client_name_aggr` | keyword | The aggregated name of the client pod. | -| `client_namespace` | keyword | Namespace of the client pod. | -| `client_labels` | array of keywords | Labels applied to the client pod. With aggregation, the label name/value pairs that are common to all aggregated clients. | -| `qname` | keyword | The domain name that was looked up. | -| `qtype` | keyword | The type of the DNS query (e.g. A, AAAA). | -| `qclass` | keyword | The class of the DNS query (e.g. IN). | -| `rcode` | keyword | The result code of the DNS query response (e.g. NoError, NXDomain). | -| `rrsets` | nested | Detailed DNS query response data - see below. | -| `servers` | nested | Details of the DNS servers that provided this response. | -| `latency_count` | long | The number of lookups for which latency was measured. (The same as `count` above, unless some DNS requests were missed, or latency reporting is disabled - see `dnsLogsLatency` in the [FelixConfiguration resource](../../../reference/resources/felixconfig.mdx).) | -| `latency_mean` | long | Mean latency, in nanoseconds. | -| `latency_max` | long | Max latency, in nanoseconds. | - -Each nested `rrsets` object contains response data for a particular name and a particular type and -class of response information. Its key/value pairs are as follows. - -| Name | Datatype | Description | -| ------- | ----------------- | ----------------------------------------------------------------------------------------------------------------------- | -| `name` | keyword | The domain name that this information is for. | -| `type` | keyword | The type of the information (e.g. A, AAAA). | -| `class` | keyword | The class of the information (e.g. IN). | -| `rdata` | array of keywords | Array of data, for the name, of that type and class. For example, when `type` is A, this is an array of IPs for `name`. | - -Each nested `servers` object provides details of a DNS server that provided the information in the -containing log. Its key/value pairs are as follows. - -| Name | Datatype | Description | -| ----------- | ----------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `ip` | ip | The IP address of the DNS server. | -| `name` | keyword |

    This field contains one of the following values:
    ● The name of the DNS server pod.
    ● -: the DNS server is not a pod.

    | -| `name_aggr` | keyword |

    This field contains one of the following values:
    ● The aggregated name of the DNS server pod.
    ● pvt: the DNS server is not a pod. Its IP address belongs to a private subnet.
    ● pub: the DNS server is not a pod. Its IP address does not belong to a private subnet. It is probably on the public internet.

    | -| `namespace` | keyword | Namespace of the DNS server pod, or `-` if the DNS server is not a pod. | -| `labels` | array of keywords | Labels applied to the DNS server pod or host endpoint; empty if there are no labels or the DNS server is not a pod or host endpoint. | - -The `latency_*` fields provide information about the latency of the DNS lookups that contributed to -this log. For each successful DNS lookup $[prodname] measures the time between when the DNS -request was sent and when the corresponding DNS response was received. - -## Query DNS log fields - -After a set of DNS logs has accumulated in Elasticsearch, you can perform many interesting queries. For example, if you query on: - -- `qname`, you can find all of the DNS response information that was provided to clients trying to resolve a particular domain name - -- `rrsets.rdata`, you can find all of the DNS lookups that included a particular IP address in their response data. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/dns/filtering-dns.mdx b/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/dns/filtering-dns.mdx deleted file mode 100644 index c40c2fef01..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/dns/filtering-dns.mdx +++ /dev/null @@ -1,63 +0,0 @@ ---- -description: Suppress DNS logs of low significance using filters. ---- - -# Filter DNS logs - -$[prodname] supports filtering out DNS logs based on user provided -configuration. Use filtering to suppress logs of low significance. - -## Configure DNS filtering - -DNS log filtering is configured through a ConfigMap in the `tigera-operator` -namespace. - -To enable DNS log filtering, follow these steps: - -1. Create a `filters` directory with a file named `dns` with the contents of - your desired filter using [Filter configuration files](#filter-configuration-files). - If you are also adding [flow filters](../flow/filtering.mdx) also add the `flow` file - to the directory. -1. Create the `fluentd-filters` ConfigMap in the `tigera-operator` namespace - with the following command. - ```bash - kubectl create configmap fluentd-filters -n tigera-operator --from-file=filters - ``` - -## Filter configuration files - -The filters defined by the ConfigMap are inserted into the fluentd configuration file. -The [upstream fluentd documentation](https://docs.fluentd.org/filter/grep) -describes how to write fluentd filters. The [DNS log schema](dns-logs.mdx) can be referred to -for the specification of the various fields you can filter based on. Remember to ensure -that the config file is properly indented in the ConfigMap. - -## Example 1: filter out cluster-internal lookups - -This example filters out lookups for domain names ending with ".cluster.local". More -logs could be filtered by adjusting the regular expression "pattern", or by adding -additional `exclude` blocks. - -``` - - @type grep - - key qname - pattern /\.cluster\.local$/ - - -``` - -## Example 2: keep logs only for particular domain names - -This example will filter out all logs _except_ those for domain names ending `.co.uk`. - -``` - - @type grep - - key qname - pattern /\.co\.uk$/ - - -``` diff --git a/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/dns/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/dns/index.mdx deleted file mode 100644 index 179b4b03db..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/dns/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Configure and filter DNS logs. -hide_table_of_contents: true ---- - -# Manage DNS logs for Calico Enterprise - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/flow/aggregation.mdx b/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/flow/aggregation.mdx deleted file mode 100644 index 0a4010e0e5..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/flow/aggregation.mdx +++ /dev/null @@ -1,125 +0,0 @@ ---- -description: Configure flow log aggregation to reduce log volume and costs. ---- - -# Configure flow log aggregation - -## Big picture - -Configure flow log aggregation level to reduce log volume and costs. - -## Value - -Beyond using filtering to suppress flow logs, $[prodname] provides controls to aggregate flow logs. Although aggressive aggregation levels reduce -flow volume and costs, it can also reduce visibility into specific metadata of allowed and denied traffic. Review this article to see which level of -aggregation is suitable for your implementation. - -## Concepts - -### Volume and cost versus visibility - -$[prodname] enables flow log aggregation by default using default aggregation levels that balance log volume with comprehensive visibility. The -defaults assume that most users do not need to see pod IP information (due to the ephemeral nature of pod IP address allocation) for allowed traffic, -but provides additional details on denied traffic that is more likely to need investigation. However, it all depends on your deployment; -we recommend reviewing aggregation levels to understand what information gets grouped (and thus suppressed from view). - -### Aggregation levels - -For allowed flows, the default aggregation level is 2, and for denied flows the default aggregation level is 1. - -#### Level 0: no aggregation - -Create separate flow logs for each distinct 5-tuple (protocol, source and destination IP and port) observed. This level is not recommended -due to high log volumes. - -#### Level 1: aggregate source ports - -Aggregate flow data relating to multiple connections that only differ in source port. This reduces log volume by discarding source port information, -which is usually ephemeral and of little value. - -#### Level 2: aggregate IPs and source ports - -In addition to the above, aggregate flows that have related sources and destinations into a single log depending on the source and -destination type. -- Pods created by the same pod controller (Deployments, ReplicaSets, etc.) are combined and identified by their common pod prefix, -- IP addresses in the same NetworkSet are aggregated under that shared NetworkSet, -- When no more precise identity is known, arbitrary IP addresses are aggregated to either public (`pub`) and private (`pvt`) as defined in RFC1918. - -#### Level 3: aggregate IPs, source and dest ports - -In addition to the above, aggregate flows with different destination ports that otherwise share together. -This is intended to reduce log volumes in situations where a lot of network probing is expected (for example by `nmap`) but is not typically needed -unless log analysis indicates it will be beneficial. - -### Understanding aggregation level differences - -Here are examples of pod-to-pod flows, highlighting the differences between flow logs at various aggregation levels. - -By suppressing the source port, aggregation level 1 minimizes the flow logs generated for applications that make many connections to the same destination. -The two flows originating from `client-a` without aggregation are combined into one. - -In Kubernetes, pod controllers (Deployments, ReplicaSets, etc.) automatically name the pods they create with a common prefix. -For example, the pods `nginx-1` and `nginx-2` are created by the ReplicaSet `nginx`. At aggregation level 2 that prefix is used to aggregate flow log entries and -is indicated with an asterisk (`*`) at the end of the name. The flows originating from `client-a` and `client-b` are combined into a single flow log. - -Finally, level 3 combines flows between matching pod prefixes that target distinct destination ports, as seen in the last example row below. - -Aggregation is currently performed on a per-node basis, but this behavior may change. Certain other fields are always kept separate -and not aggregated together such as `action` (i.e. denied traffic and allowed traffic will always be logged separately). - -| | | **Src Traffic** | | | **Dst Traffic** | | | **Packet counts** | | -|--------------------------|-----------|----------|---------|----------|----------|---------|----------|------------|-------------| -| **Aggr lvl** | **Flows** | **Name** | **IP** | **Port** | **Name** | **IP** | **Port** | **Pkt in** | **Pkt out** | -| 0 (no aggregation) | 4 | client-a | 1.1.1.1 | 45556 | nginx-1 | 2.2.2.2 | 80 | 1 | 2 | -| | | client-b | 1.1.2.2 | 45666 | nginx-2 | 2.2.3.3 | 80 | 2 | 2 | -| | | client-a | 1.1.1.1 | 65533 | nginx-1 | 2.2.2.2 | 80 | 1 | 3 | -| | | client-c | 1.1.3.3 | 65534 | nginx-2 | 2.2.3.3 | 8080 | 3 | 4 | -| 1 (src port) | 3 | client-a | 1.1.1.1 | - | nginx-1 | 2.2.2.2 | 80 | 2 | 5 | -| | | client-b | 1.1.2.2 | - | nginx-1 | 2.2.2.2 | 80 | 2 | 2 | -| | | client-c | 1.1.3.3 | - | nginx-2 | 2.2.3.3 | 8080 | 3 | 4 | -| 2 (+src/dest pod-prefix) | 2 | client-* | - | - | nginx-* | - | 80 | 4 | 7 | -| | | client-* | - | - | nginx-* | - | 8080 | 3 | 4 | -| 3 (+dest port) | 1 | client-* | - | - | nginx-* | - | - | 7 | 11 | - -## How to - -- [Verify existing aggregation level](#verify-existing-aggregation-level) -- [Change default aggregation level](#change-default-aggregation-level) -- [Troubleshoot logs with aggregation levels](#troubleshoot-logs-with-aggregation-levels) - -### Verify existing aggregation level - -Use the following command: - -```bash -kubectl get felixconfiguration -o yaml -``` - -### Change default aggregation level - -Before [changing the default aggregation level](../../../reference/resources/felixconfig.mdx#aggregationkind), note the following: - -- Although any change in aggregation level affects flow log volume, lowering the aggregation number (especially to `0` for no aggregation) will cause significant impacts to log storage. If you allow more flow logs, ensure that you provision more log storage. -- Verify that the parameters that you want to see in your aggregation level are not already [filtered](filtering.mdx). - -### Troubleshoot logs with aggregation levels - -When you use flow log aggregation, sometimes you may see two Alerts, - -![two-alerts](/img/calico-enterprise/two-alerts.png) - -along with two flow log entries. Note that the entries are identical except for the slight timestamp difference. - -![two-logs](/img/calico-enterprise/two-logs.png) - -The reason you may see two entries is because of the interaction between the aggregation interval, and the time interval to export logs (`flowLogsFlushInterval`). - -In each aggregation interval, connections/connection attempts can be started or completed. However, flow logs do not start/stop when a connection starts/stops. Let’s assume the default export logs “flush” time of 10 seconds. If a connection is started in one flush interval, but terminates in the next, it is recorded across two entries. To get visibility into flow logs to differentiate the entries, go to Service Graph, flow logs tab, and look at these fields: `num_flows`, `num_flows_started`, and `num_flows_completed`. - -The underlying reason for this overlap is a dependency on Linux conntrack, which provides the lifetime of stats that $[prodname] tracks across different protocols (TCP, ICMP, UDP). For example, for UDP and ICMP, $[prodname] waits for a conntrack entry to timeout before it considers a “connection” closed, and this is usually greater than 10 seconds. - -## Additional resources - -- [Archive logs to storage](../archive-storage.mdx) -- [Configure RBAC for Elasticsearch logs](../rbac-elasticsearch.mdx) -- [Configure data retention](../retention.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/flow/datatypes.mdx b/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/flow/datatypes.mdx deleted file mode 100644 index 2e16988d6e..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/flow/datatypes.mdx +++ /dev/null @@ -1,166 +0,0 @@ ---- -description: Data that Calico Enterprise sends to Elasticsearch. ---- - -# Flow log data types - -## Big picture - -$[prodname] sends the following data to Elasticsearch. - -The following table details the key/value pairs in the JSON blob, including their [Elasticsearch datatype](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-types.html). - -| Name | Datatype | Description | -| --------------------------------- | ----------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `host` | keyword | Name of the node that collected the flow log entry. | -| `start_time` | date | Start time of log collection in UNIX timestamp format. | -| `end_time` | date | End time of log collection in UNIX timestamp format. | -| `action` | keyword | - `allow`: $[prodname] accepted the flow.
    - `deny`: $[prodname] denied the flow. | -| `bytes_in` | long | Number of incoming bytes since the last export. | -| `bytes_out` | long | Number of outgoing bytes since the last export. | -| `dest_ip` | ip | IP address of the destination pod. A null value indicates aggregation. | -| `dest_name` | keyword | Contains one of the following values:
    - Name of the destination pod.
    - Name of the pod that was aggregated or the endpoint is not a pod. Check dest_name_aggr for more information, such as the name of the pod if it was aggregated. | -| `dest_name_aggr` | keyword | Contains one of the following values:
    - Aggregated name of the destination pod.
    - `pvt`: endpoint is not a pod. Its IP address belongs to a private subnet.
    - `pub`: endpoint is not a pod. Its IP address does not belong to a private subnet. It is probably an endpoint on the public internet. | -| `dest_namespace` | keyword | Namespace of the destination endpoint. A `-` means the endpoint is not namespaced. | -| `dest_port` | long | Destination port. Not applicable for ICMP packets. | -| `dest_service_name` | keyword | Name of the destination service. A `-` means the original destination did not correspond to a known Kubernetes service (e.g. a services ClusterIP). | -| `dest_service_namespace` | keyword | Namespace of the destination service. A `-` means the original destination did not correspond to a known Kubernetes service (e.g. a services ClusterIP). | -| `dest_service_port` | keyword | Port name of the destination service.
    A `-` means :
    - the original destination did not correspond to a known Kubernetes service (e.g. a services ClusterIP), or
    - the destination port is aggregated.
    A `*` means there are multiple service port names matching the destination port number. | -| `dest_type` | keyword | Destination endpoint type. Possible values:
    - `wep`: A workload endpoint, a pod in Kubernetes.
    - `ns`: A Networkset. If multiple Networksets match, then the one with the longest prefix match is chosen.
    - `net`: A Network. The IP address did not fall into a known endpoint type. | -| `dest_labels` | array of keywords | Labels applied to the destination pod. A hyphen indicates aggregation. | -| `dest_domains` | array of keywords | Find all the destination domain names for use in a DNS policy by examining `dest_domains`. The field displays information on the top-level domains linked to the destination IP. Applies to flows reported from the source to destinations outside the cluster. If `flowLogsDestDomainsByClient` is disabled, having `dest_domains`: ["A"] doesn't guarantee that the flow corresponds to a connection with domain name A. The destination IP may also be linked to other domain names not yet captured by Calico. | -| `reporter` | keyword | - `src`: flow came from the pod that initiated the connection.
    - `dst`: flow came from the pod that received the initial connection. | -| `num_flows` | long | Number of flows aggregated into this entry during this export interval. | -| `num_flows_completed` | long | Number of flows that were completed during the export interval. | -| `num_flows_started` | long | Number of flows that were started during the export interval. | -| `num_process_names` | long | Number of unique process names aggregated into this entry during this export interval. | -| `num_process_ids` | long | Number of unique process ids aggregated into this entry during this export interval. | -| `num_process_args` | long | Number of unique process args aggregated into this entry during this export interval. | -| `nat_outgoing_ports` | array of ints | List of [NAT](https://en.wikipedia.org/wiki/Network_address_translation) outgoing ports for the packets that were Source NAT'd in the flow | -| `packets_in` | long | Number of incoming packets since the last export. | -| `packets_out` | long | Number of outgoing packets since the last export. | -| `proto` | keyword | Protocol. | -| `policies` | array of keywords | List of policies that interacted with this flow. See [Format of the policies field](#format-of-the-policies-field). | -| `process_name` | keyword | The name of the process that initiated or received the connection or connection request. This field will have the executable path if flowLogsCollectProcessPath is enabled. A "-" indicates that the process name is not logged. A "\*" indicates that the per flow process limit has exceeded and the process names are now aggregated. | -| `process_id` | keyword | The process ID of the corresponding process (indicated by the `process_name` field) that initiated or received the connection or connection request. A "-" indicates that the process ID is not logged. A "\*" indicates that there are more than one unique process IDs for the corresponding process name. | -| `process_args` | array of strings | The arguments with which the executable was invoked. The size of the list depends on the per flow process args limit. | -| `source_ip` | ip | IP address of the source pod. A null value indicates aggregation. | -| `source_name` | keyword | Contains one of the following values:
    - Name of the source pod.
    - Name of the pod that was aggregated or the endpoint is not a pod. Check source_name_aggr for more information, such as the name of the pod if it was aggregated. | -| `source_name_aggr` | keyword | Contains one of the following values:
    - Aggregated name of the source pod.
    - `pvt`: Endpoint is not a pod. Its IP address belongs to a private subnet.
    - `pub`: the endpoint is not a pod. Its IP address does not belong to a private subnet. It is probably an endpoint on the public internet. | -| `source_namespace` | keyword | Namespace of the source endpoint. A `-` means the endpoint is not namespaced. | -| `source_port` | long | Source port. A null value indicates aggregation. | -| `source_type` | keyword | The type of source endpoint. Possible values:
    - `wep`: A workload endpoint, a pod in Kubernetes.
    - `ns`: A Networkset. If multiple Networksets match, then the one with the longest prefix match is chosen.
    - `net`: A Network. The IP address did not fall into a known endpoint type. | -| `source_labels` | array of keywords | Labels applied to the source pod. A hyphen indicates aggregation. | -| `original_source_ips` | array of ips | List of external IP addresses collected from requests made to the cluster through an ingress resource. This field is only available if capturing external IP addresses is configured. | -| `num_original_source_ips` | long | Number of unique external IP addresses collected from requests made to the cluster through an ingress resource. This count includes the IP addresses included in the `original_source_ips` field. This field is only available if capturing external IP addresses is configured. | -| `tcp_mean_send_congestion_window` | long | Mean tcp send congestion window size. This field is only available if flowLogsEnableTcpStats is enabled | -| `tcp_min_send_congestion_window` | long | Minimum tcp send congestion window size. This field is only available if flowLogsEnableTcpStats is enabled | -| `tcp_mean_smooth_rtt` | long | Mean smooth RTT in micro seconds. This field is only available if flowLogsEnableTcpStats is enabled | -| `tcp_max_smooth_rtt` | long | Maximum smooth RTT in micro seconds. This field is only available if flowLogsEnableTcpStats is enabled | -| `tcp_mean_min_rtt` | long | Mean MinRTT in micro seconds. This field is only available if flowLogsEnableTcpStats is enabled | -| `tcp_max_min_rtt` | long | Maximum MinRTT in micro seconds. This field is only available if flowLogsEnableTcpStats is enabled | -| `tcp_mean_mss` | long | Mean TCP MSS. This field is only available if flowLogsEnableTcpStats is enabled | -| `tcp_min_mss` | long | Minimum TCP MSS. This field is only available if flowLogsEnableTcpStats is enabled | -| `tcp_total_retransmissions` | long | Total retransmitted packets. This field is only available if flowLogsEnableTcpStats is enabled | -| `tcp_lost_packets` | long | Total lost packets. This field is only available if flowLogsEnableTcpStats is enabled | -| `tcp_unrecovered_to` | long | Total unrecovered timeouts. This field is only available if flowLogsEnableTcpStats is enabled | - -### Format of the policies field - -The `policies` field contains a comma-delimited list of policy rules that matched the flow. Each entry in the -list has the following format: - -``` -|||| -``` - -Where, - -* `` numbers the order in which the rules were hit, starting with `0`. - :::tip - Sort the entries of the list by the `` to see the order that rules were hit. The entries are displayed in - random order due to the way they are stored in the datastore. - ::: - -* `` is the name of the policy tier containing the policy, or `__PROFILE__` for a rule derived from a - `Profile` resource (this is the internal datatype used to represent a Kubernetes namespace and its associated - "default allow" rule). -* `` is the name of the policy/profile; its format depends on the type of policy: - - * `.` for $[prodname] `GlobalNetworkPolicy`. - * `/knp.default.` for Kubernetes `NetworkPolicy`. - * `/.` for $[prodname] `NetworkPolicy`. - - Staged policy names are prefixed with "staged:". - -* `` is the action performed by the rule; one of `allow`, `deny`, `pass`. -* `` if non-negative, is the index of the rule that was matched within the policy, starting with 0. - Otherwise, a special value: - - * `-1` means the reporting endpoint was selected by the policy but no rule matched. The traffic hit the default - action for the tier. In this case, the `` is selected arbitrarily from the set of policies within - the tier that apply to the endpoint. - * `-2` means "unknown". The rule index was not recorded. - -### Flow log example, with `no aggregation` - -A flow log with aggregation level 0, `no aggregation`, might look like: - -``` - { - "start_time": 1597166083, - "end_time": 1597166383, - "source_ip": "192.168.47.9", - "source_name": "access-6b687c8dcb-zn5s2", - "source_name_aggr": "access-6b687c8dcb-*", - "source_namespace": "policy-demo", - "source_port": 42106, - "source_type": "wep", - "source_labels": { - "labels": [ - "pod-template-hash=6b687c8dcb", - "app=access" - ] - }, - "dest_ip": "192.168.138.79", - "dest_name": "nginx-86c57db685-h6792", - "dest_name_aggr": "nginx-86c57db685-*", - "dest_namespace": "policy-demo", - "dest_port": 80, - "dest_type": "wep", - "dest_labels": { - "labels": [ - "pod-template-hash=86c57db685", - "app=nginx" - ] - }, - "proto": "tcp", - "action": "allow", - "reporter": "dst", - "policies": { - "all_policies": [ - "0|default|policy-demo/default.access-nginx|allow" - ] - }, - "bytes_in": 388, - "bytes_out": 1113, - "num_flows": 1, - "num_flows_started": 1, - "num_flows_completed": 1, - "packets_in": 6, - "packets_out": 5, - "http_requests_allowed_in": 0, - "http_requests_denied_in": 0, - "original_source_ips": null, - "num_original_source_ips": 0, - "host": "bz-n8kf-kadm-node-1", - "@timestamp": 1597166383000 - } -``` - -The log shows an incoming connection reported by the destination node, allowed by a policy on port 80. The **`start_time`** and **`end_time`** -describe the aggregation period (5 min.) During this interval, one flow (**`"num_flow": 1`**) was recorded. At higher aggregation levels, flows from -endpoints performing the same operation and originating from the same Deployment/ReplicaSet are grouped into a single log. In this example, the -common source endpoints that are prefixed with **`access-6b687c8dcb-`**. Parameters like **`source_ip`** may be dropped and set to **`null`**, depending on -the aggregation level. As aggregation levels increase, more flows will be grouped together based on your data. For more details on aggregation -levels, see [configure flow log aggregation](./aggregation.mdx). \ No newline at end of file diff --git a/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/flow/filtering.mdx b/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/flow/filtering.mdx deleted file mode 100644 index 01d094a8bd..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/flow/filtering.mdx +++ /dev/null @@ -1,110 +0,0 @@ ---- -description: Filter Calico Enterprise flow logs. ---- - -# Filter flow logs - -## Big picture - -Filter $[prodname] flow logs. - -## Value - -Filter $[prodname] flow logs to suppress logs of low significance, and troubleshoot threats. - -## Concepts - -### Container monitoring tools versus flow logs - -Container monitoring tools are good for monitoring Kubernetes and orchestrated workloads for CPU usage, network usage, and log aggregation. For example, a data monitoring tool can tell if a pod has turned into a bitcoin miner based on it using more than normal CPU. - -$[prodname] flow logs provide continuous records of every single packet sent/received by all pods in your Kubernetes cluster. Note that flow logs do not contain all packet data; only the number of packets/bytes that were sent between specific IP/ports, and when. In the previous monitoring tool example, $[prodname] flow logs could see the packets running to/from the bitcoin mining network. - -$[prodname] flow logs tell you when a pod is compromised, specifically: - -- Where a pod is sending data to -- If the pod is talking to a known command-and-control server -- Other pods that the compromised pod has been talking to (so you can see if they're compromised too) - -### Flow log format - -A flow log contains these space-delimited fields (unless filtered out). - -``` -startTime endTime srcType srcNamespace srcName srcLabels dstType dstNamespace dstName -dstLabels srcIP dstIP proto srcPort dstPort numFlows numFlowsStarted numFlowsCompleted -reporter packetsIn packetsOut bytesIn bytesOut action -``` - -**Example** - -``` -1528842551 1528842851 wep dev rails-81531* - wep dev memcached-38456* - - - 6 - 3000 7 3 4 out 154 61 70111 49404 allow -``` - -- Fields that are not enabled or are aggregated, are noted by `-` -- Aggregated names (such as “pod prefix”), are noted by `*` at the end of the name -- If `srcName` or `dstName` fields contain only a `*`, aggregation was performed using other means (such as specific labels), and no unique prefix was present. - -## How to - -- [Create flow log filters](#create-flow-log-filters) -- [Add filters to ConfigMap file](#add-filters-to-configmap-file) - -### Create flow log filters - -Create your [fluentd filters](https://docs.fluentd.org/filter/grep). - -**Example: filter out a specific namespace** - -This example filters out all flow logs whose source or destination namespace is "dev". Additional namespaces could be filtered by adjusting the regular expression "pattern"s, or by adding additional `exclude` blocks. - -``` - - @type grep - - key source_namespace - pattern dev - - - key dest_namespace - pattern dev - - -``` - -**Example: filter out internet traffic to a specific deployment** - -This example filters inbound internet traffic to the deployment with pods named, `nginx-internet-*`. Note the use of the `and` directive to filter out traffic that is both to the deployment, and from the internet (source `pub`). - -``` - - @type grep - - - key dest_name_aggr - pattern ^nginx-internet - - - key source_name_aggr - pattern pub - - - -``` - -### Add filters to ConfigMap file - -1. Create a `filters` directory with a file called `flow` with your desired filters. If you are also adding [dns filters](../dns/filtering-dns.mdx), add the `dns` file to the directory. - -1. Create the `fluentd-filters` ConfigMap in the `tigera-operator` namespace with the following command. - - ```bash - kubectl create configmap fluentd-filters -n tigera-operator --from-file=filters - ``` - -## Additional resources - -- [Flow log aggregation](aggregation.mdx) -- [Archive logs to storage](../archive-storage.mdx) -- [Configure RBAC for Elasticsearch logs](../rbac-elasticsearch.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/flow/hep.mdx b/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/flow/hep.mdx deleted file mode 100644 index 07c39916f6..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/flow/hep.mdx +++ /dev/null @@ -1,35 +0,0 @@ ---- -description: Enable hostendpoint reporting in flow logs. ---- - -# Enable HostEndpoint reporting in flow logs - -## Big picture - -Enable $[prodname] flow logs to report HostEndpoint information. - -## Value - -Get visibility into the network activity at the HostEndpoint level using $[prodname] flow logs. - -## Before you begin - -**Limitations** - -- HostEndpoint reporting is only supported on Kubernetes nodes. -- Flow logs on ApplyOnForward policies are currently not supported. As a result, a policy blocking traffic at the host level -from forwarding to a workload endpoint would not result in a flow log from the host endpoint. - -## How to - -### Enable HostEndpoint reporting - -To enable reporting HostEndpoint metadata in flow logs, use the following command: - -``` - kubectl patch felixconfiguration default -p '{"spec":{"flowLogsEnableHostEndpoint":true}}' -``` - -## Additional resources - -- [Protect Kubernetes nodes](../../../network-policy/hosts/kubernetes-nodes.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/flow/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/flow/index.mdx deleted file mode 100644 index 87603be8db..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/flow/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Configure, filter, and aggregate flow logs. -hide_table_of_contents: true ---- - -# Configure flow logs - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/flow/processpath.mdx b/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/flow/processpath.mdx deleted file mode 100644 index 87c4bef1aa..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/flow/processpath.mdx +++ /dev/null @@ -1,53 +0,0 @@ ---- -description: Get visibility into process-level network activity in flow logs. ---- - -# Enable process-level information in flow logs - -## Big picture - -Configure $[prodname] to collect process executable path and arguments and add them to flow logs. - -## Value - -Get visibility into the network activity at the process level using $[prodname] flow logs. - -## Concepts - -### eBPF kprobe programs - -eBPF is a Linux kernel technology that allows safe mini-programs to be attached to various hooks inside the kernel. To collect the path and arguments of short-lived processes, this feature uses an eBPF kprobe program. - -### Host's PID namespace - -For long-lived processes, path and arguments are read from `/proc/pid/cmdline`. This requires access to the host's PID namespace. If the access is not available then the process path and arguments will only be captured (by the eBPF kprobes) for newly-created processes. - -## Before you begin - -Ensure that your kernel contains support for eBPF kprobes that $[prodname] uses. The minimum supported -kernel for this is feature is: `v4.4.0`. - -## Privileges - -For full functionality, this feature requires the `$[noderunning]` `DaemonSet` to have access to the host's PID namespace. The Tigera Operator will automatically grant this extra privilege to the daemonset if the feature is enabled in the operator's LogCollector resource, as described below. - -# How to - -### Enable process path and argument collection - -$[prodname] can be configured to enable process path and argument collection on supported Linux kernels -using the command: - -``` - kubectl patch logcollector.operator.tigera.io tigera-secure --type merge -p '{"spec":{"collectProcessPath":"Enabled"}}' -``` - -Enabling/Disabling collectProcessPath causes a rolling update of the `$[noderunning] DaemonSet`. - -### View process path and arguments in flow logs using Kibana - -Navigate to the Kibana Flow logs dashboard to view process path and arguments associated with a flow log entry. - -The executable path will appear in the `process_name` field and `process_args` will have the executable arguments. Executable path -and arguments cannot be collected under certain circumstances, in that `process_name` will have the task name and `process_args` -will be empty. Information about these fields are described in the [Flow log datatype document](datatypes.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/flow/tcpstats.mdx b/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/flow/tcpstats.mdx deleted file mode 100644 index 48faa1936b..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/flow/tcpstats.mdx +++ /dev/null @@ -1,43 +0,0 @@ ---- -description: Enabling TCP socket stats information in flow logs ---- - -# Enabling TCP socket stats in flow logs - -## Big picture - -Configure $[prodname] to collect additional TCP socket statistics. While this feature is available in both iptables and eBPF data plane modes, it uses eBPF to collect the statistics. Therefore it requires a recent Linux kernel (at least v5.3.0/v4.18.0-193 for RHEL). - -## Value - -Get visibility into the network activity at the socket level using $[prodname] flow logs. - -## Concepts - -### eBPF TC programs - -eBPF is a Linux kernel technology that allows safe mini-programs to be attached to various hooks inside the kernel. This feature leverages eBPF to look up the TCP socket associated with packets flowing through an interface and sends them to userspace for addition to flow logs. - -## Before you begin - -Ensure that your kernel contains support for eBPF that $[prodname] uses. The minimum supported -kernel for tcp socket stats is: `v5.3.0`. For distros based on RHEL, the minimum kernel version is `v4.18.0-193`. - -# How to - -### Enable tcp stats collection - -$[prodname] can be configured to enable tcp socket stats collection on supported Linux kernels -using the command: - -``` - kubectl patch felixconfiguration default -p '{"spec":{"flowLogsCollectTcpStats":true}}' -``` - -### View tcp stats in flow logs using Kibana. - -Navigate to the Kibana Flow logs dashboard to view tcp stats associated with a flow log entry. - -The additional fields collected are `tcp_mean_send_congestion_window`, `tcp_min_send_congestion_window`, `tcp_mean_smooth_rtt`, `tcp_max_smooth_rtt`, -`tcp_mean_min_rtt`, `tcp_max_min_rtt`, `tcp_mean_mss`, `tcp_min_mss`, `tcp_total_retransmissions`, `tcp_lost_packets`, `tcp_unrecovered_to`. -Information about these fields are described in the [Flow log datatype document](datatypes.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/index.mdx deleted file mode 100644 index 94520c9b99..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Configure logs for visibility in the web console. -hide_table_of_contents: true ---- - -# Manage Calico Enterprise logs - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/l7/configure.mdx b/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/l7/configure.mdx deleted file mode 100644 index e3116dbbeb..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/l7/configure.mdx +++ /dev/null @@ -1,139 +0,0 @@ ---- -description: Configure and aggregate L7 logs. ---- - -# Configure L7 logs - -## Big picture - -Deploy Envoy and use $[prodname] L7 logs to monitor application activity. - -## Value - -Just like L3/4 $[prodname] logs, platform operators and -development teams want visibility into L7 logs to see how applications are interacting with each -other. $[prodname] flow logs only display which workloads are communicating -with each other, not the specific request details. $[prodname] provides visibility into L7 traffic without the need for a service mesh. - -L7 logs are also key for detecting anomalous behaviors like attempts to -access applications, restricted URLs, and scans for particular URLs. - -## Concepts - -### About L7 logs - -L7 logs capture application interactions from HTTP header data in requests. Data shows what is actually sent in communications between specific pods, providing more specificity than flow logs. (Flow logs capture data only from connections for workload interactions). - -$[prodname] collects L7 logs by sending the selected traffic through an Envoy proxy. - -L7 logs are visible in the web console, service graph, in the HTTP tab. - -## Before you begin - -**Not supported** -- GKE - -**Limitations** - -- L7 log collection is not supported for host-networked client pods. -- When selecting and deselecting traffic for L7 log collection, active connections may be disrupted. - -**Log storage requirements** - -:::note - -L7 logs require a minimum of 1 additional GB of log storage per node, per one-day retention period. Adjust your [Log Storage](../../../operations/logstorage/adjust-log-storage-size.mdx) before you start tasks in the next section. - -::: - -## How to -- [Configure Felix for log data collection](#configure-felix-for-log-data-collection) -- [Configure L7 logs](#configure-l7-logs) -- [View L7 logs in the web console](#view-l7-logs-in-manager-ui) - -### Configure Felix for log data collection - -1. Configure L7 log aggregation, retention, and reporting. - - For help, see [Felix Configuration documentation](../../../reference/component-resources/node/felix/configuration.mdx#calico-enterprise-specific-configuration). - -### Configure L7 logs - -In this step, you will configure L7 logs, select logs for collection, and test the configuration. - -**Configure the ApplicationLayer resource for L7 logs** - -1. Create or update the [ApplicationLayer](../../../reference/installation/api.mdx#applicationlayer) resource named, `tigera-secure`. - - Example: - - ```yaml - apiVersion: operator.tigera.io/v1 - kind: ApplicationLayer - metadata: - name: tigera-secure - spec: - logCollection: - collectLogs: Enabled - logIntervalSeconds: 5 - logRequestsPerInterval: -1 - ``` - - Read more about the log collection specification [here](../../../reference/installation/api.mdx#logcollector). - - Applying this resource creates an `l7-log-collector` daemonset in `calico-system` namespace. - -1. Ensure that the daemonset progresses and `l7-collector` and `envoy-proxy` containers inside the daemonset are in a `Running` state. - -**Select traffic for L7 log collection** - -1. Annotate the services you wish to collect L7 logs as shown. - - ```bash - kubectl annotate svc -n projectcalico.org/l7-logging=true - ``` - -2. To disable the L7 log collection, remove the annotation. - - ```bash - kubectl annotate svc -n projectcalico.org/l7-logging- - ``` - -After annotating a service for L7 log collection, only newly-established connections through that service are proxied by Envoy. Connections established before the service is annotated are not proxied or interrupted, and no logs are generated. - -Conversely, when a service is deselected, any previous connections established through the annotated service continue to be proxied by Envoy until they are terminated, and logs are generated. - -**Test your configuration** - -1. Identify the path to access your cluster. Where `` can be: - - - Public address of your cluster/service - or - - Cluster IP of your application's service (if testing within the cluster) - -1. `curl` your service with a command similar to the following. You will see `Server` header as `envoy`. - - ```bash - curl --head :/ - ``` - -### View L7 logs in the web console - -**Service Graph** - -To view L7 logs in Service Graph: - -1. In the web console left navbar, click **Service Graph**. -1. In the bottom pane you will see L7 logs in the HTTP tab. - - ![l7-logs](/img/calico-enterprise/l7-logs.png) - -**Kibana** - -To view L7 logs by index pattern in Kibana: - -1. In the web console left navbar, click **Kibana**. - -1. In the new Kibana browser, click the hamburger icon in the top left corner, and select **Analytics**, **Discover**. - -1. Select the index pattern, `tigera_secure_ee_l7`. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/l7/datatypes.mdx b/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/l7/datatypes.mdx deleted file mode 100644 index 041085f94f..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/l7/datatypes.mdx +++ /dev/null @@ -1,36 +0,0 @@ ---- -description: L7 data that Calico Enterprise sends to Elasticsearch. ---- - -# L7 log data types - -## Big picture - -$[prodname] sends the following data to Elasticsearch. - -The following table details the key/value pairs in the JSON blob, including their [Elasticsearch datatype](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-types.html). - -| Name | Datatype | Description | -| ------------------------ | -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `host` | keyword | Name of the node that collected the L7 log entry. | -| `start_time` | date | Start time of log collection in UNIX timestamp format. | -| `end_time` | date | End time of log collection in UNIX timestamp format. | -| `bytes_in` | long | Number of incoming bytes since the last export. | -| `bytes_out` | long | Number of outgoing bytes since the last export. | -| `duration_mean` | long | Mean duration time of all the requests that match this combination of L7 data in nanoseconds. | -| `duration_max` | long | Max duration time of all the requests that match this combination of L7 data in nanoseconds. | -| `count` | long | Number of requests that match this combination of L7 data. | -| `src_name_aggr` | keyword | Contains one of the following values:
    - Aggregated name of the source pod.
    - `pvt`: endpoint is not a pod. Its IP address belongs to a private subnet.
    - `pub`: endpoint is not a pod. Its IP address does not belong to a private subnet. It is probably an endpoint on the public internet. | -| `src_namespace` | keyword | Namespace of the source endpoint. | -| `src_type` | keyword | Source endpoint type. Possible values:
    - `wep`: A workload endpoint, a pod in Kubernetes.
    - `ns`: A Networkset. If multiple Networksets match, then the one with the longest prefix match is chosen.
    - `net`: A Network. The IP address did not fall into a known endpoint type. | -| `dest_name_aggr` | keyword | Contains one of the following values:
    - Aggregated name of the destination pod.
    - `pvt`: endpoint is not a pod. Its IP address belongs to a private subnet.
    - `pub`: endpoint is not a pod. Its IP address does not belong to a private subnet. It is probably an endpoint on the public internet. | -| `dest_namespace` | keyword | Namespace of the destination endpoint. | -| `dest_type` | keyword | Destination endpoint type. Possible values:
    - `wep`: A workload endpoint, a pod in Kubernetes.
    - `ns`: A Networkset. If multiple Networksets match, then the one with the longest prefix match is chosen.
    - `net`: A Network. The IP address did not fall into a known endpoint type. | -| `dest_service_name` | keyword | Name of the destination service. This may be empty if the request was not made against a service. | -| `dest_service_namespace` | keyword | Namespace of the destination service. This may be empty if the request was not made against a service. | -| `dest_service_port` | long | Destination service port. | -| `url` | keyword | URL that the request was made against. | -| `response_code` | keyword | Response code returned by the request. | -| `method` | keyword | HTTP method for the request. | -| `user_agent` | keyword | User agent of the request. | -| `type` | keyword | Type of request made. Possible values include `tcp`, `tls`, and `html/`. | diff --git a/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/l7/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/l7/index.mdx deleted file mode 100644 index 19164b2d05..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/l7/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Configure Elasticsearch L7 logs. -hide_table_of_contents: true ---- - -# L7 logs - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/overview.mdx b/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/overview.mdx deleted file mode 100644 index 6014a00935..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/overview.mdx +++ /dev/null @@ -1,105 +0,0 @@ ---- -description: Summary of the out-of-box features for Calico Enterprise logs. ---- - -# Overview - -## Big picture - -Use $[prodname] log data for visibility and troubleshooting Kubernetes clusters. - -## Value - -Workloads and policies are highly dynamic. To troubleshoot Kubernetes clusters, you need logs with workload identity and context. $[prodname] deploys an Elasticsearch cluster and Kibana instance during installation with these features: - -- Logs with workload context -- Centralized log collection for multiple clusters for $[prodname] multi-cluster-management -- View Elasticsearch logs in the $[prodname] web console (Kibana dashboard and Flow Visualizer), and the [Elasticsearch API](https://www.elastic.co/guide/en/elasticsearch/reference/current/search.html) -- Standard Kubernetes RBAC for granular access control to logs -- Collect/archive logs or subset of logs -- Log aggregation for high-volume logs -- Configure data retention settings to manage cluster disk space -- Integration with third-party tools like Amazon S3, Syslog, Splunk - -## Concepts - -### Logs types - -Elasticsearch logs provide the visibility and troubleshooting backend for $[prodname]. - -| Log type | Description | Log source | RBAC | Index | -| -------- | ---------------------------------------------------------------------------------------------- | ------------------------------------------- | ------------ | ------------------------------ | -| flow | Network flows for workloads: source and destination namespaces, pods, labels, and policies | $[prodname] cnx-node (Felix) | `flows` | `tigera_secure_ee_flows.*` | -| audit | Audit logs for $[prodname] resources | $[prodname] apiserver | `audit_ee` | `tigera_secure_ee_audit_ee.*` | -| | Audit logs for Kubernetes resources | Kubernetes apiserver | `audit_kube` | `tigera_secure_ee_audit_kube.*`| -| | | Both audit logs above | `audit*` | `tigera_secure_ee_audit*` | -| bgp | $[prodname] networking BGP peering and route propagation | $[prodname] cnx-node (BIRD) | `ee_bgp` | `tigera_secure_ee_bgp.*` | -| dns | DNS lookups and responses from $[prodname] domain-based policy | $[prodname] cnx-node (Felix) | `ee_dns` | `tigera_secure_ee_dns.*` | -| ids | $[prodname] intrusion detection events: suspicious IPs, suspicious domains, and global alerts | $[prodname] intrusion-detection-controller | `ee_events` | `tigera_secure_ee_events.*` | - -:::note - -Because of their high-volume, flow and dns logs support aggregation. - -::: - -### Default log configuration and security - -$[prodname] automatically installs fluentd on all nodes and collects flow, audit, and DNS logs. You can configure additional destinations like Amazon S3, Syslog, Splunk. - -$[prodname] enables user authentication in Elasticsearch, and secures access to Elasticsearch and Kibana instances using network policy. - -### RBAC and log access - -You control user access to logs using the standard Kubernetes RBAC cluster role and cluster role binding. For example: - -```yaml -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: bob-es-access -subjects: - - kind: User - name: bob - apiGroup: rbac.authorization.k8s.io -roleRef: - kind: ClusterRole - name: audit-ee-only - apiGroup: rbac.authorization.k8s.io -``` - -You configure Elasticsearch log access per cluster using RBAC and the Kubernetes API group, `lma.tigera.io`. For example: - -```yaml -apiGroups: ['lma.tigera.io'] -resources: ['app-cluster'] -resourceNames: ['flows', 'dns'] -verbs: ['get'] -``` - -### Logs for compliance reporting - -$[prodname] compliance reports are based on archived **flow logs** and **audit logs** for these resources: - -- Pods -- Host endpoints -- Service accounts -- Namespaces -- Kubernetes service endpoints -- Global network sets -- $[prodname] and Kubernetes network policies -- Global network policies -- Network sets - -$[prodname] also supports archiving [Cloudwatch for EKS audit logs](../../reference/installation/api.mdx#logcollectorspec). - -## Additional resources - -- [Log storage recommendations](../../operations/logstorage/log-storage-recommendations.mdx) -- [Configure RBAC for Elasticsearch logs](rbac-elasticsearch.mdx) -- [Configure flow log aggregation](flow/aggregation.mdx) -- [Audit logs](audit-overview.mdx) -- [BGP logs](bgp.mdx) -- [DNS logs](dns/dns-logs.mdx) -- [Archive logs](archive-storage.mdx) -- [Log collection options](../../reference/installation/api.mdx#logcollectorspec) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/rbac-elasticsearch.mdx b/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/rbac-elasticsearch.mdx deleted file mode 100644 index e637d66af5..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/rbac-elasticsearch.mdx +++ /dev/null @@ -1,214 +0,0 @@ ---- -description: Configure RBAC to control access to Elasticsearch logs and events. ---- - -# Configure RBAC for Elasticsearch logs and events - -## Big picture - -Configure fine-grained user access controls for flow logs, audit logs, DNS logs, and intrusion detection events. - -## Value - -Security teams and auditors require Elasticsearch logs and associated reports. Teams responsible for threat defense (suspicious IPs and domains), may have different roles. When sharing a user interface, it is critical to provide fine-grained RBAC. $[prodname] lets you manage user access at the cluster, feature, and feature subset levels. For example, users without permissions to specific Elasticsearch resources (for example, DNS logs), will not see data displayed on pages that use the Elasticsearch resource. - -## Concepts - -### RBAC for logs and events - -Elasticsearch resources are associated with the **Kubernetes API group**, `lma.tigera.io`. You can grant access to resources per cluster. The default cluster name for $[prodname] is, `cluster`. As shown in the following table, each Elasticsearch resource is mapped to a specific RBAC resource name within the `lma.tigera.io` API group. In the $[prodname] web console, Elasticsearch resources are called, **indexes or indices**. - -| Elasticsearch access | Kubernetes RBAC resource name | Description | -| --------------------------- | ----------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| tigera_secure_ee_flows | flows | Access to indices with data for Flow logs. | -| tigera_secure_ee_audit\* | audit\* | Access to indices with data for both $[prodname] and Kubernetes audit logs. The UI currently uses this query for searching both Kubernetes and $[prodname] audit logs. | -| tigera_secure_ee_audit_ee | audit_ee | Access to indices with data for $[prodname] audit logs. | -| tigera_secure_ee_audit_kube | audit_kube | Access to indices with data for Kubernetes audit logs | -| tigera_secure_ee_events | events | Access to indices with data for $[prodname] intrusion detection events. | -| kibana_login | kibana_login | Allows an OIDC user to log in to Kibana and have read permissions for discover, visualize and dashboard. | -| superuser | elasticsearch_superuser | Grants superuser access for all Elastic related actions, which include Kibana user and license management. | -| tigera_secure_ee_l7 | l7 | Access to indices with data for L7 logs | - -:::note - -Because the `lma.tigera.io` API group is used only for RBAC, and is not backed by an actual API, it does not provide access to any other Kubernetes resources. - -::: - -## Before you begin - -**Required** - -- A `tigera-network-admin` role with full permissions to create and modify resources. For help, see [Log in to the $[prodname] web console](../../operations/cnx/authentication-quickstart.mdx). - -- To view Elasticsearch resources in the $[prodname] web console, users must have [minimum permissions](../../network-policy/policy-tiers/rbac-tiered-policies.mdx). - -## How to - -- [Create access to a specific Elasticsearch resource](#create-access-to-a-specific-elasticsearch-resource) -- [Allow user access to a specific Elasticsearch resource](#allow-user-access-to-a-specific-elasticsearch-resource) -- [Verify user access to a specific Elasticsearch resource](#verify-user-access-to-a-specific-elasticsearch-resource) -- [Create access to all Elasticsearch resources](#create-access-to-all-elasticsearch-resources) -- [Allow user access to all Elasticsearch resources](#allow-user-access-to-all-elasticsearch-resources) -- [Verify user access to all Elasticsearch resources](#verify-user-access-to-all-elasticsearch-resources) - -### Create access to a specific Elasticsearch resource - -Create a `ClusterRole` with permissions to the resource using the table in the **Concepts** section. In this example, the ClusterRole named, `audit-ee-only` provides access to $[prodname] audit logs using the `resourceNames: ["audit_ee"]`. The `apiGroups: ["lma.tigera.io"]` is required. - -```yaml -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: audit-ee-only -rules: - - apiGroups: ['lma.tigera.io'] - resources: ['cluster'] - resourceNames: ['audit_ee'] - verbs: ['get'] -``` - -### Allow user access to a specific Elasticsearch resource - -To allow a user access to a specific Elasticsearch resource, create a `ClusterRoleBinding`. In the following example, the `ClusterRoleBinding` allows user **bob** access only to the resource, $[prodname] audit logs (`audit-ee-only`). - -```yaml -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: bob-es-access -subjects: - - kind: User - name: bob - apiGroup: rbac.authorization.k8s.io -roleRef: - kind: ClusterRole - name: audit-ee-only - apiGroup: rbac.authorization.k8s.io -``` - -### Verify user access to a specific Elasticsearch resource - -Create a `SubjectAccessReview` spec to verify user access to a specific Elasticsearch resource. In the SubjectAccessReview spec, set the following: - -- group: `lma.tigera.io` -- resource: `cluster` -- verb: `get` -- resource: a Kubernetes RBAC resource name -- user: username you are verifying - -:::note - -When verifying the **`audit*` RBAC resource name** (which accesses both $[prodname] and Kubernetes audit logs), create a `SubjectAccessReview` **only on `audit*`**; this provides the correct verification results. Do not create a `SubjectAccessReview` to query the individual `audit_ee` or `audit_kube` resources; results ("allowed: false") do not accurately reflect user access. - -::: - -``` -kubectl create -o yaml -f - <**Note**: See you platform documentation for specific command if above doesn't work. | - -1. Check if there are multiple replicas or statefulsets of Kibana or Elasticsearch. - `kubectl get all -n tigera-kibana` and/or `kubectl get all -n tigera-elasticsearch` -1. Check if any of the pods in the `tigera-elasticsearch` namespace are pending. - `kubectl get pod -n tigera-elasticsearch` -1. Check the TigeraStatus for problems. - `kubectl get tigerastatus -o yaml` - -## How to handle expired license - -Starting from $[prodname] v3.7, all $[prodname] features work with Elasticsearch basic license. - -If Elasticsearch platinum or enterprise license expires, ECK operator will switch it to basic license, if this doesn't happen automatically and if you notice license expiration error, switch to basic license by calling [the Elasticsearch API.](https://www.elastic.co/guide/en/elasticsearch/reference/current/start-basic.html) - -## How to create a new cluster - -:::note - -Be aware that removing LogStorage temporarily removes Elasticsearch from your cluster. Features that depend on LogStorage are temporarily unavailable, including the dashboards in the web console. Data ingestion is also temporarily paused, but will resume when the LogStorage is up and running again. -Follow these steps to create a new Elasticsearch cluster. - -::: - -1. Optional: To delete all current data follow this step. For each PersistentVolume in StorageClass `tigera-elasticsearch` that is currently mounted, set the ReclaimPolicy to `Recycle` or `Delete`. -1. Export your current LogStorage resource to a file. - - ```bash - kubectl get logstorage tigera-secure -o yaml > log-storage.yaml - ``` - -1. Delete logstorage. - - ```bash - kubectl delete -f log-storage.yaml - ``` - -1. Delete the trial license. You can skip this step if the secret is not present in your cluster. - - ```bash - kubectl delete secret -n tigera-eck-operator trial-status - ``` - -1. Optional: If you made changes to the ReclaimPolicy in step 1, revert them so that it matches the value in StorageClass `tigera-elasticsearch` again. - -1. Apply the LogStorage again. - - ```bash - kubectl apply -f log-storage.yaml - ``` - -1. Wait until your cluster is back up and running. - ```bash - watch kubectl get tigerastatus - ``` - -## Common problems - -### Elasticsearch is pending - -**Solution/workaround**: Most often, the reason is due to the absence of a PersistentVolume that matches the PersistentVolumeClaim. Check that there is a Kubernetes node with enough CPU and memory. If the field `dataNodeSelector` in the LogStorage resource is used, make sure there are pods that match all the requirements. - -### Pod cannot reach Elasticsearch - -**Solution/workaround**: Are there any policy changes that may affect the installation? In many cases, removing and reapplying log storage solves the problem. - -### kube-apiserver logs showing many certificate errors - -**Solution/workaround**: Sometimes a cluster ends up with multiple replicasets or statefulsets of Kibana or Elasticsearch if you modify the LogStorage resource. To see if this is the problem, run `kubectl get all -n tigera-(elasticsearch/kibana)`. If it is, you can ignore it; the issues will resolve over time. - -If you are using a version prior to v2.8, the issue may be caused by the ValidatingWebhookConfiguration. Although we do not support modifying this admission webhook, consider deleting it as follows: - -```bash -kubectl delete validatingwebhookconfigurations validating-webhook-configuration -kubectl delete service -n tigera-eck-operator elastic-webhook-service -``` - -As a last resort, create a new [Elasticsearch cluster](#how-to-create-a-new-cluster). - -### Elasticsearch is slow - -**Solution/workaround**: Start with diagnostics using the Kibana monitoring dashboard. Then, check the QoS of your LogStorage custom resource to see if it is causing throttling (or via the Kubernetes node itself). If the shard count is high, close old shards. Also, another option is to increase the Elasticsearch [CPU and memory](../../reference/installation/api.mdx#logstoragespec). - -### Elasticsearch crashes during booting - -**Solution/workaround**: Disk provisioners can have issues where the disk does not allow write requests by the Elasticsearch user. Check the logs of the init containers. - -### Kibana dashboard is missing - -**Solution/workaround**: Verify that the intrusion detection job is running, or try removing and reapplying: - -``` -kubectl get intrusiondetections -o yaml > intrusiondetection.yaml - -kubectl delete -f intrusiondetection.yaml -intrusiondetection.operator.tigera.io "tigera-secure" deleted - -kubectl apply -f intrusiondetection.yaml -intrusiondetection.operator.tigera.io/tigera-secure created -``` - -### Elastic Operator OOM killed - -**Solution/workaround**: Increase the memory requests/limits for the Elastic Operator in the LogStorage Custom Resource. - -``` -kubectl edit logstorage tigera-secure -``` - -Find the `ECKOperator` Component Resource in the `spec` section. Increase the limits and requests memory amounts as needed. Verify that the pod has restarted with the new settings: - -``` -kubectl describe pod elastic-operator -n tigera-eck-operator -``` - -Check the `Container.Limits` and `Container.Requests` fields to confirm the values have propagated correctly. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/observability/get-started-cem.mdx b/calico-enterprise_versioned_docs/version-3.19-2/observability/get-started-cem.mdx deleted file mode 100644 index 524e513a52..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/observability/get-started-cem.mdx +++ /dev/null @@ -1,178 +0,0 @@ ---- -description: Tour the main features of the web console. ---- - -# Web console tutorial - -## What you will learn - -- Web console features and controls -- How to gain visibility into clusters - -Let's go through each item in the web console left navbar from top to bottom. You can follow along using any cluster. - -## Dashboard - -> From the left navbar, click Dashboards. - -The Dashboard provides a birds-eye view of cluster activity. Note the following: - -- The filter panel at the top lets you change dashboard views and the time range. -- The **Layout Settings** shows the default metrics. To get WireGuard metrics for pod-to-pod and host-to-host encryption, you must [enable WireGuard](../compliance/encrypt-cluster-pod-traffic.mdx). -- For application-related dashboard cards to show data, like HTTP Response Codes or Url Requests, you need to [configure L7 logs](elastic/l7/configure.mdx). - -![dashboards](/img/calico-enterprise/dashboards.png) - -## Service Graph - -> From the left navbar, select **Service Graph**, **Default** - -Service Graph provides a point-to-point, topographical representation of network traffic within your cluster. It is the primary tool for visibility and troubleshooting. - -![service-graph](/img/calico-enterprise/service-graph.png) - -To learn more about Service Graph, see [Network visualization](visualize-traffic.mdx). - -## Policies - -> From the left navbar, click **Policies**. - -Network policy is the primary tool for securing a Kubernetes network. Policy is used to restrict network traffic (egress and ingress) in your cluster so only the traffic that you want to flow is allowed. $[prodname] supports these policies: - -- $[prodname] network policy -- $[prodname] global network policy -- Kubernetes policy - -$[prodname] uses **tiers** (also called, hierarchical tiers) to provide guardrails for managing network policy across teams. Policy tiers allow users with more authority (for example, Dev/ops users) to enforce network policies that take precedence over teams (for example, service owners and developers). - -**Policies Board** is the default view for managing tiered policies. - -![policy-board](/img/calico-enterprise/policy-board.png) - -Users typically use a mix of Policy Board and YAML files. Note that you can export one or all policies in a tier to YAML. - -The **Policy Board filter** lets you filter by policy types and label selectors. - -![policy-filters](/img/calico-enterprise/policy-filters.png) - -The following features provide more security and guardrails for teams. - -**Recommended a policy** - -> In Policies Board, click **Recommend a policy**. - -One of the first things you'll want to do after installation is to secure unprotected pods/workloads with network policy. (For example, Kubernetes pods allow traffic from any source by default.) The Recommend a policy feature generates policies that protect specific endpoints in the cluster. Users with minimal experience with network policy can easily get started. - -![recommend-policy](/img/calico-enterprise/recommend-policy.png) - -**Policy stage** - -When you create a policy, it is a best practice to stage it to evaluate the effects before enforcing it. After you verify that a staged network policy is allowing traffic as expected, you can enforce it. - -![stage-policy](/img/calico-enterprise/stage-policy.png) - -**Preview** - -When you edit a policy, you can select **Preview** to see how changes may affect existing traffic. - -![policy-preview](/img/calico-enterprise/policy-preview.png) - -## Endpoints - -> From the left navbar, click **Endpoints**. - -**Endpoint Details** - -This page is a list of all pods in the cluster (also known as workload endpoints). - -![endpoints](/img/calico-enterprise/endpoints.png) - -**Node List** - -This page lists all nodes associated with your cluster. - -![node-list](/img/calico-enterprise/node-list.png) - -## Network Sets - -Network sets and global network sets are $[prodname] resources for defining IP subnetworks/CIDRs, which can be matched by standard label selectors in policy (Kubernetes or $[prodname]). They are a powerful feature for use/reuse and scaling policy. - -A simple use case is to limit traffic to/from external networks. For example, you can create a global network set with "deny-list CIDR ranges 192.0.2.55/32 and 203.0.113.0/24", and then reference the network set in a global network policy. This also allows you to see this traffic in Service Graph. - -![networksets](/img/calico-enterprise/networksets.png) - -## Managed clusters - -If you have configured $[prodname] for multi-cluster management, you will see the **Managed clusters** option in the left navbar. - -> From the left navbar, click **Managed clusters**. - -This page is where you switch views between clusters in the web console. When you connect to a different cluster, the entire web console view changes to reflect the selected cluster. - -![managed-clusters](/img/calico-enterprise/managed-clusters.png) - -## Compliance Reports - -> From the left navbar, click **Compliance**. - -Compliance tools that rely on periodic snapshots, do not provide accurate assessments of Kubernetes workloads against your compliance standards. $[prodname] compliance dashboard and reports provide a complete inventory of regulated workloads, along with evidence of enforcement of network controls for these workloads. Additionally, audit reports are available to see changes to any network security controls. - -**Compliance reports** are based on archived flow logs and audit logs for all $[prodname] resources, and audit logs for Kubernetes resources in the Kubernetes API server. - -![cis-benchmark](/img/calico-enterprise/cis-benchmark.png) - -Using the filter, you can select report types. - -![compliance-filter](/img/calico-enterprise/compliance-filter.png) - -## Activity - -> From the left navbar, select **Activity**, **Timeline**. - -**Timeline** - -What changed, who did it, and when? This information is critical for security. Native Kubernetes doesn’t provide an easy way to capture audit logs for pods, namespaces, service accounts, network policies, and endpoints. The $[prodname] timeline provides audit logs for all changes to network policy and other resources associated with your $[prodname] deployment. - -![timeline](/img/calico-enterprise/timeline.png) - -> From the left navbar, selection **Activity**, **Alerts**. - -**Alerts** - -How do you know if you have an infected workload? A possible threat? $[prodname] detects and alerts on unexpected network behavior that may indicate a security breach. You can create alerts for: - -- Known attacks and exploits (for example, exploits found at Shopify, Tesla, Atlassian) -- DOS attempts -- Attempted connections to botnets and command and control servers -- Abnormal flow volumes or flow patterns based on machine learning - -![alerts](/img/calico-enterprise/alerts.png) - -As shown, there are many types of alerts you can enable. None are enabled by default. - -## Logs - -$[prodname] includes a fully-integrated deployment of Elastic to collect flow log data that drives key features like the Flow Visualizer, metrics in the Dashboard and Policy Board, policy automation, and testing features and security. $[prodname] also embeds Kibana so you can view raw log data for the traffic within your cluster. - -> From the left navbar, click **Logs**. - -**Dashboards** - -$[prodname] comes with built-in dashboards. - -![kibana-dashboards](/img/calico-enterprise/kibana-dashboards.png) - -**Log data** - -Kibana provides its own set of filtering capabilities to drill down into log data. For example, use filters to drill into flow log data for specific namespaces and pods. Or view details and metadata for a single flow log entry. - -![kibana](/img/calico-enterprise/kibana.png) - -## Threat feeds - -You can add threat intelligence feeds to $[prodname] to trace network flows of suspicious IP addresses and domains. Then, you can use network policy to block pods from contacting IPs or domains. - -Now that you understand the basics, we recommend the following: - -- [Get started with tiered network policy](../network-policy/policy-tiers/tiered-policy.mdx) -- [Get started with network sets](../network-policy/networksets.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/observability/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/observability/index.mdx deleted file mode 100644 index bc6d57fc27..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/observability/index.mdx +++ /dev/null @@ -1,58 +0,0 @@ ---- -description: Use Elasticsearch logs for visibility into all network traffic with Kubernetes context. -hide_table_of_contents: true ---- - -import { DocCardLink, DocCardLinkLayout } from '/src/___new___/components'; - -# Observability and troubleshooting - -See what's going on in your cluster with network observability tools and detailed logging. - -## Getting started - - - - - - - - - -## Getting started with logs - - - - - - - - - - - -## Flow logs - - - - - - - - - - - -## DNS logs - - - - - - -## L7 logs - - - - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/observability/iptables.mdx b/calico-enterprise_versioned_docs/version-3.19-2/observability/iptables.mdx deleted file mode 100644 index 0ffdb96aca..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/observability/iptables.mdx +++ /dev/null @@ -1,61 +0,0 @@ ---- -description: Learn how policy audit mode rules can affect the number of iptables logs. ---- - -# iptables logs - -## About iptables logs - -iptables logs are produced by [policy audit mode](#policy-audit-mode) or by using the `Log` action in either -[Network Policy](../reference/resources/networkpolicy.mdx) or [Global Network Policy](../reference/resources/globalnetworkpolicy.mdx). -These logs are written to syslog (specifically the `/dev/log` socket) on the nodes where the events are generated. -Collection, rotation and other management of these logs is provided by your syslog agent, for example, journald or rsyslogd. - -## Policy audit mode - -$[prodname] adds a Felix option `DropActionOverride` that configures how the -`deny` `action` in a [Rule](../reference/resources/networkpolicy.mdx#rule) is interpreted. -It can add logs for denied packets, or even allow the traffic through. - -See the -[Felix configuration reference](../reference/component-resources/node/felix/configuration.mdx#calico-enterprise-specific-configuration) for -information on how to configure this option. - -`DropActionOverride` controls what happens to each packet that is denied by -the current $[prodname] policy, i.e., by the ordered combination of all the -configured policies and profiles that apply to that packet. It may be -set to one of the following values: - -- `Drop` -- `Accept` -- `LogAndDrop` -- `LogAndAccept` - -Normally the `Drop` or `LogAndDrop` value should be used, as dropping a -packet is the obvious implication of that packet being denied. However when -experimenting, or debugging a scenario that is not behaving as you expect, the -"Accept" and "LogAndAccept" values can be useful: then the packet will be -still be allowed through. - -When one of the `LogAnd*` values is set, each denied packet is logged in -syslog, with an entry like this: - -``` -May 18 18:42:44 ubuntu kernel: [ 1156.246182] calico-drop: IN=tunl0 OUT=cali76be879f658 MAC= SRC=192.168.128.30 DST=192.168.157.26 LEN=60 TOS=0x00 PREC=0x00 TTL=62 ID=56743 DF PROTO=TCP SPT=56248 DPT=80 WINDOW=29200 RES=0x00 SYN URGP=0 MARK=0xa000000 -``` - -Note that [Denied Packet Metrics](../operations/monitor/metrics/index.mdx) are independent of the `DropActionOverride` -setting. Specifically, if packets that would normally be denied are being -allowed through by a setting of `Accept` or `LogAndAccept`, those packets -still contribute to the denied packet metrics as normal. - -For example, to set a `DropActionOverride` for `myhost` to log then drop denied packets: - -Edit the FelixConfiguration object for the `myhost` Node. - -```bash -kubectl patch felixconfiguration.p node.myhost --type='merge' -p \ - '{"spec":{"dropActionOverride":"LogAndDrop"}}' -``` - -For a global setting, modify the `default` FelixConfiguration resource. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/observability/kibana.mdx b/calico-enterprise_versioned_docs/version-3.19-2/observability/kibana.mdx deleted file mode 100644 index 04eabf3e92..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/observability/kibana.mdx +++ /dev/null @@ -1,125 +0,0 @@ ---- -description: Learn the basics of using Elasticsearch logs and Kibana to gain visibility and troubleshoot. ---- - -# Kibana dashboards and logs - -## Kibana - -Kibana is the frontend for $[prodname] Elasticsearch, which is the logging infrastructure that centrally stores logs from all managed clusters. Kibana provides an interface to explore Elasticsearch logs and gain insights into workload communication traffic volume, performance, and other key aspects of cluster operations. Log data is also summarized in custom dashboards. - -The following logs are generated by $[prodname]. All logs are enabled by default except **l7 logs**, which must be explicitly enabled. - -| Log type | **Description** | Index in Kibana | -| -------- | ----------------------------------------------------------------------------------------------------------------- | ------------------------ | -| flow | Layer 3/4 network flows for workloads: source and destination namespaces, pods, labels, and policies | tigera_secure_ee_flows\* | -| l7 | Layer 7 network flows for workloads | tigera_secure_ee_l7\* | -| audit | Audit logs for $[prodname] resources | tigera_secure_ee_audit\* | -| bgp | $[prodname] networking BGP peering and route propagation. | tigera_secure_ee_bgp.\* | -| dns | DNS lookups and responses from $[prodname] domain-based policy. | tigera_secure_ee_dns\* | -| events | $[prodname] intrusion detection events: suspicious IPs, suspicious domains, and global alerts | tigera_secure_ee_events\*| - -## Start Kibana and access dashboards - -In the web console, from the left navbar select, **Kibana**. A new browser tab opens into Kibana. - -In Kibana, click the hamburger icon in the top left corner, and select **Analytics**, **Dashboard**. - -![kibana-dashboard](/img/calico-enterprise/kibana-dashboard.png) - -A list of curated dashboards is displayed. Note that some log types do not have a default dashboard (`bgp` and `events`). - -### DNS dashboard - -![dns-dashboard](/img/calico-enterprise/dns-dashboard.png) - -The DNS dashboard summarizes DNS data and logs into metrics, providing high-level information on the types of DNS lookups made, responses, and overall DNS performance. By default, DNS activity logs are captured only for requests/responses from Kubernetes built-in DNS services (CoreDNS). DNS activity to an external DNS server can be captured by configuring the parameter, `dnsTrustedServers` in [Felix](../reference/resources/felixconfig.mdx). DNS activity to Node local server is not supported. - -The dashboard provides the following metrics/data, which can be edited as required. - -| Metric/data | Description | -| ------------------------------- | ------------------------------------------------------------------- | -| DNS total requests | Cumulative DNS requests over the reporting period. Default: 24hrs. | -| DNS requests | Type of DNS request. | -| DNS responses | DNS response codes which may indicate issues with specific lookups. | -| DNS Top 10 external domains | Count of top domains in lookups. | -| DNS internal query | Lookups within the Kubernetes cluster. | -| DNS external query | Lookups to non-cluster domains. | -| DNS Latency | Measured latency which can indicate DNS issues. | -| DNS internal queries by service | Top types of requests within the cluster per service. | -| DNS external queries by service | Top types of requests external to the cluster per service. | -| DNS response code by service | Top DNS response codes per client. | -| DNS query count by server | Volume of DNS traffic per DNS server. | -| DNS transfer by service | Volume of DNS traffic per service. | -| DNS logs | Raw DNS logs. | - -### L7 HTTP dashboard - -![l7-dashboard](/img/calico-enterprise/l7-dashboard.png) - -The L7 HTTP dashboard provides application performance metrics for inscope Kubernetes services. The data can assist service owners and platform personnel in assessing the health of cluster workloads without the need for a full service mesh. [L7 logs](elastic/l7/configure.mdx) are not enabled by default, and must be configured. - -The default metrics are: - -- L7 HTTP requests -- L7 all services -- L7 HTTP duration -- L7 HTTP methods -- L7 HTTP response codes -- L7 HTTP request duration -- L7 HTTP requests over time -- L7 HTTP method by service -- L7 HTTP response by service -- L7 HTTP bytes transferred -- L7 Top URLs -- L7-search (raw HTTP logs) - -### Tigera Secure EE audit logs dashboard - -![audit-logs-dashboard](/img/calico-enterprise/audit-logs-dashboard.png) - -The Tigera Secure EE audit logs dashboard provides historical events of changes made to your deployment. These events can be used to understand updates to resources, privileged access and actions, and can also help demonstrate compliance for different regulatory concerns. - -Audit logs listed in the section, `audit-search` can be expanded by clicking on the triangular expand icon, which presents the log in Table format by default. Clicking on JSON in the Expanded document displays the same log in JSON format. The logs can be filtered in the Audit Filtering Controls. - -### Tigera Secure EE flow logs dashboard - -![flow-logs-dashboard](/img/calico-enterprise/flow-logs-dashboard.png) - -The Tigera Secure EE flow Logs dashboard lets you analyze flow logs using the filter options in the Flow Filtering window. The flow logs matching the applied filter are displayed below in the flow logs window. To review a specific flow log in detail, click the triangular expand icon to the left of the flow. - -The full flow log is now displayed in Tabular format by default. To view the log in JSON format click the JSON header. - -![flow-logs-dashboard](/img/calico-enterprise/flow-logs-json.png) - -### Tigera Secure EE Tor-VPN logs - -![tor-vpn-dashboard](/img/calico-enterprise/tor-vpn-dashboard.png) - -Tor and VPN-based traffic indicate the use of anonymization techniques in an attempt to mask the origins and destination of network traffic. $[prodname] has built-in capabilities to assist with detecting such traffic and requires minimal [configuration](../threat/tor-vpn-feed-and-dashboard.mdx) to activate. - -Once enabled, the Tigera Secure EE Tor-VPN logs dashboard can provide a view into any traffic to/from Tor and VPN gateways. The information quickly provides InfoSec teams and operators a focused view on anonymization-based traffic patterns. The reported flows can be filtered in the Tor-VPN controls window and the flow logs for inscope traffic can be reviewed in the Tor-VPN-search window. - -### Honeypod dashboard - -![honeypods-dashboard](/img/calico-enterprise/honeypods-dashboard.png) - -The Honeypod dashboard returns information for any workloads that connect to the Honeypod resources. Honeypods are decoys explicitly deployed to detect malicious actors who are attempting lateral movement within a cluster as a way of discovering valuable assets; this is a very credible indicator of compromise (IoC). [Honeypod resources](../threat/honeypods.mdx) must be configured to capture Honeypod information. - -The Honeypod dashboard returns cluster-level information on workloads that have connected to Honeypod resources. These events also generate [GlobalAlerts](../reference/resources/globalalert.mdx), which populate the Alerts table in the web console. - -## Create custom filters and queries - -Each dashboard has advanced filtering options if pre-built dashboards are insufficient. For example: - -- To build a query from all fields available in the logs, click **Add Filter** - -- To create a manual query, click **Search** (next to the disk icon on the left). The following example shows a query `process_name :*curl*` for the `process_name field` matching glob pattern, _curl_. Only logs where `field process_name` contains the string `curl` are filtered. - -![custom-search](/img/calico-enterprise/custom-search.png) - -## View logs by indices - -To view logs by indices, click the hamburger menu, select **Analytics**, and click **Discover**. - -![all-flow-logs](/img/calico-enterprise/all-flow-logs.png) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/observability/kube-audit.mdx b/calico-enterprise_versioned_docs/version-3.19-2/observability/kube-audit.mdx deleted file mode 100644 index 60ec22165a..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/observability/kube-audit.mdx +++ /dev/null @@ -1,281 +0,0 @@ ---- -description: Enable Kubernetes audit logs on changes to Kubernetes resources. ---- - -# Kubernetes audit logs - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -## Big picture - -Enable Kubernetes audit logs so security teams and auditors can see all the changes to Kubernetes resources over time. - -## Concepts - -### About Kubernetes audit logs - -**Kubernetes resources** are used in $[prodname] compliance reports and other audit-related features, but they are not enabled by default. You must enable Kubernetes resources through the Kubernetes API server. If you miss this step, some compliance reports will not work, and audit trails will not provide a complete view to your security team. - -You must enable the following Kubernetes resources for each cluster: - -- Pod -- Namespace -- Service account -- Network policy -- Endpoints - -### Audit logs in the web console - -Like $[prodname] audit logs, Kubernetes audit logs are displayed in the web console in the Timeline dashboard, Kibana dashboard (indexed by, `tigera_secure_ee_audit_kube`), and provide the core data for compliance reports. - -## Before you begin - -**Unsupported** - -- AKS -- GKE -- OpenShift -- TKG - -## How to - -Enable Kubernetes audit logs in the Kubernetes API server: - - - - -### Enable audit logs for Kubernetes resources - -At a minimum, enable audit logs for these resources that are involved in network policy: - -- Pod -- Namespace -- ServiceAccount -- NetworkPolicy (Kubernetes/OpenShift) -- Endpoints - -**Sample policy** - -The following sample policy audits changes to Kubernetes Pod, Namespace, ServiceAccount, Endpoints and NetworkPolicy resources. To add other audit logs for resources beyond network policy, see the [Kubernetes docs](https://kubernetes.io/docs/tasks/debug/debug-cluster/audit/), or review this function for inspiration (which generates the GKE audit policy). - -```yaml -apiVersion: audit.k8s.io/v1beta1 -kind: Policy -omitStages: - - RequestReceived -rules: - - level: RequestResponse - verbs: - - create - - patch - - update - - delete - resources: - - group: networking.k8s.io - resources: ['networkpolicies'] - - group: extensions - resources: ['networkpolicies'] - - group: '' - resources: ['pods', 'namespaces', 'serviceaccounts', 'endpoints'] -``` - -### Enable Kubernetes audit logs for $[prodname] - -The following updates require a restart to the Kubernetes API Server. - -To enable Kubernetes resource audit logs to be read by $[prodname] in fluentd, follow these steps. - -On the Kubernetes API Server, update these flags. - -- `--audit-log-path=/var/log/calico/audit/kube-audit.log` -- `--audit-policy-file=` - For help with flags, see kube-apiserver flags. For help with audit logging, see Kubernetes audit logging documentation. - -Distribute the audit policy file to all control plane nodes, ensuring that it is available to the Kubernetes API server (e.g. by volume mounting it into the pods). - -Restart the Kubernetes API server. The restart command depends on how you installed Kubernetes. - - - - -### Enable audit logs for Kubernetes resources - -At a minimum, enable audit logs for these resources that are involved in network policy: - -- Pod -- Namespace -- ServiceAccount -- NetworkPolicy (Kubernetes/OpenShift) -- Endpoints - -**Sample policy** - -The following sample policy audits changes to Kubernetes Pod, Namespace, ServiceAccount, Endpoints and NetworkPolicy resources. To add other audit logs for resources beyond network policy, see the [Kubernetes docs](https://kubernetes.io/docs/tasks/debug/debug-cluster/audit/), or review this function for inspiration (which generates the GKE audit policy). - -```yaml -apiVersion: audit.k8s.io/v1beta1 -kind: Policy -omitStages: - - RequestReceived -rules: - - level: RequestResponse - verbs: - - create - - patch - - update - - delete - resources: - - group: networking.k8s.io - resources: ['networkpolicies'] - - group: extensions - resources: ['networkpolicies'] - - group: '' - resources: ['pods', 'namespaces', 'serviceaccounts', 'endpoints'] -``` - -### Enable audit logs in EKS - -Amazon EKS writes Kubernetes audit logs to [Amazon Cloudwatch logs](https://aws.amazon.com/cloudwatch/). - -1. In the EKS management console, access your EKS cluster. -1. Under **Logging**, click **Update**. -1. Enable the **Audit** option, and click **Update**. - Audit Log -1. Wait for the update to complete. - The blue progress bar at the top of the page displays the message, “Cluster config update in progress.” -1. Under **Logging**, **Cloudwatch**, make a note of the URL value for a later step, then click the link. - Cloudwatch Logging -1. In the **Log Streams** list, make note of the common prefix (for example, kube-apiserver-audit) for a later step. - Log Streams -1. Make note of the region where the cluster is hosted (for example, `us-west-2`) for a later step. - -### Create a restricted AWS user for compliance reporting - -1. Go to the **AWS IAM console** and add a user. -1. On the **Add user** page, make these changes: - - a. Select **Access type**, **Programmatic access**. - - Programmatic access - - b. In the Set permissions section, select the policy, `CloudWatchLogsReadOnlyAccess` to set read only permissions. - - Cloudwatch URL - -1. Optional: In the **Add tags** section, add a tag for the user based on your cluster information. -1. Click **Submit** to create a restricted user. - -### Update $[prodname] log collector with EKS values - -1. Update the `tigera-secure` LogCollector resource with values from the EKS configuration. - - where: - - - `additionalSources`: Section where EKS Cloudwatch logs are specified. - - `eksCloudwatchLog`: Configuration section containing EKS Cloudwatch logs. - - `fetchInterval`: Interval in seconds for $[prodname] to get logs from Cloudwatch. Default: 60 seconds, this fetches 1MB every 60 seconds, adjust it based number on CRUD operations performed on cluster resource. - - `groupName`: Name of the `Log Group` (value from "Enable audit logs in EKS") - - `region`: AWS region where EKS cluster is hosted (value from "Enable audit logs in EKS") - - `streamPrefix`: Prefix of `Log Stream` (value from "Enable audit logs in EKS") - - **Example** - - ```yaml - apiVersion: operator.tigera.io/v1 - kind: LogCollector - metadata: - name: tigera-secure - spec: - additionalSources: - eksCloudwatchLog: - fetchInterval: 60 - groupName: /aws/eks/mitch-eks-kube-audit-log-forwarder/cluster - region: us-west-2 - streamPrefix: kube-apiserver-audit- - status: - state: Ready - ``` - -### Configure authentication between $[prodname] and Cloudwatch logs - -In this step, you add AWS authentication information to enable $[prodname] to get logs from the EKS Cloudwatch instance. - -Add a Secret with the name, `tigera-eks-log-forwarder-secret` in the namespace, `tigera-operator`, and the AWS [Security Credentials](https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html) in the data section. - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: tigera-eks-log-forwarder-secret - namespace: tigera-operator -type: Opaque -data: - aws-id: $(echo -n | base64 -w0) - aws-key: $(echo -n | base64 -w0) -``` - - - - -### Enable audit logs for Kubernetes resources - -At a minimum, enable audit logs for these resources that are involved in network policy: - -- Pod -- Namespace -- ServiceAccount -- NetworkPolicy (Kubernetes/OpenShift) -- Endpoints - -**Sample policy** - -The following sample policy audits changes to Kubernetes Pod, Namespace, ServiceAccount, Endpoints and NetworkPolicy resources. To add other audit logs for resources beyond network policy, see the [Kubernetes docs](https://kubernetes.io/docs/tasks/debug/debug-cluster/audit/), or review this function for inspiration (which generates the GKE audit policy). - -```yaml -apiVersion: audit.k8s.io/v1beta1 -kind: Policy -omitStages: - - RequestReceived -rules: - - level: RequestResponse - verbs: - - create - - patch - - update - - delete - resources: - - group: networking.k8s.io - resources: ['networkpolicies'] - - group: extensions - resources: ['networkpolicies'] - - group: '' - resources: ['pods', 'namespaces', 'serviceaccounts', 'endpoints'] -``` - -Follow these instructions to enable audit logs for [AWS using kOps](https://kops.sigs.k8s.io/cluster_spec/#audit-logging). - -Note that `auditLogPath` should be `/var/log/calico/audit/kube-audit.log`. - - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/observability/packetcapture.mdx b/calico-enterprise_versioned_docs/version-3.19-2/observability/packetcapture.mdx deleted file mode 100644 index 52885f381d..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/observability/packetcapture.mdx +++ /dev/null @@ -1,322 +0,0 @@ ---- -description: Capture live traffic for debugging microservices and application interaction. ---- - -# Packet capture - -## Big picture - -Capture live traffic inside a Kubernetes cluster, and export to visualization tools like Wireshark for troubleshooting and debugging applications. - -## Value - -$[prodname] packet capture is implemented in a Kubernetes-native way so you can troubleshoot service/application connectivity issues and performance issues. You can start a packet capture in the web console Service Graph, or using the CLI. - -Packet capture integration with **Service Graph** makes it very easy to capture traffic for a specific namespace, service, replica set, daemonset, statefulset, or pod. Just right-click on an endpoint to start or schedule a capture, and then download capture files to your favorite visualization tool like WireShark. - -With $[prodname] packet capture you can: - -- Run packet capture whenever you want (available 24/7) -- Preschedule packet captures to start and stop when needed -- Customize packet captures by port and protocol -- Share packet capture jobs - -**Demos and blogs** - -- [Video: packet capture demo](https://www.tigera.io/features/packet-capture/) -- [Troubleshooting microservices with Dynamic Packet Capture](https://thenewstack.io/faster-troubleshooting-with-dynamic-packet-capture/) - -## Concepts - -## About packet capture - -Typically, when you troubleshoot microservices and applications for connectivity issues or slow performance, you run a traditional packet capture tool like **tcpdump** against a container in a pod. But live troubleshooting in an ephemeral Kubernetes environment is tricky; problems do not last a long time, and happen randomly. So you need to be very fast to capture meaningful information to determine root causes. $[prodname] makes it easy with these basic steps: - -1. Determine the workload(s) you want to capture. -1. Start/schedule a packet capture job in Service Graph (Manager UI) or the CLI. -1. After the capture is finished, download the packet capture files (known as `pcap` files), and import them into your analysis tool (for example, WireShark). - -For a simple use case workflow, see [Faster troubleshooting of microservices, containers, and Kubernetes with Dynamic Packet Capture](https://www.tigera.io/blog/faster-troubleshooting-of-microservices-containers-and-kubernetes-with-dynamic-packet-capture/). - -## Before you begin - -**Not supported** - -- Capturing traffic from host networked pods or host endpoints -- Capturing traffic from pods with multiple interfaces -- Capturing traffic for pods running on Windows hosts - -## How To - -- [Packet capture in Service Graph](#packet-capture-in-service-graph) -- [Packet capture using the command line](#packet-capture-using-the-command-line) -- [Store and rotate capture files](#store-and-rotate-capture-files) -- [Enforce RBAC for capture tasks for CLI users](#enforce-rbac-for-capture-tasks-for-cli-users) - -### Packet capture in Service Graph - -1. Select an endpoint from the service graph (for example, namespace, service, replica set, daemonset, statefulset, or pod), right-click, and select **Initiate packet capture**. - - ![start-capture](/img/calico-enterprise/start-capture.png) - -1. Schedule the capture to run now or at a later time, and click **Run**. - - ![schedule-pc](/img/calico-enterprise/schedule-pc.png) - -1. From the **Capture Jobs** tab in the bottom panel, the Status field will show that status, "Capturing". Scroll to the right, and click the drop-down menu for options to stop and manage captures. - - ![capture-menu](/img/calico-enterprise/capture-menu.png) - -### Packet capture using the command line - -This section provides examples of using the CLI to manage packet capture jobs and pcap files. - -**Create a PacketCapture resource** - -**Example: All pods in a namespace** - -This example captures traffic for all pods in the `sample` namespace. - -```yaml -apiVersion: projectcalico.org/v3 -kind: PacketCapture -metadata: - name: sample-capture-all - namespace: sample -spec: - selector: all() -``` - -**Example: All pods in a namespace matching a label and selector** - -This example captures traffic on all pods in the namespace `sample`, with the label `k8s-app`, equal to `nginx`. - -```yaml -apiVersion: projectcalico.org/v3 -kind: PacketCapture -metadata: - name: sample-capture-nginx - namespace: sample -spec: - selector: k8s-app == "nginx" -``` - -**Example: All pods in a namespace, TCP traffic only** - -This example captures traffic on all pods in the `sample` namespace, but only for TCP traffic. - -```yaml -apiVersion: projectcalico.org/v3 -kind: PacketCapture -metadata: - name: sample-capture-all - namespace: sample -spec: - selector: all() - filters: - - protocol: TCP -``` - -**Start a packet capture job, now** - -To start a packet capture job immediately, use the following command: - -```bash -kubectl apply -f -``` - -**Schedule a packet capture job** - -You can schedule a packet capture job to start and/or stop at a specific time using RFC3339 format. In the following example, a traffic capture job is scheduled for 10 minutes, between 00:30 UTC and 00:40 UTC for all pods in the sample namespace. - -```yaml -apiVersion: projectcalico.org/v3 -kind: PacketCapture -metadata: - name: sample-capture-all - namespace: sample -spec: - selector: all() - startTime: '2021-09-08T00:30:00Z' - endTime: '2021-09-08T00:40:00Z' -``` - -**Monitor status of packet capture job** - -After you start capture a job, it cycles through these states: Scheduled (if applicable), WaitingForTraffic, Capturing, and Finished. To monitor the status of a PacketCapture, use the following command: - -```bash -kubectl get packetcaptures -A -``` - -**Stop a packet capture job** - -To stop a capture job immediately, update the PacketCaptureResource by setting the `endTime` to the current time (or earlier). - -**Stop a packet capture job, and delete the capture file from the cluster** - -```bash -kubectl delete -f -``` - -**Delete a packet capture job** - -```bash -kubectl delete -f -``` - -**Find packet capture files** - -To find generated capture files, query the status of the PacketCapture: - -```bash -kubectl get packetcaptures -n -o yaml -``` - -```bash -export NS= -export NAME= -``` - -**Sample output** - -```yaml -apiVersion: projectcalico.org/v3 -kind: PacketCapture -metadata: - name: sample-capture-all - namespace: sample -spec: - selector: all() -status: - files: - - directory: /var/log/calico/pcap - fileNames: - - pod_cali.pcap - node: node-0 - state: Capturing -``` - -**Get packet capture files from pods** - -Get the pod on the node with the packet capture that you want. - -```bash -kubectl get pods -n tigera-fluentd --no-headers --field-selector spec.nodeName="" -``` - -Copy the packet capture using the pod information. - -```bash -kubectl cp tigera-fluentd/:var/log/calico/pcap/sample/sample-capture/ . -``` - -**Delete packet capture files** - -```bash -kubectl exec -it tigera-fluentd/ -- sh -c "rm -r /var/log/calico/pcap/sample/sample-capture/" -``` - -### Store and rotate capture files - -Packet capture files are stored on the host-mounted volume used for calico nodes. FelixConfig contains several parameters for storing and rotating capture files. - -**Note**: - -- Capture files are stored using the following directory structure: - `{namespace}/{packet capture resource name}` -- The active packet capture file is identified using the following schema: - `{workload endpoint name}_{host network interface}.pcap` -- Rotated capture file names contain an index matching the rotation timestamp -- Packet capture files are deleted after the packet capture resource is deleted. - -**Rotate capture files** - -The Felix parameter, `captureRotationSeconds` lets you schedule how often saved pcap are rotated. In the following example, the time rotation time is one day. - -```bash -kubectl patch felixconfiguration default -p '{"spec":{"captureRotationSeconds":"86400"}}' -``` - -### Enforce RBAC for capture tasks for CLI users - -Packet capture permissions are enforced using the standard Kubernetes RBAC for CLI users, based on Role and RoleBindings within a namespace. - -**Example** - -The following Role and RoleBindings shows how to allow user jane to create/delete/get/list/update/watch packet captures for a specific namespace. - -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - namespace: sample - name: tigera-packet-capture-role -rules: - - apiGroups: ['projectcalico.org'] - resources: ['packetcaptures'] - verbs: ['get', 'list', 'watch', 'create', 'update', 'patch', 'delete'] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: tigera-packet-capture-role-jane - namespace: sample -subjects: - - kind: ServiceAccount - name: jane -roleRef: - kind: Role - name: tigera-packet-capture-role - apiGroup: rbac.authorization.k8s.io -``` - -To allow user jane to access (get and delete) the capture files generated for a specific namespace, a role/role binding similar to the one below can be used: - -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: tigera-authentication-clusterrole-jane -rules: - - apiGroups: ['projectcalico.org'] - resources: ['authenticationreviews'] - verbs: ['create'] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: tigera-authentication-clusterrolebinding-jane -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: tigera-authentication-clusterrole-jane -subjects: - - kind: ServiceAccount - name: jane - namespace: default ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - namespace: sample - name: tigera-capture-files-role -rules: - - apiGroups: ['projectcalico.org'] - resources: ['packetcaptures/files'] - verbs: ['get', 'delete'] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: tigera-capture-files-role-jane - namespace: sample -subjects: - - kind: ServiceAccount - name: jane - namespace: default -roleRef: - kind: Role - name: tigera-capture-files-role - apiGroup: rbac.authorization.k8s.io -``` diff --git a/calico-enterprise_versioned_docs/version-3.19-2/observability/visualize-traffic.mdx b/calico-enterprise_versioned_docs/version-3.19-2/observability/visualize-traffic.mdx deleted file mode 100644 index 3789381608..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/observability/visualize-traffic.mdx +++ /dev/null @@ -1,95 +0,0 @@ ---- -description: Learn the power of network sets. ---- - -# Network visualization - -> From the left navbar, select **Service Graph**, **Default** - -Service Graph offers tools to visualize and investigate network communications in the cluster. To better understand how -Service Graph works, here is the list of core features and components. - -## Service Graph features and components - -**Namespaces** - -Namespaces are the default view in Service Graph. - -When you click on a namespace to expand the top right panel `<<`, you see detailed information about the traffic to / from -namespaces as well as list of services that belong to the selected namespace. To see service-to-service communications within -the namespace, double-click on the namespace node. - -![service-graph-namespace](/img/calico-enterprise/service-graph-namespace.png) - -**Nodes and edges** - -Lines going to/from nodes are called edges. When you click on a node or edge, the right panel shows details, and the associated flow logs are automatically filtered in the bottom panel. - -![edges](/img/calico-enterprise/edges.png) - -**Layers** - -Layers allow you to create meaningful groupings of resources so you can easily hide and show them on the graph. For example, you can group resources for different platform infrastructure types in your cluster like networking, storage, and logging. - -> Click the panel on the left (`>>`) by the Namespaces breadcrumb, and then expand the Tigera components layer. - -![service-graph-layers](/img/calico-enterprise/service-graph-layers.png) - -The **Tigera components** layer contains namespaces for $[prodname] networking components, and is a view of interest to Dev/Ops. - -> Click the vertical ellipses and select, **Hide layer**. Notice that only the business application namespaces remain visible in the graph. - -> To make this layer less visible, select **Restore layer** and click **De-emphasize layer**. - -**Service groups** - -Service group is an abstraction mechanism for grouping services in a single node in the namespace view. -Services that pass traffic to the same destination are grouped within a service group. - -If services within the service group belong to different namespaces the name of the node in namespace view is "\*", -otherwise the node displays the service group name, which is a string that combines all the unique service names. - -If you double-click on the service group, you can see details of the services, endpoints, and the network communication -links (edges) between them. Be aware that services and the backing endpoints could belong to different namespaces. - -Here is an examples showing how services and endpoints are grouped under service groups. Note that the example is created -with synthetic traffic data to showcase the service group feature and do not have any other value. - -![service-groups](/img/calico-enterprise/service-graph/servicegroups.gif) - - - The image shows Service Graph in namespace view containing a node, "*", which represents not a namespace but an aggregation of service groups from different namespaces. - - -**Logs, alerts, and capture jobs** - -The panel at the bottom below the graph provides tools for troubleshooting connectivity and performance issues. **Logs** (Flows, DNS, and HTTP) are the foundation of security and observability in $[prodname]. When you select a node or edge in the graph, logs are filtered for the node or service. For example, here is a flow log with details including how the policies were processed in tiers. - -![service-graph-flows](/img/calico-enterprise/service-graph-flows.png) - -**Alerts** - -For convenience, the Alerts tab duplicates the alerts you have enabled in the **Alerts tab** in the left navbar. By default, alerts are not enabled. - -**Capture jobs** - -Service Graph integrates a packet feature for capturing traffic for a specific namespace, service, replica set, daemonset, statefulset, or pod. You can then download capture files to your favorite visualization tool like WireShark. - -> Right-click on any endpoint to start or schedule a capture. - -![packet-capture-service](/img/calico-enterprise/packet-capture-service.png) - -**Flow Visualizations** - -> From the left navbar, select **Service Graph**, **Flow Visualizations**. - -Flow Visualizer (also called, "FlowViz") is a $[prodname] tool for drilling down into network traffic within the cluster to troubleshoot issues. The most common use of Flow Visualizer is to drill down and pinpoint which policies are allowing and denying traffic between services. - -![flow-viz](/img/calico-enterprise/flow-viz.png) - - -## Visualize traffic to and from a cluster - -With Service Graph, you know the value of seeing pod-to-pod traffic within your cluster. But what about traffic external to your cluster? To learn how to visualize traffic to and from a cluster, see [Get started with network sets](../network-policy/networksets.mdx). - -![google-networkset](/img/calico-enterprise/google-networkset.png) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/clis/calicoctl/configure/datastore.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/clis/calicoctl/configure/datastore.mdx deleted file mode 100644 index 1e53e2c3bc..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/clis/calicoctl/configure/datastore.mdx +++ /dev/null @@ -1,9 +0,0 @@ ---- -description: Sample configuration files for the Kubernetes API datastore. ---- - -# Configure calicoctl to connect to the datastore - -import CliConfigDatastore from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_cli-config-datastore.mdx'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/clis/calicoctl/configure/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/clis/calicoctl/configure/index.mdx deleted file mode 100644 index 6aabb3c936..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/clis/calicoctl/configure/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Configure the calicoctl to access your datastore. -hide_table_of_contents: true ---- - -# Configure calicoctl - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/clis/calicoctl/configure/overview.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/clis/calicoctl/configure/overview.mdx deleted file mode 100644 index 560012fa30..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/clis/calicoctl/configure/overview.mdx +++ /dev/null @@ -1,53 +0,0 @@ ---- -description: Configure calicoctl for datastore access. ---- - -# Configure calicoctl - -## Big picture - -Learn how to configure the calicoctl CLI tool for your cluster. - -## Value - -The `calicoctl` CLI tool provides helpful administrative commands for interacting with a $[prodname] cluster. - -## Concepts - -### Default calicoctl behavior - -Most `calicoctl` commands require access to the $[prodname] datastore. By default, calicoctl -will attempt to read from the Kubernetes API based on the default kubeconfig. - -## How to - -### Configure access using a Configuration file - -By default, `calicoctl` will look for a configuration file at `/etc/calico/calicoctl.cfg`. You can override this using the `--config` option with commands that require datastore access. -The file can be in either YAML or JSON format. It must be valid and readable by `calicoctl`. For example: - -```yaml noValidation -apiVersion: projectcalico.org/v3 -kind: CalicoAPIConfig -metadata: -spec: - datastoreType: "etcdv3" - etcdEndpoints: "http://etcd1:2379,http://etcd2:2379" - ... -``` - -### Configure access using environment variables - -If `calicoctl` cannot locate, read, or access a configuration file, it will check a specific set of environment variables. - -For a full set of options and examples, see [Configure calicoctl to connect to the datastore](datastore.mdx) - -:::note - -When running `calicoctl` inside a container, any environment variables and -configuration files must be passed to the container so they are available to -the process inside. It can be useful to keep a running container (that sleeps) configured -for your datastore, then it is possible to `exec` into the container and have an -already configured environment. - -::: diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/clis/calicoctl/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/clis/calicoctl/index.mdx deleted file mode 100644 index f1373fe5d1..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/clis/calicoctl/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Install and configure the required CLI for managing Calico Enterprise resources. -hide_table_of_contents: true ---- - -# calicoctl - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/clis/calicoctl/install.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/clis/calicoctl/install.mdx deleted file mode 100644 index e951aaa564..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/clis/calicoctl/install.mdx +++ /dev/null @@ -1,277 +0,0 @@ ---- -description: Install the CLI for Calico. ---- - -# Install calicoctl - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import CodeBlock from '@theme/CodeBlock'; - -## Big picture - -`calicoctl` allows you to create, read, update, and delete $[prodname] objects -from the command line. These objects represent the networking and policy -of your cluster. - -You should limit access to `calicoctl` and your $[prodname] datastore to -trusted administrators. We discuss methods of limiting access to the -$[prodname] datastore in the [configuration section](configure/index.mdx). - -You can run `calicoctl` on any host with network access to the -$[prodname] datastore as either a binary or a container. -As a binary on a single host, you can also run it as a kubectl plugin. - -{/*- Change download URL to latest release if user browsing master branch. -*/} - -export const version = '$[version]' === 'master' ? 'master' : '$[releaseTitle]'; - -## How to - -:::note - -Make sure you always install the version of `calicoctl` that matches the version of $[prodname] running on your cluster. - -::: - -- [Install calicoctl as a binary on a single host](#install-calicoctl-as-a-binary-on-a-single-host) -- [Install calicoctl as a kubectl plugin on a single host](#install-calicoctl-as-a-kubectl-plugin-on-a-single-host) -- [Install calicoctl as a container on a single host](#install-calicoctl-as-a-container-on-a-single-host) - -### Install calicoctl as a binary on a single host - - - - -Log into the host, open a terminal prompt, and navigate to the location where you want to install the binary. - -:::note - -Consider navigating to a location that's in your `PATH`. For example, `/usr/local/bin/`. - -::: - -Use the following command to download the `calicoctl` binary. - -```bash -curl -o calicoctl -L $[downloadsurl]/ee/binaries/$[releaseTitle]/calicoctl -``` - -```bash -curl -o calicoctl -L $[downloadsurl]/ee/binaries/$[releaseTitle]/calicoctl -``` - -Set the file to be executable. - -```bash -chmod +x calicoctl -``` - -:::note - -If the location of `calicoctl` is not already in your `PATH`, move the file to one that is or add its location to your `PATH`. This will allow you to invoke it without having to prepend its location. - -::: - - - - -Log into the host, open a terminal prompt, and navigate to the location where you want to install the binary. - -:::note - -Consider navigating to a location that's in your `PATH`. For example, `/usr/local/bin/`. - -::: - -Use the following command to download the `calicoctl` binary. - -```bash -curl -o calicoctl -L $[downloadsurl]/ee/binaries/$[releaseTitle]/calicoctl-darwin-amd64 -``` - -Set the file to be executable. - -```bash -chmod +x calicoctl -``` - -:::note - -If you get the error, `cannot be opened because the developer cannot be verified` when using `calicoctl` for the first time, go to `Applications > System Preferences > Security & Privacy` in the `General` tab at the bottom of the window click `Allow anyway`. - -::: - -:::note - -If the location of `calicoctl` is not already in your `PATH`, move the file to one that is or add its location to your `PATH`. This will allow you to invoke it without having to prepend its location. - -::: - - - - -Use the following PowerShell command to download the `calicoctl` binary. - -:::tip - -Consider running PowerShell as administrator and navigating -to a location that's in your `PATH`. For example, `C:\Windows`. - -::: - -```bash -Invoke-WebRequest -Uri "$[downloadsurl]/ee/binaries/$[releaseTitle]/calicoctl-windows-amd64.exe" -OutFile "calicoctl.exe" -``` - - - - -### Install calicoctl as a kubectl plugin on a single host - - - - -Log into the host, open a terminal prompt, and navigate to the location where you want to install the binary. - -:::note - -Consider navigating to a location that's in your `PATH`. For example, `/usr/local/bin/`. - -::: - -Use the following command to download the `calicoctl` binary. - -```bash -curl -o kubectl-calico -L $[downloadsurl]/ee/binaries/$[releaseTitle]/calicoctl -``` - -Set the file to be executable. - -```bash -chmod +x kubectl-calico -``` - -:::note - -If the location of `kubectl-calico` is not already in your `PATH`, move the file to one that is or add its location to your `PATH`. This is required for kubectl to detect the plugin and allow you to use it. - -::: - - - - -Log into the host, open a terminal prompt, and navigate to the location where you want to install the binary. - -:::note - -Consider navigating to a location that's in your `PATH`. For example, `/usr/local/bin/`. - -::: - -Use the following command to download the `calicoctl` binary. - -```bash -curl -o kubectl-calico -L $[downloadsurl]/ee/binaries/$[releaseTitle]/calicoctl-darwin-amd64 -``` - -Set the file to be executable. - -```bash -chmod +x kubectl-calico -``` - -:::note - -If you get the error, "cannot be opened because the developer cannot be verified" when using `calicoctl` for the first time, go to Applications > System Preferences > Security & Privacy in the **General** tab at the bottom of the window click `Allow anyway`. - -::: - -:::note - -If the location of `kubectl-calico` is not already in your `PATH`, move the file to one that is or add its location to your `PATH`. This is required for `kubectl` to detect the plugin. - -::: - - - - -Use the following PowerShell command to download the `calicoctl` binary. - -:::tip - -Consider running PowerShell as administrator and navigating -to a location that's in your `PATH`. For example, `C:\Windows`. - -::: - -```bash -Invoke-WebRequest -Uri "$[downloadsurl]/ee/binaries/$[releaseTitle]/calicoctl-windows-amd64.exe" -OutFile "kubectl-calico.exe" -``` - - - - -Verify the plugin works. - -```bash -kubectl calico -h -``` - -You can now run any `calicoctl` subcommands through `kubectl calico`. - -:::note - -If you run these commands from your local machine (instead of a host node), some of the node related subcommands will not work (like node status). - -::: - -### Install calicoctl as a container on a single host - -1. Ensure that you have the [`config.json` file with the private Tigera registry credentials](../../../getting-started/install-on-clusters/calico-enterprise.mdx#get-private-registry-credentials-and-license-key). - -1. From a terminal prompt, use the following command to either create or open the `~/.docker/config.json` file. - - ```bash - vi ~/.docker/config.json - ``` - -1. Depending on the existing contents of the file, edit it in one of the following ways. - - - **New file**: Paste in the entire contents of the `config.json` file from Tigera. - - - **Existing file without quay.io object**: Add the following lines from the `config.json` inside the `"auth"` object. - - ```json - "quay.io": { - "auth": "", - "email": "" - } - ``` - - - **Existing file with quay.io object**: Add the following lines from the `config.json` inside the `"quay.io"` object. - - ```json - "auth": "", - "email": "" - ``` - -1. Save and close the file. - -1. Use the following commands to pull the `calicoctl` image from the Tigera - registry. - - ```bash - docker pull $[registry]$[componentImage.calicoctl] - ``` - -1. Confirm that the image has loaded by typing `docker images`. - - ```bash - REPOSITORY TAG IMAGE ID CREATED SIZE - $[releases.0.components.calicoctl.image] $[releases.0.components.calicoctl.version] e07d59b0eb8a 2 minutes ago 42MB - ``` - -**Next step**: - -[Configure calicoctl to connect to your datastore](configure/index.mdx). \ No newline at end of file diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/clis/calicoq/configure/datastore.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/clis/calicoq/configure/datastore.mdx deleted file mode 100644 index 501a39a5da..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/clis/calicoq/configure/datastore.mdx +++ /dev/null @@ -1,9 +0,0 @@ ---- -description: Configure CLI to connect to the Kubernetes API datastore. ---- - -# Configure calicoq to connect to the datastore - -import CliConfigDatastore from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_cli-config-datastore.mdx'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/clis/calicoq/configure/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/clis/calicoq/configure/index.mdx deleted file mode 100644 index 1b66095e34..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/clis/calicoq/configure/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Configure calicoq for Kubernetes API datastore. -hide_table_of_contents: true ---- - -# Configure calicoq - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/clis/calicoq/configure/overview.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/clis/calicoq/configure/overview.mdx deleted file mode 100644 index 217d2ec6fc..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/clis/calicoq/configure/overview.mdx +++ /dev/null @@ -1,9 +0,0 @@ ---- -description: Configure the CLI to connect to the Kubernetes API datastore. ---- - -import CliConfigIntro from "@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/CliConfigIntro"; - -# Configure calicoq - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/clis/calicoq/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/clis/calicoq/index.mdx deleted file mode 100644 index da4878fd4c..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/clis/calicoq/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Install and configure the required CLI for managing Calico Enterprise resources. -hide_table_of_contents: true ---- - -# calicoq - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/clis/calicoq/installing.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/clis/calicoq/installing.mdx deleted file mode 100644 index 1ddc94d1fa..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/clis/calicoq/installing.mdx +++ /dev/null @@ -1,110 +0,0 @@ ---- -description: Install the CLI for Calico Enterprise. ---- - -import MaintenanceClisCalicoqInstalling from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/MaintenanceClisCalicoqInstalling'; - -# Install calicoq - -## About installing calicoq - -You can run `calicoq` on any host with network access to the -$[prodname] datastore as either a binary or a container. - -:::note - -Ensure that you install the binary on a Linux machine. The binary does not work on other machines. - -::: - -For step-by-step instructions, refer to the section that -corresponds to your desired deployment. - -- [As a binary on a single host](#install-calicoq-as-a-binary-on-a-single-host) -- [As a container on a single host](#install-calicoq-as-a-container-on-a-single-host) - -{/*- Change download URL to latest release if user browsing master branch. -*/} - -## Install calicoq as a binary on a single host - -1. Log into the host, open a terminal prompt, and navigate to the location where - you want to install the binary. - - :::tip - - Consider navigating to a location that's in your `PATH`. For example, - `/usr/local/bin/`. - - ::: - -1. Use the following command to download the `calicoq` binary. - - - -1. Set the file to be executable. - - ``` - chmod +x calicoq - ``` - - :::note - - If the location of `calicoq` is not already in your `PATH`, move the file - to one that is or add its location to your `PATH`. This will allow you to invoke it - without having to prepend its location. - - ::: - -**Next step**: - -[Configure `calicoq` to connect to your datastore](configure/index.mdx). - -## Install calicoq as a container on a single host - -1. Ensure that you have the [`config.json` file with the private Tigera registry credentials](../../../getting-started/install-on-clusters/calico-enterprise.mdx#get-private-registry-credentials-and-license-key). - -1. From a terminal prompt, use the following command to either create or open the `~/.docker/config.json` file. - - ```bash - vi ~/.docker/config.json - ``` - -1. Depending on the existing contents of the file, edit it in one of the following ways. - - - **New file**: Paste in the entire contents of the `config.json` file from Tigera. - - - **Existing file without quay.io object**: Add the following lines from the `config.json` inside the `"auth"` object. - - ```json - "quay.io": { - "auth": "", - "email": "" - } - ``` - - - **Existing file with quay.io object**: Add the following lines from the `config.json` inside the `"quay.io"` object. - - ```json - "auth": "", - "email": "" - ``` - -1. Save and close the file. - -1. Use the following commands to pull the `calicoq` image from the Tigera - registry. - - ```bash - docker pull $[registry]$[componentImage.calicoq] - ``` - -1. Confirm that the image has loaded by typing `docker images`. - - ```bash - REPOSITORY TAG IMAGE ID CREATED SIZE - $[releases.0.components.calicoq.image] $[releases.0.components.calicoq.version] e07d59b0eb8a 2 minutes ago 42MB - ``` - -**Next step**: - -[Configure `calicoq` to connect to your datastore](configure/index.mdx). diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/clis/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/clis/index.mdx deleted file mode 100644 index aca42d1121..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/clis/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Install and configure CLIs for manipulating and querying resources. -hide_table_of_contents: true ---- - -# CLIs - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/cnx/access-the-manager.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/cnx/access-the-manager.mdx deleted file mode 100644 index cf24ceb433..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/cnx/access-the-manager.mdx +++ /dev/null @@ -1,172 +0,0 @@ ---- -description: Configure access to the web console. ---- - -# Configure access to the web console - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -## Big picture - -Configure access to the $[prodname] web console user interface. - -## Value - -For security, the $[prodname] web console is not exposed outside of the cluster by default. You can configure access to the $[prodname] web console using ingress, a load balancer service, or port forwarding. - -## Before you begin - -**Required** - -- [Install $[prodname]](../../getting-started/index.mdx) -- Choose one of the following access options and complete the required configuration: - -| Option | Description | Requirement | -| ------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| Kubernetes ingress | Configure your cluster with an ingress controller to implement the `Ingress` resource using [Kubernetes ingress](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). | Ensure the $[prodname] web console receives a HTTPS (TLS) connection (not unencrypted HTTP). If you require TLS termination at your ingress, you must use a proxy that supports transparent HTTP/2 proxying, (for example, Envoy), or re-originate a TLS connection from your proxy to the $[prodname] web console. If you do not require TLS termination, configure your proxy to “pass thru” the TLS to the $[prodname] web console. | -| Load balancer | Configure your cluster with a service load balancer controller to implement the external load balancer. See [Kubernetes loadbalancer](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/) | Ensure the $[prodname] web console receives a HTTPS (TLS) connection (not unencrypted HTTP). If you require TLS termination at your load balancer, you must use a load balancer that supports transparent HTTP/2 proxying, or re-originate a TLS connection from your load balancer to the $[prodname] web console. If you do not require TLS termination, configure your proxy to “pass thru” the TLS to the $[prodname] web console. | -| Port forwarding | Forward traffic from a local port to the Kubernetes API server, where it is proxied to the web console. This approach is **not recommended for production**, but is useful if you do not have a load balancer or ingress infrastructure configured, or you need to get started quickly. | n/a | -| OpenShift routes | Use OpenShift routes to expose a service by giving it an externally-reachable hostname (for example, `www.example.com`) . | n/a | - -## How to - -### Configure access to the $[prodname] web console - - - - -**Basic ingress controller, no modification** - -The following example uses `tigera-manager` as the backend service without modification. Use the `tigera-manager` service only when edits to the service are not required. (Note if you try to make changes to `tigera-manager`, changes may appear to take effect, but the service always resets to the default and is not overwritten.) - -```yaml -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: tigera-manager - namespace: tigera-manager -spec: - rules: - - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - service: - name: tigera-manager - port: - number: 9443 -``` - -**Advanced ingress controllers, with modifications** - -If you need to annotate or modify the service, you must create your own service (`serviceName: `) in the `tigera-manager` namespace, and use it in the ingress resource. For example: - -```yaml -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: tigera-manager - namespace: tigera-manager -spec: - rules: - - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - service: - name: annotated-service - port: - number: 9443 -``` - -### Log in to the $[prodname] web console - -Access the $[prodname] web console in your browser using the URL for your ingress controller. For example: `https://`. - - - - -To expose the manager using a load balancer, create the following service. - -```yaml -kind: Service -apiVersion: v1 -metadata: - name: tigera-manager-external - namespace: tigera-manager -spec: - type: LoadBalancer - selector: - k8s-app: tigera-manager - externalTrafficPolicy: Local - ports: - - port: 9443 - targetPort: 9443 - protocol: TCP -``` - -After creating the service, it may take a few minutes for the load balancer to be created. Once complete, the load balancer IP address appears as an `ExternalIP` in `kubectl get services -n tigera-manager tigera-manager-external`. - -### Log in to the $[prodname] web console - -Access the $[prodname] web console in your browser using the load balancer's external IP address. For example: `https://:9443`. - - - - -To forward traffic locally, use the following command: - -```bash -kubectl port-forward -n tigera-manager service/tigera-manager 9443:9443 -``` - -### Log in to the $[prodname] web console - -Access the $[prodname] web console in your browser at: `https://localhost:9443` - - - - -To expose the web console using OpenShift routes, create the following route with these required parameters: - -- host: `.` -- name: `tigera-manager` -- targetPort: `9443` - -**Example** - -```yaml -kind: Route -apiVersion: route.openshift.io/v1 -metadata: - name: tigera-manager - namespace: tigera-manager -spec: - host: manager.apps.demo-ocp.tigera-solutions.io - to: - kind: Service - name: tigera-manager - weight: 100 - port: - targetPort: 9443 - tls: - termination: passthrough - insecureEdgeTerminationPolicy: Redirect - wildcardPolicy: None -``` - -### Log in to the $[prodname] web console - -Access the $[prodname] web console in your browser using the URL with clustername. For example: `https://manager.apps.demo-ocp.tigera-solutions.io:9443` - - - - - -## Additional resources - -- [Authentication quickstart](authentication-quickstart.mdx) -- [Configure an external identity provider](configure-identity-provider.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/cnx/authentication-quickstart.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/cnx/authentication-quickstart.mdx deleted file mode 100644 index 6205d7ab01..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/cnx/authentication-quickstart.mdx +++ /dev/null @@ -1,98 +0,0 @@ ---- -description: Use default token authentication to log in to the web console and Kibana. ---- - -# Authentication quickstart - -## Big picture - -Get started quickly with our default token authentication to log in to the $[prodname] web console and Kibana. - -## Concepts - -### Authentication defaults - -Token authentication is the default authentication option for the $[prodname] web console. When a service account is created, an -associated secret is created that contains a signed bearer token for that service account. Just copy the token for the service -account in to the web console and log in. - -Use basic login for the default Kibana root user. - -The default login methods are always available at: - -- **the $[prodname] web console:** `https://:/login/token`. -- **Kibana:** `https://:/tigera-kibana/login`. - -## Before you begin - -Make sure you have installed $[prodname] using one of the [installation guides](../../getting-started/index.mdx) and have set up [access to the web console](access-the-manager.mdx). - -## How to - -:::note - -For OpenShift, replace `kubectl` with `oc` in the following commands. - -::: - -**Log in to the $[prodname] web console** - -First, create a service account in the desired namespace: - -```bash -kubectl create sa -n -``` - -Give the service account permissions to access the $[prodname] web console, and a $[prodname] cluster role: - -```bash -kubectl create clusterrolebinding --clusterrole --serviceaccount : -``` - -where: - -- **binding_name** is a descriptive name for the rolebinding. -- **role_name** is one of the default cluster roles (or a custom cluster role) specifying $[prodname] UI permissions. -- **namespace** is the service account's namespace. -- **service_account** is the service account that the permissions are being associated with. - -For example, the following command gives the service account, `jane` in the default namespace network admin permissions: - -```bash -kubectl create clusterrolebinding jane-access --clusterrole tigera-network-admin --serviceaccount default:jane -``` - -Next, create a login token for the service account. - -Using the running example of a service account named, `jane` in the default namespace: - -```bash -kubectl create token jane --duration=24h -``` - -:::note - -The token created above will expire after 24 hours. - -::: - -Now that you have the token, log in to the $[prodname] UI and submit the token. - -`https://:/login/token` - -**Log in to Kibana** - -Connect to Kibana with the `elastic` username. Use the following command to decode the password: - -``` -kubectl -n tigera-elasticsearch get secret tigera-secure-es-elastic-user -o go-template='{{.data.elastic | base64decode}}' && echo -``` - -Once logged in, you can configure users and their privileges from the settings page. - -## Additional resources - -- [Configure user roles and permissions](roles-and-permissions.mdx) -- [Configure an external identity provider](configure-identity-provider.mdx) -- [Configure RBAC for tiered policies](../../network-policy/policy-tiers/rbac-tiered-policies.mdx) -- [Configure RBAC for Elasticsearch](../../observability/elastic/rbac-elasticsearch.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/cnx/configure-identity-provider.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/cnx/configure-identity-provider.mdx deleted file mode 100644 index 776daa26c4..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/cnx/configure-identity-provider.mdx +++ /dev/null @@ -1,335 +0,0 @@ ---- -description: Configure an external identity provider for user access to Calico Enterprise Manager and Kibana. ---- - -# Configure an external identity provider - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -## Big picture - -Configure an external identity provider (IdP), create a user, and log in to the $[prodname] web console. - -(If you are just starting out, the quickest way to allow user access is to configure the default, [token authentication](authentication-quickstart.mdx).) - -## Concepts - -The $[prodname] authentication method is configured through the [Authentication API resource](../../reference/installation/api.mdx#authentication) named, `tigera-secure`. - -When configuring your cluster, you may be asked for the following inputs: - -- **Client Id**: Id for exchanging data that are shared between the IdP and an application. -- **Client Secret**: Secret associated with the `client id` used by server applications for exchanging tokens. -- **Issuer URL**: URL where the IdP can be reached, based on the conventions of OAuth and OIDC. -- **Claims**: Every time your IdP issues a token for a valid user, these claims add metadata about the user. $[prodname] uses this to determine the username. - -## Before you begin - -**Supported identity providers** - -- **OIDC authentication**: User identity is managed outside of the cluster by an OIDC authorization server. -- **Google OIDC authentication**: User identity is managed by Google OIDC. Choose this option if you want to use GSuite groups. -- **OpenShift authentication**: User identity is provided by the OpenShift OAuth server. -- **LDAP authentication**: User identify is provided using the LDAP server. - -**Required** - -- [Install $[prodname]](../../getting-started/index.mdx) -- [Configure access to the web console](access-the-manager.mdx) - -## How to - -### Configure an external identity provider for user authentication - - - - -1. Apply the Authentication CR to your cluster to let the operator configure your login. - The following example uses the email claim. The email field from the JWT (created by your IdP), is used as the username for binding privileges. - - ```yaml - apiVersion: operator.tigera.io/v1 - kind: Authentication - metadata: - name: tigera-secure - spec: - # This indicates where the web console can be accessed from the browser. Include the port only if the web console is not running on port 443. - managerDomain: https:// - oidc: - issuerURL: - usernameClaim: email - ``` - -1. Apply the secret to your cluster with your OIDC credentials. To get the values, consult the documentation of your provider. - - ```yaml - apiVersion: v1 - kind: Secret - metadata: - name: tigera-oidc-credentials - namespace: tigera-operator - data: - clientID: - clientSecret: - ``` - - - - -1. Apply the Authentication CR to your cluster to let the operator configure your login. - The following example uses the email claim. The email field from the JWT (created by your IdP), is used as the username for binding privileges. - - ```yaml - apiVersion: operator.tigera.io/v1 - kind: Authentication - metadata: - name: tigera-secure - spec: - managerDomain: https:// - oidc: - issuerURL: - usernameClaim: email - ``` - -1. Optional: Google OIDC does not support the groups claim. However, $[prodname] leverages [Dex IdP](https://dexidp.io/docs/connectors/google/) to add groups if you configure a service account. - This account needs Domain-Wide Delegation and permission to access the `https://www.googleapis.com/auth/admin.directory.group.readonly` API scope. - To get group fetching set up: - - - Follow the [instructions](https://developers.google.com/admin-sdk/directory/v1/guides/delegation) to set up a service account with Domain-Wide Delegation - - During service account creation, a JSON key file will be created that contains authentication information for the service account. - - When delegating the API scopes to the service account, delegate the `https://www.googleapis.com/auth/admin.directory.group.readonly` scope and **only this scope**. - - Enable the [Admin SDK](https://console.developers.google.com/apis/library/admin.googleapis.com/) - - Use the `serviceAccountSecret` and `adminEmail` configuration options in the next step. - - The contents of the JSON key file should be used as the `serviceAccountSecret` - - For `adminEmail` choose a G Suite super user. The service account will impersonate this user when making calls to the admin API. - -1. Apply the secret to your cluster with your OIDC credentials. - - ```yaml - apiVersion: v1 - kind: Secret - metadata: - name: tigera-oidc-credentials - namespace: tigera-operator - data: - clientID: - clientSecret: - # If you created a service account in the previous step, include the following two fields. - serviceAccountSecret: - adminEmail: - ``` - - - - -1. Install the [Openshift CLI (oc)](https://docs.okd.io/4.9/cli_reference/openshift_cli/getting-started-cli.html). - -1. Create values for some required variables. `MANAGER_URL` is the URL where the $[prodname] web console will be accessed, - `CLUSTER_DOMAIN` is the domain (excl. port) where your OpenShift cluster is accessed (run `oc status` to get it) and `CLIENT_SECRET` is a value of your choosing. - - ```bash - # This indicates where the web console can be accessed from the browser. Include the port only if the web console is not running on port 443. - MANAGER_URL=: - CLUSTER_DOMAIN= - CLIENT_SECRET= - ``` - -1. Add an OAuthClient to your OpenShift cluster. - - ```bash - oc apply -f - < dex.pem - echo | openssl s_client -servername api.$CLUSTER_DOMAIN -connect api.$CLUSTER_DOMAIN:6443 | sed -ne '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p' >> dex.pem - ``` - - Alternatively, you can use the root CA of your cluster and store it in `dex.pem`. - -1. Apply a secret to your cluster with your OpenShift credentials. - - ```bash - oc create secret generic tigera-openshift-credentials -n tigera-operator --from-file=rootCA=dex.pem --from-literal=clientID=tigera-dex --from-literal=clientSecret=$CLIENT_SECRET - ``` - - - - - For detailed steps, see the Support KB article [How to Configure LDAP Authentication with Active Directory](https://tigeraio.my.site.com/help/s/article/How-to-configure-LDAP-Authentication-with-Active-Directory). - -1. Apply the Authentication CR to your cluster to let the operator configure your login. - - ```yaml noValidation - apiVersion: operator.tigera.io/v1 - kind: Authentication - metadata: - name: tigera-secure - spec: - # This indicates where the web console can be accessed from the browser. Include the port only if the web console is not running on port 443. - managerDomain: https://: - ldap: - # The host and port of the LDAP server. Example: ad.example.com:636. - host: : - # (optional) StartTLS whether to enable the startTLS feature for establishing TLS on an existing LDAP session. - # If true, the ldap protocol is used and then issues a StartTLS command, otherwise, connections will use - # the ldaps: protocol. - startTLS: true - - # User entry search configuration. - userSearch: - # To start the user search from. Example: "cn=users,dc=example,dc=com". - baseDN: - - # Optional filter to apply when searching the directory. Example: "(objectClass=posixAccount)" - filter: - - # A mapping of attributes to the username. This value can be used for applying RBAC to a user. - Default: uid - nameAttribute: - - # (Optional) Group search configuration. This value can be used to apply RBAC to a user group. - groupSearch: - - # BaseDN to start the search from. Example: "cn=groups,dc=example,dc=com". - baseDN: - - # Optional filter to apply when searching the directory. Example: "(objectClass=posixGroup)" - filter: - - # A mapping of attributes to the group name. This value can be used for applying RBAC to a user group. Example: "cn". - nameAttribute: - - # Following list contains field pairs that are used to match a user to a group. It adds an additional - # requirement to the filter that an attribute in the group must match the user's attribute value. - userMatchers: - - userAttribute: - groupAttribute: - ``` - -1. Apply the secret to your cluster with your LDAP credentials. To obtain the values, consult the documentation of your provider. - - ```yaml - apiVersion: v1 - kind: Secret - metadata: - name: tigera-ldap-credentials - namespace: tigera-operator - data: - bindDN: - bindPW: - rootCA: - ``` - ### Troubleshooting LDAP - - **Problem**: Error: (502 bad gateway nginx) when logging in to the web console - - **Solution**: nginx ingress cannot handle the length of the token when a groupSearch matches too many groups. - To resolve this, apply a filter as shown in the following example. - - ```yaml - apiVersion: operator.tigera.io/v1 - kind: Authentication - name: tigera-secure - spec: - ldap: - groupSearch: - baseDN: OU=GroupID,OU=Groups,DC=mycompany,DC=com - filter: (&(objectClass=group)(|(cn=example-dn1)(cn=example-dn2))) - nameAttribute: cn - userMatchers: - - groupAttribute: member - userAttribute: distinguishedName - host: ldap.mycompany.com:636 - startTLS: false - userSearch: - baseDN: OU=Accounts,DC=mycompany.com,DC=com - nameAttribute: userPrimaryName - managerDomain: https://tigera-manager.mycompany.com - ``` - - - - - -### Grant user login privileges - -:::note - -For OpenShift users, replace `kubectl` with `oc` in the following commands, or apply cluster roles from the OpenShift console: **User Management**, **users**. - -::: - -For admin users, apply this cluster role. - -```bash -kubectl create clusterrolebinding -tigera-network-admin --user= --clusterrole=tigera-network-admin -``` - -For basic users with view-only permissions, apply this role. - -```bash -kubectl create clusterrolebinding -tigera-ui-user --user= --clusterrole=tigera-ui-user -``` - -Or use the groups flag to assign cluster role to a group of users. - -```bash -kubectl create clusterrolebinding all-developers-tigera-ui-user --group= --clusterrole=tigera-ui-user -``` - -### (Optional) Allow $[prodname] URIs in your IdP - -Most IdPs require redirect URIs to be allowed to redirect users at the end of the OAuth flow to the $[prodname] web console or to Kibana. Consult your IdP documentation for authorizing your domain for the respective origins and destinations. - -**Authorized redirect URIs** - -- `https:///dex/callback` - -## Troubleshooting - -- ManagerDomain `localhost` and `127.0.0.1` are not the same. If you configure `localhost:9443` as your managerDomain, while navigating to `https://127.0.0.1:9443`, the OIDC security checks will deny you access. -- Omit the port from `managerURL` if it is listening on the standard port (`:443`) for HTTPS. -- When your `usernameClaim` is not `email` and `usernamePrefix` is omitted, we have implemented a default prefix identical to how Kubernetes has for their kube-apiserver, see the [oidc-username-claim documentation](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver). - For example, if the `managerDomain` in your `Authentication` spec is `https://example.com`, and the username claim maps to `jane`, the (cluster) role binding should be bound to: `https://example.com/dex#jane` -- When you encounter problems while configuring your IdP, we encourage you to use the network tab of the browser dev tools to inspect requests with error codes and to decode authorization headers of the HTTP requests. -- If you would like to bring a self-signed certificate for your IdP and are using OIDC, you can do so by adding the field `rootCA` to secret `tigera-oidc-credentials`. The value for this field should contain the certificate in PEM format. -- If logging into Kibana fails with a `cookie not present` error, update the browser settings to allow third-party cookies, as the $[prodname] web console uses Kibana's cookies during login. -- If you need to configure LDAP with a DN that has spaces, use double quotes to escape them. For example: `cn="example common name",ou="example org unit",DC=example,DC=org`. - -## Additional resources - -- [Configure user roles and permissions](roles-and-permissions.mdx) -- [Configure RBAC for tiered policies](../../network-policy/policy-tiers/rbac-tiered-policies.mdx) -- [Configure RBAC for Elasticsearch](../../observability/elastic/rbac-elasticsearch.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/cnx/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/cnx/index.mdx deleted file mode 100644 index 15d1a9efb0..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/cnx/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Get started using the web console. -hide_table_of_contents: true ---- - -# The Calico Enterprise web console - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/cnx/roles-and-permissions.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/cnx/roles-and-permissions.mdx deleted file mode 100644 index ac2d522394..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/cnx/roles-and-permissions.mdx +++ /dev/null @@ -1,51 +0,0 @@ ---- -description: Configure roles and permissions for Calico Enterprise features and functions. ---- - -# Configure user roles and permissions - -## Big picture - -Configure roles using Kubernetes RBAC and lock down user permissions to $[prodname] features and functions. - -## Value - -Self-service is an important part of your Kubernetes platform networking and network security. When you allow developers to define policies with guardrails, you create more self-service in the CI/CD process. But network security architects require assurances that developers can access only the resources they are entitled to. - -## Concepts - -### Kubernetes RBAC authorization - -The [Calico Enterprise API server](../../reference/installation/api.mdx#apiserver) is an extension to the standard [kubernetes rbac authorization apis](https://kubernetes.io/docs/reference/access-authn-authz/rbac/). You configure fine-grained user permissions using `Role`, `ClusterRole`, `RoleBinding`and `ClusterRoleBinding` with the standard RBAC controls: get, list, watch, create, update, patch, delete. - -| Features | RBAC controls for... | -| ------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| Network policy | - Tiered policy, including AWS security groups and federated services.
    - Kubernetes network policy (in default tier)
    - $[prodname] network policies including namespaces
    - Staged policy, policy recommendation, policy preview | -| Compliance | Report management, generation, export, and status. | -| Visibility and troubleshooting | Elasticsearch logs: flow, audit, dns, intrusion detection, bgp | -| Multi-cluster management | Management and managed clusters in single management plane. | - -### Predefined roles and permissions - -$[prodname] provides the following predefined roles and permissions: - -`tigera-ui-user` - -- Basic user with access to the $[prodname] web console and Kibana: - - List/view Calico Enterprise policy and tier resources in the `projectcalico.org` and `networking.k8s.io` API groups - - List/view logs in Kibana - -`tigera-network-admin` - -- Superuser access for Kibana (including Elastic user and license management), and all Calico resources in `projectcalico.org` and `networking.k8s.io` API groups (get, list, watch, create, update, patch, delete) - -## Additional resources - -For RBAC details on any given feature, see the feature. For example: - -- [Tiered policy RBAC](../../network-policy/policy-tiers/rbac-tiered-policies.mdx) -- [Policy preview RBAC](../../network-policy/policy-impact-preview.mdx) -- [Staged policy RBAC](../../network-policy/staged-network-policies.mdx) -- [Elasticsearch logs RBAC](../../observability/elastic/rbac-elasticsearch.mdx) -- [Compliance reports RBAC](../../compliance/overview.mdx) -- [Multi-cluster management RBAC](../../multicluster/set-up-multi-cluster-management/standard-install/create-a-management-cluster.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/comms/apiserver-tls.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/comms/apiserver-tls.mdx deleted file mode 100644 index 3a2fd874cc..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/comms/apiserver-tls.mdx +++ /dev/null @@ -1,52 +0,0 @@ ---- -description: Add TLS certificates to secure access to the Calico Enterprise API server. ---- - -# Provide TLS certificates for the API server - -## Big picture - -Provide TLS certificates to secure access to the $[prodname] API server. - -## Value - -Providing TLS certificates for $[prodname] components is recommended as part of a zero trust network model for security. - -## Concepts - -### $[prodname] API server - -The $[prodname] API server handles requests for $[prodname] API resources. The main Kubernetes API server has an aggregation layer and will proxy requests for the $[prodname] API resources to the $[prodname] API server. - -## Before you begin... - -By default, the $[prodname] API server uses self-signed certificates on connections. To provide TLS certificates, -get the certificate and key pair for the $[prodname] API Server using any X.509-compatible tool or from your organization's Certificate Authority. The certificate must have Common Name or a Subject Alternate Name of `tigera-api.tigera-system.svc`. - -This feature is available for Kubernetes and OpenShift. - -## How to - -### Add TLS certificates - -To provide certificates for use during deployment you must create a secret before applying the 'custom-resource.yaml' or before creating the Installation resource. To specify certificates for use in the $[prodname] web console, create a secret using the following command: - -```bash -kubectl create secret generic tigera-apiserver-certs -n tigera-operator --from-file=apiserver.crt=
    --from-file=apiserver.key=
    -``` - -To update existing certificates, run the following command: - -```bash -kubectl create secret generic tigera-apiserver-certs -n tigera-operator --from-file=apiserver.crt=
    --from-file=apiserver.key=
    --dry-run -o yaml --save-config | kubectl replace -f - -``` - -:::note - -If the $[prodname] API server is already running, updating the secret restarts the API server. While the server restarts, the $[prodname] API server may be unavailable for a short period of time. - -::: - -## Additional resources - -Additional documentation is available for securing [the $[prodname] web console connections](crypto-auth.mdx#connections-from-calico-enterprise-components-to-kube-apiserver-kubernetes-and-openshift). diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/comms/certificate-management.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/comms/certificate-management.mdx deleted file mode 100644 index e5d4868ead..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/comms/certificate-management.mdx +++ /dev/null @@ -1,146 +0,0 @@ ---- -description: Control the issuer of certificates used by Calico Enterprise. ---- - -# Manage TLS certificates used by Calico Enterprise - -## Big picture - -Enable custom workflows for issuing and signing certificates used to secure communication between $[prodname] components. - -## Value - -Some deployments have security requirements that strictly minimize or eliminate the access to private keys and/or -requirements to control the trusted certificates throughout clusters. Using the Kubernetes Certificates API that automates -certificate issuance, $[prodname] provides a simple configuration option that you add to your installation. - -## Before you begin - -**Limitations** - -If your cluster is already running $[prodname] and you would like to enable certificate management, you need to -temporarily remove [the logstorage resource](../../reference/installation/api.mdx#logstorage) -before following the steps to enable certificate management and then re-apply afterwards. For detailed steps on -re-creating logstorage, read more on [how to create a new Elasticsearch cluster](../../observability/elastic/troubleshoot.mdx#how-to-create-a-new-cluster). - -Currently, this feature is not supported in combination with [Multi-cluster management](../../multicluster/set-up-multi-cluster-management/standard-install/create-a-management-cluster.mdx). - -**Supported algorithms** - -- Private Key Pair: RSA (size: 2048, 4096, 8192), ECDSA (curve: 256, 384, 521) -- Certificate Signature: RSA (sha: 256, 384, 512), ECDSA (sha: 256, 384, 512) - -## How to - -- [Enable certificate management](#enable-certificate-management) -- [Verify and monitor](#verify-and-monitor) -- [Implement your own signing/approval process](#implement-your-own-signing-and-approval-process) - -### Enable certificate management - -1. Modify your [the installation resource](../../reference/installation/api.mdx#installation) - resource and add the `certificateManagement` section. Apply the following change to your cluster. - - ```yaml - apiVersion: operator.tigera.io/v1 - kind: Installation - metadata: - name: default - spec: - certificateManagement: - caCert: - signerName: / - signatureAlgorithm: SHA512WithRSA - keyAlgorithm: RSAWithSize4096 - ``` - - Done! If you have an automatic signer and approver, there is nothing left to do. - The next section explains in more detail how to verify and monitor the status. - -### Verify and monitor - -1. Monitor your pods as they come up: - - ``` - kubectl get pod -n calico-system -w - NAMESPACE NAME READY STATUS RESTARTS AGE - calico-system calico-node-5ckvq 0/1 Pending 0 0s - calico-system calico-typha-688c9957f5-h9c5w 0/1 Pending 0 0s - calico-system calico-node-5ckvq 0/1 Init:0/3 0 1s - calico-system calico-typha-688c9957f5-h9c5w 0/1 Init:0/1 0 1s - calico-system calico-node-5ckvq 0/1 PodInitializing 0 2s - calico-system calico-typha-688c9957f5-h9c5w 0/1 PodInitializing 0 2s - calico-system calico-node-5ckvq 1/1 Running 0 3s - calico-system calico-typha-688c9957f5-h9c5w 1/1 Running 0 3s - ``` - - During the `Init` phase a certificate signing request (CSR) is created by an init container of the pod. - It will be stuck in the `Init` phase. - Once the CSR has been approved and signed by the certificate authority, the pod continues with `PodInitializing` and eventually `Running`. - -2. Monitor certificate signing requests: - - ``` - kubectl get csr -w - NAME AGE REQUESTOR CONDITION - calico-system:calico-node-5ckvq:9a3a10 0s system:serviceaccount:calico-system:calico-node Pending - calico-system:calico-node-5ckvq:9a3a10 0s system:serviceaccount:calico-system:calico-node Pending,Issued - calico-system:calico-node-5ckvq:9a3a10 0s system:serviceaccount:calico-system:calico-node Approved,Issued - calico-system:typha-688c9957f5-h9c5w:2b0d82 0s system:serviceaccount:calico-system:calico-typha Pending - calico-system:typha-688c9957f5-h9c5w:2b0d82 0s system:serviceaccount:calico-system:calico-typha Pending,Issued - calico-system:typha-688c9957f5-h9c5w:2b0d82 0s system:serviceaccount:calico-system:calico-typha Approved,Issued - ``` - - A CSR will be `Pending` until it has been `Issued` and `Approved`. - The name of a CSR is based on the namespace, the pod name and the first 6 characters of the pod's UID. - The pod will be `Pending` until the CSR has been `Approved`. - -3. Monitor the status of this feature using the `TigeraStatus`: - - ``` - kubectl get tigerastatus - NAME AVAILABLE PROGRESSING DEGRADED SINCE - calico True False False 2m40s - ``` - -### Implement your own signing and approval process - -**Required steps** - -This feature uses api version `certificates.k8s.io/v1beta1` for [certificate signing requests](https://kubernetes.io/docs/reference/access-authn-authz/certificate-signing-requests/). -To automate the signing and approval process, run a server that performs the following actions: - -1. Watch `CertificateSigningRequests` resources with status `Pending` and `spec.signerName=`. - - :::note - - You can skip this step if you are using a version before Kubernetes v1.18; (the signerName field was not available). - - ::: - -1. For each `Pending` CSR perform (security) checks (see next heading) -1. Issue a certificate and update `.spec.status.certificate` -1. Approve the CSR and update `.spec.status.conditions` - -**Security requirements** - -Based on your requirements you may want to implement custom checks to make sure that no certificates are issued for a malicious user. -When a CSR is created, the kube-apiserver adds immutable fields to the spec to help you perform checks: - -- `.spec.username`: username of the requester -- `.spec.groups`: user groups of the requester -- `.spec.request`: certificate request in pem format - -Verify that the user and/or group match with the requested certificate subject (alt) names. - -**Implement your signer and approver using golang** - -- Use [client-go](https://github.com/kubernetes/client-go) to create a clientset -- To watch CSRs, use `clientset.CertificatesV1().CertificateSigningRequests().Watch(..)` -- To issue the certificate use `clientset.CertificatesV1().CertificateSigningRequests().UpdateStatus(...)` -- To approve the CSR use `clientset.CertificatesV1().CertificateSigningRequests().UpdateApproval(...)` - -### Additional resources - -- Read [kubernetes certificate signing requests](https://kubernetes.io/docs/reference/access-authn-authz/certificate-signing-requests/) for more information on CSRs -- Use [client-go](https://github.com/kubernetes/client-go) to implement a controller to sign and approve a CSR diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/comms/compliance-tls.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/comms/compliance-tls.mdx deleted file mode 100644 index 50d096c9b9..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/comms/compliance-tls.mdx +++ /dev/null @@ -1,35 +0,0 @@ ---- -description: Add TLS certificate to secure access to compliance. ---- - -# Provide TLS certificates for compliance - -## Big picture - -Provide TLS certificates to secure access to $[prodname] to the compliance components. - -## Value - -Providing TLS certificates for $[prodname] compliance components is recommended as part of a zero trust network model for security. - -## Before you begin... - -By default, $[prodname] uses self-signed certificates for its compliance reporting components. To provide TLS certificates, -get the certificate and key pair for the $[prodname] compliance using any X.509-compatible tool or from your organization's -Certificate Authority. The certificate must have Common Name or a Subject Alternate Name of `compliance.tigera-compliance.svc`. - -## How to - -### Add TLS certificates for compliance - -To provide TLS certificates for use by $[prodname] compliance components during deployment, you must create a secret before applying the 'custom-resource.yaml' or before creating the Compliance resource. Use the following command to create a secret: - -```bash -kubectl create secret generic tigera-compliance-server-tls -n tigera-operator --from-file=tls.crt=
    --from-file=tls.key=
    -``` - -To update existing certificates, run the following command: - -```bash -kubectl create secret generic tigera-compliance-server-tls -n tigera-operator --from-file=tls.crt=
    --from-file=tls.key=
    --dry-run -o yaml --save-config | kubectl replace -f - -``` diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/comms/crypto-auth.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/comms/crypto-auth.mdx deleted file mode 100644 index 06cee6aac3..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/comms/crypto-auth.mdx +++ /dev/null @@ -1,112 +0,0 @@ ---- -description: Enable TLS authentication and encryption for various Calico Enterprise components. ---- - -# Configure encryption and authentication to secure Calico Enterprise components - -## Connections from $[prodname] components to kube-apiserver (Kubernetes and OpenShift) - -We recommend enabling TLS on kube-apiserver, as well as the client certificate and JSON web token (JWT) -authentication modules. This ensures that all of its communications with $[prodname] components occur -over TLS. The $[prodname] components present either an X.509 certificate or a JWT to kube-apiserver -so that kube-apiserver can verify their identities. - -## Connections from Node to Typha (Kubernetes) - -Operator based installations automatically configure mutual TLS authentication on connections from -Felix to Typha. You may also configure this TLS by providing your own secrets. - -### Configure Node to Typha TLS based on your deployment - -For clusters installed using operator, see how to [provide TLS certificates for Typha and Node](typha-node-tls.mdx). - -For detailed reference information on TLS configuration parameters, refer to: - -- **Typha**: [Node-Typha TLS configuration](../../reference/component-resources/typha/configuration.mdx#felix-typha-tls-configuration) - -- **Node**: [Node-Typha TLS configuration](../../reference/component-resources/node/felix/configuration.mdx#felix-typha-tls-configuration) - -# Calico Enterprise Manager connections - -Tigera the $[prodname] web console's web interface, run from your browser, uses HTTPS to securely communicate -with the $[prodname] web console, which in turn, communicates with the Kubernetes and $[prodname] API -servers also using HTTPS. Through the installation steps, secure communication between -$[prodname] components should already be configured, but secure communication through your web -browser of choice may not. To verify if this is properly configured, the web browser -you are using should display `Secure` in the address bar. - -Before we set up TLS certificates, it is important to understand the traffic -that we are securing. By default, your web browser of choice communicates with -the $[prodname] web console through a -[`NodePort` service](https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typenodeport) -over port `30003`. The NodePort service passes through packets without modification. -TLS traffic is [terminated](https://en.wikipedia.org/wiki/TLS_termination_proxy) -at the $[prodname] web console. This means that the TLS certificates used to secure traffic -between your web browser and the $[prodname] web console do not need to be shared or related -to any other TLS certificates that may be used elsewhere in your cluster or when -configuring $[prodname]. The flow of traffic should look like the following: - -![the $[prodname] web console traffic diagram](/img/calico-enterprise/cnx-tls-mgr-comms.svg) - -:::note - -the `NodePort` service in the above diagram can be replaced with other -[Kubernetes services](https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services---service-types). -Configuration will vary if another service, such as a load balancer, is placed between the web -browser and the $[prodname] web console. - -::: - -To properly configure TLS in the $[prodname] web console, you will need -certificates and keys signed by an appropriate Certificate Authority (CA). -For more high level information on certificates, keys, and CAs, see -[this blog post](http://www.steves-internet-guide.com/ssl-certificates-explained/). - -:::note - -It is important when generating your certificates to make sure -that the Common Name or Subject Alternative Name specified in your certificates -matches the host name/DNS entry/IP address that is used to access the $[prodname] web console -(i.e. what it says in the browser address bar). - -::: - -## Issues with certificates - -If your web browser still does not display `Secure` in the address bar, the most -common reasons and their fixes are listed below. - -- **Untrusted Certificate Authority**: Your browser may not display `Secure` because - it does not know (and therefore trust) the certificate authority (CA) that issued - the certificates that the $[prodname] web console is using. This is generally caused by using - self-signed certificates (either generated by Kubernetes or manually). If you have - certificates signed by a recognized CA, we recommend that you use them with the $[prodname] - Manager since the browser will automatically recognize them. - - If you opt to use self-signed certificates you can still configure your browser to - trust the CA on a per-browser basis by importing the CA certificates into the browser. - In Google Chrome, this can be achieved by selecting Settings, Advanced, Privacy and security, - Manage certificates, Authorities, Import. This is not recommended since it requires the CA - to be imported into every browser you access the $[prodname] web console from. - -- **Mismatched Common Name or Subject Alternative Name**: If you are still having issues - securely accessing the $[prodname] web console with TLS, you may want to make sure that the Common Name - or Subject Alternative Name specified in your certificates matches the host name/DNS - entry/IP address that is used to access the $[prodname] web console (i.e. what it says in the browser - address bar). In Google Chrome you can check the $[prodname] web console certificate with Developer Tools - (Ctrl+Shift+I), Security. If you are issued certificates which do not match, - you will need to reissue the certificates with the correct Common Name or - Subject Alternative Name and reconfigure the $[prodname] web console following the steps above. - -## Ingress proxies and load balancers - -You may wish to configure proxy elements, including hardware or software load balancers, Kubernetes Ingress -proxies etc., between user web browsers and the $[prodname] web console. If you do so, configure your proxy -such that the $[prodname] web console receives a HTTPS (TLS) connection, not unencrypted HTTP. - -If you require TLS termination at any of these proxy elements, you will need to - -- use a proxy that supports transparent HTTP/2 proxying, for example, [Envoy](https://www.envoyproxy.io/) -- re-originate a TLS connection from your proxy to the $[prodname] web console, as it expects TLS - -If you do not require TLS termination, configure your proxy to "pass thru" the TLS to the $[prodname] web console. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/comms/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/comms/index.mdx deleted file mode 100644 index 24e13343f5..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/comms/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Secure communications for Calico components. -hide_table_of_contents: true ---- - -# Secure Calico component communications - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/comms/linseed-tls.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/comms/linseed-tls.mdx deleted file mode 100644 index 0e2233f8d5..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/comms/linseed-tls.mdx +++ /dev/null @@ -1,35 +0,0 @@ ---- -description: Add TLS certificate to secure access to Linseed APIs. ---- - -# Provide TLS certificates for Linseed APIs - -## Big picture - -Provide TLS certificates to secure access to $[prodname] to the Linseed components. - -## Value - -Providing TLS certificates for $[prodname] Linseed components is recommended as part of a zero trust network model for security. - -## Before you begin... - -By default, $[prodname] uses self-signed certificates for its Linseed components. To provide TLS certificates, -get the certificate and key pair for the $[prodname] Linseed using any X.509-compatible tool or from your organization's -Certificate Authority. The certificate must have Common Name or a Subject Alternate Name of `tigera-linseed.tigera-elasticsearch.svc`. - -## How to - -### Add TLS certificates for PacketCapture - -To provide TLS certificates for use by $[prodname] Linseed components during deployment, you must create a secret before applying the 'custom-resource.yaml' or before creating the LogStorage resource. Use the following command to create a secret: - -```bash -kubectl create secret generic tigera-secure-linseed-cert -n tigera-operator --from-file=tls.crt=
    --from-file=tls.key= -``` - -To update existing certificates, run the following command: - -```bash -kubectl create secret generic tigera-secure-linseed-cert -n tigera-operator --from-file=tls.crt= --from-file=tls.key= --dry-run -o yaml --save-config | kubectl replace -f - -``` diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/comms/log-storage-tls.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/comms/log-storage-tls.mdx deleted file mode 100644 index 906dd93af1..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/comms/log-storage-tls.mdx +++ /dev/null @@ -1,45 +0,0 @@ ---- -description: Add TLS certificate to secure access to log storage. ---- - -# Provide TLS certificates for log storage - -## Big picture - -Provide TLS certificates to secure access to $[prodname] to the log storage. - -## Value - -Providing TLS certificates for $[prodname] components is recommended as part of a zero trust network model for security. - -## Before you begin... - -By default, the $[prodname] log storage uses self-signed certificates on connections. To provide TLS certificates, -get the certificate and key pair for the $[prodname] log storage using any X.509-compatible tool or from your organization's -Certificate Authority. The certificate must include the following Subject Alternate Names or DNS names `tigera-secure-es-http.tigera-elasticsearch.svc` and `tigera-secure-es-gateway-http.tigera-elasticsearch.svc`. - -If your cluster has Windows nodes, the certificate must additionally include `tigera-secure-es-http.tigera-elasticsearch.svc.` where `` is the local domain specified for in-cluster DNS. - -## How to - -### Add TLS certificates for log storage - -To provide TLS certificates for use by $[prodname] components during deployment, you must create a secret before applying the 'custom-resource.yaml' or before creating the LogStorage resource. Use the following command to create a secret: - -```bash -kubectl create secret generic tigera-secure-elasticsearch-cert -n tigera-operator --from-file=tls.crt= --from-file=tls.key= -``` - -To update existing certificates, run the following command: - -```bash -kubectl create secret generic tigera-secure-elasticsearch-cert -n tigera-operator --from-file=tls.crt= --from-file=tls.key= --dry-run -o yaml --save-config | kubectl replace -f - -``` - -:::note - -If the $[prodname] log storage already exists, you must manually delete the log storage pods one by one -after updating the secret. These pods will be in the `tigera-elasticsearch` namespace with the prefix, `tigera-secure-es`. -Other $[prodname] components will not be unable to communicate with log storage until the pods are restarted. - -::: diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/comms/manager-tls.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/comms/manager-tls.mdx deleted file mode 100644 index 9c92370c4d..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/comms/manager-tls.mdx +++ /dev/null @@ -1,38 +0,0 @@ ---- -description: Add TLS certificates to secure access to Calico Enterprise Manager user interface. ---- - -# Provide TLS certificates for Calico Enterprise Manager - -## Big picture - -Provide TLS certificates that secure access to the $[prodname] web console user interface. - -## Value - -By default, the $[prodname] web console uses self-signed TLS certificates on connections. This article describes how to provide TLS certificates that users' browsers will trust. - -## Before you begin... - -- **Get the certificate and key pair for the $[prodname] web console** - Generate the certificate using any X.509-compatible tool or from your organization's Certificate Authority. The certificate must have Common Name or Subject Alternate Names that match the IPs or DNS names that will be used to [access the web console](../cnx/access-the-manager.mdx). - -## How to - -To provide certificates for use during deployment you must create a secret before applying the 'custom-resource.yaml' or before creating the Installation resource. To specify certificates for use in the manager, create a secret using the following command: - -```bash -kubectl create secret generic manager-tls -n tigera-operator --from-file=tls.crt= --from-file=tls.key= -``` - -To update existing certificates, run the following command: - -```bash -kubectl create secret generic manager-tls -n tigera-operator --from-file=tls.crt= --from-file=tls.key= --dry-run -o yaml --save-config | kubectl replace -f - -``` - -If the $[prodname] web console is already running then updating the secret should cause it to restart and pickup the new certificate and key. This will result in a short period of unavailability of the $[prodname] web console. - -## Additional resources - -Additional documentation is available for securing [the $[prodname] web console connections](crypto-auth.mdx#calico-enterprise-manager-connections). diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/comms/packetcapture-tls.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/comms/packetcapture-tls.mdx deleted file mode 100644 index c75f45f90c..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/comms/packetcapture-tls.mdx +++ /dev/null @@ -1,35 +0,0 @@ ---- -description: Add TLS certificate to secure access to PacketCapture APIs. ---- - -# Provide TLS certificates for PacketCapture APIs - -## Big picture - -Provide TLS certificates to secure access to $[prodname] to the PacketCapture components. - -## Value - -Providing TLS certificates for $[prodname] PacketCapture components is recommended as part of a zero trust network model for security. - -## Before you begin... - -By default, $[prodname] uses self-signed certificates for its PacketCapture APIs components. To provide TLS certificates, -get the certificate and key pair for the $[prodname] PacketCapture using any X.509-compatible tool or from your organization's -Certificate Authority. The certificate must have Common Name or a Subject Alternate Name of `tigera-packetcapture.tigera-packetcapture.svc`. - -## How to - -### Add TLS certificates for PacketCapture - -To provide TLS certificates for use by $[prodname] PacketCapture components during deployment, you must create a secret before applying the 'custom-resource.yaml' or before creating the APIServer resource. Use the following command to create a secret: - -```bash -kubectl create secret generic tigera-packetcapture-server-tls -n tigera-operator --from-file=tls.crt= --from-file=tls.key= -``` - -To update existing certificates, run the following command: - -```bash -kubectl create secret generic tigera-packetcapture-server-tls -n tigera-operator --from-file=tls.crt= --from-file=tls.key= --dry-run -o yaml --save-config | kubectl replace -f - -``` diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/comms/secure-bgp.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/comms/secure-bgp.mdx deleted file mode 100644 index 358177aec0..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/comms/secure-bgp.mdx +++ /dev/null @@ -1,185 +0,0 @@ ---- -description: Configure BGP passwords to prevent attackers from injecting false routing information. ---- - -# Secure BGP sessions - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -## Big picture - -Use BGP passwords to prevent attackers from injecting false routing information. - -## Value - -Setting a password on a BGP peering between BGP speakers means that a peering will only -work when both ends of the peering have the same password. This provides a layer of defense -against an attacker impersonating an external BGP peer or a workload in the cluster, for -example to inject malicious routing information into the cluster. - -## Concepts - -### Password protection on BGP sessions - -Password protection is a [standardized](https://tools.ietf.org/html/rfc5925) optional -feature of BGP sessions. The effect is that the two peers at either end of a BGP session -can only communicate, and exchange routing information, if they are both configured with -the same password. - -Please note that password use does not cause the data exchange to be _encrypted_. It -remains relatively easy to _eavesdrop_ on the data exchange, but not to _inject_ false -information. - -### Using Kubernetes secrets to store passwords - -In Kubernetes, the Secret resource is designed for holding sensitive information, -including passwords. Therefore, for this $[prodname] feature, we use Secrets to -store BGP passwords. - -## How to - -To use a password on a BGP peering: - -1. Create (or update) a Kubernetes secret in the namespace where $[noderunning] is - running, so that it has a key whose value is the desired password. Note the secret - name and the key name. - - :::note - - BGP passwords must be 80 characters or fewer. If a - password longer than that is configured, the BGP sessions with - that password will fail to be established. - - ::: - -1. Ensure that $[noderunning] has RBAC permissions to access that secret. - -1. Specify the secret and key name on the relevant BGPPeer resource. - -### Create or update Kubernetes secret - -For example: - -``` -kubectl create -f - < - - -When [configuring a BGP peer](../../networking/configuring/bgp.mdx), -include the secret and key name in the specification of the BGPPeer resource, like this: - -```yaml -apiVersion: projectcalico.org/v3 -kind: BGPPeer -metadata: - name: bgppeer-global-3040 -spec: - peerIP: 192.20.30.40 - asNumber: 64567 - password: - secretKeyRef: - name: bgp-secrets - key: rr-password -``` - - - - -Include the secret in the default [BGP configuration](../../reference/resources/bgpconfig.mdx) -similar to the following: - -```yaml -kind: BGPConfiguration -apiVersion: projectcalico.org/v3 -metadata: - name: default -spec: - logSeverityScreen: Info - nodeToNodeMeshEnabled: true - nodeMeshPassword: - secretKeyRef: - name: bgp-secrets - key: rr-password -``` - -:::note - -Node to node mesh must be enabled to set node to node mesh -BGP password. - -::: - - - - - -## Additional resources - -For more detail about the BGPPeer resource, see -[BGPPeer](../../reference/resources/bgppeer.mdx). - -For more on configuring BGP peers, see [configuring BGP peers](../../networking/configuring/bgp.mdx) -. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/comms/secure-metrics.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/comms/secure-metrics.mdx deleted file mode 100644 index 3790f95c77..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/comms/secure-metrics.mdx +++ /dev/null @@ -1,512 +0,0 @@ ---- -description: Limit access to Calico Enterprise metric endpoints using network policy. ---- - -# Secure Calico Enterprise Prometheus endpoints - -## About securing access to $[prodname]'s metrics endpoints - -When using $[prodname] with Prometheus metrics enabled, we recommend using network policy -to limit access to $[prodname]'s metrics endpoints. - -## Prerequisites - -- $[prodname] is installed with Prometheus metrics reporting enabled. -- `calicoctl` is [installed in your PATH and configured to access the data store](../clis/calicoctl/install.mdx). - -## Choosing an approach - -This guide provides two example workflows for creating network policies to limit access -to $[prodname]'s Prometheus metrics. Choosing an approach depends on your requirements. - -- [Using a deny-list approach](#using-a-deny-list-approach) - - This approach allows all traffic to your hosts by default, but lets you limit access to specific ports using - $[prodname] policy. This approach allows you to restrict access to specific ports, while leaving other - host traffic unaffected. - -- [Using an allow-list approach](#using-an-allow-list-approach) - - This approach denies traffic to and from your hosts by default, and requires that all - desired communication be explicitly allowed by a network policy. This approach is more secure because - only explicitly-allowed traffic will get through, but it requires you to know all the ports that should be open on the host. - -## Using a deny-list approach - -### Overview - -The basic process is as follows: - -1. Create a default network policy that allows traffic to and from your hosts. -1. Create host endpoints for each node that you'd like to secure. -1. Create a network policy that denies unwanted traffic to the $[prodname] metrics endpoints. -1. Apply labels to allow access to the Prometheus metrics. - -### Example for $[nodecontainer] - -This example shows how to limit access to the $[nodecontainer] Prometheus metrics endpoints. - -1. Create a default network policy to allow host traffic - - First, create a default-allow policy. Do this first to avoid a drop in connectivity when adding the host endpoints - later, since host endpoints with no policy default to deny. - - To do this, create a file named `default-host-policy.yaml` with the following contents. - - ```yaml - apiVersion: projectcalico.org/v3 - kind: GlobalNetworkPolicy - metadata: - name: default-host - spec: - # Select all $[prodname] nodes. - selector: running-calico == "true" - order: 5000 - ingress: - - action: Allow - egress: - - action: Allow - ``` - - Then, use `kubectl` to apply this policy. - - ```bash - kubectl apply -f default-host-policy.yaml - ``` - -1. List the nodes on which $[prodname] is running with the following command. - - ```bash - calicoctl get nodes - ``` - - In this case, we have two nodes in the cluster. - - ``` - NAME - kubeadm-master - kubeadm-node-0 - ``` - -1. Create host endpoints for each $[prodname] node. - - Create a file named `host-endpoints.yaml` containing a host endpoint for each node listed - above. In this example, the contents would look like this. - - ```yaml - apiVersion: projectcalico.org/v3 - kind: HostEndpoint - metadata: - name: kubeadm-master.eth0 - labels: - running-calico: 'true' - spec: - node: kubeadm-master - interfaceName: eth0 - expectedIPs: - - 10.100.0.15 - --- - apiVersion: projectcalico.org/v3 - kind: HostEndpoint - metadata: - name: kubeadm-node-0.eth0 - labels: - running-calico: 'true' - spec: - node: kubeadm-node-0 - interfaceName: eth0 - expectedIPs: - - 10.100.0.16 - ``` - - In this file, replace `eth0` with the desired interface name on each node, and populate the - `expectedIPs` section with the IP addresses on that interface. - - Note the use of a label to indicate that this host endpoint is running $[prodname]. The - label matches the selector of the network policy created in step 1. - - Then, use `kubectl` to apply the host endpoints with the following command. - - ```bash - kubectl apply -f host-endpoints.yaml - ``` - -1. Create a network policy that restricts access to the $[nodecontainer] Prometheus metrics port. - - Now let's create a network policy that limits access to the Prometheus metrics port such that - only endpoints with the label `calico-prometheus-access: true` can access the metrics. - - To do this, create a file named `calico-prometheus-policy.yaml` with the following contents. - - ```yaml - # Allow traffic to Prometheus only from sources that are - # labeled as such, but don't impact any other traffic. - apiVersion: projectcalico.org/v3 - kind: GlobalNetworkPolicy - metadata: - name: restrict-calico-node-prometheus - spec: - # Select all $[prodname] nodes. - selector: running-calico == "true" - order: 500 - types: - - Ingress - ingress: - # Deny anything that tries to access the Prometheus port - # but that doesn't match the necessary selector. - - action: Deny - protocol: TCP - source: - notSelector: calico-prometheus-access == "true" - destination: - ports: - - 9091 - ``` - - This policy selects all endpoints that have the label `running-calico: true`, and enforces a single ingress deny rule. - The ingress rule denies traffic to port 9091 unless the source of traffic has the label `calico-prometheus-access: true`, meaning - all $[prodname] workload endpoints, host endpoints, and global network sets that do not have the label, as well as any - other network endpoints unknown to $[prodname]. - - Then, use `kubectl` to apply this policy. - - ```bash - kubectl apply -f calico-prometheus-policy.yaml - ``` - -1. Apply labels to any endpoints that should have access to the metrics. - - At this point, only endpoints that have the label `calico-prometheus-access: true` can reach - $[prodname]'s Prometheus metrics endpoints on each node. To grant access, simply add this label to the - desired endpoints. - - For example, to allow access to a Kubernetes pod you can run the following command. - - ```bash - kubectl label pod my-prometheus-pod calico-prometheus-access=true - ``` - - If you would like to grant access to a specific IP network, you - can create a [global network set](../../reference/resources/globalnetworkset.mdx) using `kubectl`. - - For example, you might want to grant access to your management subnets. - - ```yaml - apiVersion: projectcalico.org/v3 - kind: GlobalNetworkSet - metadata: - name: calico-prometheus-set - labels: - calico-prometheus-access: 'true' - spec: - nets: - - 172.15.0.0/24 - - 172.101.0.0/24 - ``` - -### Additional steps for Typha deployments - -If your $[prodname] installation uses the Kubernetes API datastore and has greater than 50 nodes, it is likely -that you have installed Typha. This section shows how to use an additional network policy to secure the Typha -Prometheus endpoints. - -After following the steps above, create a file named `typha-prometheus-policy.yaml` with the following contents. - -```yaml -# Allow traffic to Prometheus only from sources that are -# labeled as such, but don't impact any other traffic. -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: restrict-calico-node-prometheus -spec: - # Select all $[prodname] nodes. - selector: running-calico == "true" - order: 500 - types: - - Ingress - ingress: - # Deny anything that tries to access the Prometheus port - # but that doesn't match the necessary selector. - - action: Deny - protocol: TCP - source: - notSelector: calico-prometheus-access == "true" - destination: - ports: - - 9093 -``` - -This policy selects all endpoints that have the label `running-calico: true`, and enforces a single ingress deny rule. -The ingress rule denies traffic to port 9093 unless the source of traffic has the label `calico-prometheus-access: true`, meaning -all $[prodname] workload endpoints, host endpoints, and global network sets that do not have the label, as well as any -other network endpoints unknown to $[prodname]. - -Then, use `kubectl` to apply this policy. - -```bash -kubectl apply -f typha-prometheus-policy.yaml -``` - -### Example for kube-controllers - -If your $[prodname] installation exposes metrics from kube-controllers, you can limit access to those metrics -with the following network policy. - -Create a file named `kube-controllers-prometheus-policy.yaml` with the following contents. - -```yaml -apiVersion: projectcalico.org/v3 -kind: NetworkPolicy -metadata: - name: restrict-kube-controllers-prometheus - namespace: calico-system -spec: - # Select kube-controllers. - selector: k8s-app == "calico-kube-controllers" - order: 500 - types: - - Ingress - ingress: - # Deny anything that tries to access the Prometheus port - # but that doesn't match the necessary selector. - - action: Deny - protocol: TCP - source: - notSelector: calico-prometheus-access == "true" - destination: - ports: - - 9094 -``` - -:::note - -The above policy is installed in the calico-system namespace. If your cluster has $[prodname] installed -in the kube-system namespace, you will need to create the policy in that namespace instead. - -::: - -Then, use `calicoctl` to apply this policy. - -```bash -kubectl apply -f kube-controllers-prometheus-policy.yaml -``` - -## Using an allow-list approach - -### Overview - -The basic process is as follows: - -1. Create host endpoints for each node that you'd like to secure. -1. Create a network policy that allows desired traffic to the $[prodname] metrics endpoints. -1. Apply labels to allow access to the Prometheus metrics. - -### Example for $[nodecontainer] - -1. List the nodes on which $[prodname] is running with the following command. - - ```bash - calicoctl get nodes - ``` - - In this case, we have two nodes in the cluster. - - ``` - NAME - kubeadm-master - kubeadm-node-0 - ``` - -1. Create host endpoints for each $[prodname] node. - - Create a file named `host-endpoints.yaml` containing a host endpoint for each node listed - above. In this example, the contents would look like this. - - ```yaml - apiVersion: projectcalico.org/v3 - kind: HostEndpoint - metadata: - name: kubeadm-master.eth0 - labels: - running-calico: 'true' - spec: - node: kubeadm-master - interfaceName: eth0 - expectedIPs: - - 10.100.0.15 - --- - apiVersion: projectcalico.org/v3 - kind: HostEndpoint - metadata: - name: kubeadm-node-0.eth0 - labels: - running-calico: 'true' - spec: - node: kubeadm-node-0 - interfaceName: eth0 - expectedIPs: - - 10.100.0.16 - ``` - - In this file, replace `eth0` with the desired interface name on each node, and populate the - `expectedIPs` section with the IP addresses on that interface. - - Note the use of a label to indicate that this host endpoint is running $[prodname]. The - label matches the selector of the network policy created in step 1. - - Then, use `kubectl` to apply the host endpoints with the following command. This will prevent all - traffic to and from the host endpoints. - - ```bash - kubectl apply -f host-endpoints.yaml - ``` - - :::note - - $[prodname] allows some traffic as a failsafe even after applying this policy. This can - be adjusted using the `failsafeInboundHostPorts` and `failsafeOutboundHostPorts` options - on the [FelixConfiguration resource](../../reference/resources/felixconfig.mdx). - - ::: - -1. Create a network policy that allows access to the $[nodecontainer] Prometheus metrics port. - - Now let's create a network policy that allows access to the Prometheus metrics port such that - only endpoints with the label `calico-prometheus-access: true` can access the metrics. - - To do this, create a file named `calico-prometheus-policy.yaml` with the following contents. - - ```yaml - apiVersion: projectcalico.org/v3 - kind: GlobalNetworkPolicy - metadata: - name: restrict-calico-node-prometheus - spec: - # Select all $[prodname] nodes. - selector: running-calico == "true" - order: 500 - types: - - Ingress - ingress: - # Allow traffic from selected sources to the Prometheus port. - - action: Allow - protocol: TCP - source: - selector: calico-prometheus-access == "true" - destination: - ports: - - 9091 - ``` - - This policy selects all endpoints that have the label `running-calico: true`, and enforces a single ingress allow rule. - The ingress rule allows traffic to port 9091 from any source with the label `calico-prometheus-access: true`, meaning - all $[prodname] workload endpoints, host endpoints, and global network sets that have the label will be allowed access. - - Then, use `kubectl` to apply this policy. - - ```bash - kubectl apply -f calico-prometheus-policy.yaml - ``` - -1. Apply labels to any endpoints that should have access to the metrics. - - At this point, only endpoints that have the label `calico-prometheus-access: true` can reach - $[prodname]'s Prometheus metrics endpoints on each node. To grant access, simply add this label to the - desired endpoints. - - For example, to allow access to a Kubernetes pod you can run the following command. - - ```bash - kubectl label pod my-prometheus-pod calico-prometheus-access=true - ``` - - If you would like to grant access to a specific IP address in your network, you - can create a [global network set](../../reference/resources/globalnetworkset.mdx) using `kubectl`. - - For example, creating the following network set would grant access to a host with IP 172.15.0.101. - - ```yaml - apiVersion: projectcalico.org/v3 - kind: GlobalNetworkSet - metadata: - name: calico-prometheus-set - labels: - calico-prometheus-access: 'true' - spec: - nets: - - 172.15.0.101/32 - ``` - -### Additional steps for Typha deployments - -If your $[prodname] installation uses the Kubernetes API datastore and has greater than 50 nodes, it is likely -that you have installed Typha. This section shows how to use an additional network policy to secure the Typha -Prometheus endpoints. - -After following the steps above, create a file named `typha-prometheus-policy.yaml` with the following contents. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: restrict-typha-prometheus -spec: - # Select all $[prodname] nodes. - selector: running-calico == "true" - order: 500 - types: - - Ingress - ingress: - - action: Allow - protocol: TCP - source: - selector: calico-prometheus-access == "true" - destination: - ports: - - 9093 -``` - -This policy selects all endpoints that have the label `running-calico: true`, and enforces a single ingress allow rule. -The ingress rule allows traffic to port 9093 from any source with the label `calico-prometheus-access: true`, meaning -all $[prodname] workload endpoints, host endpoints, and global network sets that have the label will be allowed access. - -Then, use `kubectl` to apply this policy. - -```bash -kubectl apply -f typha-prometheus-policy.yaml -``` - -### Example for kube-controllers - -If your $[prodname] installation exposes metrics from kube-controllers, you can limit access to those metrics -with the following network policy. - -Create a file named `kube-controllers-prometheus-policy.yaml` with the following contents. - -```yaml -apiVersion: projectcalico.org/v3 -kind: NetworkPolicy -metadata: - name: restrict-kube-controllers-prometheus - namespace: calico-system -spec: - selector: k8s-app == "calico-kube-controllers" - order: 500 - types: - - Ingress - ingress: - - action: Allow - protocol: TCP - source: - selector: calico-prometheus-access == "true" - destination: - ports: - - 9094 -``` - -Then, use `kubectl` to apply this policy. - -```bash -kubectl apply -f kube-controllers-prometheus-policy.yaml -``` diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/comms/typha-node-tls.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/comms/typha-node-tls.mdx deleted file mode 100644 index e18e854fd9..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/comms/typha-node-tls.mdx +++ /dev/null @@ -1,85 +0,0 @@ ---- -description: Add TLS certificates to secure communications between if you are using Typha to scale your deployment. ---- - -# Provide TLS certificates for Typha and Node - -## Big picture - -Provide TLS certificates that allow mutual TLS authentication between Node and Typha. - -## Value - -By default, $[prodname] Typha and Node components are configured with self-signed Certificate Authority (CA) and certificates for mutual TLS authentication. This article describes how to provide a CA and TLS certificates. - -## Concepts - -**Mutual TLS authentication** means each side of a connection authenticates the other side. As such, the CA and certificates that are used must all be in sync. If one side of the connection is updated with a certificate that is not compatible with the other side, communication stops. So if certificate updates are mismatched on Typha, Node, or CA certificate, new pod networking and policy application will be interrupted until you restore compatibility. To make it easy to keep updates in sync, this article describes how to use one command to apply updates for all resources. - -## Before you begin... - -**Get the Certificate Authority certificate and signed certificate and key pairs for $[prodname] Typha and Node** - -- Generate the certificates using any X.509-compatible tool or from your organization's CA. -- Ensure the generated certificates meet the requirements for [TLS connections between Node and Typha](crypto-auth.mdx#connections-from-node-to-typha-kubernetes). - -## How to - -### Create resource file - -1. Create the CA ConfigMap with the following commands: - - ```bash - kubectl create configmap typha-ca -n tigera-operator --from-file=caBundle= --dry-run -o yaml --save-config > typha-node-tls.yaml - echo '---' >> typha-node-tls.yaml - ``` - - :::tip - - The contents of the caBundle field should contain the CA or the certificates for both Typha and Node. - It is possible to add multiple PEM blocks. - - ::: - -1. Create the Typha Secret with the following command: - - ```bash - kubectl create secret generic typha-certs -n tigera-operator \ - --from-file=tls.crt= --from-file=tls.key= \ - --from-literal=common-name= --dry-run -o yaml --save-config >> typha-node-tls.yaml - echo '---' >> typha-node-tls.yaml - ``` - - :::tip - - If using SPIFFE identifiers replace `--from-literal=common-name=` with `--from-literal=uri-san=`. - - ::: - -1. Create the Node Secret with the following command: - - ```bash - kubectl create secret generic node-certs -n tigera-operator \ - --from-file=tls.crt= --from-file=tls.key= \ - --from-literal=common-name= --dry-run -o yaml --save-config >> typha-node-tls.yaml - ``` - - :::tip - - If using SPIFFE identifiers replace `--from-literal=common-name=` with `--from-literal=uri-san=`. - - ::: - -### Apply or update resources - -1. Apply the `typha-node-tls.yaml` file. - - To create these resource for use during deployment, you must apply this file before applying `custom-resource.yaml` or before creating the Installation resource. To apply this file, use the following command: - ```bash - kubectl apply -f typha-node-tls.yaml - ``` - - To update existing resources, use the following command: - ```bash - kubectl replace -f typha-node-tls.yaml - ``` - -If $[prodname] Node and Typha are already running, the update causes a rolling restart of both. If the new CA and certificates are not compatible with the previous set, there may be a period where the Node pods produce errors until the old set CA and certificates are replaced with the new ones. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/decommissioning-a-node.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/decommissioning-a-node.mdx deleted file mode 100644 index e5c5224804..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/decommissioning-a-node.mdx +++ /dev/null @@ -1,93 +0,0 @@ ---- -description: Manually remove a node from a cluster that is installed with Calico. ---- - -# Decommission a node - -## About decommissioning nodes - -If you are running the [node controller](../reference/component-resources/kube-controllers/configuration.mdx) -or using the Kubernetes API datastore in policy-only mode, you do not need to manually decommission nodes. - -In other configurations, you may need to manually decommission a node for one -of the following reasons. - -- You are decommissioning a host running `$[nodecontainer]` or removing it from your - cluster. -- You are renaming a node. -- You are receiving an error about an IP address already in use. -- Readiness checks are failing due to unreachable peers that are no longer in the - cluster. -- Hosts are regularly added and removed from your cluster. - -## Purpose of this page - -Provide guidance on how to remove a host that is part of a $[prodname] cluster -and clean up the associated [Node resource](../reference/resources/node) -information. - -## Prerequisites - -- Prior to removing any Node resource from the datastore the `$[nodecontainer]` - container should be stopped on the corresponding host and it should be - ensured that it will not be restarted. -- You must have [calicoctl configured](../operations/clis/calicoctl/install) and operational to run - the commands listed here. - -## Removing a node resource - -Removing a Node resource will also remove the Workload Endpoint, Host -Endpoint, and IP Address resources and any other sub configuration items -associated with that Node. - -:::note - -- Deleting a Node resource may be service impacting if the host is still in - service. Ensure that the host is no longer in service before deleting the - Node resource. -- Any configuration specific to the node will be removed. This would be - configuration like node BGP peerings or custom Felix configs. - -::: - -## Removing a single node resource - -See the example below for how to remove a node with the calicoctl command. - -See [Removing a Node resource](#removing-a-node-resource) above. - -```bash -calicoctl delete node -``` - -## Removing multiple node resources - -To remove several Nodes, a file can be created with several Node resources and -then be passed to the `calicoctl delete` command with the `-f` flag. -Below is an example of how to create a file of Nodes and delete them. - -1. Create a file with the [Node resources](../reference/resources/node) that need - to be removed. For example: - - ```yaml - - apiVersion: projectcalico.org/v3 - kind: Node - metadata: - name: node-02 - - apiVersion: projectcalico.org/v3 - kind: Node - metadata: - name: node-03 - ``` - -2. To delete the nodes listed in the file pass it like below. - - :::caution - - See [Removing a Node resource](#removing-a-node-resource) above. - - ::: - - ```bash - calicoctl delete -f nodes_to_delete.yaml - ``` diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/ebpf/enabling-ebpf.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/ebpf/enabling-ebpf.mdx deleted file mode 100644 index cafacf3ad4..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/ebpf/enabling-ebpf.mdx +++ /dev/null @@ -1,254 +0,0 @@ ---- -description: Steps to enable the eBPF data plane on an existing cluster. ---- - -# Enable eBPF on an existing cluster - -import EbpfValue from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_ebpf-value.mdx'; - -## Big picture - -Enable the eBPF data plane on an existing cluster. - -## Value - - - -## Concepts - -### eBPF - -eBPF (or "extended Berkeley Packet Filter"), is a technology that allows safe mini programs to be attached to various low-level hooks in the Linux kernel. eBPF has a wide variety of uses, including networking, security, and tracing. You’ll see a lot of non-networking projects leveraging eBPF, but for $[prodname] our focus is on networking, and in particular, pushing the networking capabilities of the latest Linux kernels to the limit. - -## Before you begin - -**Required** - -- Review [Requirements, limitations, and performance](install.mdx#before-you-begin) - -## How to - -- [Verify that your cluster is ready for eBPF mode](#verify-that-your-cluster-is-ready-for-ebpf-mode) -- [Configure $[prodname] to talk directly to the API server](#configure-calico-enterprise-to-talk-directly-to-the-api-server) -- [Configure kube-proxy](#configure-kube-proxy) -- [Enable eBPF mode](#enable-ebpf-mode) -- [Try out DSR mode](#try-out-direct-server-return-mode) -- [Reversing the process](#reversing-the-process) - -### Verify that your cluster is ready for eBPF mode - -This section explains how to make sure your cluster is suitable for eBPF mode. - -To check that the kernel on a node is suitable, you can run - -```bash -uname -rv -``` - -The output should look like this: - -``` -5.4.0-42-generic #46-Ubuntu SMP Fri Jul 10 00:24:02 UTC 2020 -``` - -In this case the kernel version is v5.4, which is suitable. - -On Red Hat-derived distributions, you may see something like this: - -``` -4.18.0-193.el8.x86_64 (mockbuild@x86-vm-08.build.eng.bos.redhat.com) -``` - -Since the Red Hat kernel is v4.18 with at least build number 193, this kernel is suitable. - -### Configure $[prodname] to talk directly to the API server - -In eBPF mode, $[prodname] implements Kubernetes service networking directly (rather than relying on `kube-proxy`). -Of course, this makes it highly desirable to disable `kube-proxy` when running in eBPF mode to save resources -and avoid confusion over which component is handling services. - -To be able to disable `kube-proxy`, $[prodname] needs to communicate to the API server _directly_ rather than -going through `kube-proxy`. To make _that_ possible, we need to find a persistent, static way to reach the API server. -The best way to do that varies by Kubernetes distribution: - -- If you created a cluster manually (for example by using `kubeadm`) then the right address to use depends on whether you - opted for a high-availability cluster with multiple API servers or a simple one-node API server. - - - If you opted to set up a high availability cluster then you should use the address of the load balancer that you - used in front of your API servers. As noted in the Kubernetes documentation, a load balancer is required for a - HA set-up but the precise type of load balancer is not specified. - - If you opted for a single control plane node then you can use the address of the control plane node itself. However, - it's important that you use a _stable_ address for that node such as a dedicated DNS record, or a static IP address. - If you use a dynamic IP address (such as an EC2 private IP) then the address may change when the node is restarted - causing $[prodname] to lose connectivity to the API server. - -- `kops` typically sets up a load balancer of some sort in front of the API server. You should use - the FQDN and port of the API load balancer, for example `api.internal.` as the `KUBERNETES_SERVICE_HOST` - below and 443 as the `KUBERNETES_SERVICE_PORT`. -- OpenShift requires various DNS records to be created for the cluster; one of these is exactly what we need: - `api-int..` should point to the API server or to the load balancer in front of the - API server. Use that (filling in the `` and `` as appropriate for your cluster) for the - `KUBERNETES_SERVICE_HOST` below. Openshift uses 6443 for the `KUBERNETES_SERVICE_PORT`. -- MKE runs a reverse proxy in each node that can be used to reach the API server. You should use `proxy.local` - as the `KUBERNETES_SERVICE_HOST` and `6444` as the `KUBERNETES_SERVICE_PORT`. -- For AKS and EKS clusters you should use the FQDN of the API server's load balancer. This can be found with - ``` - kubectl cluster-info - ``` - which gives output like the following: - ``` - Kubernetes master is running at https://60F939227672BC3D5A1B3EC9744B2B21.gr7.us-west-2.eks.amazonaws.com - ... - ``` - In this example, you would use `60F939227672BC3D5A1B3EC9744B2B21.gr7.us-west-2.eks.amazonaws.com` for - `KUBERNETES_SERVICE_HOST` and `443` for `KUBERNETES_SERVICE_PORT` when creating the config map. - -Once you've found the correct address for your API server, create the following config map in the `tigera-operator` -namespace using the host and port that you found above: - -```yaml -kind: ConfigMap -apiVersion: v1 -metadata: - name: kubernetes-services-endpoint - namespace: tigera-operator -data: - KUBERNETES_SERVICE_HOST: '' - KUBERNETES_SERVICE_PORT: '' -``` - -The operator will pick up the change to the config map automatically and do a rolling update of $[prodname] to pass on the change. Confirm that pods restart and then reach the `Running` state with the following command: - -```bash -watch kubectl get pods -n calico-system -``` - -If you do not see the pods restart then it's possible that the `ConfigMap` wasn't picked up (sometimes Kubernetes is slow to propagate `ConfigMap`s (see Kubernetes [issue #30189](https://github.com/kubernetes/kubernetes/issues/30189))). You can try restarting the operator. - -### Configure kube-proxy - -In eBPF mode $[prodname] replaces `kube-proxy` so it wastes resources (and reduces performance) to run both. -This section explains how to disable `kube-proxy` in some common environments. - -#### Clusters that run `kube-proxy` with a `DaemonSet` (such as `kubeadm`) - -For a cluster that runs `kube-proxy` in a `DaemonSet` (such as a `kubeadm`-created cluster), you can disable `kube-proxy` reversibly by adding a node selector to `kube-proxy`'s `DaemonSet` that matches no nodes, for example: - -```bash -kubectl patch ds -n kube-system kube-proxy -p '{"spec":{"template":{"spec":{"nodeSelector":{"non-calico": "true"}}}}}' -``` - -Then, should you want to start `kube-proxy` again, you can simply remove the node selector. - -:::note - -This approach is not suitable for AKS with Azure CNI since that platform makes use of the Kubernetes add-on manager. -the change will be reverted by the system. For AKS, you should follow [Avoiding conflicts with kube-proxy](#avoiding-conflicts-with-kube-proxy) -below. - -::: - -#### OpenShift - -If you are running OpenShift, you can disable `kube-proxy` as follows: - -```bash -kubectl patch networks.operator.openshift.io cluster --type merge -p '{"spec":{"deployKubeProxy": false}}' -``` - -To re-enable it: - -```bash -kubectl patch networks.operator.openshift.io cluster --type merge -p '{"spec":{"deployKubeProxy": true}}' -``` - -:::note - -If you are running kube-proxy in IPVS mode, switch to iptables mode before disabling. - -::: - -#### MKE - -If you are running MKE, you can disable `kube-proxy` as follows: - -Follow the step procedure in [Modify an existing MKE configuration](https://docs.mirantis.com/mke/current/ops/administer-cluster/configure-an-mke-cluster/use-an-mke-configuration-file.html#modify-an-existing-mke-configuration) to download, edit, and upload your MKE configuration. During the editing step, add the following configuration: -`kube_proxy_mode=disabled` and `kube_default_drop_masq_bits=true`. - -### Avoiding conflicts with kube-proxy - -If you cannot disable `kube-proxy` (for example, because it is managed by your Kubernetes distribution), then you _must_ change Felix configuration parameter `BPFKubeProxyIptablesCleanupEnabled` to `false`. This can be done with `kubectl` as follows: - -```bash -kubectl patch felixconfiguration default --patch='{"spec": {"bpfKubeProxyIptablesCleanupEnabled": false}}' -``` - -If both `kube-proxy` and `BPFKubeProxyIptablesCleanupEnabled` is enabled then `kube-proxy` will write its iptables rules and Felix will try to clean them up resulting in iptables flapping between the two. - -### Verify node interface naming pattern - -When Calico data plane is configured in BPF mode, Calico configures `ebpf` programs for the host interfaces that match the regex pattern defined by the `bpfDataIfacePattern` setting in [FelixConfiguration](../../reference/resources/felixconfig.mdx). The default regex value tries to match commonly used interface names, but interface names can vary depending on a virtualization solution, a flavor of the operating system, company-specific configuration standards, such as VLAN device naming pattern, etc. The regex command should at least match interfaces that participate in intra-cluster and external (e.g. NodePorts) communications. In scenarios when a node has additional interfaces, you may want to leverage Calico policies to secure some of them or even all of them or speed up forwarding to/from pods that use them. In such cases, the regex command should match all interfaces that you want to be managed by Calico. - -:::note - -A common example is when a cluster is configured in an on-prem environment and control-plane nodes are virtualized with only one network interface, but the worker nodes are bare-metal nodes with additional interfaces that could be VLAN devices with sub-interfaces and specific naming patterns. In such cases, the `bpfDataIfacePattern` setting may need to be adjusted to include the interface from the control-plane nodes as well as necessary interface from the worker nodes. - -::: - -### Enable eBPF mode - -To enable eBPF mode, change the `spec.calicoNetwork.linuxDataplane` parameter in the operator's `Installation` -resource to `"BPF"`. - -```bash -kubectl patch installation.operator.tigera.io default --type merge -p '{"spec":{"calicoNetwork":{"linuxDataplane":"BPF"}}}' -``` - -When enabling eBPF mode, preexisting connections continue to use the non-BPF datapath; such connections should -not be disrupted, but they do not benefit from eBPF mode’s advantages. - -:::note - -The operator rolls out the change with a rolling update (non-disruptive) and then swiftly transitions all nodes to eBPF mode. However, it's inevitable that some nodes will enter eBPF mode before others. This can disrupt the flow of traffic through node ports. - -::: - -### Try out direct server return mode - -Direct server return (DSR) mode skips a hop through the network for traffic to services (such as node ports) from outside the cluster. -This reduces latency and CPU overhead but it requires the underlying network to allow nodes to send traffic with each other's IPs. -In AWS, this requires all your nodes to be in the same subnet and for the source/dest check to be disabled. - -DSR mode is disabled by default; to enable it, set the `BPFExternalServiceMode` Felix configuration parameter to `"DSR"`. This can be done with `kubectl`: - -```bash -kubectl patch felixconfiguration default --patch='{"spec": {"bpfExternalServiceMode": "DSR"}}' -``` - -To switch back to tunneled mode, set the configuration parameter to `"Tunnel"`: - -```bash -kubectl patch felixconfiguration default --patch='{"spec": {"bpfExternalServiceMode": "Tunnel"}}' -``` - -Switching external traffic mode can disrupt in-progress connections. - -### Reversing the process - -To revert to standard Linux networking: - -1. Reverse the changes to the operator's `Installation`: - - ```bash - kubectl patch installation.operator.tigera.io default --type merge -p '{"spec":{"calicoNetwork":{"linuxDataplane":"Iptables"}}}' - ``` - -1. If you disabled `kube-proxy`, re-enable it (for example, by removing the node selector added above). - - ```bash - kubectl patch ds -n kube-system kube-proxy --type merge -p '{"spec":{"template":{"spec":{"nodeSelector":{"non-calico": null}}}}}' - ``` -2. If you are running MKE, follow the step procedure in [Modify an existing MKE configuration](https://docs.mirantis.com/mke/current/ops/administer-cluster/configure-an-mke-cluster/use-an-mke-configuration-file.html#modify-an-existing-mke-configuration) to download, edit, and upload your MKE configuration. During the editing step, add the following configuration: -`kube_proxy_mode` to `iptables`. - -1. Since disabling eBPF mode is disruptive to existing connections, monitor existing workloads to make sure they re-establish any connections that were disrupted by the switch. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/ebpf/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/ebpf/index.mdx deleted file mode 100644 index 357f65bedd..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/ebpf/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Documentation for eBPF data plane mode, including how to enable eBPF data plane mode. -hide_table_of_contents: true ---- - -# eBPF data plane mode - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/ebpf/install.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/ebpf/install.mdx deleted file mode 100644 index 14f2207df9..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/ebpf/install.mdx +++ /dev/null @@ -1,432 +0,0 @@ ---- -description: Install Calico Enterprise in eBPF mode. ---- - -# Install in eBPF mode - -import EbpfValue from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_ebpf-value.mdx'; - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -## Big picture - -Install the eBPF data plane during the initial installation of $[prodname]. - -## Value - - - -## Concepts - -### eBPF - -eBPF (or "extended Berkeley Packet Filter"), is a technology that allows safe mini programs to be attached to various -low-level hooks in the Linux kernel. eBPF has a wide variety of uses, including networking, security, and tracing. -You’ll see a lot of non-networking projects leveraging eBPF, but for $[prodname] our focus is on networking, -and in particular, pushing the networking capabilities of the latest Linux kernels to the limit. - -## Before you begin - -**Supported architecture and versions** - -- x86-64 -- Linux distribution/kernel: - - - Ubuntu 20.04 and 22.04. - - Red Hat v8.2 with Linux kernel v4.18.0-193 or above (Red Hat have backported the required features to that build). - - Another [supported distribution](../../getting-started/install-on-clusters/requirements.mdx) with Linux kernel v5.3 or above. - -- An underlying network fabric that allows VXLAN traffic between hosts. In eBPF mode, VXLAN is used to forward Kubernetes NodePort traffic. - -**Unsupported platforms** - -- GKE -- MKE -- TKG -- RKE - -:::note - -eBPF supports AKS with Calico CNI and $[prodname] network policy. However, with [AKS with Azure CNI and $[prodname] network policy](../../getting-started/install-on-clusters/aks.mdx#install-with-azure-cni-networking), kube-proxy cannot be disabled so the performance benefits of eBPF are lost. However, there are other reasons to use eBPF other than performance gains, as described in [eBPF use cases](use-cases-ebpf.mdx). - -::: - -**Unsupported features** -- Clusters with some eBPF nodes and some standard data plane and/or Windows nodes -- Floating IPs -- SCTP (either for policy or services) -- `Log` action in policy rules -- Tagged VLAN devices -- L7 logs -- Application layer policies -- Web application firewall (WAF) - -**Recommendations for performance** - -For best pod-to-pod performance, we recommend using an underlying network that doesn't require $[prodname] to use an overlay. For example: - -- A cluster within a single AWS subnet -- A cluster using a compatible cloud provider's CNI (such as the AWS VPC CNI plugin) -- An on-prem cluster with BGP peering configured - -If you must use an overlay, we recommend that you use VXLAN, not IPIP. VXLAN has better performance than IPIP in eBPF mode due to various kernel optimizations. - -## How to - -Installing $[prodname] normally consists of the following steps, which are covered by the main installation -guide: - -- Create a cluster suitable to run $[prodname] -- Install the Tigera Operator, and the associated Custom Resource Definitions -- Apply a set of Custom Resources to tell the operator what to install -- Wait for the operator to provision all the associated resources and report back via its status resource - -To install directly in eBPF is very similar; this guide explains the differences: - -- [Create a cluster](#create-a-suitable-cluster) suitable to run $[prodname] **with the added requirement that the nodes must use a recent - enough kernel**. -- [**Create a config map with the "real" address of the API server.**](#create-kubernetes-service-endpoint-config-map) This allows the operator to install $[prodname] - with a direct connection to the API server so that it can take over from `kube-proxy`. -- [Install the Tigera Operator](#install-the-tigera-operator) (possibly via a Helm chart), and the associated Custom Resource Definitions. -- **[Download and tweak the installation Custom Resource](#tweak-and-apply-installation-custom-resources) to tell the operator to use eBPF mode.** -- [Apply a set of Custom Resources](#tweak-and-apply-installation-custom-resources) to tell the operator what to install. -- [Wait for the operator to provision all the associated resources and report back via its status resource](#monitor-the-progress-of-the-installation). -- [Disable kube-proxy or avoid conflicts.](#disable-kube-proxy-or-avoid-conflicts) - -These steps are explained in more detail below. - -### Create a suitable cluster - -The basic requirement for eBPF mode is to have a recent-enough kernel (see [supported architectures and versions](#before-you-begin)). - -Select the appropriate tab below for distribution-specific instructions: - - - - -`kubeadm` supports a number of base OSes; as long as the base OS chosen (such as Ubuntu 20.04) meets the kernel -requirements, `kubeadm`-provisioned clusters are supported. - -Since `kube-proxy` is not required in eBPF mode, you must disable `kube-proxy` at install time. With `kubeadm` -you can do that by passing the ` --skip-phases=addon/kube-proxy` flag to `kubeadm init`: - -``` -kubeadm init --skip-phases=addon/kube-proxy -``` - - - - -`kops` supports a number of base OSes; as long as the base OS chosen (such as Ubuntu 20.04 or RHEL 8.2) meets the kernel -requirements, `kops`-provisioned clusters are supported. - -Since `kube-proxy` is not required in eBPF mode, you must disable `kube-proxy` at install time. With `kops` you -can do that by setting the following in your `kops` configuration: - -```yaml -kubeProxy: - enabled: false -``` - - - - -OpenShift supports a number of base OSes; as long as the base OS chosen has a recent enough kernel, OpenShift clusters are -fully supported. Since Red Hat have backported the eBPF features required by $[prodname] the Red Hat kernel -version required is lower than the mainline: v4.18.0-193 or above. - - - - -Azure Kubernetes Service (AKS) supports a number of base OSes. The most recent Ubuntu 18.04 image has a recent enough -kernel to use with eBPF mode. - -AKS does not support disabling `kube-proxy` so it's necessary to tell $[prodname] not to try to clean up -`kube-proxy`'s iptables rules at a later stage. - - - - -Amazon's Elastic Kubernetes Service (EKS) supports a number of base OSes for nodes. At the time of writing, the -default kernel used by Amazon Linux is recent enough to run eBPF mode, as is the Bottlerocket kernel. The Ubuntu -18.04 image did not have a recent-enough kernel (but that may have changed by the time you read this). - - - - -The eBPF data plane is supported on MKE with any Linux operating system that meets the minimum kernel requirements. - -Since `kube-proxy` is not required in eBPF mode, you must disable `kube-proxy` at install time. With `MKE` you -can do that by setting `--kube-proxy-mode=disabled` and `--kube-default-drop-masq-bits` when installing the cluster. - -More details can be found in [the MKE documentation](https://docs.mirantis.com/mke/current/install/predeployment/configure-networking/cluster-service-networking-options.html) - - - - - -### Create kubernetes-service-endpoint config map - -In eBPF mode, $[prodname] takes over from `kube-proxy`. This means that, like `kube-proxy`, it needs to be able -to reach the API server _directly_ rather than by using the API server's `ClusterIP`. To tell $[prodname] how -to reach the API server we create a `ConfigMap` with the API server's "real" address. In this guide we do that before -installing the Tigera Operator. That means that the operator itself can also use the direct connection and hence -it doesn't require `kube-proxy` to be running. - -The tabs below explain how to find the "real" address of the API server for a range of distributions. -**Note:** In all cases it's important that the address used is stable even if your API server is restarted or -scaled up/down. If you have multiple API servers, with DNS or other load balancing in front it's important to use -the address of the load balancer. This prevents $[prodname] from being disconnected if the API servers IP changes. - - - - -If you created a cluster manually (for example by using `kubeadm`) then the right address to use depends on whether you -opted for a high-availability cluster with multiple API servers or a simple one-node API server. - -- If you opted to set up a high availability cluster then you should use the address of the load balancer that you - used in front of your API servers. As noted in the Kubernetes documentation, a load balancer is required for a - HA set-up but the precise type of load balancer is not specified. -- If you opted for a single control plane node then you can use the address of the control plane node itself. However, - it's important that you use a _stable_ address for that node such as a dedicated DNS record, or a static IP address. - If you use a dynamic IP address (such as an EC2 private IP) then the address may change when the node is restarted - causing $[prodname] to lose connectivity to the API server. - - - - -When using `kops`, `kops` typically sets up a load balancer of some sort in front of the API server. You should use -the FQDN and port of the API load balancer: `api.internal.`. - - - - -OpenShift requires various DNS records to be created for the cluster; one of these is exactly what we need: -`api..` should point to the API server or to the load balancer in front of the -API server. Use that (filling in the `` and `` as appropriate for your cluster) for the -`KUBERNETES_SERVICE_HOST` below. Openshift uses 6443 for the `KUBERNETES_SERVICE_PORT`. - - - - -For AKS clusters, you should use the FQDN of your API server. This can be found by running the following command: - -```bash -kubectl cluster-info -``` - -which should give output similar to the following: - -``` -Kubernetes master is running at https://mycalicocl-calicodemorg-03a087-36558dbb.hcp.canadaeast.azmk8s.io:443 -``` - -In this example, you would use `mycalicocl-calicodemorg-03a087-36558dbb.hcp.canadaeast.azmk8s.io` for -`KUBERNETES_SERVICE_HOST` and `443` for `KUBERNETES_SERVICE_PORT` when creating the config map. - - - - -For an EKS cluster, it's important to use the domain name of the EKS-provided load balancer that is in front of the API -server. This can be found by running the following command: - -```bash -kubectl cluster-info -``` - -which should give output similar to the following: - -``` -Kubernetes master is running at https://60F939227672BC3D5A1B3EC9744B2B21.gr7.us-west-2.eks.amazonaws.com -... -``` - -In this example, you would use `60F939227672BC3D5A1B3EC9744B2B21.gr7.us-west-2.eks.amazonaws.com` for -`KUBERNETES_SERVICE_HOST` and `443` for `KUBERNETES_SERVICE_PORT` when creating the config map. - - - - -MKE runs a reverse proxy in each node which can be used to reach the api-server. `KUBERNETES_SERVICE_HOST` must be set to -`proxy.local` and `KUBERNETES_SERVICE_PORT` must be set to `6444`. - - - - -### Install the Tigera Operator - -Follow the steps in the main install for your platform that installs the Tigera Operator, without applying the custom-resources.yaml (you will update this file in a later step in this doc). - -For clusters in AWS, such as kOps and EKS, you must also patch the tigera-operator deployment with DNS config so the operator can resolve the apiserver DNS. -AWS DNS server's address is 169.254.169.253. - -```bash -kubectl patch deployment -n tigera-operator tigera-operator -p '{"spec":{"template":{"spec":{"dnsConfig":{"nameservers":["169.254.169.253"]}}}}}' -``` - -### Create the Config Map - -Create the following config map in the `tigera-operator` namespace using the host and port determined above: - -```bash -kubectl apply -f - <" - KUBERNETES_SERVICE_PORT: "" -EOF -``` - -### Tweak and apply installation Custom Resources - -When the main install guide tells you to apply the `custom-resources.yaml`, typically by running `kubectl create` with -the URL of the file directly, you should instead download the file, so that you can edit it: - -```bash - curl -o custom-resources.yaml $[filesUrl]/manifests/custom-resources.yaml -``` - -Edit the file in your editor of choice and find the `Installation` resource, which should be at the top of the file. -To enable eBPF mode, we need to add a new `calicoNetwork` section inside the `spec` of the Installation resource, -including the `linuxDataplane` field. For EKS Bottlerocket OS only, you should also add the `flexVolumePath` setting -as shown below. - -For example: - -```yaml -# This section includes base Calico Enterprise installation configuration. - -kind: Installation -metadata: - name: default -spec: - # Added calicoNetwork section with linuxDataplane field - calicoNetwork: - linuxDataplane: BPF - - # EKS with Bottlerocket as node image only: - # flexVolumePath: /var/lib/kubelet/plugins - - # ... remainder of the Installation resource varies by platform ... - - # Install Calico Enterprise - variant: TigeraSecureEnterprise - ``` - -Then apply the edited file: - -```bash -kubectl create -f custom-resources.yaml -``` - -:::tip - -If you already created the custom resources, you can switch your cluster over to eBPF mode by updating the -installation resource. The operator will automatically roll out the change. - -```bash -kubectl patch installation.operator.tigera.io default --type merge -p '{"spec":{"calicoNetwork":{"linuxDataplane":"BPF", "hostPorts":null}}}' -``` - -::: - -### Monitor the progress of the installation - -You can monitor progress of the installation with the following command: - -```bash -watch kubectl get tigerastatus -``` - -### Disable `kube-proxy` (or avoid conflicts) - -In eBPF mode, to avoid conflicts with `kube-proxy` it's necessary to either disable `kube-proxy` or to configure -$[prodname] not to clean up `kube-proxy`'s iptables rules. If you didn't disable `kube-proxy` when starting -your cluster then follow the steps below to avoid conflicts: - - - - -For a cluster that runs `kube-proxy` in a `DaemonSet` (such as a `kubeadm`-created cluster), you can disable -`kube-proxy`, reversibly, by adding a node selector to `kube-proxy`'s `DaemonSet` that matches no nodes, for example: - -```bash -kubectl patch ds -n kube-system kube-proxy -p '{"spec":{"template":{"spec":{"nodeSelector":{"non-calico": "true"}}}}}' -``` - -Then, should you want to start `kube-proxy` again, you can simply remove the node selector. - - - - -`kops` allows `kube-proxy` to be disabled by setting - -```yaml -kubeProxy: - enabled: false -``` - -in its configuration. You will need to do `kops update cluster` to roll out the change. - - - - -In OpenShift, you can disable `kube-proxy` as follows: - -```bash -kubectl patch networks.operator.openshift.io cluster --type merge -p '{"spec":{"deployKubeProxy": false}}' -``` - -If you need to re-enable it later: - -```bash -kubectl patch networks.operator.openshift.io cluster --type merge -p '{"spec":{"deployKubeProxy": true}}' -``` - - - - -AKS with Azure CNI does not allow `kube-proxy` to be disabled, `kube-proxy` is deployed by the add-on manager, which will reconcile -away any manual changes made to its configuration. To ensure `kube-proxy` and $[prodname] don't fight, set -the Felix configuration parameter `bpfKubeProxyIptablesCleanupEnabled` to false. This can be done with -`kubectl` as follows: - -```bash -kubectl patch felixconfiguration default --type merge --patch='{"spec": {"bpfKubeProxyIptablesCleanupEnabled": false}}' -``` - - - - -In EKS, you can disable `kube-proxy`, reversibly, by adding a node selector that doesn't match and nodes to -`kube-proxy`'s `DaemonSet`, for example: - -```bash -kubectl patch ds -n kube-system kube-proxy -p '{"spec":{"template":{"spec":{"nodeSelector":{"non-calico": "true"}}}}}' -``` - -Then, should you want to start `kube-proxy` again, you can simply remove the node selector. - - - - -:::note - -If you are running kube-proxy in IPVS mode, switch to iptables mode before disabling. - -::: - -## Next steps - -**Recommended** - -- [Learn more about eBPF](use-cases-ebpf.mdx) - -**Recommended - Security** - -- [Get started with $[prodname] tiered network policy](../../network-policy/policy-tiers/tiered-policy.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/ebpf/troubleshoot-ebpf.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/ebpf/troubleshoot-ebpf.mdx deleted file mode 100644 index 14e7998227..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/ebpf/troubleshoot-ebpf.mdx +++ /dev/null @@ -1,249 +0,0 @@ ---- -description: How to troubleshoot when running in eBPF mode. ---- - -# Troubleshoot eBPF mode - -This document gives some general troubleshooting guidance for the eBPF data plane. - -## Troubleshoot ebpf video - -To understand basic concepts, we recommend the following video by Tigera Engineers: [Opening the Black Box: Understanding and troubleshooting Calico's eBPF Data Plane](https://www.youtube.com/watch?v=Mh43sNBu208). - -## Troubleshoot access to services - -If pods or hosts within your cluster have trouble accessing services, check the following: - -- Either $[prodname]'s eBPF mode or `kube-proxy` must be active on a host for services to function. If you - disabled `kube-proxy` when enabling eBPF mode, verify that eBPF mode is actually functioning. If $[prodname] - detects that the kernel is not supported, it will fall back to standard data plane mode (which does not support - services). - - To verify that eBPF mode is correctly enabled, examine the log for a `$[noderunning]` container; if - eBPF mode is not supported it will log an `ERROR` log that says - - ```bash - BPF data plane mode enabled but not supported by the kernel. Disabling BPF mode. - ``` - - If BPF mode is correctly enabled, you should see an `INFO` log that says - - ```bash - BPF enabled, starting BPF endpoint manager and map manager. - ``` - -- In eBPF mode, external client access to services (typically NodePorts) is implemented using VXLAN encapsulation. - If NodePorts time out when the backing pod is on another node, check your underlying network fabric allows - VXLAN traffic between the nodes. VXLAN is a UDP protocol; by default it uses port 4789. -- In DSR mode, $[prodname] requires that the underlying network fabric allows one node to respond on behalf of - another. - - - In AWS, to allow this, the Source/Dest check must be disabled on the node's NIC. However, note that DSR only - works within AWS; it is not compatible with external traffic through a load balancer. This is because the load - balancer is expecting the traffic to return from the same host. - - - In GCP, the "Allow forwarding" option must be enabled. As with AWS, traffic through a load balancer does not - work correctly with DSR because the load balancer is not consulted on the return path from the backing node. - -# The `calico-bpf` tool - -Since BPF maps contain binary data, the $[prodname] team wrote a tool to examine $[prodname]'s BPF maps. -The tool is embedded in the $[nodecontainer] container image. To run the tool: - -- Find the name of the $[nodecontainer] Pod on the host of interest using - - ```bash - kubectl get pod -o wide -n calico-system - ``` - - for example, `calico-node-abcdef` - -- Run the tool as follows: - - ```bash - kubectl exec -n calico-system calico-node-abcdef -- calico-node -bpf ... - ``` - - For example, to show the tool's help: - - ```bash - kubectl exec -n calico-system calico-node-abcdef -- calico-node -bpf help - - Usage: - calico-bpf [command] - - Available Commands: - arp Manipulates arp - connect-time Manipulates connect-time load balancing programs - conntrack Manipulates connection tracking - counters Show and reset counters - help Help about any command - ipsets Manipulates ipsets - nat Manipulates network address translation (nat) - routes Manipulates routes - version Prints the version and exits - - Flags: - --config string config file (default is $HOME/.calico-bpf.yaml) - -h, --help help for calico-bpf - --log-level string Set log level (default "warn") - -t, --toggle Help message for toggle - ``` - - (Since the tool is embedded in the main `calico-node` binary the `--help` option is not available, but running - `calico-node -bpf help` does work.) - - To dump the BPF conntrack table: - - ``` - kubectl exec -n calico-system calico-node-abcdef -- calico-node -bpf conntrack dump - ... - ``` - - Also, it is possible to fetch various counters, like packets dropped by a policy or different errors, from BPF data plane using the same tool. - For example, to dump the BPF counters of `eth0` interface: - - ``` - kubectl exec -n calico-system calico-node-abcdef -- calico-node -bpf counters dump --iface=eth0 - +----------+--------------------------------+---------+--------+ - | CATEGORY | TYPE | INGRESS | EGRESS | - +----------+--------------------------------+---------+--------+ - | Accepted | by another program | 0 | 0 | - | | by failsafe | 0 | 4 | - | | by policy | 21 | 0 | - | Dropped | by policy | 4 | 0 | - | | failed decapsulation | 0 | 0 | - | | failed encapsulation | 0 | 0 | - | | incorrect checksum | 0 | 0 | - | | malformed IP packets | 0 | 0 | - | | packets with unknown route | 0 | 0 | - | | packets with unknown source | 0 | 0 | - | | packets with unsupported IP | 0 | 0 | - | | options | | | - | | too short packets | 0 | 0 | - | Total | packets | 1593 | 1973 | - +----------+--------------------------------+---------+--------+ - dumped eth0 counters. - ``` - -## Check if a program is dropping packets - -To check if an eBPF program is dropping packets, you can use either the `calico-bpf` or `tc` command-line tool. For example, if you -are worried that the eBPF program attached to `eth0` is dropping packets, you can use `calico-bpf` to fetch BPF counters as described -in the previous section and look for one of the `Dropped` counters or you can run the following command: - -``` -tc -s qdisc show dev eth0 -``` - -The output should look like the following; find the `clsact` qdisc, which is the attachment point for eBPF programs. -The `-s` option to `tc` causes `tc` to display the count of dropped packets, which amounts to the count of packets -dropped by the eBPF programs. - -``` -... -qdisc clsact 0: dev eth0 root refcnt 2 - sent 1340 bytes 10 pkt (dropped 10, overlimits 0 requeues 0) - backlog 0b 0p requeues 0 -... -``` - -## Debug high CPU usage - -If you notice `$[noderunning]` using high CPU: - -- Check if `kube-proxy` is still running. If `kube-proxy` is still running, you must either disable `kube-proxy` or - ensure that the Felix configuration setting `bpfKubeProxyIptablesCleanupEnabled` is set to `false`. If the setting - is set to `true` (its default), then Felix will attempt to remove `kube-proxy`'s iptables rules. If `kube-proxy` is - still running, it will fight with `Felix`. -- If your cluster is very large, or your workload involves significant service churn, you can increase the interval - at which Felix updates the services data plane by increasing the `bpfKubeProxyMinSyncPeriod` setting. The default is - 1 second. Increasing the value has the trade-off that service updates will happen more slowly. -- $[prodname] supports endpoint slices, similarly to `kube-proxy`. If your Kubernetes cluster supports endpoint - slices and they are enabled, then you can enable endpoint slice support in $[prodname] with the - `bpfKubeProxyEndpointSlicesEnabled` configuration flag. - -## eBPF program debug logs - -$[prodname]'s eBPF programs contain optional detailed debug logging. Although the logs can be very verbose (because -the programs will log every packet), they can be invaluable to diagnose eBPF program issues. To enable the log, set the -`bpfLogLevel` Felix configuration setting to `Debug`. - -:::caution - -Enabling logs in this way has a significant impact on eBPF program performance. - -::: - -> The logs are emitted to the kernel trace buffer, and they can be examined using the following command: - -``` -tc exec bpf debug -``` - -Logs have the following format: - -``` - <...>-84582 [000] .Ns1 6851.690474: 0: ens192---E: Final result=ALLOW (-1). Program execution time: 7366ns -``` - -The parts of the log are explained below: - -- `<...>-84582` gives an indication about what program (or kernel process) was handling the - packet. For packets that are being sent, this is usually the name and PID of the program that is actually sending - the packet. For packets that are received, it is typically a kernel process, or an unrelated program that happens to - trigger the processing. -- `6851.690474` is the log timestamp. - -- `ens192---E` is the $[prodname] log tag. For programs attached to interfaces, the first part contains the - first few characters of the interface name. The suffix is either `-I` or `-E` indicating "Ingress" or "Egress". - "Ingress" and "Egress" have the same meaning as for policy: - - - A workload ingress program is executed on the path from the host network namespace to the workload. - - A workload egress program is executed on the workload to host path. - - A host endpoint ingress program is executed on the path from external node to the host. - - A host endpoint egress program is executed on the path from host to external host. - -- `Final result=ALLOW (-1). Program execution time: 7366ns` is the message. In this case, logging the final result of - the program. Note that the timestamp is massively distorted by the time spent logging. - -## Poor performance - -A number of problems can reduce the performance of the eBPF data plane. - -- Verify that you are using the best networking mode for your cluster. If possible, avoid using an overlay network; - a routed network with no overlay is considerably faster. If you must use one of $[prodname]'s overlay modes, - use VXLAN, not IPIP. IPIP performs poorly in eBPF mode due to kernel limitations. -- If you are not using an overlay, verify that the [Felix configuration parameters](../../reference/component-resources/node/felix/configuration.mdx) - `ipInIpEnabled` and `vxlanEnabled` are set to `false`. Those parameters control whether Felix configured itself to - allow IPIP or VXLAN, even if you have no IP pools that use an overlay. The parameters also disable certain eBPF - mode optimisations for compatibility with IPIP and VXLAN. - - To examine the configuration: - - ```bash - kubectl get felixconfiguration -o yaml - ``` - - ```yaml noValidation - apiVersion: projectcalico.org/v3 - items: - - apiVersion: projectcalico.org/v3 - kind: FelixConfiguration - metadata: - creationTimestamp: "2020-10-05T13:41:20Z" - name: default - resourceVersion: "767873" - uid: 8df8d751-7449-4b19-a4f9-e33a3d6ccbc0 - spec: - ... - ipipEnabled: false - ... - vxlanEnabled: false - kind: FelixConfigurationList - metadata: - resourceVersion: "803999" - ``` - -- If you are running your cluster in a cloud such as AWS, then your cloud provider may limit the bandwidth between - nodes in your cluster. For example, most AWS nodes are limited to 5GBit per connection. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/ebpf/use-cases-ebpf.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/ebpf/use-cases-ebpf.mdx deleted file mode 100644 index 35d5f33d66..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/ebpf/use-cases-ebpf.mdx +++ /dev/null @@ -1,95 +0,0 @@ ---- -description: Learn when to use eBPF, and when not to. ---- - -# eBPF use cases - -## Big picture - -Learn when to use eBPF (and when not to). - -## What is eBPF? - -eBPF is a feature available in Linux kernels that allows you to run a virtual machine inside the kernel. This virtual machine allows you to safely load programs into the kernel, to customize its operation. Why is this important? - -In the past, making changes to the kernel was difficult: there were APIs you could call to get data, but you couldn’t influence what was inside the kernel or execute code. Instead, you had to submit a patch to the Linux community and wait for it to be approved. With eBPF, you can load a program into the kernel and instruct the kernel to execute your program if, for example, a certain packet is seen or another event occurs. - -With eBPF, the kernel and its behavior become highly customizable, instead of being fixed. This can be extremely beneficial, when used under the right circumstances. - -## $[prodname] and eBPF - -$[prodname] offers an eBPF data plane as an alternative to our standard Linux data plane (which is iptables based). While the standard data plane focuses on compatibility by working together with kube-proxy and your own iptables rules, the eBPF data plane focuses on performance, latency, and improving user experience with features that aren’t possible with the standard data plane. - -But $[prodname] doesn’t only support standard Linux and eBPF; it currently supports a total of three data planes, including Windows HNS, and has plans to add support for even more data planes in the near future. $[prodname] enables you, the user, to decide what works best for what you want to do. - -If you enable eBPF within $[prodname] but have existing iptables flows, we won’t touch them. Because maybe you want to use connect-time load balancing, but leave iptables as is. With $[prodname], it’s not an all-or-nothing deal—we allow you to easily load and unload our eBPF data plane to suit your needs, which means you can quickly try it out before making a decision. $[prodname] offers you the ability to leverage eBPF as needed, as an additional control to build your Kubernetes cluster security. - -## Use cases - -There are several use cases for eBPF, including traffic control, creating network policy, and connect-time load balancing. - -### Traffic control - -Without eBPF, packets use the standard Linux networking path on their way to a final destination. If a packet shows up at point A, and you know that the packet needs to go to point B, you can optimize the network path in the Linux kernel by sending it straight to point B. With eBPF, you can leverage additional context to make these changes in the kernel so that packets bypass complex routing and simply arrive at their final destination. - -This is especially relevant in a Kubernetes container environment, where you have numerous networks. (In addition to the host network stack, each container has its own mini network stack.) When traffic comes in, it is usually routed to a container stack and must travel a complex path as it makes its way there from the host stack. This routing can be bypassed using eBPF. - -### Creating network policy - -When creating network policy, there are two instances where eBPF can be used: - -- **eXpress Data Path (XDP)** – As a raw packet buffer enters the system, eBPF gives you an efficient way to examine that buffer and make quick decisions about what to do with it. - -- **Network policy** – eBPF allows you to efficiently examine a packet and apply network policy, both for pods and hosts. - -### Connect-time load balancing - -When load balancing service connections in Kubernetes, a port needs to talk to a service and therefore network address translation (NAT) must occur. A packet is sent to a virtual IP, and that virtual IP translates it to the destination IP of the pod backing the service; the pod then responds to the virtual IP and the return packet is translated back to the source. - -With eBPF, you can avoid this packet translation by using an eBPF program that you’ve loaded into the kernel and load balancing at the source of the connection. All NAT overhead from service connections is removed because destination network address translation (DNAT) does not need to take place on the packet processing path. - -## The price of performance - -So is eBPF more efficient than standard Linux iptables? The short answer: it depends. - -If you were to micro-benchmark how iptables works when applying network policies with a large number of IP addresses (i.e. ipsets), iptables in many cases is better than eBPF. But if you want to do something in the Linux kernel where you need to alter the packet flow in the kernel, eBPF would be the better choice. Standard Linux iptables is a complex system and certainly has its limitations, but at the same time it provides options to manipulate traffic; if you know how to program iptables rules, you can achieve a lot. eBPF allows you to load your own programs into the kernel to influence behavior that can be customized to your needs, so it is more flexible than iptables as it is not limited to one set of rules. - -Something else to consider is that, while eBPF allows you to run a program, add logic, redirect flows, and bypass processing—which is a definite win—it’s a virtual machine and as such must be translated to bytecode. By comparison, the Linux kernel’s iptables is already compiled to code. - -As you can see, comparing eBPF to iptables is not a straight apples-to-apples comparison. What we need to assess is performance, and the two key factors to look at here are latency (speed) and expense. If eBPF is very fast but takes up 80% of your resources, then it’s like a Lamborghini—an expensive, fast car. And if that works for you, great (maybe you really like expensive, fast cars). Just keep in mind that more CPU usage means more money spent with your cloud providers. So while a Lamborghini might be faster than a lot of other cars, it might not be the best use of money if you need to comply with speed limits on your daily commute. - -## When to use eBPF (and when not to) - -With eBPF, you get performance—but it comes at a cost. You need to find a balance between the two by figuring out the price of performance, and deciding if it’s acceptable to you from an eBPF perspective. - -Let’s look at some specific cases where it would make sense to use eBPF, and some where it would not. - -### When not to use eBPF - -### ✘ Packet-by-packet processing - -Using eBPF to perform CPU intensive or packet-by-packet processing, such as decryption and re-encryption for encrypted flows, would not be efficient because you would need to build a structure and do a lookup for every packet, which is expensive. - -### When to use eBPF - -### ✔ XDP - -eBPF provides an efficient way to examine raw packet buffers as they enter the system, allowing you to make quick decisions about what to do with them. - -### ✔ Connect-time load balancing - -With eBPF, you can load balance at the source using a program you’ve loaded into the kernel, instead of using a virtual IP. Since DNAT does not need to take place on the packet processing path, all NAT overhead from service connections is removed. - -### ✔ Building a service mesh control plane - -Service mesh relies on proxies like Envoy. A lot of thought has gone into designing this process over the years. The main reason for doing it this way is that, in many cases, it is not viable to do inline processing for application protocols like HTTP at the high speeds seen inside a cluster. Therefore, you should think of using eBPF to route traffic to a proxy like Envoy in an efficient way, rather than using it to replace the proxy itself. However, you do need to turn off connect-time load balancing (CTLB) so sidecars can see the service addresses. Given you are already taking a performance hit by the extra hop to the sidecar, not using CTLB performance optimization to avoid NAT overhead is likely not a big deal. - -## Summary - -Is eBPF a replacement for iptables? Not exactly. It’s hard to imagine everything working as efficiently with eBPF as it does with iptables. For now, the two co-exist and it’s up to the user to weigh the price-performance tradeoff and decide which feature to use when, given their specific needs. - -We believe the right solution is to leverage eBPF, along with existing mechanisms in the Linux kernel, to achieve your desired outcome. That’s why $[prodname] offers support for multiple data planes, including standard Linux, Windows HNS, and Linux eBPF. Since we have established that both eBPF and iptables are useful, the only logical thing to do in our opinion is to support both. $[prodname] gives you the choice so you can choose the best tool for the job. - -## Additional resources - -To learn more and see performance metrics from our test environment, see the blog, [Introducing the eBPF data plane](https://www.projectcalico.org/introducing-the-calico-ebpf-dataplane/). diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/index.mdx deleted file mode 100644 index adcc11f46d..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/index.mdx +++ /dev/null @@ -1,93 +0,0 @@ ---- -description: Post-installation tasks for managing Calico including upgrading and troubleshooting. ---- - -import { DocCardLink, DocCardLinkLayout } from '/src/___new___/components'; - -# Operations - -Post-installation tasks for managing Calico Enterprise. - -## Configuring the web console - - - - - - - - -## calicoctl and calicoq - - - - - - - - - - -## Securing component communications - - - - - - - - - - - - - - - -## Storage - - - - - - - - -## Monitoring - - - - - - - - - - - - - -## eBPF - - - - - - - - -## Troubleshooting - - - - - - - - -## Other operations tasks - - - - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/license-options.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/license-options.mdx deleted file mode 100644 index 3ff4aec4b5..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/license-options.mdx +++ /dev/null @@ -1,19 +0,0 @@ ---- -description: Review options to track your Calico Enterprise license expiration. ---- - -# License expiration and renewal - -import License from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_license.mdx'; - -## Big picture - -Review options for tracking $[prodname] license expiration. - -## Concepts - -We highly recommend using the [license agent using Prometheus](monitor/metrics/license-agent.mdx) to get alerts on your $[prodname] license expiration day to avoid disruption to services. Regardless of whether you using the alerting feature, here are some things you should know. - -### FAQ - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/logstorage/adjust-log-storage-size.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/logstorage/adjust-log-storage-size.mdx deleted file mode 100644 index d95544708f..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/logstorage/adjust-log-storage-size.mdx +++ /dev/null @@ -1,74 +0,0 @@ ---- -description: Adjust the log storage size during or after installation. ---- - -# Adjust log storage size - -## Big picture - -Adjust the size of the $[prodname] log storage during or after installation. - -## Value - -By default, $[prodname] creates the log storage with a single node. This makes it easy to get started using $[prodname]. -Generally, a single node for logs is fine for test or development purposes. Before going to production, you should scale -the number of nodes, replicas, CPU, and memory to reflect a production environment. - -## Concepts - -### Log storage terms - -| Term | Description | -| ------- | --------------------------------------------------------------------------------------------------------------------------------------------------- | -| node | A running instance of the log storage. | -| cluster | A collection of nodes. Multiple nodes protect the cluster from any single node failing, and lets you scale resources (CPU, memory, storage space) . | -| replica | A copy of data. Replicas protect against data loss if a node fails. The number of replicas must be less than the number of nodes. | - -## Before you begin... - -**Review log storage recommendations** - -Review [Log storage recommendations](log-storage-recommendations.mdx) for guidance on the number of nodes and resources to configure for your environment. - -## How to - -- [Adjusting LogStorage](#adjusting-logstorage) - -:::caution - -If you are not using a dynamic provisioner, make sure there is an available persistent volume before updating the resource requirements (cpu, memory, storage) in this section. To check that a persistent volume has the status of `Available`, run this command: `kubectl get pv | grep tigera-elasticsearch` - -::: - -### Adjusting LogStorage - -In the following example, $[prodname] is configured to install 3 nodes that have 200Gi of storage each with 1 replica. Whenever the storage size is modified, resourceRequirements must be revisited respectively to support these changes. - -```yaml -apiVersion: operator.tigera.io/v1 -kind: LogStorage -metadata: - name: tigera-secure -spec: - indices: - replicas: 1 - nodes: - count: 3 - # This section sets the resource requirements for each individual Elasticsearch node. - resourceRequirements: - limits: - cpu: 1000m - memory: 16Gi - requests: - cpu: 1000m - memory: 16Gi - storage: 200Gi - componentResources: - - componentName: ECKOperator - # This section sets the resource requirements for the operator that bootstraps the Elasticsearch cluster. - resourceRequirements: - limits: - memory: 512Mi - requests: - memory: 512Mi -``` diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/logstorage/advanced-node-scheduling.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/logstorage/advanced-node-scheduling.mdx deleted file mode 100644 index 190338dcff..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/logstorage/advanced-node-scheduling.mdx +++ /dev/null @@ -1,104 +0,0 @@ ---- -description: Control where Elasticsearch pods and replicas are scheduled. ---- - -# Advanced Node Scheduling - -## Big picture - -$[prodname] can leverage advanced Kubernetes node scheduling options and advanced Elasticsearch shard scheduling -options to allow you to customize the distribution of the Elasticsearch pods and replicas across Kubernetes nodes with -specific attributes. - -## Value - -In some scenarios you may want to control where the Elasticsearch pods and replicas are scheduled. DataNodeSelectors and -SelectionAttributes have been designed to add this flexibility. - -To make sure that all the Elasticsearch pods are scheduled on Kubernetes nodes with specific attributes, use dataNodeSelectors -and specify the labels a Kubernetes node must have in order for it to be eligible for an Elasticsearch pod to be scheduled on it. - -To protect the Elasticsearch cluster from failing if certain Kubernetes nodes fail, use SelectionAttributes. Elasticsearch -pods and replicas will be distributed across nodes with different node labels, ensuring that in the event of Kubernetes -node failure all the Elasticsearch data will still be available. - -## How to - -### Schedule LogStorage using dataNodeSelectors - -The $[prodname] [LogStorageSpec](../../reference/installation/api.mdx) has the field `dataNodeSelector`, -which is a map where the keys are label names and values are label values. The Elasticsearch pods used by $[prodname] -will only be scheduled on Kubernetes nodes that match **all** labels. - -1. Add the labels to your `LogStorage`. - `batch kubectl apply -f - < label1=value1 - kubectl label nodes label2=value2 - ``` -1. Monitor the progress. - ```bash - kubectl get pod -o wide -n tigera-elasticsearch -w - ``` - -### Make LogStorage zone-aware using selectionAttributes - -If you are in a situation where your cluster has nodes across three regions, you can make sure your Elasticsearch pods -and replicas are spread evenly among the three regions to ensure the Elasticsearch cluster is intact during zone failure. -Consider these Kubernetes nodes: - -- Nodes in zone A have label `failure-domain.beta.kubernetes.io/zone: zone-a`. -- Nodes in zone B have label `failure-domain.beta.kubernetes.io/zone: zone-b`. -- Nodes in zone C have label `failure-domain.beta.kubernetes.io/zone: zone-c`. - -We can now use a LogStorage with three node sets that will all form one cluster. For each node set add `selectionAttributes` object, -such that: - -- **NodeLabel** represents a label that should be found on a Kubernetes node for it to be scheduled on. -- **Value** represents the value for `nodeLabel`. -- **Name** represents a unique label for this selection attribute. - -It is important to have at least one replica of each shard to make sure that no data can get lost. - -1. Apply the following `LogStorage` manifest. - ```bash - kubectl apply -f - < - tigera-secure-es-dee415120001c937-0-1 1/1 Running 0 15m 192.168.8.196 ip-192-168-26-221.zone-a.compute.internal - tigera-secure-es-dee415120001c937-1-0 1/1 Running 0 15m 192.168.43.28 ip-192-168-36-19.zone-b.compute.internal - tigera-secure-es-dee415120001c937-1-1 1/1 Running 0 14m 192.168.50.138 ip-192-168-36-19.zone-b.compute.internal - tigera-secure-es-dee415120001c937-2-0 1/1 Running 0 15m 192.168.52.226 ip-192-168-43-89.zone-c.compute.internal - tigera-secure-es-dee415120001c937-2-1 1/1 Running 0 14m 192.168.36.225 ip-192-168-43-89.zone-c.compute.internal - ``` diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/logstorage/create-storage.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/logstorage/create-storage.mdx deleted file mode 100644 index eb50ff70ef..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/logstorage/create-storage.mdx +++ /dev/null @@ -1,163 +0,0 @@ ---- -description: Configure persistent storage for flow logs, DNS logs, audit logs, and compliance reports. ---- - -# Configure storage for logs and reports - -import PersistentStorageTerms from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_persistent-storage-terms.mdx'; - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -## Big picture - -Before installing $[prodname], you must configure persistent storage for flow logs, DNS logs, audit logs, and compliance reports. - -## Concepts - -Before configuring a storage class for $[prodname], the following terms will help you understand storage interactions. - - - -## Before you begin... - -**Review log storage recommendations** - -Review [Log storage recommendations](log-storage-recommendations.mdx) for guidance on the number of nodes and resources to configure for your environment. - -**Determine storage support** - -Determine the storage types that are available on your cluster. If you are using dynamic provisioning, verify it is supported. -If you are using local disks, you may find the [sig-storage local static provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) useful. It creates and manages PersistentVolumes by watching for disks mounted in a configured directory. - -:::caution - -Do not use the host path storage provisioner. This provisioner is not suitable for production and results in scalability issues, instability, and data loss. - -::: - -:::caution - -Do not use shared network file systems, such as AWS' EFS or Azure's azure-file. These file systems may result in decreases of performance and data loss. - -::: - -## How to - -### Create a storage class - -Before installing $[prodname], create a storage class named, `tigera-elasticsearch`. - -**Examples** - -#### Pre-provisioned local disks - -In the following example, we create a **StorageClass** to use when explicitly adding **PersistentVolumes** for local disks. This can be performed manually, or using the [sig-storage local static provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner). - -```yaml -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: tigera-elasticsearch -provisioner: kubernetes.io/no-provisioner -volumeBindingMode: WaitForFirstConsumer -reclaimPolicy: Retain -``` - -:::note - -If local persistent volumes are provisioned on an SELinux-enabled host, you can use the `/mnt/tigera` host path created by the $[prodname] policy package. - -::: - -#### AWS EBS disks - -In the following example for an AWS cloud provider integration, the **StorageClass** is based on [how your EBS disks are provisioned](https://docs.aws.amazon.com/eks/latest/userguide/ebs-csi.html): - - - - -Make sure the CSI plugin is enabled in your cluster and apply the following manifest. - -```yaml -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: tigera-elasticsearch -provisioner: ebs.csi.aws.com -reclaimPolicy: Retain -allowVolumeExpansion: true -volumeBindingMode: WaitForFirstConsumer -``` - - - - -```yaml -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: tigera-elasticsearch -provisioner: ebs.csi.aws.com -parameters: - type: gp2 - fsType: ext4 -reclaimPolicy: Retain -allowVolumeExpansion: true -volumeBindingMode: WaitForFirstConsumer -``` - - - - -#### AKS Azure Files storage - -In the following example for an AKS cloud provider integration, the **StorageClass** tells $[prodname] to use LRS disks for log storage. -:::note - -Premium Storage is recommended for databases greater than 100GiB and for production installations. - -::: - -```yaml -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: tigera-elasticsearch -provisioner: disk.csi.azure.com -parameters: - cachingmode: ReadOnly - kind: Managed - storageaccounttype: StandardSSD_LRS -reclaimPolicy: Retain -volumeBindingMode: WaitForFirstConsumer -allowVolumeExpansion: true -``` - -#### GCP Persistent Disks - -In the following example for a GKE cloud provider integration, the **StorageClass** tells $[prodname] to use the GCE Persistent Disks for log storage. - -:::note - -There are currently two types available `pd-standard` and `pd-ssd`. For production deployments, we recommend using the `pd-ssd` storage type. - -::: - -```yaml -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: tigera-elasticsearch -provisioner: pd.csi.storage.gke.io -parameters: - type: pd-ssd - replication-type: none -reclaimPolicy: Retain -volumeBindingMode: WaitForFirstConsumer -allowVolumeExpansion: true -``` - -## Additional resources - -- [Adjust size of Elasticsearch cluster](adjust-log-storage-size.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/logstorage/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/logstorage/index.mdx deleted file mode 100644 index 0198401665..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/logstorage/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Configure your log storage. -hide_table_of_contents: true ---- - -# Configure log storage - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/logstorage/log-storage-recommendations.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/logstorage/log-storage-recommendations.mdx deleted file mode 100644 index 69f781d6a6..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/logstorage/log-storage-recommendations.mdx +++ /dev/null @@ -1,83 +0,0 @@ ---- -description: Guidelines for setting up your log storage. ---- - -# Log storage recommendations - -## Log storage recommendations - -$[prodname] installs an Elasticsearch cluster for storing logs internally. - -We use -[Elastic Cloud on Kubernetes](https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-overview.html) -to install and manage the cluster. All you need to do is provide at least one node with suitable storage. - -The cluster is configured to use a StorageClass called `tigera-elasticsearch`. You must -set up this StorageClass before installing $[prodname]. - -We recommend using local disks for storage when possible (this offers the best performance), -but high performance remote storage can also be used. Examples of suitable remote storage include -AWS SSD type EBS disks, or GCP PD-SSDs. - -:::caution - -Do not use Amazon EFS, it is not compatible with Elasticsearch. - -::: - -For information on how to configure storage, please consult the [Kubernetes](https://kubernetes.io/docs/concepts/storage/storage-classes/) -or [OpenShift](https://docs.openshift.com/container-platform/4.2/storage/understanding-persistent-storage.html) documentation. - -- If you're going to use local disks, you may find the [sig-storage local static provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) - useful. It can create and manage PersistentVolumes by watching for disks mounted in a certain directory. -- If you're planning on using cloud storage, ensure you have set up the cloud provider integration. - -## Sizing - -Many factors need to be considered when sizing this cluster: - -- Scale and nature of the traffic patterns in the cluster -- Retention periods for logs -- Aggregation and export interval configuration -- Desired replication factor for data - -For tailored recommendations on sizing the cluster, please contact Tigera support. - -### Example production topology - -5 Elasticsearch nodes, 2 replicas, each node with 32GB RAM, 4 CPU cores and 1TB of storage. - -## Recommended settings - -We recommend that you configure your cluster to have: - -- At least 3 nodes -- At least 1 replica -- At least 8GB or ram per node, preferably higher -- The node count should exceed the replica count by at least 2 - -**Relationship between nodes and replicas** - -Elasticsearch stores data in shards. A replica adds a copy of each shard to the cluster. It must not be on the same node as -the original, therefore you need a node count higher than the replica count. - -**Replicas and cluster health** - -An Elasticsearch cluster health gives a user a quick impression of the state of the cluster: - -- Green health: the cluster is fully functional and every shard has at least the number of configured replicas -- Yellow health: the cluster is available for most actions, but there are unavailable shards or shards with too few replicas -- Red health: There is a serious issue with your cluster, check the [troubleshoot page](../../observability/elastic/troubleshoot.mdx) - -**Pod disruption** - -The ECK Operator will automatically modify the pod [disruption budget](https://kubernetes.io/docs/tasks/run-application/configure-pdb/) -according to the cluster health. When the Elasticsearch cluster is yellow or red, it will not tolerate the removal of any -extra node. For this reason we recommend that the node count is at least 2 higher than the replica count. Otherwise, there -is no tolerance for removing/restarting a node. - -## Next Steps - -- Read the [LogStorage overview page](../logstorage/index.mdx) -- [Troubleshooting Elasticsearch](../../observability/elastic/troubleshoot.mdx) -- [LogStorage Specification](../../reference/installation/api.mdx#logstorage) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/monitor/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/monitor/index.mdx deleted file mode 100644 index 9f5f788a25..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/monitor/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Tools for scraping useful metrics. -hide_table_of_contents: true ---- - -# Monitoring - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/monitor/metrics/bgp-metrics.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/monitor/metrics/bgp-metrics.mdx deleted file mode 100644 index dacb660116..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/monitor/metrics/bgp-metrics.mdx +++ /dev/null @@ -1,152 +0,0 @@ ---- -description: Monitor BGP peering and route exchange in your cluster and get alerts by defining rules and thresholds. ---- - -# BGP metrics - -## Big picture - -Use Prometheus configured for $[prodname] `$[noderunning]` to monitor the health of BGP peers within your cluster. - -## Value - -Using the open-source Prometheus monitoring and alerting toolkit, you can view time-series metrics from $[prodname] components in the Prometheus or Grafana interfaces. - -$[prodname] adds the ability to monitor high-level operations between BGP peers in your cluster. By defining a set of simple rules and thresholds, you can monitor peer-to-peer connection health between your nodes as well as the number of routes being exchanged and receive alerts when it exceeds configured thresholds. - -## Concepts - -``` - +-------------------+ - | Host | - | +-------------------+ +------------+ +------------+ - | | Host |------------->--| | | |--->-- - | | +-------------------+ policy | Prometheus | | Prometheus | alert - +-| | Host |----------->--| Server |-->--| Alert- |--->-- - | | +-------------+ | metrics | | | manager | mechanisms - +-| | BGP Metrics |-------------->--| | | |--->-- - | | Server | | | | | | - | +-------------+ | +------------+ +------------+ - +-------------------+ ^ ^ - | | - Collect and store metrics. Web UI for accessing alert - WebUI for accessing and states. - querying metrics. Configure fan out - Configure alerting rules. notifications to different - alert receivers. -``` - -BGP metric reporting is accomplished using three key pieces: - -- BGP Metrics Server -- Prometheus Server -- Prometheus Alertmanager - -### About Prometheus - -The Prometheus scrapes various instrumented jobs (endpoints) to collect time series data for a given set of metrics. Time series data can then be queried and rules can be setup to monitor specific thresholds to trigger alerts. The data can also be visualized (such as using Grafana). - -Prometheus Server deployed as part of the $[prodname] scrapes every configured `$[noderunning]` target. Alerting rules querying BGP metrics can be configured in Prometheus and when triggered, fire alerts to the Prometheus Alertmanager. - -Prometheus Alertmanager (or simply Alertmanager), deployed as part of the $[prodname], receives alerts from Prometheus and forwards alerts to various alerting mechanisms such as _Pager Duty_, or _OpsGenie_. - -### About $[prodname] `$[noderunning]` - -`$[noderunning]` bundles together the components required for networking containers with $[prodname]. The key components are: - -- Felix -- BIRD -- confd - -Its critical function means that it runs on every machine that provides endpoints. A binary running inside `$[noderunning]` monitors the BIRD daemon for peering and routing activity and reports these statics to Prometheus. - -## How to - -BGP metrics are generated within `$[noderunning]` every 5 seconds using statistics pulled from the BIRD daemon. - -The metrics generated are: - -- `bgp_peers` - Total number of peers with a specific BGP connection status. -- `bgp_routes_imported` - Current number of routes successfully imported into the routing table. -- `bgp_route_updates_received` - Total number of route updates received over time (since startup). - -$[prodname] will run BGP metrics for Prometheus by default. Metrics are directly available on each compute node at `http://:9900/metrics`. - -Refer to [Configuring Prometheus](../prometheus/index.mdx) for information on how to create a new Alerting rule or updating the scraping interval for how often Prometheus collects the metrics. - -### BGP peers metric - -The metric `bgp_peers` has the relevant labels `instance`, `status` and `ip_version`. Using this metric, you can identify how many peers have a specific BGP connection status with a given node instance and IP version. This metric will be available as a combination of `{instance, status, ip_version}`. - -Example queries: - -- Total number of peers currently with a BGP connection to the node instance “calico-node-1”, with status “Established”, for IP version “IPv4”. - -``` -bgp_peers{instance="calico-node-1", status="Established", ip_version="IPv4"} -``` - -- Total number of peers currently with a BGP connection to the node instance “calico-node-1”, with status “Down”, for IP version “IPv6”. - -``` -bgp_peers{instance="calico-node-1", status="Down", ip_version="IPv6"} -``` - -- Total number of peers currently with a BGP connection to any node instance, with a status that is not “Established”, for IP version “IPv4”. - -``` -bgp_peers{status!="Established", ip_version="IPv4"} -``` - -Valid BGP connection statuses are: "Idle", "Connect", "Active", "OpenSent", "OpenConfirm", "Established", "Close", "Down" and "Passive". - -### BGP routes imported metric - -The metric `bgp_routes_imported` has the relevant labels `instance` and `ip_version`. Using this metric, you can identify how many routes are being successfully imported into a given node instance's routing table at a specific point in time. This number can increase or decrease depending on how BGP rules process incoming routes. This metric will be available as a combination of `{instance, ip_version}`. - -Example queries: - -- Computes the per-second rate for the number of routes imported by a specific node instance “calico-node-1” looking up to 120 seconds back (using the two most recent data points). - -``` -irate(bgp_routes_imported{instance="calico-node-1",ip_version="IPv4"}[120s]) -``` - -- Computes the per-second rate for the number of routes imported across all node instances looking up to 120 seconds back (using the two most recent data points). - -``` -irate(bgp_routes_imported{ip_version="IPv4"}[120s]) -``` - -### BGP route updates received metric - -The metric `bgp_route_updates_received` has the relevant labels `instance` and `ip_version`. Using this metric, you can identify the total number of BGP routes received by a given node over time. This number includes all routes that have been accepted & imported into the routing table, as well as any routes that were rejected as invalid, rejected by filters or rejected as already in the route table. This total number should only increase over time. This metric will be available as a combination of `{instance, ip_version}`. - -Example queries: - -- Computes the per-second rate for the number of routes received by a specific node instance “calico-node-1” looking up to 5 minutes back (using the two most recent data points). - -``` -irate(bgp_route_updates_received{instance="calico-node-1",ip_version="IPv4"}[5m]) -``` - -- Computes the per-second rate for the number of routes received across all node instances looking up to 5 minutes back (using the two most recent data points). - -``` -irate(bgp_route_updates_received{ip_version="IPv4"}[5m]) -``` - -### BGP metrics on $[prodnameWindows] - -By default, the Windows firewall blocks listening on ports. For $[prodname] to manage the Prometheus metrics ports Windows firewall rules, enable the `windowsManageFirewallRules` setting in FelixConfiguration: - -```bash -kubectl patch felixConfiguration default --type merge --patch '{"spec":{"windowsManageFirewallRules": "Enabled"}}' -``` - -[See the FelixConfiguration reference for more details](../../../reference/resources/felixconfig.mdx). You can also add a Windows firewall rule that allows listening on the Prometheus BGP metrics port instead of having $[prodname] manage it. - -## Additional resources - -- [Secure $[prodname] Prometheus endpoints](../../comms/secure-metrics.mdx) -- [Configuring Prometheus](../prometheus/index.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/monitor/metrics/elasticsearch-and-fluentd-metrics.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/monitor/metrics/elasticsearch-and-fluentd-metrics.mdx deleted file mode 100644 index bc9e34ec01..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/monitor/metrics/elasticsearch-and-fluentd-metrics.mdx +++ /dev/null @@ -1,158 +0,0 @@ ---- -description: Monitor Elasticsearch and Fluentd metrics, and get alerts on log storage or collection issues. ---- - -# Elasticsearch and Fluentd metrics - -## Big picture - -Use the Prometheus monitoring and alerting tool for Fluentd and Elasticsearch metrics to ensure continuous network visibility. - -## Value - -Platform engineering teams rely on logs, such as flow logs and DNS logs, for visibility into their networks. If collecting or storing logs are disrupted, this can impact network visibility. Prometheus can monitor log collection and storage metrics so platform engineering teams are alerted about problems before they occur. - -## Concepts - -| Component | Description | -| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| Prometheus | Monitoring tool that scrapes metrics from instrumented jobs and displays time series data in a visualizer (such as Grafana). For $[prodname], the “jobs” that Prometheus can harvest metrics from are the Elasticsearch and Fluentd components. | -| Elasticsearch | Stores $[prodname] logs. | -| Fluentd | Sends $[prodname] logs to Elasticsearch for storage. | - -**Multi-cluster management users**: Elasticsearch metrics are collected only from the management cluster. Because managed clusters do not have Elasticsearch clusters, do not monitor Elasticsearch for managed clusters. However, managed clusters do feed Fluentd logs to Elasticsearch, so you should monitor fluentd for managed clusters. - -## How to - -- [Create Prometheus alerts for Elasticsearch](#create-prometheus-alerts-for-elasticsearch) -- [Create Prometheus alerts for Fluentd](#create-prometheus-alerts-for-elasticsearch) - -### Create Prometheus alerts for Elasticsearch - -The following example creates Prometheus rules to monitor some important Elasticsearch metrics, and alert when they have -crossed certain thresholds: - -:::note - -The Elasticsearch Prometheus rules are only applicable to standalone and management cluster types, not the -managed cluster type. - -::: - -:::note - -The ElasticsearchHighMemoryUsage alert is an absolute value. This must be configured before applying the -rules. - -::: - -```yaml noValidation -apiVersion: monitoring.coreos.com/v1 -kind: PrometheusRule -metadata: - name: tigera-prometheus-log-storage-monitoring - namespace: tigera-prometheus - labels: - role: tigera-prometheus-rules - prometheus: calico-node-prometheus -spec: - groups: - - name: tigera-elasticsearch.rules - rules: - - alert: ElasticsearchClusterStatusRed - expr: elasticsearch_cluster_health_status{color="red"} == 1 - labels: - severity: Critical - annotations: - summary: "Elasticsearch cluster {{$labels.cluster}}'s status is red" - description: "The Elasticsearch cluster {{$labels.cluster}} is very unhealthy and immediate action must be -taken. Check the pod logs for the {{$labels.cluster}} Elasticsearch cluster to start the investigation." - - alert: ElasticsearchClusterStatusYellow - expr: elasticsearch_cluster_health_status{color="yellow"} == 1 - labels: - severity: Warning - annotations: - summary: "Elasticsearch cluster {{$labels.cluster}} status is yellow" - description: "The Elasticsearch cluster {{$labels.cluster}} may be unhealthy and could become very unhealthy if -the issue isn't resolved. Check the pod logs for the {{$labels.cluster}} Elasticsearch cluster to start the -investigation." - - alert: ElasticsearchPodCriticallyLowDiskSpace - expr: elasticsearch_filesystem_data_available_bytes / elasticsearch_filesystem_data_size_bytes < 0.10 - labels: - severity: Critical - annotations: - summary: "Elasticsearch pod {{$labels.name}}'s disk space is critically low." - description: "Elasticsearch pod {{$labels.name}} in Elasticsearch cluster {{$labels.name}} has less than 10% of -free disk space left. To avoid service disruption review the LogStorage resource limits and curation settings." - - alert: ElasticsearchPodLowDiskSpace - expr: elasticsearch_filesystem_data_available_bytes / elasticsearch_filesystem_data_size_bytes < 0.25 - labels: - severity: Warning - annotations: - summary: "Elasticsearch pod {{$labels.name}}'s disk space is getting low." - description: "Elasticsearch pod {{$labels.name}} in Elasticsearch cluster {{$labels.name}} has less than 25% of -free disk space left. To avoid service disruption review the LogStorage resource limits and curation settings." - - alert: ElasticsearchConsistentlyHighCPUUsage - expr: avg_over_time(elasticsearch_os_cpu_percent[10m]) > 90 - labels: - severity: Warning - annotations: - summary: "Elasticsearch pod {{$labels.name}}'s CPU usage is consistently high." - description: "Elasticsearch pod {{$labels.name}} in Elasticsearch cluster {{$labels.cluster}} has been using -above 90% of it's available CPU for the last 10 minutes. To avoid service disruption review the LogStorage resource -limits." - - alert: ElasticsearchHighMemoryUsage - expr: avg_over_time(elasticsearch_jvm_memory_pool_used_bytes[10m]) > 1000000000 - labels: - severity: Warning - annotations: - summary: "Elasticsearch pod {{$labels.name}}'s memory usage is consistently high." - description: "Elasticsearch pod {{$labels.name}} in Elasticsearch cluster {{$labels.cluster}} has been using -an average of {{$labels.value}} bytes of memory over the past 10 minutes. To avoid service disruption review the -LogStorage resource limits." -``` - -#### The alerts created in the example are described in the following table: - -| Alert | Severity | Requires | Issue/reason | -| ------------------------------------------- | --------------------- | --------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| ElasticsearchClusterStatusRed | Critical | Immediate action to avoid service disruption and restore the service. | Elasticsearch cluster is unhealthy. | -| ElasticsearchPodCriticallyLowDiskSpace | Critical | | Disk space for pod is less than 10% of total available space. The LogStorage resource settings for disk space are not high enough or logs are not being correctly curated. | -| ElasticsearchClusterStatusYellow | Non-critical, warning | Immediate investigation to rule out critical issue. | Early warning of cluster problem. | -| ElasticsearchPodLowDiskSpace | Non-critical, warning | | Disk space for an Elasticsearch pod is less than 25% of total available space. The LogStorage resource settings for disk space are not high enough or logs are not being correctly curated. | -| ElasticsearchPodConsistentlyHighCPUUsage | Non-critical, warning | | An Elasticsearch pod is averaging above 90% of its CPU over the last 10 minutes. | -| ElasticsearchPodConsistentlyHighMemoryUsage | Non-critical, warning | | An Elasticsearch pod is averaging above the set memory threshold over the last 10 minutes. | - -### Create Prometheus alerts for Fluentd - -The following example creates a Prometheus a rule to monitor some important Fluentd metrics, and alert when they -have crossed certain thresholds: - -```yaml noValidation -apiVersion: monitoring.coreos.com/v1 -kind: PrometheusRule -metadata: - name: tigera-prometheus-log-collection-monitoring - namespace: tigera-prometheus - labels: - role: tigera-prometheus-rules - prometheus: calico-node-prometheus -spec: - groups: - - name: tigera-log-collection.rules - rules: - - alert: FluentdPodConsistentlyLowBufferSpace - expr: avg_over_time(fluentd_output_status_buffer_available_space_ratio[5m]) < 75 - labels: - severity: Warning - annotations: - summary: "Fluentd pod {{$labels.pod}}'s buffer space is consistently below 75 percent capacity." - description: "Fluentd pod {{$labels.pod}} has very low buffer space. There may be connection issues between Elasticsearch -and Fluentd or there are too many logs to write out, check the logs for the Fluentd pod." -``` - -#### The alerts created in the example are described as follows: - -| Alert | Severity | Requires | Issue/reason | -| ---------------------------------------- | --------------------- | -------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| **FluentdPodConsistentlyLowBufferSpace** | Non-critical, warning | Immediate investigation to ensure logs are being gathered correctly. | A Fluentd pod’s available buffer size has averaged less than 75% over the last 5 minutes.

    This could mean Fluentd is having trouble communicating with the Elasticsearch cluster, the Elasticsearch cluster is down, or there are simply too many logs to process. | diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/monitor/metrics/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/monitor/metrics/index.mdx deleted file mode 100644 index 1a154f4983..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/monitor/metrics/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Configure Prometheus metrics. -hide_table_of_contents: true ---- - -# Metrics - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/monitor/metrics/license-agent.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/monitor/metrics/license-agent.mdx deleted file mode 100644 index ea37f6cb05..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/monitor/metrics/license-agent.mdx +++ /dev/null @@ -1,92 +0,0 @@ ---- -description: Monitor Calico Enterprise license metrics such as nodes used, nodes available, and days until license expires. ---- - -# License metrics - -import License from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_license.mdx'; - -## Big picture - -Use the Prometheus monitoring and alerting tool to get $[prodname] license metrics. - -## Value - -Platform engineering teams need to report licensing usage on third-party software (like $[prodname]) for their CaaS/Kubernetes platforms. This is often driven by compliance, but also to mitigate risks from license expiration or usage that may impact operations. For teams to easily access these vital metrics, $[prodname] provides license metrics using the Prometheus monitoring and alerting tool. - -## Concepts - -### About Prometheus - -The Prometheus monitoring tool scrapes metrics from instrumented jobs and displays time series data in a visualizer (such as Grafana). For $[prodname], the “jobs” that Prometheus can harvest metrics from the License Agent component. - -### About License Agent - -The **License Agent** is a containerized application that monitors the following $[prodname] licensing information from the Kubernetes cluster, and exports the metrics through the Prometheus server: - -- Days till expiration - -### FAQ - - - -## How to - -- [Add license agent in your Kubernetes cluster](#add-license-agent-in-your-kubernetes-cluster) -- [Create alerts using Prometheus metrics](#create-alerts-using-prometheus-metrics) - -### Add license agent in your Kubernetes cluster - -To add the license-agent component in a Kubernetes cluster for license metrics, install the pull secret and apply the license-agent manifest. - -1. Create a namespace for the license-agent. - ``` - kubectl create namespace tigera-license-agent - ``` -1. Install your pull secret. - ``` - kubectl create secret generic tigera-pull-secret \ - --type=kubernetes.io/dockerconfigjson -n tigera-license-agent \ - --from-file=.dockerconfigjson= - ``` -1. Apply the manifest. - ``` - kubectl apply -f $[filesUrl]/manifests/licenseagent.yaml - ``` - -### Create alerts using Prometheus metrics - -In the following example, an alert is configured when the license expiry is fewer than 15 days. - -```yaml -apiVersion: monitoring.coreos.com/v1 -kind: PrometheusRule -metadata: - name: calico-prometheus-license - namespace: tigera-prometheus - labels: - role: tigera-prometheus-rules - prometheus: calico-node-prometheus -spec: - groups: - - name: tigera-license.rules - rules: - - alert: CriticalLicenseExpiry - expr: license_number_of_days < 15 - labels: - severity: Warning - annotations: - summary: 'Calico Enterprise License expires in less than 15 days' - description: 'Calico Enterprise License expires in less than 15 days' -``` - -:::note - -If the Kubernetes api-server serves on any port other than 6443 or 443, add that port in the Egress policy of the license agent manifest. - -::: - -## Additional resources - -- [LicenseKey resource](../../../reference/resources/licensekey.mdx) -- [Configure Alertmanager](../prometheus/alertmanager.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/monitor/metrics/policy-metrics.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/monitor/metrics/policy-metrics.mdx deleted file mode 100644 index 9a7afc6f47..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/monitor/metrics/policy-metrics.mdx +++ /dev/null @@ -1,142 +0,0 @@ ---- -description: Monitor the effects of policy in your cluster and received alerts by defining rules and thresholds. ---- - -# Policy metrics - -$[prodname] adds the ability to monitor effects of policies configured in your cluster. -By defining a set of simple rules and thresholds, you can monitor traffic metrics and receive -alerts when it exceeds configured thresholds. - -``` - +------------+ - | | - | TSEE | - | Manager | - | | - | | - | | - +------------+ - ^ - | - | - | - +-----------------+ | - | Host | | - | +-----------------+ +------------+ +------------+ - | | Host |------------->--| | | |--->-- - | | +-----------------+ policy | Prometheus | | Prometheus | alert - +-| | Host |----------->--| Server |-->--| Alert |--->-- - | | +----------+ | metrics | | | Manager | mechanisms - +-| | Felix |-------------->--| | | |--->-- - | +----------+ | +------------+ +------------+ - +-----------------+ ^ ^ - | | - Collect and store metrics. Web UI for accessing alert - WebUI for accessing and states. - querying metrics. Configure fan out - Configure alerting rules. notifications to different - alert receivers. -``` - -Policy inspection and reporting is accomplished using four key pieces: - -- A $[prodname] specific Felix binary running inside `$[noderunning]` container - monitors the host for denied/allowed packets and collects metrics. -- Prometheus Server(s) deployed as part of the $[prodname] manifest scrapes - every configured `$[noderunning]` target. Alerting rules querying denied packet - metrics are configured in Prometheus and when triggered, fire alerts to - the Prometheus Alertmanager. -- Prometheus Alertmanager (or simply Alertmanager), deployed as part of - the $[prodname] manifest, receives alerts from Prometheus and forwards - alerts to various alerting mechanisms such as _Pager Duty_, or _OpsGenie_. -- the $[prodname] web console, also deployed as part of the $[prodname] manifest, - processes the metrics using pre-defined Prometheus queries and provides dashboards and associated workflows. - -Metrics will only be generated at a node when there are packets directed at an endpoint that are being actively profiled by a policy. -Once generated they stay alive for 60 seconds. - -Once Prometheus scrapes a node and collects policy metrics, it will be -available at Prometheus until the metric is considered _stale_, i.e., -Prometheus has not seen any updates to this metric for some time. This time is -configurable. Refer to -[Configuring Prometheus configuration](../prometheus/index.mdx) -for more information. - -Because of metrics being expired, as just described, it is entirely possible -for a GET on a metrics query URL to return no information. This is expected -if there have not been any packets being processed by a policy on that node, in -the last 60 seconds. - -Metrics generated by each $[prodname] node are: - -- `calico_denied_packets` - Total number of packets denied by $[prodname] policies. -- `calico_denied_bytes` - Total number of bytes denied by $[prodname] policies. -- `cnx_policy_rule_packets` - Sum of allowed/denied packets over rules processed by - $[prodname] policies. -- `cnx_policy_rule_bytes` - Sum of allowed/denied bytes over rules processed by - $[prodname] policies. -- `cnx_policy_rule_connections` - Sum of connections over rules processed by $[prodname] - policies. - -The metrics `calico_denied_packets` and `calico_denied_bytes` have the labels `policy` and `srcIP`. -Using these two metrics, one can identify the policy that denied packets as well as -the source IP address of the packets that were denied by this policy. Using -Prometheus terminology, `calico_denied_packets` is the metric name and `policy` -and `srcIP` are labels. Each one of these metrics will be available as a -combination of `{policy, srcIP}`. - -Example queries: - -- Total number of bytes, denied by $[prodname] policies, originating from the IP address "10.245.13.133" - by `k8s_ns.ns-0` profile. - -``` -calico_denied_bytes{policy="profile|k8s_ns.ns-0|0|deny", srcIP="10.245.13.133"} -``` - -- Total number of packets denied by $[prodname] policies, originating from the IP address "10.245.13.149" - by `k8s_ns.ns-0` profile. - -``` -calico_denied_packets{policy="profile|k8s_ns.ns-0|0|deny", srcIP="10.245.13.149"}} -``` - -The metrics `cnx_policy_rule_packets`, `cnx_policy_rule_bytes` and `cnx_policy_rule_connections` have the -labels: `tier`, `policy`, `namespace`, `rule_index`, `action`, `traffic_direction`, `rule_direction`. - -Using these metrics, one can identify allow, and denied byte rate and packet rate, both inbound and outbound, indexed by both policy and rule. the $[prodname] web console Dashboard makes heavy usage of these metrics. -Staged policy names are prefixed with "staged:". - -Example queries: - -- Query counts for rules: Packet rates for specific rule by traffic_direction - -``` -sum(irate(cnx_policy_rule_packets{namespace="namespace-2",policy="policy-0",rule_direction="ingress",rule_index="rule-5",tier="tier-0"}[30s])) without (instance) -``` - -- Query counts for rules: Packet rates for each rule in a policy by traffic_direction - -``` -sum(irate(cnx_policy_rule_packets{namespace="namespace-2",policy="policy-0",tier="tier-0"}[30s])) without (instance) -``` - -- Query counts for a single policy by traffic_direction and action - -``` -sum(irate(cnx_policy_rule_packets{namespace="namespace-2",policy="policy-0",tier="tier-0"}[30s])) without (instance,rule_index,rule_direction) -``` - -- Query counts for all policies across all tiers by traffic_direction and action - -``` -sum(irate(cnx_policy_rule_packets[30s])) without (instance,rule_index,rule_direction) -``` - -See the -[Felix configuration reference](../../../reference/component-resources/node/felix/configuration.mdx#calico-enterprise-specific-configuration) for -the settings that control the reporting of these metrics. $[prodname] manifests -normally set `PrometheusReporterEnabled=true` and -`PrometheusReporterPort=9081`, so these metrics are available on each compute -node at `http://:9081/metrics`. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/monitor/metrics/recommended-metrics.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/monitor/metrics/recommended-metrics.mdx deleted file mode 100644 index cae29d4310..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/monitor/metrics/recommended-metrics.mdx +++ /dev/null @@ -1,562 +0,0 @@ ---- -description: Recommended Prometheus metrics for monitoring Calico Enterprise components. ---- - -# Recommended Prometheus metrics - -## Big picture - -Monitor the $[prodname] Typha, Felix, and policy component metrics to ensure optimal cluster operation. - -## Concepts - -$[prodname] Typha, Felix, and policy components are the most critical to monitor because they are responsible for ensuring networking and security functions are up-to-date and working as expected. - -### Typha - -Typha is a caching datastore proxy that sits between calico-nodes and Kubernetes API Server. Its primary function is to allow for increased cluster scale by reducing the load on Kubernetes API Server. Without Typha, large clusters (200+ nodes) would need a considerable amount of memory to correspond to the continuous watches and requests from calico-nodes running in the cluster. - -Typha maintains a single datastore connection on behalf of all of its clients (processes running in the calico-node pods, with Felix being Typhas’ main client). Typha watches for node, pod, network policy, bgp configuration, and other events on the Kubernetes API Server, caches and deduplicates this data, and fans out these events to its clients. - -### Felix - -Felix is a component of calico-node and is responsible for $[prodname] network policy. -Felix must be continuously in sync with the datastore to ensure the correct set of policies are applied to the node it is running on. - -![Typha-felix](/img/calico-enterprise/typha-felix.png) - -### About metrics - -Each $[prodname] component that you want to connect to Prometheus for endpoint metrics has its own configuration (bgp, license, policy, felix, and typha). - -Note that Felix is a separate application with metric endpoints, its own core metrics to monitor itself, and a separate port for a second policy metric endpoint. - -## Metrics - -This section provides metrics recommendations for maintaining optimal cluster operations. Note the following: - -- Threshold values for each metric depend on the cluster size and churn rate. -- Threshold recommendations are provided where possible, but because each cluster is different, and metrics can depend on cluster churn rate and scale. We recommend that you baseline the cluster to establish numbers that represent normal figures for your cluster. -- Metrics that start increasing rapidly from the baseline set need attention. - -Typha -- [Typha general metrics](#typha-general-metrics) -- [Typha cluster mesh metrics](#typha-cluster-mesh-metrics) -- [Typha client metrics](#typha-client-metrics) -- [Typha cache internals](#typha-cache-internals) -- [Typha snapshot details](#typha-snapshot-details) - -Felix -- [Policy metrics](#policy-metrics) -- [Felix cluster state metrics](#felix-cluster-state-metrics) -- [Felix error metrics](#felix-error-metrics) -- [Felix time-based metrics](#felix-time-based-metrics) - -## Typha general metrics - -### Datastore cache size - -| Datastore cache size | | -| -------------------------------- | ------------------------------------------------------------ | -| Metric | Note: Syncer (type) is Typha's internal name for a client (type).
    Individual syncer values:
    (typha_cache_size\{syncer="bgp"\})
    (typha_cache_size\{syncer="dpi"\})
    (typha_cache_size\{syncer="felix"\})
    (typha_cache_size\{syncer="node-status"\})
    (typha_cache_size\{syncer="tunnel-ip-allocation"\})

    Sum of all syncers:
    The sum of all cache sizes (each syncer type has a cache).
    sum by (instance) (typha_cache_size)

    Largest syncer:
    max by (instance) (typha_cache_size) | -| Example value | Example of: max by (instance) (typha_cache_size\{syncer="felix"\})

    \{instance="10.0.1.20:9093"\} 661
    \{instance="10.0.1.31:9093"\} 661 | -| Explanation | The total number of key/value pairs in Typha's in-memory cache.This metric represents the scale of the $[prodname] datastore as it tracks how many WEPs (pods and services), HEPs (hostendpoints), networksets, globalnetworksets, $[prodname] Network Policies etc that Typha is aware of across the entire Calico Federation.You can use this metric to monitor individual syncers to Typha (like Felix, BGP etc), or to get a sum of all syncers. We recommend that you monitor the largest syncer but it is completely up to you. This is a good metric to understand how much data is in Typha. Note: If all Typhas are in sync then they should have the same value for this metric. | -| Threshold value recommendation | The value of this metric will depend on the scale of the Calico Federation and will always increase as WEPs, $[prodname] network policies and clusters are added. Achieve a baseline first, then monitor for any unexpected increases from the baseline. | -| Threshold breach symptoms | Unexpected increases may indicate memory leaks and performance issues with Typha. | -| Threshold breach recommendations | Check CPU usage on Typha pods and Kubernetes nodes. Increase resources if needed, rollout and restart Typha(s) if needed. | -| Priority level | Optional. | - -### CPU usage - -| CPU usage | | -| -------------------------------- | ------------------------------------------------------------ | -| Metric | rate(process_cpu_seconds_total\{30s\}) \* 100 | -| Example value | \{endpoint="metrics-port", instance="10.0.1.20:9093", job="typha-metrics-svc", namespace="calico-system", pod="calico-typha-6c6cc9fcf7-csbdl", service="typha-metrics-svc"\} 0.27999999999999403 | -| Explanation | CPU in use by Typha represented as a percentage of a core. | -| Threshold value recommendation | A spike at startup is normal. It is recommended to achieve a baseline first, then monitor for any unexpected increases from this baseline. A rule of thumb is to investigate maintained CPU usage above 90%. | -| Threshold breach symptoms | Unexpected maintained CPU usage could cause Typha to fall behind in updating its clients (for example, Felix) and could cause delays to policy updates. | -| Threshold breach recommendations | Check CPU usage on Kubernetes nodes. If needed, increase resources, and rollout restart Typha(s). | -| Priority level | Recommended. | - -### Memory usage - -| Memory usage | | -| -------------------------------- | ------------------------------------------------------------ | -| Metric | process_resident_memory_bytes | -| Example value | process_resident_memory_bytes\{endpoint="metrics-port", instance="10.0.1.20:9093", job="typha-metrics-svc", namespace="calico-system", pod="calico-typha-6c6cc9fcf7-csbdl", service="typha-metrics-svc"\} 80515072 | -| Explanation | Amount of memory used by Typha. | -| Threshold value recommendation | It is recommended to achieve a baseline first, then monitor for any unexpected increases from this baseline. A rule of thumb is to investigate if maintained memory usage is above 90% of what is available from the underlying node. The metric can also be used for memory leaks. In this case, the metric would show Typhas' memory consumption rising over time, even though the cluster is in a stable state. | -| Threshold breach symptoms | Unexpected maintained memory usage could cause Typha to fall behind in updating its clients (for example, Felix) and could cause delays to policy updates. | -| Threshold breach recommendations | Check memory usage on Kubernetes nodes. Increase resources if needed, and rollout restart Typha(s) if needed. | -| Priority level | Recommended. | - -## Typha cluster mesh metrics - -The following metrics are applicable only if you have implemented [Cluster mesh](multicluster/federation/overview.mdx). - -Note that this metric requires a count syntax because you will have a copy of the metric per RemoteClusterConfiguration. As shown in the table, the value `2 = In Sync` reflects good connections. - -``` -remote_cluster_connection_status\{cluster="foo"\} = 2 -remote_cluster_connection_status\{cluster="bar"\} = 2 -remote_cluster_connection_status\{cluster="baz"\} = 1 -``` - -### Remote cluster connections (in-sync) - -| Remote cluster connections (in-sync) | | -| ------------------------------------ | ------------------------------------------------------------ | -| Metric | count by (instance) (remote_cluster_connection_status == 2) | -| Explanation | This represents the number of remote cluster connections that are connected and in sync. Each remote cluster will report a *connection_status* value from the following list:
    - 0 = Not Connected
    - 1 = Connecting
    - 2 = In Sync
    - 3 = Resync in Process
    - 4 = Config Change Restart Required

    We suggest the count syntax because there will be one copy of *remote_cluster_connection_status* per cluster: - remote_cluster_connection_status[cluster="foo"] = 2
    remote_cluster_connection_status[cluster="bar"] = 2
    remote_cluster_connection_status[cluster="baz"] = '

    Counting the number of metrics with value 2 returns the number of In Sync clusters. | -| Threshold value recommendation | When remote cluster connections are initializing, *connection_status* values will fluctuate. After the connection is established, this value should be equal to the number of remote clusters in the environment (if everything is in sync). | -| Threshold breach symptoms | N/A
    For out-of-sync symptoms, see the out-of-sync metric. | -| Threshold breach recommendations | N/A
    For out-of-sync recommendations, see the out-of-sync metric. | -| Priority level | Recommended. | - -### Remote cluster connections (out-of-sync) - -The following metrics are applicable only if you have implemented [Cluster mesh](multicluster/federation/overview.mdx). - -| Remote cluster connections (out-of-sync) | | -| ---------------------------------------- | ------------------------------------------------------------ | -| Metric | count by (instance) (remote_cluster_connection_status != 2) | -| Explanation | Number of remote cluster connections that are not in sync (i.e. resyncing or failing to connect). Each remote cluster will report a *connection_status* value from the following list:
    - 0 = Not Connected
    - 1 = Connecting
    - 2 = In Sync
    - 3 = Resync in Process
    - 4 = Config Change Restart Required | -| Threshold value recommendation | This value should be 2 if everything is in sync. Note: At Typha startup, it is normal to have non-2 values, but it should stabilize at 2 after connections come up. | -| Threshold breach symptoms | Typha will not receive updates from the relevant remote clusters. Connected clients will see stale or partial data from remote clusters. | -| Threshold breach recommendations | Investigate Typha's logs where remote cluster connectivity events are logged. Ensure the networking between clusters is not experiencing issues. | -| Priority level | Recommended. | - -## Typha client metrics - -### Total connections accepted - -| Total connections accepted | | -| -------------------------------- | ------------------------------------------------------------ | -| Metric | typha_connections_accepted | -| Example value | typha_connections_accepted\{endpoint="metrics-port", instance="10.0.1.20:9093", job="typha-metrics-svc", namespace="calico-system", pod="calico-typha-6c6cc9fcf7-csbdl", service="typha-metrics-svc"\} 10 | -| Explanation | Total number of connections accepted over time. This value always increases. | -| Threshold value recommendation | A steady increase over time is normal. Counters rising after a Felix or Typha restart is also normal (as clients get rebalanced). Investigate connection counters that rise rapidly with no Felix or Typha restarts. | -| Threshold breach symptoms | Counters rising when there are no Felix or Typha restarts, or no action that could cause restarts (an upgrade for example), could indicate unexpected Felix or Typha restarts or issues. | -| Threshold breach recommendations | Check resource usage on Typha(s) and Kubernetes nodes. Increase resources if needed. | -| Priority level | Optional. | - -### Client connections actively streaming - -| Client connections actively streaming | | -| ------------------------------------- | ------------------------------------------------------------ | -| Metric | sum by (instance) (typha_connections_streaming) | -| Example value | \{instance="10.0.1.20:9093"\} 10
    \{instance="10.0.1.31:9093"\} 5 | -| Explanation | Current number of active connections that are "streaming" (have completed the handshake), to this Typha. After a connection has been Accepted (reported in the previous metric), there will be a handshake before the connection is deemed to be actively streaming. This indicates how many clients are connected to a Typha. The sum reflects per-cache metrics as well. | -| Threshold value recommendation | Compare the value for Total Connections Accepted and Client Connections Actively Streaming. The fluctuation of these values should be in-sync with each other if Accepted Connections are turning into Actively Streamed connections. If there is a discrepancy , you should investigate. Note: As always, it is recommended to baseline the relationship between these two metrics to have a sense of what is normal. It is also worth noting that in smaller clusters, it is normal for Typha to be unbalanced. Typha can handle hundreds of connections so it is of no concern if all nodes in a 10-node cluster (for example) connect to the same Typha. | -| Threshold breach symptoms | Felix is not getting updates from Typha. $[prodname] network policies are out-of-sync. | -| Threshold breach recommendations | Check Typha and Felix logs, and rollout restart Typha(s) if needed. | -| Priority level | Recommended. | - -### Rebalanced client connections - -| Rebalanced client connections | | -| -------------------------------- | ------------------------------------------------------------ | -| Metric | rate(typha_connections_dropped\{$_rate_interval\}) | -| Example value | \{endpoint="metrics-port", instance="10.0.1.20:9093", job="typha-metrics-svc", namespace="calico-system", pod="calico-typha-6c6cc9fcf7-csbdl", service="typha-metrics-svc"\} | -| Explanation | Number of client connections dropped to rebalance and share the load across different Typhas. | -| Threshold value recommendation | It is normal to see this value increasing sometimes. Investigate if connection dropped counters is rising constantly. If all Typhas are dropping connections because all Typhas believe they have too much load, this also warrants investigation. | -| Threshold breach symptoms | Dropping connections is rate limited so it should not affect the cluster as a whole. Typha clients, like Felix, will get dropped sometimes (but not constantly), and could result in periodic delays to policy updates. | -| Threshold breach recommendations | Ensure that the Kubernetes nodes have enough resources. | -| Priority level | Optional. | - -### 99 percentile client fall-behind - -| 99 percentile client fall-behind | | -| -------------------------------- | ------------------------------------------------------------ | -| Metric | max by (instance) (typha_client_latency_secs\{quantile='0.99'\}) | -| Example value | \{instance="10.0.1.20:9093"\} 0.1234
    \{instance="10.0.1.31:9093"\} 0.1234 | -| Explanation | This metric measures how far behind Typha's client-handling threads are at reading updates.This metric will increase if:
    a) The client (e.g Felix) is slow or overloaded and cannot keep up with what Typha is sending or
    b) Typha is overloaded and it cannot keep up with writes to all its clients.

    This metric is a good indication of your cluster, Felix, and Typha health. | -| Threshold value recommendation | It is normal for this to spike when new clients connect; they must download and process the snapshot, during which time they will fall slightly behind. Investigate of latency persists. | -| Threshold breach symptoms | Typha clients receiving updates from Typha will be behind in time. Potential symptoms could include $[prodname] network policies being out-of-sync. | -| Threshold breach recommendations | Check Typha and Felix logs and resource usage. It is recommended to focus on Felix logs and resource usage first, as there is generally more overhead with Felix and thus more of a chance of overload. Rollout restart Typha(s) and calico-node(s) if needed. | -| Priority level | Recommended. | - -### 99 percentile client write latency - -| 99 percentile client write latency | | -| ---------------------------------- | ------------------------------------------------------------ | -| Metric | max by (instance) (typha_client_write_latency_secs) | -| Example value | \{instance="10.0.1.20:9093"\} 0.007450815 | -| Explanation | Time for Typha to write to a client's socket (for example, Felix). | -| Threshold value recommendation | If the write latency is increasing, this indicates that a client (for example, Felix) is having an issue, or the network is having an issue. It is normal for intermittent spikes. Investigate any persistent latency. | -| Threshold breach symptoms | Typha clients will lag behind in receiving updates that Typha is sending. Potential symptoms include $[prodname] network policies being out-of-sync. | -| Threshold breach recommendations | Check Felix logs and resource usage. | -| Priority level | Recommended. | - -### 99 percentile client ping latency - -| 99 percentile client ping latency | | -| --------------------------------- | ------------------------------------------------------------ | -| Metric | max by (instance) (typha_ping_latency\{quantile="0.99"\}) | -| Example value | \{instance="10.0.1.20:9093"\} 0.034285331 | -| Explanation | This metric tracks the round-trip-time from Typha to a client. How long it takes for Typha's clients to respond to pings over the Typha protocol. | -| Threshold value recommendation | An increase in this metric above 1 second indicates that the clients, network or Typha are more heavily loaded. It is normal for intermittent spikes. Persistent latency above 1 second warrants investigation. | -| Threshold breach symptoms | Typha clients could be behind in time on updates Typha is sending. Potential symptoms include $[prodname] network policies being out-of-sync. | -| Threshold breach recommendations | Check Typha and Felix logs and resource usage. It is recommended to focus on Felix logs and resource usage first, as there is generally more overhead with Felix and thus more of a chance of overload. Check if the node is overloaded and review/increase calico-node/Typha CPU requests if needed. If needed, rollout restart Typha(s) and calico-node(s). | -| Priority level | Recommended. | - -## Typha cache internals - -### 99 percentile breadcrumb size - -| 99 percentile breadcrumb size | | -| -------------------------------- | ------------------------------------------------------------ | -| Metric | max by (instance) (typha_breadcrumb_size\{quantile="0.99"\}) | -| Explanation | Typha stores datastore changes as a series of blocks called breadcrumbs. Typha will store updates inside of these breadcrumbs (for example if a pod churned, this would be a single update). Typha can store multiple updates in a single breadcrumb with the default maximum size number being 100. | -| Threshold value recommendation | Typha generating blocks of size 100 during start up is normal. Investigate if Typha is consistently generating blocks of size 90+, which can indicate Typha is overloaded. | -| Threshold breach symptoms | Maintained block of sizes of 100 can indicate that Typha is falling behind on information and updates contained in the datastore. This will lead to Typha clients also falling behind (for example, $[prodname] network policy object may not be current). | -| Threshold breach recommendations | Check Typha logs and resource usage. Check if there is a lot of activity within the cluster that would cause Typha to send large breadcrumbs (for example, a huge amount of pod churn). If possible, reduce churn rate of resources on the cluster. | -| Priority level | Recommended. | - -### Non-blocking breadcrumbs fraction - -| Non-blocking breadcrumb fraction | | -| -------------------------------- | ------------------------------------------------------------ | -| Metric | (sum by (instance) (rate(typha_breadcrumb_non_block\{30s\})))/((sum by (instance) (rate(typha_breadcrumb_non_block\{30s\})))+(sum by (instance) (rate(typha_breadcrumb_block\{30s\})))) | -| Example value | \{instance="10.0.1.20:9093"\} NaN | -| Explanation | Typha stores datastore changes as a series of blocks called "breadcrumbs". Each client "follows the breadcrumbs" either by blocking and waiting, or skipping to the next one (non-blocking) if it is already available. Non-blocking breadcrumb actions indicates that Typha is constantly sending breadcrumbs to keep up with the datastore. Blocking breadcrumb actions indicate that Typha and the client have caught up, are up-to-date, and are waiting on the next breadcrumb. This metric will give a ratio between blocking and non-blocking actions that can indicate the health of Typha, its clients, and the cluster. | -| Threshold value recommendation | As the load on Typha increases, the ratio of skip-ahead, non-blocking reads, increases. If it approaches 100% then Typha may be overloaded (since clients only do non-blocking reads when they're behind). | -| Threshold breach symptoms | Consistent non-blocking breadcrumbs could indicate that Typha is falling behind on information and updates contained in the datastore. This will lead to Typha clients also being behind (for example, $[prodname] network policy object may not be current). | -| Threshold breach recommendations | Check Typha and Felix logs and resource usage. Check if there is a lot of activity within the cluster that would cause Typha to continuously send non-blocking breadcrumbs. | -| Priority level | Recommended. | - -### Datastore updates total - -| Datastore updates total | | -| -------------------------------- | ------------------------------------------------------------ | -| Metric | sum by (instance) (rate(typha_updates_total\{30s\})) | -| Example value | \{instance="10.0.1.20:9093"\} 0 | -| Explanation | The rate of updates from the datastore(s). For example, updates to Pods/Nodes/Policies/etc. | -| Threshold value recommendation | Intermittent spikes are expected. Constant updates indicates a very busy cluster (for example, lots of pod churn). | -| Threshold breach symptoms | Constant updates could lead to overloaded Typhas whereTyphas clients could fall behind. | -| Threshold breach recommendations | Ensure Typha has enough resources to handle a very dynamic cluster. | -| Priority level | Optional. | - -### Datastore update skipped (no-ops) - -| Datastore update skipped (no-ops) | | -| --------------------------------- | ------------------------------------------------------------ | -| Metric | sum by (instance) (rate(typha_updates_skipped\{30s\})) | -| Example value | \{instance="10.0.1.20:9093"\} 0 | -| Explanation | The number of updates from the datastore that Typha detected were no-ops. For example, an update to a Kubernetes node resource that did not touch any values that is of interest to $[prodname]. Such updates are not propagated to clients, which saves resources. | -| Threshold value recommendation | N/A | -| Threshold breach symptoms | N/A | -| Threshold breach recommendations | N/A | -| Priority level | Optional. | - -## Typha snapshot details - -### Snapshot send time - -| Median snapshot send time | | -| -------------------------------- | ------------------------------------------------------------ | -| Metric | max by (instance) (typha_client_snapshot_send_secs\{quantile="0.5"\}) | -| Example value | \{instance="10.0.1.20:9093"\} NaN | -| Explanation | The median time to stream the initial datastore snapshot to each client. It is useful to know the time it takes for a client to receive the data when it connects; it does not include time to process the data. | -| Threshold value recommendation | Investigate if this value is moving towards 10s of seconds. | -| Threshold breach symptoms | High values of this metric could indicate that newly-started clients are taking a long time to get the latest snapshot of the datastore, increasing the window of time where networking/policy updates are not being applied to the data plane during a restart/upgrade. Typha has a write timeout for writing the snapshot; if a client cannot receive the snapshot within that timeout, it is disconnected. Clients falling behind on information and updates contained in the datastore (for example, $[prodname] network policy object may not be current). | -| Threshold breach recommendations | Check Typha and calico-node logs and resource usage. Check for network congestion. Investigate why a particular calico-node is slow; it is likely on an overloaded node with insufficient CPU). | -| Priority level | Optional. | - -### Clients requiring grace period - -| Clients requiring grace period | | -| -------------------------------- | ------------------------------------------------------------ | -| Metric | sum by (instance) (typha_connections_grace_used) | -| Example value | \{instance="10.0.1.20:9093"\} 0 | -| Explanation | The number of Typhas with clients that required a grace period. After sending the snapshot to the client, Typha allows a grace period for the client to catch up to the most recent data. Typha sending the initial snapshot should take < 1 second, but the processing of the snapshot could take longer, so this grace period is there to allow the newly connected client to process the snapshot. | -| Threshold value recommendation | If this metric is constantly increasing, it can indicate potential performance issues with Typha and clients. It can indicate that performance is being impacted and may warrant investigation. | -| Threshold breach symptoms | High values of this metric could indicate clients falling behind on information and updates contained in the datastore (for example, $[prodname] network policy object may not be current). | -| Threshold breach recommendations | Check Typha and calico-node logs and resource usage. Check for network congestion, and determine the root cause. | -| Priority level | Optional. | - -### Max snapshot size (raw) - -| Max snapshot size (raw) | | -| -------------------------------- | ------------------------------------------------------------ | -| Metric | max(typha_snapshot_raw_bytes) | -| Example value | \{\} 557359 | -| Explanation | The raw size in bytes of snapshots sent from Typha to clients. | -| Threshold value recommendation | N/A | -| Threshold breach symptoms | N/A | -| Threshold breach recommendations | N/A | -| Priority Level | Optional. | - -### Max snapshot size (compressed) - -| Max snapshot size (compressed) | | -| -------------------------------- | ------------------------------------------------------------ | -| Metric | max(typha_snapshot_compressed_bytes) | -| Example value | \{\}134845 | -| Explanation | The compressed size in bytes of snapshots sent from Typha to clients. | -| Threshold value recommendation | This metric can be helpful for customers to estimate the bandwidth requirements for Felix to startup. For example, if the compressed snapshot size is 20MB in size on average, and 1000 Felix/calico-nodes start up, the bandwidth requirements could be estimated at 20GB between the pool of Typha and the set of Felixes across the network. | -| Threshold breach symptoms | N/A | -| Threshold breach recommendations | N/A | -| Priority Level | Optional. | - -## Policy metrics - -:::note -The following policy metrics are a separate endpoint exposed by Felix that are used in the web console. They require special Prometheus configuration to scrape the metrics. For details, see [Policy metrics](./policy-metrics). - -::: - -### Denied traffic - -| Denied traffic | | -| -------------------------------- | ------------------------------------------------------------ | -| Metric | calico_denied_packets
    calico_denied_bytes
    | -| Example value | calico_denied_packets\{endpoint="calico-metrics-port", instance="ip-10-0-1-30.ca-central-1.compute.internal", job="calico-node-metrics", namespace="calico-system", pod="calico-node-6pcqm", policy="default | -| Explanation | Number of packets or bytes that have been dropped by explicit or implicit deny rules. Note that you'll get one instance of `calico_denied_packets/bytes` for each policy rule that is denying traffic. For example: calico_denied_packets\{policy="tier1\|fv/policy1\|0\|deny\|-1",scrIP="10.245.13.133"\} | -| Threshold value recommendation | The general rule of thumb is this metric should report zero at a stable state. Any deviation means that policy and traffic have diverged. Achieving a zero state depends on the stability and maturity of your cluster and policy. | -| Threshold breach symptoms | Either unexpected traffic is being denied because of an attack (one example), or expected traffic is being denied because of a misconfiguration in a policy. | -| Threshold breach recommendations | If this metric indicates that policy and traffic have diverged, the recommended steps are: Determine if an attack is causing the metric to spike, or if these flows should be allowed. If the flow should indeed be allowed, update the policy or a preceding policy to allow this traffic. | -| Priority level | Recommended. | - -### Traffic per rule - -| Traffic per rule | | -| -------------------------------- | ------------------------------------------------------------ | -| Metric | cnx_policy_rule_bytes
    cnx_policy_rule_packets | -| Example value | cnx_policy_rule_bytes\{action="allow", endpoint="calico-metrics-port", instance="ip-10-0-1-20.ca-central-1.compute.internal", job="calico-node-metrics", namespace="calico-system", pod="calico-node-qzpkt", policy="es-kube-controller-access", rule_direction="egress", rule_index="1", service="calico-node-metrics", tier="allow-tigera", traffic_direction="inbound"\} | -| Explanation | Number of bytes or packets handled by $[prodname] network policy rules. | -| Threshold value recommendation | This metric should usually be non-zero (unless expected). A zero value indicates the rule is not matching any packets, and could be surplus to requirements. | -| Threshold breach symptoms | N/A | -| Threshold breach recommendations | If this metrics consistently reports a zero value over an acceptable period of time, you can consider removing the policy rule. | -| Priority Level | Optional. | - -### Connections per policy rule - -| Connections per policy rule | | -| -------------------------------- | ------------------------------------------------------------ | -| Metric | cnx_policy_rule_connections | -| Example value | cnx_policy_rule_connections\{endpoint="calico-metrics-port", instance="ip-10-0-1-20.ca-central-1.compute.internal", job="calico-node-metrics", namespace="calico-system", pod="calico-node-qzpkt", policy="es-kube-controller-access", rule_direction="egress", rule_index="0", service="calico-node-metrics", tier="allow-tigera", traffic_direction="outbound"\} | -| Explanation | Number connections handled by $[prodname] policy rules. | -| Threshold value recommendation | This metric is similar to *Traffic per Rule* but this deals more with flow monitoring. This metric should usually be non-zero. A zero value indicates that the rule is not matching any packets and could be surplus to requirements. | -| Threshold breach symptoms | N/A | -| Threshold breach recommendations | If this metrics consistently reports a zero value over an acceptable period of time, this policy rule can be considered for removal. | -| Priority Level | Optional. | -## Felix cluster-state metrics - -### CPU usage - -| CPU usage | | -| -------------------------------- | ------------------------------------------------------------ | -| Metric | rate(process_cpu_seconds_total\{30s\}) \* 100 | -| Example value | \{endpoint="metrics-port", instance="10.0.1.20:9091", job="felix-metrics-svc", namespace="calico-system", pod="calico-node-qzpkt", service="felix-metrics-svc"\}3.1197504199664072 | -| Explanation | CPU in use by calico-node represented as a percentage of a core. | -| Threshold value recommendation | A spike at startup is normal. It is recommended to first achieve a baseline and then monitor for any unexpected increases from this baseline. Investigate if maintained CPU usage goes above 90%. | -| Threshold breach symptoms | Unexpected maintained CPU usage could cause Felix to fall behind and could cause delays to policy updates. | -| Threshold breach recommendations | Check CPU usage on Kubernetes nodes. Increase resources if needed, rollout restart calico-node(s) if needed. | -| Priority level | Recommended. | - -### Memory usage - -| Memory usage | | -| -------------------------------- | ------------------------------------------------------------ | -| Metric | process_resident_memory_bytes | -| Example value | process_resident_memory_bytes\{endpoint="metrics-port", instance="10.0.1.20:9091", job="felix-metrics-svc", namespace="calico-system", pod="calico-node-qzpkt", service="felix-metrics-svc"\} 98996224 | -| Explanation | Amount of memory in use by calico-node. | -| Threshold value recommendation | Recommended to achieve a baseline first, then monitor for any unexpected increases from this baseline. Investigate if maintained CPU usage goes above 90% of what is available from the underlying node. | -| Threshold breach symptoms | Unexpected, maintained, memory usage could cause Felix to fall behind and could cause delays to policy updates. | -| Threshold breach recommendations | Check memory usage on Kubernetes nodes. Increase resources if needed, rollout restart typha(s) if needed. | -| Priority level | Recommended. | - -### Active hosts on each endpoint - -| Active hosts on each endpoint | | -| -------------------------------- | ------------------------------------------------------------ | -| Metric | felix_active_local_endpoints | -| Example value | felix_active_local_endpoints\{endpoint="metrics-port", instance="10.0.1.30:9091", job="felix-metrics-svc", namespace="calico-system", pod="calico-node-6pcqm", service="felix-metrics-svc"\} 36 | -| Explanation | Number of active pod-networked pods, and HEPs, on this node. | -| Threshold value recommendation | Threshold relates to resource limits on the node for example kubelet's max pods setting. | -| Threshold breach symptoms | Suggests Felix is getting out of sync. | -| Threshold breach recommendations | Rolling restart calico-node and report issue to support. | -| Priority level | Optional. | - -### Active calico nodes - -| Active calico nodes | | -| -------------------------------- | ------------------------------------------------------------ | -| Metric | max(felix_cluster_num_hosts) | -| Example value | \{\} 3 | -| Explanation | Total number of nodes in the cluster that have calico-node deployed and running. | -| Threshold value recommendation | This value should be equal to the number of nodes in the cluster. If there are discrepancies, then calico-nodes on some nodes are having issues. | -| Threshold breach symptoms | $[prodname] network policies on affected nodes could be out-of-sync. | -| Threshold breach recommendations | Check calico-node logs, rollout restart calico-node if needed. | -| Priority level | Recommended. | - -### Felix cluster policies - -| Felix cluster policies | | -| -------------------------------- | ------------------------------------------------------------ | -| Metric | felix_cluster_num_policies | -| Example value | felix_cluster_num_policies\{endpoint="metrics-port", instance="10.0.1.20:9091", job="felix-metrics-svc", namespace="calico-system", pod="calico-node-qzpkt", service="felix-metrics-svc"\} 58 | -| Explanation | Total number of $[prodname] network policies in the cluster. | -| Threshold value recommendation | Because $[prodname] is a distributed system, the number of policies should be generally consistent across all nodes. It is expected to have some skew between nodes for a short period of time while they sync, however they should never be out of sync for very long. | -| Threshold breach symptoms | If nodes are out of sync for long time, calico-nodes may be having issues or experiencing resource contention. Check the Errors Plot to see if there are any iptables errors reported. | -| Threshold breach recommendations | Redeploy calico-node if issues are seen, and increase resources if needed. | -| Priority level | Optional. | - -### Felix active local policies - -| Felix active local policies | | -| -------------------------------- | ------------------------------------------------------------ | -| Metric | felix_active_local_policies | -| Example value | felix_active_local_policies\{endpoint="metrics-port", instance="10.0.1.30:9091", job="felix-metrics-svc", namespace="calico-system", pod="calico-node-6pcqm", service="felix-metrics-svc"\} 44 | -| Explanation | Total number of network policies deployed on per node basis. | -| Threshold value recommendation | There is no hard limit to active policies. We can handle 1000+ active policies, but it impacts performance, especially if there's pod churn. The best solution is to optimize policies by combining multiple rules into one policy, and make sure that top-level policy selectors are being used. | -| Threshold breach symptoms | N/A | -| Threshold breach recommendations | Redeploy calico-node if issues are seen, and increase resources if needed. | -| Priority level | Recommended. | - -### Felix open FDS - -| Felix open FDS | | -| -------------------------------- | ------------------------------------------------------------ | -| Metric | sum by (pod) (process_open_fds\{pod=~"calico-node.*"\}) | -| Example value | \{pod="calico-node-6pcqm"\} 90 | -| Explanation | Number of opened file descriptors per calico-node pod. | -| Threshold value recommendation | Alert on this metric when it approaches the ulimit (as reported in `process_max_fds` value). You should not be anywhere near the maximum. | -| Threshold breach symptoms | Felix may become unstable/crash or fail to apply updates as it should. These failures and issues are logged. | -| Threshold breach recommendations | Check Felix logs, redeploy calico-node if you see log issues, and increase `max_fds value` if possible. | -| Priority Level | Optional. | - -### Felix max FDS - -| Felix max FDS | | -| -------------------------------- | ------------------------------------------------------------ | -| Metric | sum by (pod) (process_max_fds\{pod=~"calico-node.*"\}) | -| Example value | \{pod="calico-node-qzpkt"\} 1048576 | -| Explanation | Maximum number of opened file descriptors allowed per calico-node pod. | -| Threshold value recommendation | N/A | -| Threshold breach symptoms | N/A | -| Threshold breach recommendations | N/A | -| Priority level | Optional. | - -### Felix resync started - -| Felix resync started | | -| -------------------------------- | ------------------------------------------------------------ | -| Metric | sum(rate(felix_resyncs_started\{5m\})) | -| Explanation | This is the number of times that Typha has reported to Felix that it is re-connecting with the datastore. | -| Threshold value recommendation | Occasional resyncs are normal. Investigate resync counters that rapidly rise. | -| Threshold breach symptoms | Typha pods may be having issues or experiencing resource contention. Some calico-nodes that are paired with Typha pods experiencing issues will not be able to sync with the datastore. | -| Threshold breach recommendations | Investigate the root cause to avoid redeploying Typha (which can be very disruptive). Check resource contention and network connectivity from Typha to the datastore to see if Typha is working fine or if the API server is overloaded. | -| Priority level | Recommended. | - -### Felix dropped logs - -| Felix dropped logs | | -| -------------------------------- | ------------------------------------------------------------ | -| Metric | felix_logs_dropped | -| Example value | felix_logs_dropped\{endpoint="metrics-port", instance="10.0.1.20:9091", job="felix-metrics-svc", namespace="calico-system", pod="calico-node-qzpkt", service="felix-metrics-svc"\} 0 | -| Explanation | The number of logs Felix has dropped. Note that this metric does not count flow-logs; it counts logs to stdout. | -| Threshold value recommendation | Occasional drops are normal. Investigate if drop counters rapidly rise. | -| Threshold breach symptoms | Felix will drop logs if it cannot keep up with writing them out. These are ordinary code logs, not flow logs. Calico-node may be under resource constraints. | -| Threshold breach recommendations | Check CPU usage on calico-nodes and Kubernetes nodes. Increase resources if needed, and rollout restart calico-node(s) if needed. | -| Priority level | Optional. | - -## Felix error metrics - -### IPset errors - -| IPset errors | | -| -------------------------------- | ------------------------------------------------------------ | -| Metric | sum(rate(felix_ipset_errors\{5m\})) | -| Example value | \{\} 0 | -| Explanation | Number of ipset creation, modification, and deletion command failures. This metric reports how many times the ipset command has failed when Felix tried to run it. An error can occur when Felix sends bad ipset command data, or the kernel throws an error (potentially because it was too busy to handle this request at that time). | -| Threshold value recommendation | Occasional errors are normal. Investigate error counters that rapidly rise. | -| Threshold breach symptoms | $[prodname] network policies may not scope all endpoints in network policy rules. Cluster nodes may be under resource contention, which may result in other _error and _seconds metrics rising. Repeated errors could mean some persistent problem (for example, some other process has created an IP set with that name, which is incompatible). | -| Threshold breach recommendations | See the Errors Plot graph to determine if the scope is cluster-wide or node-local. Check calico-node logs. Check resource usage and contention on Kubernetes nodes and calico-nodes. Add nodes/resources if needed. If resource contention is not seen, restart calico-node(s) and monitor. Ensure that other process using IPtables are not blocking $[prodname] network policy management. | -| Priority level | Optional. | - -### Iptables restore errors - -| Iptables restore errors | | -| -------------------------------- | ------------------------------------------------------------ | -| Metric | sum(rate(felix_iptables_restore_errors\{5m\})) | -| Explanation | The number of iptables-restore errors over five minutes. The iptables-restore command is used when $[prodname] makes a change to iptables. For example, a new WEP or HEP is created, changes to a WEP or HEP or a change to a policy that affects a WEP or HEP. | -| Threshold value recommendation | Occasional errors are normal. Investigate error counters that rapidly rise. | -| Threshold breach symptoms | $[prodname] network policies are not up to date. Cluster nodes may be under resource contention, which may result in other _error and _seconds metrics rising. | -| Threshold breach recommendations | See the Errors Plot graph to determine if the scope is cluster-wide or node-local. Check calico-node logs. Check resource usage and contention on Kubernetes nodes and calico-nodes. Add nodes/resources if needed. If no resource contention is seen, restart calico-node and monitor. | -| Priority level | Optional. | - -### Iptables save errors - -| Iptables save errors | | -| -------------------------------- | ------------------------------------------------------------ | -| Metric | sum(rate(felix_iptables_save_errors\{5m\})) | -| Example value | \{\} 0 | -| Explanation | Number of iptables-save errors. The iptables-save command is run before every iptables-restore command so that $[prodname] has the current state of iptables. | -| Threshold value recommendation | Occasional errors are normal. Investigate error counters that rapidly rise. | -| Threshold breach symptoms | $[prodname] network policies are not up to date. Cluster nodes may be under resource contention, which may result in other _error and _seconds metrics rising. Repeated errors could mean some persistent problem (for example, some other process has creating iptables rules that $[prodname] cannot decode with the version of iptables-save in use). | -| Threshold breach recommendations | See the Errors Plot graph to determine if the scope is cluster-wide or node-local. Check calico-node logs. Check resource usage and contention on Kubernetes nodes and calico-nodes. Add nodes/resources if needed. If no resource contention is seen, restart calico-node and monitor. | -| Priority level | Optional. | - -### Felix log errors - -| Felix log errors | | -| -------------------------------- | ------------------------------------------------------------ | -| Metric | sum(rate(felix_log_errors\{5m\})) | -| Example value | \{\} 0 | -| Explanation | The number of times Felix fails to write out a log because the log buffer is full. | -| Threshold value recommendation | Occasional errors are normal. Investigate error counters that rapidly rise. | -| Threshold breach symptoms | Calico-node may be under resource contention, which may result in other _error and _seconds metrics rising. | -| Threshold breach recommendations | See the Errors Plot graph to determine if the scope is cluster-wide or node-local. Check resource usage and contention on Kubernetes nodes and calico-nodes. Add nodes/resources if needed. If no resource contention is seen, restart calico-node and monitor. | -| Priority level | Optional. | - -### Monitor Felix metrics using a graph - -| Errors plot graph | | -| -------------------------------- | ------------------------------------------------------------ | -| Metric | rate(felix_ipset_errors\{5m\}) \|\| rate(felix_iptables_restore_errors[5m]) \|\| rate(felix_iptables_save_errors[5m]) \|\| rate(felix_log_errors\{5m\}) | -| Example value | \{endpoint="metrics-port", instance="10.0.1.20:9091", job="felix-metrics-svc", namespace="calico-system", pod="calico-node-qzpkt", service="felix-metrics-svc"\} 0 | -| Explanation | Checks if there have been any iptables-save, iptables-restore, or ipset command errors in the past five minutes. Keeps track of what node is reporting which error. | -| Threshold value recommendation | Occasional errors are normal. Investigate error counters that rapidly rise. For this specific metric it is worth focusing on the metric that is spiking, and referencing that metric information. | -| Threshold breach symptoms | Dependent on the specific metric that is logging errors. | -| Threshold breach recommendations | If more than one metric is rising, check if all rising metrics are related to a specific calico-node. If this is the case, then the issue is local to that calico-node. Check calico-node logs. Check resource usage for the node and calico-node pod. If more than one metric is rising rapidly across all calico-nodes, then it is a cluster-wide issue and cluster health must be checked. Check cluster resource usage, cluster networking/infrastructure health, and restart calico-nodes and calico-typha pods. | -| Priority level | Recommended. | - -## Felix time-based metrics - -### Data plane apply time quantile 0.5/0.9/0.99 - -| Data plane apply time quantile 0.5/0.9/0.99 | | -| ------------------------------------------ | ------------------------------------------------------------ | -| Metric | felix_int_dataplane_apply_time_seconds\{quantile="0.5"\}
    felix_int_dataplane_apply_time_seconds\{quantile="0.9"\}
    felix_int_dataplane_apply_time_seconds\{quantile="0.99"\} | -| Example value | felix_int_dataplane_apply_time_seconds\{quantile="0.5"\}:felix_int_dataplane_apply_time_seconds\{endpoint="metrics-port", instance="10.0.1.30:9091", job="felix-metrics-svc", namespace="calico-system", pod="calico-node-6pcqm", quantile="0.5", service="felix-metrics-svc"\} 0.020859218 | -| Explanation | Time in seconds that it took to apply a data plane update ,viewed at the median, 90th percentile, and 99th percentile. | -| Threshold value recommendation | Thresholds will vary depending on cluster size and rate of churn. It is recommended that a baseline be set to determine a normal threshold value. In the field we have seen >10s in extremely high-scale clusters with 100k+ endpoints and lots of policy/Kubernetes services. | -| Threshold breach symptoms | Large time-to-apply values will cause a delay between $[prodname] network policy commits and enforcement in the data plane. This is dependent on how $[prodname] waiting for kube-proxy to release the iptables lock, which is influenced by the number of services in use. | -| Threshold breach recommendations | Increase cluster resources, and reduce the number of Kubernetes services if possible. | -| Priority level | Recommended. | - -### Felix route table list seconds quantile 0.5/0.9/0.99 - -| Felix route table list seconds quantile 0.5/0.9/0.99 | | -| ---------------------------------------------------- | ------------------------------------------------------------ | -| Metric | felix_route_table_list_seconds\{quantile="0.5"\}
    felix_route_table_list_seconds\{quantile="0.9"\}
    felix_route_table_list_seconds\{quantile="0.99"\} | -| Example value | felix_route_table_list_seconds\{quantile="0.5"\}:felix_route_table_list_seconds\{endpoint="metrics-port",instance="10.0.1.30:9091",job="felix-metrics-svc",namespace="calico-system", pod="calico-node-6pcqm",quantile="0.5", service="felix-metrics-svc"\} 0.000860426 | -| Explanation | Time to list all the interfaces during a resync, viewed at the median, 90th percentile and 99th percentile. | -| Threshold value recommendation | Thresholds will vary depending on the number of cali interfaces per node. It is recommended that a baseline be set to determine a normal threshold value. | -| Threshold breach symptoms | High values indicate high CPU usage in felix and slow data plane updates. | -| Threshold breach recommendations | Increase cluster resources. Reduce the number of cali interfaces per node where possible. | -| Priority level | Optional. | - -### Felix graph update time quantile 0.5/0.9/0/99 - -| Felix graph update time seconds quantile 0.5/0.9/0.99 | | -| ----------------------------------------------------- | ------------------------------------------------------------ | -| Metric | felix_calc_graph_update_time_seconds\{quantile="0.5"\}
    felix_calc_graph_update_time_seconds\{quantile="0.9"\}
    felix_calc_graph_update_time_seconds\{quantile="0.99"\} | -| Example value | felix_calc_graph_update_time_seconds\{quantile="0.5"\}:felix_calc_graph_update_time_seconds\{endpoint="metrics-port",instance="10.0.1.30:9091", job="felix-metrics-svc",namespace="calico-system", pod="calico-node-6pcqm",quantile="0.5", service="felix-metrics-svc"\} 0.00007129 | -| Explanation | This metric reports the time taken to update the calculation graph for each datastore on an update call, viewed at the median, 90th percentile and 99th percentile. The calculation graph is the Felix component that takes all the policies/workload endpoints/host endpoints information that it has received from Typha, and distills it down to data plane updates that are relevant for this node. | -| Threshold value recommendation | After *start of day* (where we will typically get a large update), then values should be sub 1 second (with occasional blips to 1+ seconds). Should be measured in milliseconds with the occasional blip to a second or two. Investigate if the result is constantly in values of seconds. | -| Threshold breach symptoms | High values indicate high CPU usage in felix and slow data plane updates. | -| Threshold breach recommendations | Increase cluster resources. Check calico-node logs. Rollout restart calico-node(s) if needed. | -| Priority level | Recommended. | diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/monitor/prometheus/alertmanager.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/monitor/prometheus/alertmanager.mdx deleted file mode 100644 index 6dfbb692b0..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/monitor/prometheus/alertmanager.mdx +++ /dev/null @@ -1,103 +0,0 @@ ---- -description: Configure Alertmanager, a Prometheus feature that routes alerts. ---- - -# Configure Alertmanager - -Alertmanager is used by $[prodname] to route alerts from Prometheus to the administrators. -It handles routing, deduplicating, grouping, silencing and inhibition of alerts. - -More detailed information about Alertmanager is available in the [upstream documentation](https://prometheus.io/docs/alerting/latest/configuration). - -### Updating the AlertManager config - -- Save the current alertmanager secret, usually named `alertmanager-`. - Our manifests will end up creating a secret called: `alertmanager-calico-node-alertmanager`. - - ```bash - kubectl -n tigera-operator get secrets alertmanager-calico-node-alertmanager -o yaml > alertmanager-secret.yaml - ``` - -- The current alertmanager.yaml file is encoded and stored inside the - `alertmanager.yaml` key under the `data` field. You can decode it by - copying the value of `alertmanager.yaml` and using the `base64` command. - - ```bash - echo "" | base64 --decode > alertmanager-config.yaml - ``` - -- Make necessary changes to `alertmanager-config.yaml`. Once this is done, - you have to re-encode and save it to `alertmanager-secret.yaml`. You can do - this by (in Linux): - - ```bash - cat alertmanager-config.yaml | base64 -w 0 - ``` - -- Paste the output of the running the command above back in `alertmanager-secret.yaml` - replacing the value present in `alertmanager.yaml` field. Then apply this - updated manifest. - - ```bash - kubectl -n tigera-operator apply -f alertmanager-secret.yaml - ``` - -Your changes should be applied in a few seconds by the config-reloader -container inside the alertmanager pod launched by the prometheus-operator -(usually named `alertmanager-`). - -For more advice on writing alertmanager configuration files, see the -[alertmanager configuration](https://prometheus.io/docs/alerting/latest/configuration/) documentation. - -### Configure Inhibition Rules - -Alertmanager has a feature to suppress certain notifications according to -defined rules. A typical use case for defining `inhibit` rules is to suppress -notifications from a lower priority alert when one with a higher priority is -firing. These inhibition rules are defined in the alertmanager configuration -file. You can define one by adding this configuration snippet to your -`alertmanager.yaml`. - -```yaml noValidation -[...] -inhibit_rules: -- source_match: - severity: 'critical' - target_match: - severity: 'info' - # Apply inhibition for alerts generated by the same alerting rule - # and on the same node. - equal: ['alertname', 'instance'] -[...] -``` - -### Configure Grouping of Alerts - -Alertmanager also has a feature to group alerts based on labels and fine tune -how often to resend an alert and so on. In the case of Denied Packet metrics, -simply defining a Prometheus alerting rule would mean that you will get an -page (if so defined in your alertmanager configuration) for every policy on -every node for every Source IP. All these alerts can be combined into a single -alert by configuring grouping. The Alertmanager configuration file that is -provided with $[prodname] by default, groups alerts on a -per-node basis. Instead, if the goal is to group all alerts with the same -name, edit (and apply) the alertmanager configuration file like so: - -```yaml -global: - resolve_timeout: 5m -route: - group_by: ['alertname'] - group_wait: 30s - group_interval: 1m - repeat_interval: 5m - receiver: 'webhook' -receivers: - - name: 'webhook' - webhook_configs: - - url: 'http://calico-alertmanager-webhook:30501/' -``` - -More information, including descriptions of the various options can be found under the -[route section](https://prometheus.io/docs/alerting/latest/configuration/#route) -of the Alertmanager Configuration guide. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/monitor/prometheus/byo-prometheus.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/monitor/prometheus/byo-prometheus.mdx deleted file mode 100644 index 5f054ad01c..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/monitor/prometheus/byo-prometheus.mdx +++ /dev/null @@ -1,431 +0,0 @@ ---- -description: Steps to get Calico Enterprise metrics using your own Prometheus. ---- - -# Bring your own Prometheus - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -## Big picture - -Scrape $[prodname] metrics for Bring Your Own (BYO) Prometheus. - -## Value - -$[prodname] uses the Prometheus monitoring tool to scrape metrics from instrumented jobs, and displays time-series data in a visualizer such as Grafana. You can scrape the following time-series metrics for $[prodname] components to your own Prometheus: - -- elasticsearch -- fluentd -- calico-node -- kube-controllers -- felix -- typha (not enabled by default) - -## Before you begin - -**Supported** - -For the supported version of Prometheus in this release, see the [Release Notes](../../../release-notes/index.mdx) (`coreos-prometheus`). - -## How to - -- [Scrape all enabled metrics](#scrape-all-enabled-metrics) -- [Scrape metrics from specific components directly](#scrape-metrics-from-specific-components-directly) -- [Verify BYO Prometheus](#verify-byo-prometheus) -- [Create policy to secure traffic between pods](#create-policy-to-secure-traffic-between-pods) -- [Troubleshooting](#troubleshooting) - -### Scrape all enabled metrics - -In this section we create a service monitor that scrapes all enabled metrics. To enable metrics that -are not enabled by default, please consult the [next section](#scrape-metrics-from-specific-components). - -The following example shows a Prometheus server installed in namespace "external-prometheus" with a `serviceMonitorSelector` that selects all service monitors with the label `k8s-app=tigera-external-prometheus`. - -1. Save the following configuration in a file called `monitor.yaml`. - - ```yaml - apiVersion: operator.tigera.io/v1 - kind: Monitor - metadata: - name: tigera-secure - spec: - externalPrometheus: - namespace: external-prometheus - serviceMonitor: - labels: - k8s-app: tigera-external-prometheus - ``` - For a list of all configuration options, see the [Installation API reference](../../../reference/installation/api.mdx). - -2. Apply the manifest to your cluster. - - ```bash - kubectl apply -f monitor.yaml - ``` - -3. Verify that the new configuration has been added to your cluster - ```bash - export NS=external-prometheus - kubectl get servicemonitor -n $NS tigera-external-prometheus - kubectl get serviceaccount -n $NS tigera-external-prometheus - kubectl get secret -n $NS tigera-external-prometheus - kubectl get clusterrole tigera-external-prometheus - kubectl get clusterrolebinding tigera-external-prometheus - ``` - That's it. You should be seeing the new metrics show up in your Prometheus instance within a minute. For more information on verifying metrics, see the section, [Verify BYO Prometheus](#verify-byo-prometheus). - -### Scrape metrics from specific components directly - -We recommend the previous section for scraping all enabled metrics. Read on if you wish to scrape metrics from specific -components directly using mTLS, or if you wish to enable metrics that are disabled by default. - - - - -**Configure TLS certificates** - -1. Copy the required secret and configmap to your namespace. -2. Save the manifest of the required TLS secret and CA configmap. - - ```bash - kubectl get secret calico-node-prometheus-client-tls -n tigera-prometheus -o yaml > calico-node-prometheus-client-tls.yaml - ``` - - ```bash - kubectl get configmap -n tigera-prometheus tigera-ca-bundle -o yaml > tigera-ca-bundle.yaml - ``` - -3. Edit `calico-node-prometheus-client-tls.yaml` and `tigera-ca-bundle.yaml` by changing the namespace to the namespace where your prometheus instance is running. -4. Apply the manifests to your cluster. - - ```bash - kubectl apply -f calico-node-prometheus-client-tls.yaml - ``` - - ```bash - kubectl apply -f tigera-ca-bundle.yaml - ``` - -**Create the service monitor** - -Apply the ServiceMonitor to the namespace where Prometheus is running. - -```bash -export NAMESPACE= -``` - -```bash -kubectl apply -f $[filesUrl]/manifests/prometheus/elasticsearch-metrics-service-monitor.yaml -n $NAMESPACE -``` - -The .yamls have no namespace defined so when you apply `kubectl`, it is applied in the $NAMESPACE. - - - - -**Configure TLS certificates** - -1. Copy the required secret and configmap to your namespace. -2. Save the manifest of the required TLS secret and CA configmap. - - ```bash - kubectl get secret calico-node-prometheus-client-tls -n tigera-prometheus -o yaml > calico-node-prometheus-client-tls.yaml - ``` - - ```bash - kubectl get configmap -n tigera-prometheus tigera-ca-bundle -o yaml > tigera-ca-bundle.yaml - ``` - -3. Edit `calico-node-prometheus-client-tls.yaml` and `tigera-ca-bundle.yaml` and change the namespace to the namespace where your prometheus instance is running. -4. Apply the manifests to your cluster. - - ```bash - kubectl apply -f calico-node-prometheus-client-tls.yaml - ``` - - ```bash - kubectl apply -f tigera-ca-bundle.yaml - ``` - -**Create the service monitor** - -Apply the ServiceMonitor to the namespace where Prometheus is running. - -```bash -export NAMESPACE= -``` - -```bash -kubectl apply -f $[filesUrl]/manifests/prometheus/fluentd-metrics-service-monitor.yaml -n $NAMESPACE -``` - -The .yamls have no namespace defined so when you apply `kubectl`, it is applied in the $NAMESPACE. - - - - -**Configure TLS certificates** - -1. Copy the required secret and configmap to your namespace. -2. Save the manifest of the required TLS secret and CA configmap. - - ```bash - kubectl get secret calico-node-prometheus-client-tls -n tigera-prometheus -o yaml > calico-node-prometheus-client-tls.yaml - ``` - - ```bash - kubectl get configmap -n tigera-prometheus tigera-ca-bundle -o yaml > tigera-ca-bundle.yaml - ``` - -3. Edit `calico-node-prometheus-client-tls.yaml` and `tigera-ca-bundle.yaml` by changing the namespace to the namespace where your prometheus instance is running. -4. Apply the manifests to your cluster. - - ```bash - kubectl apply -f calico-node-prometheus-client-tls.yaml - ``` - - ```bash - kubectl apply -f tigera-ca-bundle.yaml - ``` - -**Create the service monitor** - -Apply the ServiceMonitor to the namespace where Prometheus is running. - -```bash -export NAMESPACE= -``` - -```bash -kubectl apply -f $[filesUrl]/manifests/prometheus/calico-node-monitor-service-monitor.yaml -n $NAMESPACE -``` - -The .yamls have no namespace defined so when you apply `kubectl`, it is applied in $NAMESPACE. - - - - -**Configure TLS certificates** - -1. Copy the required secret and configmap to your namespace. -2. Save the manifest of the required TLS secret and CA configmap. - - ```bash - kubectl get secret calico-node-prometheus-client-tls -n tigera-prometheus -o yaml > calico-node-prometheus-client-tls.yaml - ``` - - ```bash - kubectl get configmap -n tigera-prometheus tigera-ca-bundle -o yaml > tigera-ca-bundle.yaml - ``` - -3. Edit `calico-node-prometheus-client-tls.yaml` and `tigera-ca-bundle.yaml` by changing the namespace to the namespace where your prometheus instance is running. -4. Apply the manifests to your cluster. - - ```bash - kubectl apply -f calico-node-prometheus-client-tls.yaml - ``` - - ```bash - kubectl apply -f tigera-ca-bundle.yaml - ``` - -**Create the service monitor** - -Apply the ServiceMonitor to the namespace where Prometheus is running. - -```bash -export NAMESPACE= -``` - -```bash -kubectl apply -f $[filesUrl]/manifests/prometheus/kube-controller-metrics-service-monitor.yaml -n $NAMESPACE -``` - -The .yamls have no namespace defined so when you apply `kubectl`, it is applied in the $NAMESPACE. - - - - -**Enable metrics** - -Felix metrics are not enabled by default. - -By default, Felix uses **port 9091 TCP** to publish metrics. - -Use the following command to enable Felix metrics. - -```bash -kubectl patch felixconfiguration default --type merge --patch '{"spec":{"prometheusMetricsEnabled": true}}' -``` - -You should see a result similar to: - -``` -felixconfiguration.projectcalico.org/default patched -``` - -For all Felix configuration values, see [Felix configuration](../../../reference/component-resources/node/felix/configuration.mdx). - -For all Prometheus Felix configuration values, see [Felix Prometheus](../../../reference/component-resources/node/felix/prometheus.mdx). - -**Create a service to expose Felix metrics** - -```bash -kubectl apply -f - < -``` - -```bash -kubectl apply -f $[filesUrl]/manifests/prometheus/felix-metrics-service-monitor.yaml -n $NAMESPACE -``` - -The .yamls have no namespace defined so when you apply `kubectl`, it is applied in the $NAMESPACE. - - - - -**Enable metrics** - -Typha metrics are not enabled by default. - -By default, Typha uses **port 9091** TCP to publish metrics. However, if $[prodname] is installed using the Amazon yaml file, this port will be 9093 because it is set manually using the **TYPHA_PROMETHEUSMETRICSPORT** environment variable. - -Use the following command to enable Typha metrics. - -```bash -kubectl patch installation default --type=merge -p '{"spec": {"typhaMetricsPort":9093}}' -``` - -You should see a result similar to: - -```bash -installation.operator.tigera.io/default patched -``` - -**Create the service monitor** - -Apply the ServiceMonitor to the namespace where Prometheus is running. - -```bash -export NAMESPACE= -``` - -```bash -kubectl apply -f $[filesUrl]/manifests/prometheus/typha-metrics-service-monitor.yaml -n $NAMESPACE -``` - -The .yamls have no namespace defined so when you apply `kubectl`, it is applied in the $NAMESPACE. - - - - -### Verify BYO Prometheus - -1. Access the Prometheus dashboard using the port-forwarding feature. - - ```bash - kubectl port-forward pod/byo-prometheus-pod 9090:9090 -n $NAMESPACE - ``` - -1. Browse to the Prometheus dashboard: http://localhost:9090. - -1. In the Expression text box, enter your metric name and click the **Execute** button. - - The Console table is populated with all of your nodes with the number of endpoints. - -### Troubleshooting - -This section is applicable only if you experience issues with mTLS after following the [Scrape metrics from specific components directly](#scrape-metrics-from-specific-components) -section. - -1. Use the following command to retrieve the tls.key and tls.cert. - - ```bash - export NAMESPACE= - ``` - - ```bash - kubectl get secret -n $NAMESPACE calico-node-prometheus-client-tls -o yaml - ``` - -1. Save the tls.key and tls.cert content into key and cert after base64 decode. - - ```bash - $:tls_key= - $:echo $tls_key|base64 -d >key.pem - - $:tls_cert= - $:echo $cert|base64 -d>cert.pem - ``` - -1. Get the ca-bundle certificate using this command: - - ```bash - kubectl get cm -n $NAMESPACE tigera-ca-bundle -o yaml - ``` - -1. Open a new file (bundle.pem) in your favorite editor, and paste the content from "BEGIN CERTIFICATE" to "END CERTIFICATE". - -1. Port-forward the prometheus pods and run this command with the forwarded port. - - ```bash - curl --cacert bundle.pem --key key.pem --cert cert.pem https://localhost:8080/metrics - ``` - -You should be able to see the metrics. - -### Create policy to secure traffic between pods - -To support zero trust, we recommend that you create $[prodname] network policy to allow the traffic between BYO Prometheus pods, and the respective metrics pods. For samples of ingress and egress policies, see [Get started with Calico network policy](../../../network-policy/beginners/calico-network-policy.mdx). diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/monitor/prometheus/configure-prometheus.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/monitor/prometheus/configure-prometheus.mdx deleted file mode 100644 index 5ff021d239..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/monitor/prometheus/configure-prometheus.mdx +++ /dev/null @@ -1,342 +0,0 @@ ---- -description: Configure rules for alerts and denied packets, for persistent storage. ---- - -# Configure Prometheus - -## Updating Denied Packets Rules - -This is an example of how to modify the sample rule created by the sample manifest. -The process of updating rules is the same as for user created rules (documented below). - -- Save the current alert rule: - - ```bash - kubectl -n tigera-prometheus get prometheusrule -o yaml > calico-prometheus-alert-rule-dp.yaml - ``` - -- Make necessary edits to the alerting rules then apply the updated manifest. - - ```bash - kubectl apply -f calico-prometheus-alert-rule-dp.yaml - ``` - -Your changes should be applied in a few seconds by the prometheus-config-reloader -container inside the prometheus pod launched by the prometheus-operator -(usually named `prometheus-`). - -As an example, the range query in this Manifest is 10 seconds. - -```yaml -apiVersion: monitoring.coreos.com/v1 -kind: PrometheusRule -metadata: - name: calico-prometheus-dp-rate - namespace: tigera-prometheus - labels: - role: tigera-prometheus-rules - prometheus: calico-node-prometheus -spec: - groups: - - name: calico.rules - rules: - - alert: DeniedPacketsRate - expr: rate(calico_denied_packets[10s]) > 50 - labels: - severity: critical - annotations: - summary: 'Instance {{$labels.instance}} - Large rate of packets denied' - description: '{{$labels.instance}} with calico-node pod {{$labels.pod}} has been denying packets at a fast rate {{$labels.sourceIp}} by policy {{$labels.policy}}.' -``` - -To update this alerting rule, to say, execute the query with a range of -20 seconds modify the manifest to this: - -```yaml -apiVersion: monitoring.coreos.com/v1 -kind: PrometheusRule -metadata: - name: calico-prometheus-dp-rate - namespace: tigera-prometheus - labels: - role: tigera-prometheus-rules - prometheus: calico-node-prometheus -spec: - groups: - - name: calico.rules - rules: - - alert: DeniedPacketsRate - expr: rate(calico_denied_packets[20s]) > 50 - labels: - severity: critical - annotations: - summary: 'Instance {{$labels.instance}} - Large rate of packets denied' - description: '{{$labels.instance}} with calico-node pod {{$labels.pod}} has been denying packets at a fast rate {{$labels.sourceIp}} by policy {{$labels.policy}}.' -``` - -## Creating a New Alerting Rule - -Creating a new alerting rule is straightforward once you figure out what you -want your rule to look for. Check [alerting rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) -and [Queries](https://prometheus.io/docs/querying/examples/) for more -information. - -### New Alerting Rule for Monitoring Calico Node - -To add the new alerting rule to our Prometheus instance, define a PrometheusRule manifest -in the `tigera-prometheus` namespace with the labels -`role: tigera-prometheus-rules` and `prometheus: calico-node-prometheus`. The -labels should match the labels defined by the `ruleSelector` field of the -Prometheus manifest. - -As an example, to fire a alert when a $[noderunning] instance has been down for -more than 5 minutes, save the following to a file, say `calico-node-down-alert.yaml`. - -```yaml -apiVersion: monitoring.coreos.com/v1 -kind: PrometheusRule -metadata: - name: calico-prometheus-calico-node-down - namespace: tigera-prometheus - labels: - role: tigera-prometheus-rules - prometheus: calico-node-prometheus -spec: - groups: - - name: calico.rules - rules: - - alert: CalicoNodeInstanceDown - expr: up == 0 - for: 5m - labels: - severity: warning - annotations: - summary: 'Instance {{$labels.instance}} Pod: {{$labels.pod}} is down' - description: '{{$labels.instance}} of job {{$labels.job}} has been down for more than 5 minutes' -``` - -Then create/apply this manifest in kubernetes. - -```bash -kubectl apply -f calico-node-down-alert.yaml -``` - -Your changes should be applied in a few seconds by the prometheus-config-reloader -container inside the prometheus pod launched by the prometheus-operator -(usually named `prometheus-`). - -### New Alerting Rule for Monitoring BGP Peers - -Let’s look at an example of a new alerting rule to our Prometheus instance with respect to monitoring BGP -peering health. Define a PrometheusRule manifest in the tigera-prometheus namespace with the labels -`role: tigera-prometheus-rules` and `prometheus: calico-node-prometheus`. The labels should match the labels -defined by the `ruleSelector` field of the Prometheus manifest. - -As an example, to fire an alert when the number of peering connections with a status other than “Established” -is increasing at a non-zero rate in the cluster (over the last 5 minutes), save the following to a file, say -`tigera-peer-status-not-established.yaml`. - -```yaml -apiVersion: monitoring.coreos.com/v1 -kind: PrometheusRule -metadata: - labels: - prometheus: calico-node-prometheus - role: tigera-prometheus-rules - name: tigera-prometheus-peer-status-not-established - namespace: tigera-prometheus -spec: - groups: - - name: calico.rules - rules: - - alert: CalicoNodePeerStatusNotEstablished - annotations: - description: '{{$labels.instance}} has at least one peer connection that is - no longer up.' - summary: Instance {{$labels.instance}} has peer connection that is no longer - up - expr: rate(bgp_peers{status!~"Established"}[5m]) > 0 - labels: - severity: critical -``` - -Then create/apply this manifest in kubernetes. - -```bash -kubectl apply -f tigera-peer-status-not-established.yaml -``` - -Your changes should be applied in a few seconds by the prometheus-config-reloader -container inside the prometheus pod launched by the prometheus-operator -(usually named `prometheus-`). - -## Additional Alerting Rules - -The Alerting Rules installed by the $[prodname] install manifest is a simple -one that fires an alert when the rate of denied packets denied by a policy on -a node from a particular Source IP exceeds a certain packets per second -threshold. The Prometheus query used for this (ignoring the threshold value 20) is: - -``` -rate(calico_denied_packets[10s]) -``` - -and this query will return results something along the lines of: - -``` -{endpoint="calico-metrics-port",instance="10.240.0.81:9081",job="calico-node-metrics",namespace="kube-system",pod="calico-node-hn0kl",policy="profile/k8s_ns.test/0/deny",service="calico-node-metrics",srcIP="192.168.167.129"} 0.6 -{endpoint="calico-metrics-port",instance="10.240.0.84:9081",job="calico-node-metrics",namespace="kube-system",pod="calico-node-97m3g",policy="profile/k8s_ns.test/0/deny",service="calico-node-metrics",srcIP="192.168.167.175"} 0.2 -{endpoint="calico-metrics-port",instance="10.240.0.84:9081",job="calico-node-metrics",namespace="kube-system",pod="calico-node-97m3g",policy="profile/k8s_ns.test/0/deny",service="calico-node-metrics",srcIP="192.168.252.157"} 0.4 -{endpoint="calico-metrics-port",instance="10.240.0.81:9081",job="calico-node-metrics",namespace="kube-system",pod="calico-node-hn0kl",policy="profile/k8s_ns.test/0/deny",service="calico-node-metrics",srcIP="192.168.167.175"} 1 -{endpoint="calico-metrics-port",instance="10.240.0.84:9081",job="calico-node-metrics",namespace="kube-system",pod="calico-node-97m3g",policy="profile/k8s_ns.test/0/deny",service="calico-node-metrics",srcIP="192.168.167.129"} 0.4 -{endpoint="calico-metrics-port",instance="10.240.0.81:9081",job="calico-node-metrics",namespace="kube-system",pod="calico-node-hn0kl",policy="profile/k8s_ns.test/0/deny",service="calico-node-metrics",srcIP="192.168.167.159"} 0.4 -{endpoint="calico-metrics-port",instance="10.240.0.81:9081",job="calico-node-metrics",namespace="kube-system",pod="calico-node-hn0kl",policy="profile/k8s_ns.test/0/deny",service="calico-node-metrics",srcIP="192.168.252.175"} 0.4 -{endpoint="calico-metrics-port",instance="10.240.0.84:9081",job="calico-node-metrics",namespace="kube-system",pod="calico-node-97m3g",policy="profile/k8s_ns.test/0/deny",service="calico-node-metrics",srcIP="192.168.252.175"} 0.6 -{endpoint="calico-metrics-port",instance="10.240.0.81:9081",job="calico-node-metrics",namespace="kube-system",pod="calico-node-hn0kl",policy="profile/k8s_ns.test/0/deny",service="calico-node-metrics",srcIP="192.168.252.157"} 0.6 -{endpoint="calico-metrics-port",instance="10.240.0.84:9081",job="calico-node-metrics",namespace="kube-system",pod="calico-node-97m3g",policy="profile/k8s_ns.test/0/deny",service="calico-node-metrics",srcIP="192.168.167.159"} 0.6 -``` - -We can modify this query to find out all packets dropped by different policies -on every node. - -``` -(sum by (instance,policy) (rate(calico_denied_packets[10s]))) -``` - -This query will aggregate the results from all different Source IPs, and -preserve the `policy` and `instance` labels. Note that the `instance` label -represents the calico node's IP Address and `PrometheusReporterPort`. This -query will return results like so: - -``` -{instance="10.240.0.84:9081",policy="profile/k8s_ns.test/0/deny"} 2 -{instance="10.240.0.81:9081",policy="profile/k8s_ns.test/0/deny"} 2.8 -``` - -To include the pod name in these results, add the label `pod` to the labels -listed in the `by` expression like so: - -``` -(sum by (instance,pod,policy) (rate(calico_denied_packets[10s]))) -``` - -which will return the following results: - -``` -{instance="10.240.0.84:9081",pod="calico-node-97m3g",policy="profile/k8s_ns.test/0/deny"} 2 -{instance="10.240.0.81:9081",pod="calico-node-hn0kl",policy="profile/k8s_ns.test/0/deny"} 2.8 -``` - -An interesting use case is when a rogue Pod is using tools such as nmap to -scan a subnet for open ports. To do this, we have to execute a query that will -aggregate across all policies on all instances while preserving the source IP -address. This can be done using this query: - -``` -(sum by (srcIP) (rate(calico_denied_packets[10s]))) -``` - -which will return results, different source IP address: - -``` -{srcIP="192.168.167.159"} 1.0000000000000002 -{srcIP="192.168.167.129"} 1.2000000000000002 -{srcIP="192.168.252.175"} 1.4000000000000001 -{srcIP="192.168.167.175"} 0.4 -{srcIP="192.168.252.157"} 1.0000000000000002 -``` - -To use these queries as Alerting Rules, follow the instructions defined in the -[Creating a new Alerting Rule](#creating-a-new-alerting-rule) section and create -a ConfigMap with the appropriate query. - -## Updating the scrape interval - -You may wish to modify the scrape interval (time between Prometheus polling each node for new denied packet information). -Increasing the interval reduces load on Prometheus and the amount of storage required, but decreases the detail of the collected metrics. - -The scrape interval of endpoints ($[noderunning] in our case) is defined as part of -the ServiceMonitor manifest. To change the interval: - -- Save the current ServiceMonitor manifest: - - ```bash - kubectl -n tigera-prometheus get servicemonitor calico-node-monitor -o yaml > calico-node-monitor.yaml - ``` - -- Update the `interval` field under `endpoints` to desired settings and - apply the updated manifest. - - ```bash - kubectl apply -f calico-node-monitor.yaml - ``` - -Your changes should be applied in a few seconds by the prometheus-config-reloader -container inside the prometheus pod launched by the prometheus-operator -(usually named `prometheus-`). - -As an example on what to update, the interval in this ServiceMonitor manifest -is 5 seconds (`5s`). - -```yaml -apiVersion: monitoring.coreos.com/v1alpha1 -kind: ServiceMonitor -metadata: - name: calico-node-monitor - namespace: tigera-prometheus - labels: - team: network-operators -spec: - selector: - matchLabels: - k8s-app: calico-node - namespaceSelector: - matchNames: - - kube-system - endpoints: - - port: calico-metrics-port - interval: 5s -``` - -To update $[prodname] Prometheus' scrape interval to 10 seconds modify the manifest -to this: - -```yaml -apiVersion: monitoring.coreos.com/v1alpha1 -kind: ServiceMonitor -metadata: - name: calico-node-monitor - namespace: tigera-prometheus - labels: - team: network-operators -spec: - selector: - matchLabels: - k8s-app: calico-node - namespaceSelector: - matchNames: - - kube-system - endpoints: - - port: calico-metrics-port - interval: 10s -``` - -## Troubleshooting Config Updates - -Check config reloader logs to see if they detected any recent activity. - -- For prometheus run: - - ```bash - kubectl -n tigera-prometheus logs prometheus- prometheus-config-reloader - ``` - -- For alertmanager run: - - ```bash - kubectl -n tigera-prometheus logs alertmanager- config-reloader - ``` - -The config-reloaders watch each pods file-system for updated config from -ConfigMap's or Secret's and will perform steps necessary for reloading -the configuration. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/monitor/prometheus/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/monitor/prometheus/index.mdx deleted file mode 100644 index 67a21e239a..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/monitor/prometheus/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Configure open-source toolkit for systems monitoring and alerting. -hide_table_of_contents: true ---- - -# Prometheus - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/monitor/prometheus/support.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/monitor/prometheus/support.mdx deleted file mode 100644 index 94cdb176f0..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/monitor/prometheus/support.mdx +++ /dev/null @@ -1,31 +0,0 @@ ---- -description: Prometheus support in Calico Enterprise. ---- - -# Prometheus support - -## Big picture - -$[prodname] uses the open-source [Prometheus monitoring and alerting toolkit](https://prometheus.io/docs/introduction/overview/). With these tools, you can view time-series metrics from $[prodname] components in the Prometheus and Grafana interfaces, or scrape the metrics for a BYO Prometheus deployment. - -## Install options - -### Use Prometheus operator managed by Tigera Operator - -You install the $[prodname] Prometheus operator and CRDs during $[prodname] installation. $[prodname] metrics and alerts are available in the web console. You configure alerts through Prometheus AlertManager. - -If you want to specify your own Prometheus operator during installation for management by the Tigera Operator, the required operator version must be **v0.40.0 or higher**. Because $[prodname] creates AlertManager and Prometheus CRs in the `tigera-prometheus` namespace, all you need to do is verify that your Prometheus operator is configured to manage Prometheus and AlertManager instances in the `tigera-prometheus` namespace. - -#### Prometheus on $[prodnameWindows] - -By default, the Windows firewall blocks listening on ports. For $[prodname] to manage the Prometheus metrics ports Windows firewall rules, enable the `windowsManageFirewallRules` setting in FelixConfiguration: - -```bash -kubectl patch felixConfiguration default --type merge --patch '{"spec": {"windowsManageFirewallRules": "Enabled"}}' -``` - -[See the FelixConfiguration reference for more details](../../../reference/resources/felixconfig.mdx). You can also add a Windows firewall rule that allows listening on the Prometheus ports instead of having $[prodname] manage it. - -### Bring your own Prometheus - -For details, see [Bring your own Prometheus](byo-prometheus.mdx). diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/troubleshoot/commands.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/troubleshoot/commands.mdx deleted file mode 100644 index f78e06273e..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/troubleshoot/commands.mdx +++ /dev/null @@ -1,471 +0,0 @@ ---- -description: Learn basic commands to verify cluster and components are working. ---- - -# Troubleshooting commands - -## Big picture - -Use command line tools to get status and troubleshoot. - -- [Hosts](#hosts) -- [Kubernetes](#kubernetes) -- [Calico components](#calico-components) -- [Routing](#routing) -- [Network policy](#network-policy) - -:::note - -`calico-system` is used for operator-based commands and examples; for manifest-based install, use `kube-system`. - -::: - -See [Calico architecture and components](../../reference/architecture/overview.mdx) for help with components. - -## Hosts - -### Verify number of nodes in a cluster - -```bash -kubectl get nodes -``` - -``` - -NAME STATUS ROLES AGE VERSION -ip-10-0-0-10 Ready master 27h v1.18.0 -ip-10-0-0-11 Ready 27h v1.18.0 -ip-10-0-0-12 Ready 27h v1.18.0 - -``` - -### Verify calico-node pods are running on every node, and are in a healthy state - -```bash -kubectl get pods -n calico-system -o wide -``` - -``` -NAME READY STATUS RESTARTS AGE IP NODE -calico-node-77zgj 1/1 Running 0 27h 10.0.0.10 ip-10-0-0-10 -calico-node-nz8k2 1/1 Running 0 27h 10.0.0.11 ip-10-0-0-11 -calico-node-7trv7 1/1 Running 0 27h 10.0.0.12 ip-10-0-0-12 -``` - -### Exec into pod for further troubleshooting - -```bash -kubectl run multitool --image=praqma/network-multitool - -kubectl exec -it multitool -- bash -``` - -``` -bash-5.0 ping 8.8.8.8 -PING 8.8.8.8 (8.8.8.8) 56(84) bytes of data. -64 bytes from 8.8.8.8: icmp_seq=1 ttl=97 time=6.61 ms -64 bytes from 8.8.8.8: icmp_seq=2 ttl=97 time=6.64 ms -``` - -### Collect $[prodname] diagnostic logs - -```bash -sudo calicoctl node diags -``` - -``` -Collecting diagnostics -Using temp dir: /tmp/calico194224816 -Dumping netstat -Dumping routes (IPv4) -Dumping routes (IPv6) -Dumping interface info (IPv4) -Dumping interface info (IPv6) -Dumping iptables (IPv4) -Dumping iptables (IPv6) - -Diags saved to /tmp/calico194224816/diags-20201127_010117.tar.gz -``` - -## Kubernetes - -### Verify all pods are running - -```bash -kubectl get pods -A -``` - -``` -kube-system coredns-66bff467f8-dxbtl 1/1 Running 0 27h -kube-system coredns-66bff467f8-n95vq 1/1 Running 0 27h -kube-system etcd-ip-10-0-0-10 1/1 Running 0 27h -kube-system kube-apiserver-ip-10-0-0-10 1/1 Running 0 27h -``` - -### Verify Kubernetes API server is running - -```bash -kubectl cluster-info -``` - -``` -Kubernetes master is running at https://10.0.0.10:6443 -KubeDNS is running at https://10.0.0.10:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy -ubuntu@master:~$ kubectl get svc -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -kubernetes ClusterIP 10.49.0.1 443/TCP 2d2h -``` - -### Verify Kubernetes kube-dns is working - -```bash -kubectl get svc -``` - -``` -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -kubernetes ClusterIP 10.49.0.1 443/TCP 2d2h -``` - -```bash -kubectl exec -it multitool bash -``` - -``` -bash-5.0 curl -I -k https://kubernetes -HTTP/2 403 -cache-control: no-cache, private -content-type: application/json -x-content-type-options: nosniff -content-length: 234 -``` - -```bash -bash-5.0 nslookup google.com -``` - -``` -Server: 10.49.0.10 -Address: 10.49.0.10#53 -Non-authoritative answer: -Name: google.com -Address: 172.217.14.238 -Name: google.com -Address: 2607:f8b0:400a:804::200e -``` - -### Verify that kubelet is running on the node with the correct flags - -```bash -systemctl status kubelet -``` - -If there is a problem, check the journal - -```bash -journalctl -u kubelet | head -``` - -### Check the status of other system pods - -Look especially at coredns; if they are not getting an IP, something is wrong with the CNI - -```bash -kubectl get pod -n kube-system -o wide -``` - -But if other pods fail, it is likely a different issue. Perform normal Kubernetes troubleshooting. For example: - -```bash -kubectl describe pod kube-scheduler-ip-10-0-1-20.eu-west-1.compute.internal -n kube-system | tail -15 -``` - -## Calico components - -### View Calico CNI configuration on a node - -```bash -cat /etc/cni/net.d/10-calico.conflist -``` - -### Verify calicoctl matches cluster - -The cluster version and type must match the calicoctl version. - -```bash -calicoctl version -``` - -For syntax: - -```bash -calicoctl version -help -``` - -### Check Tigera Operator status - -```bash -kubectl get tigerastatus -``` - -``` -NAME AVAILABLE PROGRESSING DEGRADED SINCE -calico True False False 27h -``` - -### Check if operator pod is running - -```bash -kubectl get pod -n tigera-operator -``` - -### View calico nodes - -```bash -kubectl get pod -n calico-system -o wide -``` - -### View $[prodname] installation parameters - -```bash -kubectl get installation -o yaml -``` - -```yaml -apiVersion: v1 -items: -- apiVersion: operator.tigera.io/v1 - kind: Installation - metadata: - - apiVersion: operator.tigera.io/v1 - spec: - calicoNetwork: - bgp: Enabled - hostPorts: Enabled - ipPools: - - blockSize: 26 - cidr: 10.48.0.0/16 - encapsulation: VXLANCrossSubnet - natOutgoing: Enabled - nodeSelector: all() - multiInterfaceMode: None - nodeAddressAutodetectionV4: - firstFound: true - cni: - ipam: - type: Calico - type: Calico -``` - -### Run commands across multiple nodes - -```bash -export THE_COMMAND_TO_RUN=date && for calinode in `kubectl get pod -o wide -n calico-system | grep calico-node | awk '{print $1}'`; do echo $calinode; echo "-----"; kubectl exec -n calico-system $calinode -- $THE_COMMAND_TO_RUN; printf "\n"; done -``` - -```bash -calico-node-87lpx ------ -Defaulted container "calico-node" out of: calico-node, flexvol-driver (init), install-cni (init) -Thu Apr 28 13:48:06 UTC 2022 - -calico-node-x5fmm ------ -Defaulted container "calico-node" out of: calico-node, flexvol-driver (init), install-cni (init) -Thu Apr 28 13:48:07 UTC 2022 - -``` - -### View pod info - -```bash -kubectl describe pods `` -n ` ` -``` - -```bash -kubectl describe pods busybox -n default -``` - -``` -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal Scheduled 21s default-scheduler Successfully assigned default/busybox to ip-10-0-0-11 - Normal Pulling 20s kubelet, ip-10-0-0-11 Pulling image "busybox" - Normal Pulled 19s kubelet, ip-10-0-0-11 Successfully pulled image "busybox" - Normal Created 19s kubelet, ip-10-0-0-11 Created container busybox - Normal Started 18s kubelet, ip-10-0-0-11 Started container busybox -``` - -### View logs of a pod - -```bash -kubectl logs `` -n `` -``` - -```bash -kubectl logs busybox -n default -``` - -### View kubelet logs - -```bash -journalctl -u kubelet -``` - -## Routing - -### Verify routing table on the node - -```bash -ip route -``` - -``` -default via 10.0.0.1 dev eth0 proto dhcp src 10.0.0.10 metric 100 -10.0.0.0/24 dev eth0 proto kernel scope link src 10.0.0.10 -10.0.0.1 dev eth0 proto dhcp scope link src 10.0.0.10 metric 100 -10.48.66.128/26 via 10.0.0.12 dev eth0 proto 80 onlink -10.48.231.0/26 via 10.0.0.11 dev eth0 proto 80 onlink -172.17.0.0/16 dev docker0 proto kernel scope link src 172.17.0.1 linkdown -``` - -### Verify BGP peer status - -```bash -sudo calicoctl node status -``` - -``` -Calico process is running. - -IPv4 BGP status -+--------------+-------------------+-------+------------+-------------+ -| PEER ADDRESS | PEER TYPE | STATE | SINCE | INFO | -+--------------+-------------------+-------+------------+-------------+ -| 10.0.0.12 | node-to-node mesh | up | 2020-11-25 | Established | -| 10.0.0.11 | node-to-node mesh | up | 2020-11-25 | Established | -+--------------+-------------------+-------+------------+-------------+ -``` - -### Verify overlay configuration - -```bash -kubectl get ippools default-ipv4-ippool -o yaml -``` - -```yaml - ---- -spec: - ipipMode: Always - vxlanMode: Never -``` - -### Verify bgp learned routes - -```bash -ip r | grep bird -``` - -``` -192.168.66.128/26 via 10.0.0.12 dev tunl0 proto bird onlink -192.168.180.192/26 via 10.0.0.10 dev tunl0 proto bird onlink -blackhole 192.168.231.0/26 proto bird -``` - -### Verify BIRD routing table - -**Note**: The BIRD routing table gets pushed to node routing tables. - -```bash -kubectl exec -it -n calico-system calico-node-8cfc8 -- /bin/bash -``` - -``` -[root@ip-10-0-0-11 /] birdcl -BIRD v0.3.3+birdv1.6.8 ready. -bird> show route -0.0.0.0/0 via 10.0.0.1 on eth0 [kernel1 18:13:33] * (10) -10.0.0.0/24 dev eth0 [direct1 18:13:32] * (240) -10.0.0.1/32 dev eth0 [kernel1 18:13:33] * (10) -10.48.231.2/32 dev calieb874a8ef0b [kernel1 18:13:41] * (10) -10.48.231.1/32 dev caliaeaa173109d [kernel1 18:13:35] * (10) -10.48.231.0/26 blackhole [static1 18:13:32] * (200) -10.48.231.0/32 dev vxlan.calico [direct1 18:13:32] * (240) -10.48.180.192/26 via 10.0.0.10 on eth0 [Mesh_10_0_0_10 18:13:34] * (100/0) [i] - via 10.0.0.10 on eth0 [Mesh_10_0_0_12 18:13:41 from 10.0.0.12] (100/0) [i] - via 10.0.0.10 on eth0 [kernel1 18:13:33] (10) -10.48.66.128/26 via 10.0.0.12 on eth0 [Mesh_10_0_0_10 18:13:36 from 10.0.0.10] * (100/0) [i] - via 10.0.0.12 on eth0 [Mesh_10_0_0_12 18:13:41] (100/0) [i] - via 10.0.0.12 on eth0 [kernel1 18:13:36] (10) -``` - -### Capture traffic - -For example, - -```bash -sudo tcpdump -i calicofac0017c3 icmp -``` - -## Network policy - -### Verify existing Kubernetes network policies - -```bash -kubectl get networkpolicy --all-namespaces -``` - -``` -NAMESPACE NAME POD-SELECTOR AGE -client allow-ui 20m -client default-deny 4h51m -stars allow-ui 20m -stars backend-policy role=backend 20m -stars default-deny 4h51m -``` - -### Verify existing $[prodname] network policies - -```bash -calicoctl get networkpolicy --all-namespaces -o wide -``` - -``` -NAMESPACE NAME ORDER SELECTOR -calico-demo allow-busybox 50 app == 'porter' -client knp.default.allow-ui 1000 projectcalico.org/orchestrator == 'k8s' -client knp.default.default-deny 1000 projectcalico.org/orchestrator == 'k8s' -stars knp.default.allow-ui 1000 projectcalico.org/orchestrator == 'k8s' -stars knp.default.backend-policy 1000 projectcalico.org/orchestrator == 'k8s' -stars knp.default.default-deny 1000 projectcalico.org/orchestrator == 'k8s' -``` - -### Verify existing $[prodname] global network policies - -```bash -calicoctl get globalnetworkpolicy -o wide -``` - -``` -NAME ORDER SELECTOR -default-app-policy 100 -egress-lockdown 600 -default-node-policy 100 has(kubernetes.io/hostname) -nodeport-policy 100 has(kubernetes.io/hostname) -``` - -### Check policy selectors and order - -For example, - -```bash -calicoctl get np -n yaobank -o wide -``` - -If the selectors should match, check the endpoint IP and the node where it is running. For example, - -```bash -kubectl get pod -l app=customer -n yaobank -``` diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/troubleshoot/component-logs.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/troubleshoot/component-logs.mdx deleted file mode 100644 index 2c0cc96e2c..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/troubleshoot/component-logs.mdx +++ /dev/null @@ -1,121 +0,0 @@ ---- -description: Where to find component logs. ---- - -# Component logs - -## Big picture - -View and collect $[prodname] logs. - -## Value - -It is useful to view logs to monitor component health and diagnose potential issues. - -## Concepts - -### $[nodecontainer] logs - -The $[nodecontainer] logs contain log output from the following subcomponents: - -- Per-node startup logic -- BGP agent -- Felix policy agent - -Components log either to disk within `/var/log/calico`, to stdout, or both. - -For components that log to disk, files are automatically rotated, and by default 10 files of 1MB each are kept. The current log file is called `current` and rotated files have @ followed by a timestamp detailing when the files was rotated in [tai64n](http://cr.yp.to/libtai/tai64.html#tai64n) format. - -## How to - -## View logs for a $[nodecontainer] instance - -You can view logs for a node using the `kubectl logs` command. This will show logs for all subcomponents of the given node. - -For example: - -``` -kubectl logs -n calico-system calico-node-xxxx -``` - -## View logs from the CNI plugin - -CNI plugin logs are not available through kubectl and are instead logged both to the host machine's disk as well as stderr. - -By default, these logs can be found at `/var/log/calico/cni/` on the host machine. - -The container runtime may also display the CNI plugin logs within its own log output. - -## Configure BGP agent log level - -BGP log level is configured via the [BGPConfiguration](../../reference/resources/bgpconfig.mdx) API, and can be one of the following values: - -- `Debug`: enables "debug all" logging for BIRD. The most verbose logging level. -- `Info`: enables logging for protocol state changes. This is the default log level. -- `Warning`: disables BIRD logging, emits warning level configuration logs only. -- `Error`: disables BIRD logging, emits error level configuration logs only. -- `Fatal`: disables BIRD logging, emits fatal level configuration logs only. - -To modify the BGP log level: - -1. Get the current bgpconfig settings. - - ```bash - kubectl get bgpconfiguration.projectcalico.org -o yaml > bgp.yaml - ``` - -1. Modify logSeverityScreen to the desired value. - - ```bash - vim bgp.yaml - ``` - - :::tip - - For a global change set the name to "default". - For a node-specific change set the name to the node name prefixed with "node.", e.g., "node.node-1". - - ::: - -1. Replace the current bgpconfig settings. - - ```bash - kubectl replace -f bgp.yaml - ``` - -## Configure Felix log level - -Felix log level is configured via the [FelixConfiguration](../../reference/resources/felixconfig.mdx) API, and can be one of the following values: - -- `Debug`: The most verbose logging level - for development and debugging. -- `Info`: The default log level. Shows important state changes. -- `Warning`: Shows warnings only. -- `Error`: Shows errors only. -- `Fatal`: Shows fatal errors only. - -To modify Felix's log level: - -1. Get the current felixconfig settings. - - ```bash - kubectl get felixconfiguration.projectcalico.org default -o yaml > felix.yaml - ``` - -1. Modify logSeverityScreen to desired value. - - ```bash - vim felixconfig.yaml - ``` - - :::tip - - For a global change set the name to "default". - For a node-specific change set the name to the node name prefixed with "node.", e.g., "node.node-1". - - ::: - -1. Replace the current felixconfig settings. - - ``` - kubectl replace -f felixconfig.yaml - ``` diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/troubleshoot/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/troubleshoot/index.mdx deleted file mode 100644 index a067f1b844..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/troubleshoot/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Troubleshooting, logs, and diagnostics. -hide_table_of_contents: true ---- - -# Troubleshooting - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/troubleshoot/troubleshooting.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/troubleshoot/troubleshooting.mdx deleted file mode 100644 index 06efe50bb1..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/operations/troubleshoot/troubleshooting.mdx +++ /dev/null @@ -1,168 +0,0 @@ ---- -description: View logs and diagnostics, common issues, and where to report issues in github. ---- - -# Troubleshooting and diagnostics - -## Logs and diagnostics - -To collect diagnostics, download and install `calicoctl` somewhere in your `$PATH`. We recommend installing it as a kubectl plugin by [following these directions](../clis/calicoctl/install.mdx#install-calicoctl-as-a-kubectl-plugin-on-a-single-host). - -Assuming you installed the binary as a kubectl plugin, you can then create a diagnostics bundle by running the following command: - -``` -kubectl calico cluster diags -``` - -By default the command collects all logs. Optionally, you can select only those logs that are newer than a relative duration (specified in seconds, minutes or hours). For example: - -``` -kubectl calico cluster diags --since=1h -``` - -To report a problem, contact Tigera Support. - -## Alert diagnostics - -The developer console can provide diagnostic information if you -have an issue with your alerts and need to contact Support. Please -execute the following queries and provide the output along with your -alert definition. - -``` -GET _watcher/watch/tigera_secure_ee.. -POST _watcher/watch/tigera_secure_ee../_execute -{ - "action_modes": { - "index_events": "force_simulate" - } -} -``` - -### Check BGP peer status - -If you have connectivity between containers on the same host, and between -containers and the Internet, but not between containers on different hosts, it -probably indicates a problem in your BGP configuration. - -Look at `calicoctl node status` on each host. It should include output like this: - -``` -Calico process is running. - -IPv4 BGP status -+--------------+-------------------+-------+----------+-------------+ -| PEER ADDRESS | PEER TYPE | STATE | SINCE | INFO | -+--------------+-------------------+-------+----------+-------------+ -| 172.17.8.102 | node-to-node mesh | up | 23:30:04 | Established | -+--------------+-------------------+-------+----------+-------------+ - -IPv6 BGP status -No IPv6 peers found. -``` - -Alternatively, you can create a [`CalicoNodeStatus` resource](../../reference/resources/caliconodestatus.mdx) to get BGP session status for the node. - -If you do not see this, please check the following. - -- Make sure there is IP connectivity between your hosts. - -- Make sure your network allows the requisite BGP traffic on TCP port 179. - -### Configure NetworkManager - -Configure [NetworkManager](https://help.ubuntu.com/community/NetworkManager) before -attempting to use $[prodname] networking. - -NetworkManager manipulates the routing table for interfaces in the default network -namespace where $[prodname] veth pairs are anchored for connections to containers. -This can interfere with the $[prodname] agent's ability to route correctly. - -The procedure for configuring NetworkManager to ignore $[prodname] interfaces -varies by Linux distribution. The following steps work best on Ubuntu systems. - -1. Create the following configuration file at `/etc/NetworkManager/conf.d/calico.conf`. - - ```conf - [keyfile] - unmanaged-devices=interface-name:cali*;interface-name:tunl*;interface-name:vxlan.calico;interface-name:vxlan-v6.calico;interface-name:wireguard.cali;interface-name:wg-v6.cali - ``` - -1. Restart NetworkManager. - - ```bash - sudo service network-manager stop - sudo service network-manager start - ``` - -1. Install $[prodname]. - -1. Check the interfaces that NetworkManager ignores. - - ```bash - nmcli dev status - ``` - - It should return output indicating that the `cali` and `tunl` interfaces - are `unmanaged`. - - If this does not to prevent NetworkManager from interfering with $[prodname] networking, try disabling NetworkManager. If disabling NetworkManager does not stop it from interfering with $[prodname] networking, you may need to remove NetworkManager. This will require manual network configuration. - -## Errors when running sudo calicoctl - -If you use `sudo` for commands, remember that your environment variables are not transferred to the `sudo` environment. You must run `sudo` with the `-E` flag to include your environment variables: - -```bash -sudo -E calicoctl node diags -``` - -or you can set environment variables for `sudo` commands like this: - -```bash -sudo DATASTORE_TYPE=kubernetes KUBECONFIG=~/.kube/config calicoctl node run -``` - -Also be aware that connection information can be specified as a config file rather than using environment variables. See [Installing calicoctl](../clis/calicoctl/install.mdx) -for details. - -## Error: $[nodecontainer] is not ready: BIRD is not ready: BGP not established with 10.0.0.1 - -In most cases, this "unready" status error in Kubernetes means that a particular peer is unreachable in the cluster. Check that BGP connectivity between the two peers is allowed in the environment. - -This error can also occur if inactive Node resources are configured for node-to-node mesh. To fix this, [decommission the stale nodes](../decommissioning-a-node.mdx). - -## Linux conntrack table is out of space - -A common problem on Linux systems is running out of space in the conntrack table, which can cause poor iptables performance. This can -happen if you run a lot of workloads on a given host, or if your workloads create a lot of TCP connections or bidirectional UDP streams. To avoid this problem, we recommend increasing the conntrack table size using the following commands: - -```bash -sysctl -w net.netfilter.nf_conntrack_max=1000000 -echo "net.netfilter.nf_conntrack_max=1000000" >> /etc/sysctl.conf -``` - -## Compliance report is not generating at expected time - -By design, reports are scheduled to generate 30 minutes after the specified end time. The reason for this is to allow a certain amount of -time to pass for all the relevant data within the specified start and end time to be fully processed and stored. This delay can be modified -by setting the `TIGERA_COMPLIANCE_JOB_START_DELAY` environment variable on the `compliance-controller` deployment to the -desired [Golang duration](https://godoc.org/time#Duration). - -## GlobalAlert reports error "Trying to create too many buckets" - -``` -"Trying to create too many buckets. Must be less than or equal to: [10000] but was [10001]. This limit can be set by changing the [search.max_buckets] cluster level setting." -``` - -The GlobalAlert system has a hard limit of 10000 aggregation keys per -query, and will fail to generate alerts if nested aggregations -result in the number of keys exceeding this limit. The “healthy“ status -of the GlobalAlert will be set to false until the number of aggregation -keys returned by the query no longer exceeds this limit. - -Careful selection of queries and `aggregateBy` keys will mitigate this issue. -GlobalAlerts should consider the size of the keyspace used in the -`aggregateBy` field and order from least expansive to most. For example: -Namespace should precede pod name. Avoid aggregating by source or destination -port unless the query selects specific ports. Ephemeral ports used by clients -number in the tens of thousands and a single host can trigger this condition. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/api.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/api.mdx deleted file mode 100644 index 680e0df767..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/api.mdx +++ /dev/null @@ -1,12 +0,0 @@ ---- -description: Learn about the Tigera client library and how to use it. ---- - -# Tigera Client library - -$[prodname] provides and consumes a client library in Go that allows developers to work with $[prodname] resources. The -client library can be used to get, list, watch, create, delete and update custom resources, such as network policies much -like the [client-go project](https://github.com/kubernetes/client-go) does for native Kubernetes resources. - -To learn more about the Tigera client library and how to use it, see the Tigera API project [README](https://github.com/tigera/api/blob/master/README.md) or -the [github.com/tigera/api Go module page](https://pkg.go.dev/github.com/tigera/api). diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/architecture/data-path.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/architecture/data-path.mdx deleted file mode 100644 index b20e161252..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/architecture/data-path.mdx +++ /dev/null @@ -1,63 +0,0 @@ ---- -description: Learn how packets flow between workloads in a datacenter, or between a workload and the internet. ---- - -# 'The Calico Enterprise data path: IP routing and iptables' - -One of $[prodname]’s key features is how packets flow between workloads in a -data center, or between a workload and the Internet, without additional -encapsulation. - -In the $[prodname] approach, IP packets to or from a workload are routed and -firewalled by the Linux routing table and iptables or eBPF infrastructure on the -workload’s host. For a workload that is sending packets, $[prodname] ensures -that the host is always returned as the next hop MAC address regardless -of whatever routing the workload itself might configure. For packets -addressed to a workload, the last IP hop is that from the destination -workload’s host to the workload itself. - -![Calico datapath](/img/calico-enterprise/calico-datapath.png) - -Suppose that IPv4 addresses for the workloads are allocated from a -datacenter-private subnet of 10.65/16, and that the hosts have IP -addresses from 172.18.203/24. If you look at the routing table on a host: - -```bash -route -n -``` - -You will see something like this: - -``` -Kernel IP routing table -Destination Gateway Genmask Flags Metric Ref Use Iface -0.0.0.0 172.18.203.1 0.0.0.0 UG 0 0 0 eth0 -10.65.0.0 0.0.0.0 255.255.0.0 U 0 0 0 ns-db03ab89-b4 -10.65.0.21 172.18.203.126 255.255.255.255 UGH 0 0 0 eth0 -10.65.0.22 172.18.203.129 255.255.255.255 UGH 0 0 0 eth0 -10.65.0.23 172.18.203.129 255.255.255.255 UGH 0 0 0 eth0 -10.65.0.24 0.0.0.0 255.255.255.255 UH 0 0 0 tapa429fb36-04 -172.18.203.0 0.0.0.0 255.255.255.0 U 0 0 0 eth0 -``` - -There is one workload on this host with IP address 10.65.0.24, and -accessible from the host via a TAP (or veth, etc.) interface named -tapa429fb36-04. Hence there is a direct route for 10.65.0.24, through -tapa429fb36-04. Other workloads, with the .21, .22 and .23 addresses, -are hosted on two other hosts (172.18.203.126 and .129), so the routes -for those workload addresses are via those hosts. - -The direct routes are set up by a $[prodname] agent named Felix when it is -asked to provision connectivity for a particular workload. A BGP client -(such as BIRD) then notices those and distributes them – perhaps via a -route reflector – to BGP clients running on other hosts, and hence the -indirect routes appear also. - -## Is that all? - -As far as the static data path is concerned, yes. It’s just a -combination of responding to workload ARP requests with the host MAC, IP -routing and iptables or eBPF. There’s a great deal more to $[prodname] in terms of -how the required routing and security information is managed, and for -handling dynamic things such as workload migration – but the basic data -path really is that simple. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/architecture/design/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/architecture/design/index.mdx deleted file mode 100644 index 717ac9f295..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/architecture/design/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Deep dive into using Calico over Ethernet and IP fabrics. -hide_table_of_contents: true ---- - -# Network design - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/architecture/design/l2-interconnect-fabric.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/architecture/design/l2-interconnect-fabric.mdx deleted file mode 100644 index a797d36554..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/architecture/design/l2-interconnect-fabric.mdx +++ /dev/null @@ -1,117 +0,0 @@ ---- -description: Understand the interconnect fabric options in a Calico network. ---- - -# Calico over Ethernet fabrics - -Any technology that is capable of transporting IP packets can be used as the interconnect fabric in a $[prodname] network. This means that the standard tools used to transport IP, such as MPLS and Ethernet can be used in a $[prodname] network. - -The focus of this article is on Ethernet as the interconnect network. Most at-scale cloud operators have converted to IP fabrics, and that infrastructure will work for $[prodname] as well. However, the concerns that drove most of those operators to IP as the interconnection network in their pods are largely ameliorated by $[prodname], allowing Ethernet to be viably considered as a $[prodname] interconnect, even in large-scale deployments. - -## Concerns over Ethernet at scale - -It has been acknowledged by the industry for years that, beyond a certain size, classical Ethernet networks are unsuitable for production deployment. Although there have been [multiple](https://en.wikipedia.org/wiki/Provider_Backbone_Bridge_Traffic_Engineering) [attempts](https://web.archive.org/web/20150923231827/https://www.cisco.com/web/about/ac123/ac147/archived_issues/ipj_14-3/143_trill.html) [to address](https://en.wikipedia.org/wiki/Virtual_Private_LAN_Service) these issues, the scale-out networking community has largely abandoned Ethernet for anything other than providing physical point-to-point links in the networking fabric. The principle reasons for Ethernet failures at large scale are: - -- Large numbers of _endpoints_ ([note 1](#note-1)) - - Each switch in an Ethernet network must learn the path to all Ethernet endpoints that are connected to the Ethernet network. Learning this amount of state can become a substantial task when we are talking about hundreds of thousands of _endpoints_. - -- High rate of _churn_ or change in the network - - With that many endpoints, most of them being ephemeral (such as virtual machines or containers), there is a large amount of _churn_ in the network. That load of re-learning paths can be a substantial burden on the control plane processor of most Ethernet switches. - -- High volumes of broadcast traffic - - As each node on the Ethernet network must use Broadcast packets to locate peers, and many use broadcast for other purposes, the resultant packet replication to each and every endpoint can lead to _broadcast storms_ in large Ethernet networks, effectively consuming most, if not all resources in the network and the attached endpoints. - -- Spanning tree - - Spanning tree is the protocol used to keep an Ethernet network from forming loops. The protocol was designed in the era of smaller, simpler networks, and it has not aged well. As the number of links and interconnects in an Ethernet network goes up, many implementations of spanning tree become more _fragile_. Unfortunately, when spanning tree fails in an Ethernet network, the effect is a catastrophic loop or partition (or both) in the network, and, in most cases, difficult to troubleshoot or resolve. - -Although many of these issues are crippling at _VM scale_ (tens of thousands of endpoints that live for hours, days, weeks), they will be absolutely lethal at _container scale_ (hundreds of thousands of endpoints that live for seconds, minutes, days). - -If you weren't ready to turn off your Ethernet data center network before this, I bet you are now. Before you do, however, let's look at how $[prodname] can mitigate these issues, even in very large deployments. - -## How does $[prodname] tame the Ethernet daemons? - -First, let's look at how $[prodname] uses an Ethernet interconnect fabric. It's important to remember that an Ethernet network _sees_ nothing on the other side of an attached IP router, the Ethernet network just _sees_ the router itself. This is why Ethernet switches can be used at Internet peering points, where large fractions of Internet traffic is exchanged. The switches only see the routers from the various ISPs, not those ISPs' customers' nodes. We leverage the same effect in $[prodname]. - -To take the issues outlined above, let's revisit them in a $[prodname] -context. - -- Large numbers of endpoints - - In a $[prodname] network, the Ethernet interconnect fabric only sees the routers/compute servers, not the - endpoint. In a standard cloud model, where there is tens of VMs per server (or hundreds of containers), this reduces the number of nodes that the Ethernet sees (and has to learn) by one to two orders - of magnitude. Even in very large pods (say twenty thousand servers), the Ethernet network would still only see a few tens of thousands of endpoints. Well within the scale of any competent data center - Ethernet top of rack (ToR) switch. - -- High rate of churn - - In a classical Ethernet data center fabric, there is a _churn_ event each time an endpoint is created, - destroyed, or moved. In a large data center, with hundreds of thousands of endpoints, this _churn_ could run into tens of events per second, every second of the day, with peaks easily in the hundreds or thousands of events per second. In a $[prodname] network, however, the _churn_ is very low. The only event that would lead to _churn_ - orders of magnitude more than what is normally experienced), there would only be two thousand events per **day**. Any switch that cannot handle that volume of change in the network should not be used - for any application. - -- High volume of broadcast traffic - - Because the first (and last) hop for any traffic in a $[prodname] network is an IP hop, and IP hops terminate - broadcast traffic, there is no endpoint broadcast network in the Ethernet fabric, period. In fact, the only broadcast traffic that should be seen in the Ethernet fabric is the ARPs of the compute servers locating each other. If the traffic pattern is fairly consistent, the steady-state ARP rate should be almost zero. Even in a pathological case, the ARP rate should be well within normal accepted boundaries. - -- Spanning tree - - Depending on the architecture chosen for the Ethernet fabric, it may even be possible to turn off spanning tree. However, even if it is left on, due to the reduction in node count, and reduction in churn, most competent spanning tree implementations should be able to handle the load without stress. - -With these considerations in mind, it should be evident that an Ethernet connection fabric in $[prodname] is not only possible, it is practical and should be seriously considered as the interconnect fabric for a $[prodname] -network. - -As mentioned in the IP fabric post, an IP fabric is also quite feasible for $[prodname], but there are more considerations that must be taken into account. The Ethernet fabric option has fewer architectural considerations in its design. - -## A brief note about Ethernet topology - -As mentioned elsewhere in the $[prodname] documentation, because $[prodname] can use most of the standard IP tooling, some interesting options regarding fabric topology become possible. - -We assume that an Ethernet fabric for $[prodname] would most likely be constructed as a _leaf/spine_ architecture. Other options are possible, but the _leaf/spine_ is the predominant architectural model in use in -scale-out infrastructure today. - -Because $[prodname] is an IP routed fabric, a $[prodname] network can use [ECMP](https://en.wikipedia.org/wiki/Equal-cost_multi-path_routing) to distribute traffic across multiple links (instead of using Ethernet techniques such as MLAG). By leveraging ECMP load balancing on the $[prodname] compute servers, it is possible to build the fabric out of multiple _independent_ leaf/spine planes using no technologies other than IP routing in the $[prodname] nodes, and basic Ethernet switching in the interconnect fabric. These planes would operate completely independently and could be designed such that they would not share a fault domain. This would allow for the catastrophic failure of one (or more) plane(s) of Ethernet interconnect fabric without the loss of the pod (the failure would just decrease the amount of interconnect bandwidth in the pod). This is a gentler failure mode than the pod-wide IP or Ethernet failure that is possible with today's designs. - -You might find this [Facebook blog post](https://engineering.fb.com/2014/11/14/production-engineering/introducing-data-center-fabric-the-next-generation-facebook-data-center-network/) - on their fabric approach interesting. A graphic to visualize the idea is shown below. - -![Ethernet spine planes](/img/calico-enterprise/l2-spine-planes.png) - -The diagram does not show the endpoints in this diagram, and the endpoints would be unaware of anything in the fabric (as noted above). - -In this diagram, each ToR is segmented into four logical switches (possibly by using 'port VLANs'), ([note 2](#note-2)) and each compute server has a connection to each of those logical switches. We will identify those logical switches by their color. Each ToR would then have a blue, green, orange, and red logical switch. Those 'colors' would be members of a given _plane_, so there would be a blue plane, a green plane, an orange plane, and a red plane. Each plane would have a dedicated spine switch. and each ToR in a given spine would be connected to its spine, and only its spine. - -Each plane would constitute an IP network, so the blue plane would be 2001:db8:1000::/36, the green would be 2001:db8:2000::/36, and the orange and red planes would be 2001:db8:3000::/36 and 2001:db8:4000::/36 respectively ([note 3](#note-3)). - -Each IP network (plane) requires its own BGP route reflectors. Those route reflectors need to be peered with each other within the plane, but the route reflectors in each plane do not need to be peered with one another. Therefore, a fabric of four planes would have four route reflector meshes. Each compute server, border router, _etc._ would need -to be a route reflector client of at least one route reflector in each plane, and very preferably two or more in each plane. - -The following diagram visualizes the route reflector environment. - -![route-reflector](/img/calico-enterprise/l2-rr-spine-planes.png) - -These route reflectors could be dedicated hardware connected to the spine switches (or the spine switches themselves), or physical or virtual route reflectors connected to the necessary logical leaf switches (blue, green, orange, and red). That may be a route reflector running on a compute server and connected directly to the correct plane link, and not routed through the vRouter, to avoid the chicken and egg problem that would occur if the route reflector were "behind" the $[prodname] network. - -Other physical and logical configurations and counts are, of course, possible, this is just an example. - -The logical configuration would then have each compute server would have an address on each plane's subnet, and announce its endpoints on each subnet. If ECMP is then turned on, the compute servers would distribute the load across all planes. - -If a plane were to fail (say due to a spanning tree failure), then only that one plane would fail. The remaining planes would stay running. - -### Footnotes - -### Note 1 - -In this document (and in all $[prodname] documents) we tend to use the term _endpoint_ to refer to a virtual machine, container, appliance, bare metal server, or any other entity that is connected to a $[prodname] network. If we are referring to a specific type of endpoint, we will call that out (such as referring to the behavior of VMs as distinct from containers). - -### Note 2 - -We are using logical switches in this example. Physical ToRs could also be used, or a mix of the two (say 2 logical switches hosted on each physical switch). - -### Note 3 - -We use IPv6 here purely as an example. IPv4 would be configured similarly. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/architecture/design/l3-interconnect-fabric.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/architecture/design/l3-interconnect-fabric.mdx deleted file mode 100644 index f94a762ce7..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/architecture/design/l3-interconnect-fabric.mdx +++ /dev/null @@ -1,282 +0,0 @@ ---- -description: Understand considerations for implementing interconnect fabrics with Calico. ---- - -# Calico over IP fabrics - -$[prodname] provides an end-to-end IP network that interconnects the endpoints ([note 1](#note-1)) in a scale-out or cloud environment. To do that, it needs an _interconnect fabric_ to provide the physical networking layer on which $[prodname] operates ([note 2](#note-2)). - -Although $[prodname] is designed to work with any underlying interconnect fabric that can support IP traffic, the fabric that has the least considerations attached to its implementation is an Ethernet fabric as -discussed in [Calico over Ethernet fabrics](l2-interconnect-fabric.mdx). - -In most cases, the Ethernet fabric is the appropriate choice, but there are infrastructures where L3 (an IP fabric) has already been deployed, or will be deployed, and it makes sense for $[prodname] to operate in those -environments. - -However, because $[prodname] is, itself, a routed infrastructure, there are more engineering, architecture, and operations considerations that have to be weighed when running $[prodname] with an IP routed interconnection -fabric. We will briefly outline those in the rest of this post. That said, $[prodname] operates equally well with Ethernet or IP interconnect fabrics. - -## Background - -### Basic $[prodname] architecture overview - -A description of the $[prodname] architecture can be found in our [architectural overview](../overview.mdx). However, a brief discussion of the routing and data paths is useful for -the discussion. - -In a $[prodname] network, each compute server acts as a router for all of the endpoints that are hosted on that compute server. We call that function a vRouter. The data path is provided by the Linux kernel, the control -plane by a BGP protocol server, and management plane by $[prodname]'s on-server agent, _Felix_. - -Each endpoint can only communicate through its local vRouter, and the first and last _hop_ in any $[prodname] packet flow is an IP router hop through a vRouter. Each vRouter announces all of the endpoints it is attached to all the other vRouters and other routers on the infrastructure fabric, using BGP, usually with BGP route reflectors to -increase scale. A discussion of why we use BGP can be found in [Why BGP?](https://www.tigera.io/blog/why-bgp/). - -Access control lists (ACLs) enforce security (and other) policy as directed by whatever cloud orchestrator is in use. There are other components in the $[prodname] architecture, but they are irrelevant to the interconnect network fabric discussion. - -### Overview of current common IP scale-out fabric architectures - -There are two approaches to building an IP fabric for a scale-out infrastructure. However, all of them, to date, have assumed that the edge router in the infrastructure is the top of rack (TOR) switch. In the $[prodname] model, that function is pushed to the compute server itself. - -The two approaches are: - -**Routing infrastructure is based on some form of IGP** - -Due to the limitations in scale of IGP networks, the $[prodname] team does not believe that using an IGP to distribute endpoint reachability information will adequately scale in a $[prodname] environment. However, it is possible to use a combination of IGP and BGP in the interconnect fabric, where an IGP communicates the path to the _next-hop_ router (in $[prodname], this is often the destination compute server) and BGP is used to distribute the actual next-hop for a given endpoint. This is a valid model, and, in fact is the most common approach in a widely distributed IP network (say a carrier's backbone network). The design of these networks is somewhat complex though, and will not be addressed further in this article. ([note 3](#note-3)). - -**Routing infrastructure is based entirely on BGP** - -In this model, the IP network is "tight enough" or has a small enough diameter that BGP can be used to distribute endpoint routes, and the paths to the next-hops for those routes is known to all of the routers in the network (in a $[prodname] network this includes the compute servers). This is the network model that this note will address. - -In this article, we will cover the second option because it is more common in the scale-out world. - -### BGP-only interconnect fabrics - -There are multiple methods to build a BGP-only interconnect fabric. We will focus on three models, each with two widely viable variations. There are other options, and we will briefly touch on why we didn't include some of them in [Other Options](#other-options). - -The two methods are: - -- A BGP fabric where each of the TOR switches (and their subsidiary compute servers) are a unique [Autonomous System (AS)]() - and they are interconnected via either an Ethernet switching plane provided by the spine switches in a [leaf/spine](http://bradhedlund.com/2012/10/24/video-a-basic-introduction-to-the-leafspine-data-center-networking-fabric-design/) architecture, or via a set of spine switches, each of which is also a unique AS. We'll refer to this as the _AS per rack_ model. This model is detailed in [IETF RFC 7938](https://datatracker.ietf.org/doc/html/rfc7938). - -- A BGP fabric where each of the compute servers is a unique AS, and the TOR switches make up a transit AS. We'll refer to this as the _AS per server_ model. - -Each of these models can either have an Ethernet or IP spine. In the case of an Ethernet spine, each spine switch provides an isolated Ethernet connection _plane_ as in the $[prodname] Ethernet interconnect fabric model and each TOR switch is connected to each spine switch. - -Another model is where each spine switch is a unique AS, and each TOR switch BGP peers with each spine switch. In both cases, the TOR switches use ECMP to load-balance traffic between all available spine switches. - -### BGP network design considerations - -Contrary to popular opinion, BGP is actually a fairly simple protocol. For example, the BGP configuration on a $[prodname] compute server is approximately sixty lines long, not counting comments. The perceived complexity is due to the things that you can _do_ with BGP. Many uses of BGP involve complex policy rules, where the behavior of BGP can be modified to meet technical (or business, financial, political, etc.) requirements. A default $[prodname] network does not venture into those areas, ([note 4](#note-4)) and therefore is fairly straight forward. - -That said, there are a few design rules for BGP that need to be kept in mind when designing an IP fabric that will interconnect nodes in a $[prodname] network. These BGP design requirements _can_ be worked around, if necessary, but doing so takes the designer out of the standard BGP _envelope_ and should only be done by an implementer who is _very_ comfortable with advanced BGP design. - -These considerations are: - -- AS continuity or _AS puddling_ - - Any router in an AS _must_ be able to communicate with any other router in that same AS without transiting another AS. - -- Next hop behavior - - By default BGP routers do not change the _next hop_ of a route if it is peering with another router in its same AS. The inverse is also true, a BGP router will set itself as the _next hop_ of a route if it is peering with a router in another AS. - -- Route reflection - - All BGP routers in a given AS must _peer_ with all the other routers in that AS. This is referred to a _complete BGP mesh_. This can become problematic as the number of routers in the AS scales up. The use of _route reflectors_ reduce the need for the complete BGP mesh. However, route reflectors also have scaling considerations. - -- Endpoints - - In a $[prodname] network, each endpoint is a route. Hardware networking platforms are constrained by the number of routes they can learn. This is usually in range of 10,000's or 100,000's of routes. Route aggregation can help, but that is usually dependent on the capabilities of the scheduler used by the orchestration software (_e.g._ OpenStack). - -A deeper discussion of these considerations can be found in the [IP Fabric Design Considerations](#ip-fabric-design-considerations). - -The designs discussed below address these considerations. - -### The AS Per Rack model - -This model is the closest to the model suggested by [IETF RFC 7938](https://datatracker.ietf.org/doc/html/rfc7938). - -As mentioned earlier, there are two versions of this model, one with an set of Ethernet planes interconnecting the ToR switches, and the other where the core planes are also routers. The following diagrams may be useful for the discussion. - -![](/img/calico-enterprise/l3-fabric-diagrams-as-rack-l2-spine.png) - -The diagram above shows the **AS per rack model** where the ToR switches are physically meshed via a set of Ethernet switching planes. - -![](/img/calico-enterprise/l3-fabric-diagrams-as-rack-l3-spine.png) - -The diagram above shows the **AS per rack model** where the ToR switches are physically meshed via a set of discrete BGP spine routers, each in their own AS. - -In this approach, every ToR-ToR or ToR-Spine (in the case of an AS per spine) link is an eBGP peering which means that there is no route-reflection possible (using standard BGP route reflectors) _north_ of the ToR switches. - -If the L2 spine option is used, the result of this is that each ToR must either peer with every other ToR switch in the cluster (which could be hundreds of peers). - -If the AS per spine option is used, then each ToR only has to peer with each spine (there are usually somewhere between two and sixteen spine switches in a pod). However, the spine switches must peer with all ToR -switches (again, that would be hundreds, but most spine switches have more control plane capacity than the average ToR, so this might be more scalable in many circumstances). - -Within the rack, the configuration is the same for both variants, and is somewhat different than the configuration north of the ToR. - -Every router within the rack, which, in the case of $[prodname] is every compute server, shares the same AS as the ToR that they are connected to. That connection is in the form of an Ethernet switching layer. Each router in the rack must be directly connected to enable the AS to remain contiguous. The ToR's _router_ function is then connected to that Ethernet switching layer as well. The actual configuration of this is dependent on the ToR in use, but usually it means that the ports that are connected to the compute servers are treated as _subnet_ or _segment_ ports, and then the ToR's _router_ function has a single interface into that subnet. - -This configuration allows each compute server to connect to each other compute server in the rack without going through the ToR router, but it will, of course, go through the ToR switching function. The compute servers and the ToR router could all be directly meshed, or a route reflector could be used within the rack, either hosted on the ToR -itself, or as a virtual function hosted on one or more compute servers within the rack. - -The ToR, as the eBGP router redistributes all of the routes from other ToRs as well as routes external to the data center to the compute servers that are in its AS, and announces all of the routes from within -the AS (rack) to the other ToRs and the larger world. This means that each compute server will see the ToR as the next hop for all external routes, and the individual compute servers are the next hop for all routes internal to the rack. - -### The AS per Compute Server model - -This model takes the concept of an AS per rack to its logical conclusion. In the earlier referenced [IETF RFC 7938](https://datatracker.ietf.org/doc/html/rfc7938) the assumption in the overall model is that the ToR is first tier aggregating and routing element. In $[prodname], the ToR, if it is an L3 router, is actually the second tier. Remember, in $[prodname], the compute server is always the first/last router for an endpoint, and is also the first/last point of aggregation. - -Therefore, if we follow the architecture of the draft, the compute server, not the ToR should be the AS boundary. The differences can be seen in the following two diagrams. - -![](/img/calico-enterprise/l3-fabric-diagrams-as-server-l2-spine.png) - -The diagram above shows the _AS per compute server model_ where the ToR -switches are physically meshed via a set of Ethernet switching planes. - -![](/img/calico-enterprise/l3-fabric-diagrams-as-server-l3-spine.png) - -The diagram above shows the _AS per compute server model_ where the ToR switches are physically connected to a set of independent routing planes. - -As can be seen in these diagrams, there are still the same two variants as in the _AS per rack_ model, one where the spine switches provide a set of independent Ethernet planes to interconnect the ToR switches, and the other where that is done by a set of independent routers. - -The real difference in this model, is that the compute servers as well as the ToR switches are all independent autonomous systems. To make this work at scale, the use of four byte AS numbers as discussed in [RFC 4893](http://www.faqs.org/rfcs/rfc4893.html). Without -using four byte AS numbering, the total number of ToRs and compute servers in a $[prodname] fabric would be limited to the approximately five thousand available private AS ([note 5](#note-5)) numbers. If four byte AS numbers are used, there are approximately ninety-two million private AS numbers available. This should be sufficient for any given $[prodname] fabric. - -The other difference in this model _vs._ the AS per rack model, is that there are no route reflectors used, as all BGP peerings are eBGP. In this case, each compute server in a given rack peers with its ToR switch which is also acting as an eBGP router. For two servers within the same rack to communicate, they will be routed through the ToR. Therefore, each server will have one peering to each ToR it is connected to, and each ToR will have a peering with each compute server that it is connected to (normally, all the compute servers in the rack). - -The inter-ToR connectivity considerations are the same in scale and scope as in the AS per rack model. - -### The Downward Default model - -The final model is a bit different. Whereas, in the previous models, all of the routers in the infrastructure carry full routing tables, and leave their AS paths intact, this model ([note 6](#note-6)) removes the AS numbers at -each stage of the routing path. This is to prevent routes from other nodes in the network from not being installed due to it coming from the _local_ AS (since they share the source and dest of the route share the same AS). - -The following diagram will show the AS relationships in this model. - -![](/img/calico-enterprise/l3-fabric-downward-default.png) - -In the diagram above, we are showing that all $[prodname] nodes share the same AS number, as do all ToR switches. However, those ASs are different (_A1_ is not the same network as _A2_, even though the both share the -same AS number _A_ ). - -Although the use of a single AS for all ToR switches, and another for all compute servers simplifies deployment (standardized configuration), the real benefit comes in the offloading of the routing tables in the ToR -switches. - -In this model, each router announces all of its routes to its upstream peer (the $[prodname] routers to their ToR, the ToRs to the spine switches). However, in return, the upstream router only announces a default route. -In this case, a given $[prodname] router only has routes for the endpoints that are locally hosted on it, as well as the default from the ToR. Because the ToR is the only route for the $[prodname] network the rest of the -network, this matches reality. The same happens between the ToR switches and the spine. This means that the ToR only has to install the routes that are for endpoints that are hosted on its downstream $[prodname] nodes. -Even if we were to host 200 endpoints per $[prodname] node, and stuff 80 $[prodname] nodes in each rack, that would still limit the routing table on the ToR to a maximum of 16,000 entries (well within the capabilities of -even the most modest of switches). - -Because the default is originated by the Spine (originally) there is no chance for a downward announced route to originate from the recipient's AS, preventing the **AS puddling** problem. - -There is one (minor) drawback to this model, in that all traffic that is destined for an invalid destination (the destination IP does not exist) will be forwarded to the spine switches before they are dropped. - -It should also be noted that the spine switches do need to carry all of the $[prodname] network routes, just as they do in the routed spines in the previous examples. In short, this model imposes no more load on the -spines than they already would have, and substantially reduces the amount of routing table space used on the ToR switches. It also reduces the number of routes in the $[prodname] nodes, but, as we have discussed -before, that is not a concern in most deployments as the amount of memory consumed by a full routing table in $[prodname] is a fraction of the total memory available on a modern compute server. - -## Recommendation - -The $[prodname] team recommends the use of the [AS per rack](#the-as-per-rack-model) model if the resultant routing table size can be accommodated by the ToR and spine switches, remembering to account for projected growth. - -If there is concern about the route table size in the ToR switches, the $[prodname] recommends the [Downward Default](#the-downward-default-model) model. - -If there are concerns about both the spine and ToR switch route table capacity, or there is a desire to run a very simple L2 fabric to connect the $[prodname] nodes, then the user should consider the Ethernet fabric as -detailed in [Calico over Ethernet fabrics](l2-interconnect-fabric.mdx). - -If you are interested in the AS per compute server, the $[prodname] team would be very interested in discussing the deployment of that model. - -## Other options - -The way the physical and logical connectivity is laid out in this article, and the [Ethernet fabric](l2-interconnect-fabric.mdx), the next hop router for a given route is always directly connected to the router receiving that route. This makes the need for another protocol to distribute the next hop routes unnecessary. - -However, in many (or most) WAN BGP networks, the routers within a given AS may not be directly adjacent. Therefore, a router may receive a route with a next hop address that it is not directly adjacent to. In those cases, an IGP, such as OSPF or IS-IS, is used by the routers within a given AS to determine the path to the BGP next hop route. - -There may be $[prodname] architectures where there are similar models where the routers within a given AS are not directly adjacent. In those models, the use of an IGP in $[prodname] may be warranted. The configuration -of those protocols are, however, beyond the scope of this technical -note. - -### IP fabric design considerations - -**AS puddling** - -The first consideration is that an AS must be kept contiguous. This means that any two nodes in a given AS must be able to communicate without traversing any other AS. If this rule is not observed, the effect is often referred to as _AS puddling_ and the network will _not_ function correctly. - -A corollary of that rule is that any two administrative regions that share the same AS number, are in the same AS, even if that was not the desire of the designer. BGP has no way of identifying if an AS is local or foreign other than the AS number. Therefore re-use of an AS number for two _networks_ that are not directly connected, but only connected -through another _network_ or AS number will not work without a lot of policy changes to the BGP routers. - -Another corollary of that rule is that a BGP router will not propagate a route to a peer if the route has an AS in its path that is the same AS as the peer. This prevents loops from forming in the network. The effect of this prevents two routers in the same AS from transiting another router (either in that AS or not). - -**Next hop behavior** - -Another consideration is based on the differences between iBGP and eBGP. BGP operates in two modes, if two routers are BGP peers, but share the same AS number, then they are considered to be in an _internal_ BGP (or iBGP) peering relationship. If they are members of different AS's, then they are in an _external_ or eBGP relationship. - -BGP's original design model was that all BGP routers within a given AS would know how to get to one another (via static routes, IGP ([note 7](#note-7)) routing protocols, or the like), and that routers in different ASs would -not know how to reach one another unless they were directly connected. - -Based on that design point, routers in an iBGP peering relationship assume that they do not transit traffic for other iBGP routers in a given AS (i.e. A can communicate with C, and therefore will not need to route through B), and therefore, do not change the _next hop_ attribute in BGP ([note 8](#note-8)). - -A router with an eBGP peering, on the other hand, assumes that its eBGP peer will not know how to reach the next hop route, and then will substitute its own address in the next hop field. This is often referred -to as _next hop self_. - -In the $[prodname] [Ethernet fabric](l2-interconnect-fabric.mdx) -model, all of the compute servers (the routers in a $[prodname] network) are directly connected over one or more Ethernet network(s) and therefore are directly reachable. In this case, a router in the $[prodname] network -does not need to set _next hop self_ within the $[prodname] fabric. - -The models we present in this article ensure that all routes that may traverse a non-$[prodname] router are eBGP routes, and therefore _next hop self_ is automatically set correctly. If a deployment of $[prodname] in -an IP interconnect fabric does not satisfy that constraint, then _next hop self_ must be appropriately configured. - -**Route reflection** - -As mentioned above, BGP expects that all of the iBGP routers in a network can see (and speak) directly to one another, this is referred to as a _BGP full mesh_. In small networks this is not a problem, but it does become interesting as the number of routers increases. For example, if you have 99 BGP routers in an AS and wish to add one more, you would -have to configure the peering to that new router on each of the 99 existing routers. Not only is this a problem at configuration time, it means that each router is maintaining 100 protocol adjacencies, which can start being a drain on constrained resources in a router. While this might be _interesting_ at 100 routers, it becomes an impossible task -with 1000's or 10,000's of routers (the potential size of a $[prodname] network). - -Conveniently, large scale/Internet scale networks solved this problem almost 20 years ago by deploying BGP route reflection as described in [RFC 1966](http://www.faqs.org/rfcs/rfc1966.html). This is a technique supported by almost all BGP routers today. In a large network, a number of route reflectors ([note 9](#note-9)) are evenly distributed and each iBGProuter is _peered_ with one or more route reflectors (usually 2 or 3). Each route reflector can handle 10's or 100's of route reflector clients (in $[prodname]'s case, the compute server), depending on the route reflector being used. Those route reflectors are, in turn, peered with each other. This means that there are an order of magnitude less route reflectors that need to be completely meshed, and each route reflector client is only configured to peer to 2 or 3 route reflectors. This is much easier to manage. - -Other route reflector architectures are possible, but those are beyond the scope of this document. - -**Endpoints** - -The final consideration is the number of endpoints in a $[prodname] network. In the [Ethernet fabric](l2-interconnect-fabric.mdx) case the number of endpoints is not constrained by the interconnect fabric, as the interconnect fabric does not _see_ the actual endpoints, it only _sees_ the actual vRouters, or compute servers. This is not the case in an IP fabric, however. IP networks forward by using the -destination IP address in the packet, which, in $[prodname]'s case, is the destination endpoint. That means that the IP fabric nodes (ToR switches and/or spine switches, for example) must know the routes to each endpoint in the network. They learn this by participating as route reflector clients in the BGP mesh, just as the $[prodname] vRouter/compute server does. - -However, unlike a compute server which has a relatively unconstrained amount of memory, a physical switch is either memory constrained, or quite expensive. This means that the physical switch has a limit on how many _routes_ it can handle. The current industry standard for modern commodity switches is in the range of 128,000 routes. This means that, -without other routing _tricks_, such as aggregation, a $[prodname] installation that uses an IP fabric will be limited to the routing table size of its constituent network hardware, with a reasonable upper limit -today of 128,000 endpoints. - -### Footnotes - -### Note 1 - -In $[prodname]'s terminology, an endpoint is an IP address and interface. It could refer to a VM, a container, or even a process bound to an IP address running on a bare metal server. - -### Note 2 - -This interconnect fabric provides the connectivity between the $[prodname] (v)Router (in almost all cases, the compute servers) nodes, as well as any other elements in the fabric (_e.g._ bare metal servers, border routers, and appliances). - -### Note 3 - -If there is interest in a discussion of this approach, please let us know. The $[prodname] team could either arrange a discussion, or if there was enough interest, publish a follow-up tech note. - -### Note 4 - -However those tools are available if a given $[prodname] instance needs to utilize those policy constructs. - -### Note 5 - -The two byte AS space reserves approximately the last five thousand AS numbers for private use. There is no technical reason why other AS numbers could not be used. However the re-use of global scope AS numbers within a private infrastructure is strongly discouraged. The chance for routing system failure or incorrect routing is substantial, and not restricted to the entity that is doing the reuse. - -### Note 6 - -We first saw this design in a customer's lab, and thought it innovative enough to share (we asked them first, of course). Similar **AS Path Stripping** approaches are used in ISP networks, however. - -### Note 7 - -An Interior Gateway Protocol is a local routing protocol that does not cross an AS boundary. The primary IGPs in use today are OSPF and IS-IS. While complex iBGP networks still use IGP routing protocols, a data center is normally a fairly simple network, even if it has many routers in it. Therefore, in the data center case, the use of an IGP can often be disposed of. - -### Note 8 - -A Next hop is an attribute of a route announced by a routing protocol. In simple terms a route is defined by a _target_, or the destination that is to be reached, and a _next hop_, which is the next router in the path to reach that target. There are many other characteristics in a route, but those are well beyond the scope of this post. - -### Note 9 - -A route reflector may be a physical router, a software appliance, or simply a BGP daemon. It only processes routing messages, and does not pass actual data plane traffic. However, some route reflectors are co-resident on regular routers that do pass data plane traffic. Although they may sit on one platform, the functions are distinct. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/architecture/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/architecture/index.mdx deleted file mode 100644 index cbaca7c5bd..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/architecture/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Calico Enterprise component architecture diagram, network design, and the data path between workloads. -hide_table_of_contents: true ---- - -# Architecture - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/architecture/overview.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/architecture/overview.mdx deleted file mode 100644 index 22550a7ffd..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/architecture/overview.mdx +++ /dev/null @@ -1,251 +0,0 @@ ---- -description: Understand the Calico Enterprise components and the basics of BGP networking. ---- - -# Component architecture - -import ModalImage from 'react-modal-image'; - -## About $[prodname] architecture - -The following diagram shows the components that comprise a Kubernetes on-premises deployment using the $[prodname] CNI for networking and network policy. - -**Tip**: For best visibility, right-click on the image below and select "Open image in new tab" - - - -Calico open-source components are the foundation of $[prodname]. $[prodname] provides value-added components for visibility and troubleshooting, compliance, policy lifecycle management, threat detection, and multi-cluster management. - -## $[prodname] components - -- [calicoq](#calicoq) -- [Compliance](#compliance) -- [Linseed API and ES gateway](#linseed-api-and-es-gateway) -- [Intrusion detection](#intrusion-detection) -- [kube-controllers](#kube-controllers) -- [Manager](#manager) -- [Packet capture API](#packet-capture-api) -- [Prometheus API service](#prometheus-api-service) - -## Bundled third-party components - -- [fluentd](#fluentd) -- [Elasticsearch and Kibana](#elasticsearch-and-kibana) -- [Prometheus](#prometheus) - -## Calico open-source components - -- [API server](#api-server) -- [Felix](#felix) -- [BIRD](#bird) -- [calicoctl](#calicoctl) -- [calico-node](#calico-node) -- [confd](#confd) -- [CNI plugin](#cni-plugin) -- [Datastore plugin](#datastore-plugin) -- [IPAM plugin](#ipam-plugin) -- [Typha](#typha) - -## Kubernetes components - -- [Kubernetes API server](#kubernetes-api-server) -- [kubectl](#kubectl) - -## Cloud orchestrator plugins (not pictured) - -Translates the orchestrator APIs for managing networks to the $[prodname] data-model and datastore. - -For cloud providers, $[prodname] has a separate plugin for each major cloud orchestration platform. This allows $[prodname] to tightly bind to the orchestrator, so users can manage the $[prodname] network using their orchestrator tools. When required, the orchestrator plugin provides feedback from the $[prodname] network to the orchestrator. For example, providing information about Felix liveness, and marking specific endpoints as failed if network setup fails. - -## $[prodname] components - -### calicoq - -**Main task**: A command line tool for policy inspection to ensure policies are configured as intended. For example, you can determine which endpoints a selector or policy matches, or which policies apply to an endpoint. Requires a separate installation. [calicoq](../clis/calicoq/index.mdx). - -### Compliance - -**Main task**: Generates compliance reports for the Kubernetes cluster. Report are based on archived flow and audit logs for $[prodname] resources, plus any audit logs you’ve configured for Kubernetes resources in the Kubernetes API server. Compliance reports provide the following high-level information: - -- Protection - - Endpoints explicitly protected using ingress or egress policy -- Policies and services - - Policies and services associated with endpoints - - Policy audit logs -- Traffic - - Allowed ingress/egress traffic to/from namespaces, and to/from the internet - -Compliance is comprised of these components: - -**compliance-snapshotter** - -Handles listing of required Kubernetes and $[prodname] configuration and pushes snapshots to Elasticsearch. Snapshots give you visibility into configuration changes, and how the cluster-wide configuration has evolved within a reporting interval. - -**compliance-reporter** - -Handles report generation. Reads configuration history from Elasticsearch and determines time evolution of cluster-wide configuration, including relationships between policies, endpoints, services and networksets. Data is then passed through a zero-trust aggregator to determine the "worst-case outliers" in the reporting interval. - -**compliance-controller** - -Reads report configuration, and manages creation, deletion, and monitoring of report generation jobs. - -**compliance-server** - -Provides the API for listing, downloading, and rendering reports, and RBAC by performing authentication and authorization through the Kubernetes API server. RBAC is determined from the users RBAC for the GlobalReportType and GlobalReport resources. - -**compliance-benchmarker** - -A daemonset that runs checks in the CIS Kubernetes Benchmark on each node so you can see if Kubernetes is securely deployed. - -### Linseed API and ES gateway - -The Linseed API uses mTLS to connect to clients, and provides an API to access Elasticsearch data. The ES gateway proxies requests to Elasticsearch, and provides backwards-compatibility for managed clusters that run versions before 3.17. - -### Intrusion detection - -**Main task**: Consists of a controller that handles integrations with threat intelligence feeds and $[prodname] custom alerts, and an installer that installs the Kibana dashboards for viewing jobs through the Kibana UI. - -### kube-controllers - -**Main task**: Monitors the Kubernetes API and performs actions based on cluster state. The $[prodname] kube-controllers container includes these controllers: - -- Node -- Service -- Federated services -- Authorization -- Managed cluster (for management clusters only) - -### Manager - -**Main task**: Provides network traffic visibility, centralized multi-cluster management, threat-defense troubleshooting, policy lifecycle management, and compliance using a browser-based UI for multiple roles/stakeholders. [Manager](../installation/api.mdx#manager). - -### Packet capture API - -**Main task**: Retrieves capture files (pcap format) generated by a packet capture for use with network protocol analysis tools like Wireshark. The packet capture feature is installed by default in all cluster types. Packet capture data is visible in the web console, service graph. - -### Prometheus API service - -**Main task**: A proxy querying service that checks a user’s token RBAC to validate its scope and forwards the query to the Prometheus monitoring component. - -## Bundled third-party components - -### Elasticsearch and Kibana - -**Main task**: Built-in third-party search-engine and visualization dashboard, which provide logs for visibility into workloads, to troubleshoot Kubernetes clusters. Installed and configured by default. [Elasticsearch](../../observability/index.mdx). - -### fluentd - -**Main task**: Collects and forwards $[prodname] logs (flows, DNS, L7) to Elasticsearch. Open source data collector for unified logging. [fluentd open source](https://www.fluentd.org/). - -### Prometheus - -**Main task**: The default monitoring component for collecting $[prodname] policy metrics. It can also be used to collect metrics on calico/nodes from Felix. Prometheus is an open-source toolkit for systems monitoring and alerting. [Prometheus metrics](../component-resources/node/felix/prometheus.mdx), and [Configure Prometheus](../../operations/monitor/index.mdx). - -## Calico open-source components - -### API server - -**Main task**: Allows users to manage $[prodname] resources such as policies and tiers through `kubectl` or the Kubernetes API. `kubectl` has significant advantages over `calicoctl` including: audit logging, RBAC using Kubernetes Roles and RoleBindings, and not needing to provide privileged Kubernetes CRD access to anyone who needs to manage resources. [API server](../installation/api.mdx#apiserver). - -### BIRD - -**Main task**: Gets routes from Felix and distributes to BGP peers on the network for inter-host routing. Runs on each node that hosts a Felix agent. Open source, internet routing daemon. [BIRD](../component-resources/node/configuration.mdx#content-main). - -The BGP client is responsible for: - -- **Route distribution** - - When Felix inserts routes into the Linux kernel FIB, the BGP client distributes them to other nodes in the deployment. This ensures efficient traffic routing for the deployment. - -- **BGP route reflector configuration** - - BGP route reflectors are often configured for large deployments rather than a standard BGP client. (Standard BGP requires that every BGP client be connected to every other BGP client in a mesh topology, which is difficult to maintain.) - For redundancy, you can seamlessly deploy multiple BGP route reflectors. Note that BGP route reflectors are involved only in control of the network: endpoint data does not passes through them. When the $[prodname] BGP client advertises - routes from its FIB to the route reflector, the route reflector advertises those routes to the other nodes in the deployment. - -### calicoctl - -**Main task**: Command line interface used largely during pre-installation for CRUD operations on $[prodname] objects. `kubectl` is the recommended CLI for CRUD operations. calicoctl is available on any host with network access to the $[prodname] datastore as either a binary or a container. Requires separate installation. [calicoctl](../clis/calicoctl/index.mdx)). - -### calico-node - -**Main task**: Bundles key components that are required for networking containers with $[prodname]: - -- Felix -- BIRD -- confd - -The calico repository contains the Dockerfile for calico-node, along with various configuration files to configure and “glue” these components together. In addition, we use runit for logging (svlogd) and init (runsv) services. [calico-node](../component-resources/node/configuration.mdx). - -### CNI plugin - -**Main task**: Provides $[prodname] networking for Kubernetes clusters. - -The Calico CNI plugin allows you to use Calico networking for any orchestrator that makes use of the CNI networking specification. The Calico binary that presents this API to Kubernetes is called the CNI plugin, and must be installed on every node in the Kubernetes cluster. Configured through the standard [CNI configuration mechanism](https://github.com/containernetworking/cni/blob/master/SPEC.md#network-configuration), and [Calico CNI plugin](../component-resources/configuration.mdx). - -### confd - -**Main task**: Monitors $[prodname] datastore for changes to BGP configuration and global defaults such as AS number, logging levels, and IPAM information. An open source, lightweight configuration management tool. - -Confd dynamically generates BIRD configuration files based on the updates to data in the datastore. When the configuration file changes, confd triggers BIRD to load the new files. [Configure confd](../component-resources/node/configuration.mdx#content-main), and [confd project](https://github.com/kelseyhightower/confd). - -### Datastore plugin - -**Main task**: The datastore for the $[prodname] CNI plugin. The Kubernetes API datastore: - -- Is simple to manage because it does not require an extra datastore -- Uses Kubernetes RBAC to control access to Calico resources -- Uses Kubernetes audit logging to generate audit logs of changes to $[prodname] resources - -### Felix - -**Main task**: Programs routes and ACLs, and anything else required on the host to provide desired connectivity for the endpoints on that host. Runs on each machine that hosts endpoints. Runs as an agent daemon. [Felix resource](../resources/felixconfig.mdx). - -Depending on the specific orchestrator environment, Felix is responsible for: - -- **Interface management** - - Programs information about interfaces into the kernel so the kernel can correctly handle the traffic from that endpoint. In particular, it ensures that the host responds to ARP requests from each workload with the MAC of the host, and enables IP forwarding for interfaces that it manages. It also monitors interfaces to ensure that the programming is applied at the appropriate time. - -- **Route programming** - - Programs routes to the endpoints on its host into the Linux kernel FIB (Forwarding Information Base). This ensures that packets destined for those endpoints that arrive on at the host are forwarded accordingly. - -- **ACL programming** - - Programs ACLs into the Linux kernel to ensure that only valid traffic can be sent between endpoints, and that endpoints cannot circumvent $[prodname] security measures. - -- **State reporting** - - Provides network health data. In particular, it reports errors and problems when configuring its host. This data is written to the datastore so it visible to other components and operators of the network. - -:::note - -`$[nodecontainer]` can be run in _policy only mode_ where Felix runs without BIRD and confd. This provides policy management without route distribution between hosts, and is used for deployments like managed cloud providers. - -::: - -### IPAM plugin - -**Main task**: Uses $[prodname]’s IP pool resource to control how IP addresses are allocated to pods within the cluster. It is the default plugin used by most $[prodname] installations. It is one of the $[prodname] [CNI plugins](../component-resources/configuration.mdx). - -### Typha - -**Main task**: Increases scale by reducing each node’s impact on the datastore. Runs as a daemon between the datastore and instances of Felix. Installed by default, but not configured. [Typha description](https://github.com/projectcalico/typha), and [Typha component](../component-resources/typha/index.mdx). - -Typha maintains a single datastore connection on behalf of all of its clients like Felix and confd. It caches the datastore state and deduplicates events so that they can be fanned out to many listeners. Because one Typha instance can support hundreds of Felix instances, it reduces the load on the datastore by a large factor. And because Typha can filter out updates that are not relevant to Felix, it also reduces Felix’s CPU usage. In a high-scale (100+ node) Kubernetes cluster, this is essential because the number of updates generated by the API server scales with the number of nodes. - -## Kubernetes components - -### Kubernetes API server - -**Main task**: A Kubernetes component that validates and configures data for the API objects (for example, pods, services, and others). Proxies requests for $[prodname] API resources to the Kubernetes API server through an aggregation layer. - -### kubectl - -**Main task**: The recommended command line interface for CRUD operations on $[prodname] and Calico objects. [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/). diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/attribution.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/attribution.mdx deleted file mode 100644 index 156f3e6ae6..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/attribution.mdx +++ /dev/null @@ -1,3394 +0,0 @@ ---- -description: Attribution report ---- - -# Attribution - -## $[prodname] attribution report - -24 Feb 2021 - -$[prodname] incorporates various open source softwares. The following open source components and their respective licenses used in the product are provided for your informational purposes. -In the table below, you can look at the details of each project and license associated with it. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    ProjectArtifact NameLICENSE
    abbrevabbrev-1.1.1.tgzISC
    algoliasearchalgoliasearch.umd-4.2.0.jsMIT
    ansi-regexansi-regex-2.1.1.tgzMIT
    ansi-stylesansi-styles-3.2.1.tgzMIT
    @types/anymatchanymatch-1.3.1.tgzMIT
    app-policyapp-policy-v3.16.6Tigera Proprietary
    argparseargparse-1.0.9.tgzMIT
    arr-diffarr-diff-4.0.0.tgzMIT
    arr-flattenarr-flatten-1.1.0.tgzMIT
    arr-unionarr-union-3.1.0.tgzMIT
    array-uniquearray-unique-0.3.2.tgzMIT
    asapasap-2.0.6.tgzMIT
    assign-symbolsassign-symbols-1.0.0.tgzMIT
    atobatob-2.1.2.tgzApache 2.0
    atomicwritesatomicwrites-1.4.0-py2.py3-none-any.whlMIT
    attrsattrs-20.3.0-py2.py3-none-any.whlMIT
    avsdf-baseavsdf-base-1.0.0.tgzMIT
    babel-standalonebabel-6.26.0.min.jsMIT
    babel-runtimebabel-runtime-6.26.0.tgzMIT
    backports.functools-lru-cachebackports.functools_lru_cache-1.6.1-py2.py3-none-any.whlMIT
    balanced-matchbalanced-match-0.4.2.tgzMIT
    balanced-matchbalanced-match-1.0.0.tgzMIT
    basebase-0.11.2.tgzMIT
    beautifulsoup4beautifulsoup4-4.9.3-py2-none-any.whlMIT
    big.jsbig.js-5.2.2.tgzMIT
    boolbaseboolbase-1.0.0.tgzISC
    twitter-bootstrapbootstrap-3.3.7.min.jsMIT
    bootstrapbootstrap-3.4.1.tgzMIT
    brace-expansionbrace-expansion-1.1.11.tgzMIT
    bracesbraces-2.3.2.tgzMIT
    bs4bs4-0.0.1.tar.gzMIT
    buffer-frombuffer-from-1.1.1.tgzMIT
    cache-basecache-base-1.0.1.tgzMIT
    cachetoolscachetools-3.1.1-py2.py3-none-any.whlMIT
    projectcalicocalico-v3.17.1Apache 2.0
    projectcalicocalicoctl-v3.17.1Apache 2.0
    camel-casecamel-case-4.1.1.tgzMIT
    certificertifi-2020.12.5-py2.py3-none-any.whlMozilla 2.0
    chain-functionchain-function-1.0.0.tgzMIT
    chalkchalk-2.4.2.tgzMIT
    chardetchardet-3.0.4-py2.py3-none-any.whlLGPL 3.0
    chardetchardet-4.0.0-py2.py3-none-any.whlLGPL 3.0
    chevrotainchevrotain-6.5.0.tgzApache 2.0
    cidr-regexcidr-regex-2.0.10.tgzBSD 2
    class-transformerclass-transformer-0.3.1.tgzMIT
    class-utilsclass-utils-0.3.6.tgzMIT
    class-validatorclass-validator-0.9.1.tgzMIT
    classnamesclassnames-2.2.5.tgzMIT
    classnamesclassnames-2.2.6.tgzMIT
    clean-cssclean-css-4.2.3.tgzMIT
    clipboard.jsclipboard-2.0.0.min.jsMIT
    cni-plugincni-plugin-v3.16.6Tigera Proprietary
    @babel/code-framecode-frame-7.10.1.tgzMIT
    codemirrorcodemirror-5.57.0.jsMIT
    codemirrorcodemirror-5.57.0.tgzMIT
    collection-visitcollection-visit-1.0.0.tgzMIT
    color-convertcolor-convert-1.9.3.tgzMIT
    color-namecolor-name-1.1.3.tgzMIT
    commandercommander-2.13.0.tgzMIT
    commandercommander-2.20.3.tgzMIT
    commandercommander-4.1.1.tgzMIT
    component-emittercomponent-emitter-1.3.0.tgzMIT
    concat-mapconcat-map-0.0.1.tgzMIT
    configparserconfigparser-4.0.2-py2.py3-none-any.whlMIT
    connected-react-routerconnected-react-router-6.5.2.tgzMIT
    contextlib2contextlib2-0.6.0.post1-py2.py3-none-any.whlPython 2.0
    copy-descriptorcopy-descriptor-0.1.1.tgzMIT
    @popperjs/corecore-2.4.4.tgzMIT
    core-jscore-js-1.2.7.tgzMIT
    core-jscore-js-2.5.1.tgzMIT
    core-jscore-js-2.5.7.tgzMIT
    core-jscore-js-3.6.5.tgzMIT
    cose-basecose-base-1.0.3.tgzMIT
    create-react-classcreate-react-class-15.6.2.tgzMIT
    css-box-modelcss-box-model-1.1.1.tgzMIT
    css-selectcss-select-1.2.0.tgzBSD 2
    css-whatcss-what-2.1.3.tgzBSD 2
    @ungap/custom-elementscustom-elements-0.1.12.tgzISC
    cytoscapecytoscape-3.15.2.tgzMIT
    cytoscapecytoscape-3.18.0.min.jsMIT
    cytoscape-avsdfcytoscape-avsdf-1.0.0.tgzMIT
    cytoscape-cisecytoscape-cise-1.0.0.tgzMIT
    cytoscape-context-menuscytoscape-context-menus-4.0.0.tgzMIT
    cytoscape-cose-bilkentcytoscape-cose-bilkent-4.1.0.tgzMIT
    cytoscape-dagre-cluster-fixcytoscape-dagre-cluster-fix-2.2.5.tgzMIT
    cytoscape-expand-collapsecytoscape-expand-collapse-4.0.0.tgzMIT
    cytoscape-fcosecytoscape-fcose-1.2.3.tgzMIT
    cytoscape-layerscytoscape-layers-2.1.0.tgzMIT
    cytoscape-navigatorcytoscape-navigator-2.0.1.tgzMIT
    cytoscape-poppercytoscape-popper-1.0.7.jsMIT
    cytoscape-poppercytoscape-popper-1.0.7.tgzMIT
    d3d3-5.5.0.tgzBSD 3
    d3-arrayd3-array-1.2.1.tgzBSD 3
    d3-arrayd3-array-1.2.4.tgzBSD 3
    d3-axisd3-axis-1.0.8.tgzBSD 3
    d3-brushd3-brush-1.0.4.tgzBSD 3
    d3-chordd3-chord-1.0.4.tgzBSD 3
    d3-collectiond3-collection-1.0.4.tgzBSD 3
    d3-collectiond3-collection-1.0.7.tgzBSD 3
    d3-colord3-color-1.0.3.tgzBSD 3
    d3-contourd3-contour-1.2.0.tgzBSD 3
    d3-dispatchd3-dispatch-1.0.3.tgzBSD 3
    d3-dragd3-drag-1.2.1.tgzBSD 3
    d3-dsvd3-dsv-1.0.8.tgzBSD 3
    d3-eased3-ease-1.0.3.tgzBSD 3
    d3-fetchd3-fetch-1.1.0.tgzBSD 3
    d3-forced3-force-1.1.0.tgzBSD 3
    d3-formatd3-format-1.2.2.tgzBSD 3
    d3-geod3-geo-1.10.0.tgzBSD 3
    d3-hierarchyd3-hierarchy-1.1.6.tgzBSD 3
    d3-interpolated3-interpolate-1.1.6.tgzBSD 3
    d3-interpolated3-interpolate-1.3.2.tgzBSD 3
    d3-pathd3-path-1.0.5.tgzBSD 3
    d3-polygond3-polygon-1.0.3.tgzBSD 3
    d3-quadtreed3-quadtree-1.0.3.tgzBSD 3
    d3-randomd3-random-1.1.0.tgzBSD 3
    d3-sankey-circulard3-sankey-circular-0.34.0.tgzMIT
    d3-scaled3-scale-2.0.0.tgzBSD 3
    d3-scaled3-scale-2.1.2.tgzBSD 3
    d3-scale-chromaticd3-scale-chromatic-1.3.0.tgzBSD 3
    d3-selectiond3-selection-1.3.0.tgzBSD 3
    d3-shaped3-shape-1.2.0.tgzBSD 3
    d3-shaped3-shape-1.2.3.tgzBSD 3
    d3-shaped3-shape-1.3.7.tgzBSD 3
    d3-timed3-time-1.0.8.tgzBSD 3
    d3-time-formatd3-time-format-2.1.1.tgzBSD 3
    d3-timerd3-timer-1.0.7.tgzBSD 3
    d3-transitiond3-transition-1.1.1.tgzBSD 3
    d3-voronoid3-voronoi-1.1.2.tgzBSD 3
    d3-zoomd3-zoom-1.7.1.tgzBSD 3
    dagredagre-0.7.4.jsMIT
    dagre-cluster-fixdagre-cluster-fix-0.9.3.tgzMIT
    debugdebug-2.6.9.tgzMIT
    decimal.js-lightdecimal.js-light-2.5.0.tgzMIT
    decode-uri-componentdecode-uri-component-0.2.0.tgzMIT
    deepdiffdeepdiff-3.3.0-py2-none-any.whlMIT
    deepmergedeepmerge-2.1.1.tgzMIT
    define-propertiesdefine-properties-1.1.2.tgzMIT
    define-propertiesdefine-properties-1.1.3.tgzMIT
    define-propertydefine-property-0.2.5.tgzMIT
    define-propertydefine-property-1.0.0.tgzMIT
    define-propertydefine-property-2.0.2.tgzMIT
    diffdiff-3.5.0.tgzBSD 3
    diff2htmldiff2html-2.4.0.tgzMIT
    dom-converterdom-converter-0.2.0.tgzMIT
    dom-helpersdom-helpers-3.3.1.tgzMIT
    dom-helpersdom-helpers-3.4.0.tgzMIT
    dom-serializerdom-serializer-0.2.2.tgzMIT
    dom-walkdom-walk-0.1.2.tgzMIT
    domelementtypedomelementtype-1.3.1.tgzBSD 2
    domelementtypedomelementtype-2.0.1.tgzBSD 2
    domhandlerdomhandler-2.4.2.tgzBSD 2
    domutilsdomutils-1.5.1.tgzBSD 2
    domutilsdomutils-1.7.0.tgzBSD 2
    dot-casedot-case-3.0.3.tgzMIT
    elasticsearchelasticsearch-6.8.1-py2.py3-none-any.whlApache 2.0
    elementary-circuits-directed-graphelementary-circuits-directed-graph-1.2.0.tgzMIT
    emojis-listemojis-list-3.0.0.tgzMIT
    encodingencoding-0.1.12.tgzMIT
    entitiesentities-1.1.2.tgzBSD 2
    entitiesentities-2.0.3.tgzBSD 2
    es-abstractes-abstract-1.17.5.tgzMIT
    es-to-primitivees-to-primitive-1.2.1.tgzMIT
    es5-shimes5-shim-4.3.1.jsMIT
    escape-string-regexpescape-string-regexp-1.0.5.tgzMIT
    esprimaesprima-4.0.0.tgzBSD 2
    expand-bracketsexpand-brackets-2.1.4.tgzMIT
    extend-shallowextend-shallow-2.0.1.tgzMIT
    extend-shallowextend-shallow-3.0.2.tgzMIT
    extglobextglob-2.0.4.tgzMIT
    fast-levenshteinfast-levenshtein-2.0.6.tgzMIT
    fbjsfbjs-0.8.16.tgzMIT
    felixfelix-v3.17.2Tigera Proprietary
    file-saverfile-saver-2.0.1.tgzMIT
    fill-rangefill-range-4.0.0.tgzMIT
    @fortawesome/fontawesome-common-typesfontawesome-common-types-0.2.32.tgzMIT
    @fortawesome/fontawesome-svg-corefontawesome-svg-core-1.2.10.tgzMIT
    for-infor-in-1.0.2.tgzMIT
    foreachforeach-2.0.5.tgzMIT
    fork-ts-checker-webpack-pluginfork-ts-checker-webpack-plugin-4.1.6.tgzMIT
    formikformik-2.1.3.tgzMIT
    fragment-cachefragment-cache-0.2.1.tgzMIT
    @fortawesome/free-brands-svg-iconsfree-brands-svg-icons-5.6.1.tgzCC BY 4.0
    @fortawesome/free-regular-svg-iconsfree-regular-svg-icons-5.15.1.tgzCC BY 4.0
    @fortawesome/free-solid-svg-iconsfree-solid-svg-icons-5.6.1.tgzCC BY 4.0
    funcsigsfuncsigs-1.0.2-py2.py3-none-any.whlApache 2.0
    function-bindfunction-bind-1.1.1.tgzMIT
    get-valueget-value-2.0.6.tgzMIT
    github.com/alecthomas/participlegithub.com/alecthomas/participle-v0.3.0MIT
    github.com/apparentlymart/go-cidr/cidrgithub.com/apparentlymart/go-cidr/cidr-v1.0.1MIT
    github.com/aquasecurity/kube-bench/checkgithub.com/aquasecurity/kube-bench/check-v0.0.34Apache 2.0
    github.com/araddon/dateparsegithub.com/araddon/dateparse-262228af701ebf3932b8b8488da6781b9d585c88MIT
    github.com/avast/retry-gogithub.com/avast/retry-go-v2.2.0MIT
    github.com/aws/aws-lambda-go/eventsgithub.com/aws/aws-lambda-go/events-v1.13.3Apache 2.0
    github.com/aws/aws-sdk-go/awsgithub.com/aws/aws-sdk-go/aws-v1.25.8Apache 2.0
    github.com/bmizerany/patgithub.com/bmizerany/pat-6226ea591a40176dd3ff9cd8eff81ed6ca721a00MIT
    github.com/bronze1man/gostrongswanvicigithub.com/bronze1man/gostrongswanvici-27d02f80ba4008de552efb746b3f6eaa7718b518MIT
    github.com/buger/jsonparsergithub.com/buger/jsonparser-v1.0.0MIT
    github.com/burntsushi/tomlgithub.com/burntsushi/toml-v0.3.1MIT
    github.com/caimeo/iniflagsgithub.com/caimeo/iniflags-ef4ae6c5cd79d20db0b18bc5ebd8657fac7260e5BSD 2
    github.com/cloudflare/cfssl/loggithub.com/cloudflare/cfssl/log-v1.4.1BSD 2
    github.com/containernetworking/cni/libcnigithub.com/containernetworking/cni/libcni-v0.8.0Apache 2.0
    github.com/containernetworking/plugins/pkg/hnsgithub.com/containernetworking/plugins/pkg/hns-v0.8.5Apache 2.0
    github.com/coreos/go-oidcgithub.com/coreos/go-oidc-v2.1.0Apache 2.0
    github.com/coreos/go-semver/semvergithub.com/coreos/go-semver/semver-v0.3.0Apache 2.0
    github.com/davecgh/go-spew/spewgithub.com/davecgh/go-spew/spew-v1.1.1ISC
    github.com/docker/docker/api/typesgithub.com/docker/docker/api/types-v1.13.1Apache 2.0
    github.com/docker/docker/clientgithub.com/docker/docker/client-v1.13.1Apache 2.0
    github.com/docker/go-connections/natgithub.com/docker/go-connections/nat-v0.4.0Apache 2.0
    github.com/docopt/docopt-gogithub.com/docopt/docopt-go-ee0de3bc6815ee19d4a46c7eb90f829db0e014b1MIT
    github.com/elastic/go-elasticsearch/v7github.com/elastic/go-elasticsearch/v7-v7.3.0Apache 2.0
    github.com/envoyproxy/data-plane-api/envoy/api/v2/coregithub.com/envoyproxy/data-plane-api/envoy/api/v2/core-ffd420ef8a9ad148642236aa6d89e2855b41c821Apache 2.0
    github.com/fsnotify/fsnotifygithub.com/fsnotify/fsnotify-v1.4.9BSD 3
    github.com/gavv/monotimegithub.com/gavv/monotime-30dba43534243e3484a34676a0f068d12b989f84Apache 2.0
    github.com/getlantern/deepcopygithub.com/getlantern/deepcopy-v1Apache 2.0
    github.com/ghodss/yamlgithub.com/ghodss/yaml-v1.0.0BSD 3
    github.com/go-ini/inigithub.com/go-ini/ini-v1.43.0Apache 2.0
    github.com/go-logr/logrgithub.com/go-logr/logr-v0.3.0Apache 2.0
    github.com/go-openapi/specgithub.com/go-openapi/spec-v0.19.3Apache 2.0
    github.com/go-sql-driver/mysqlgithub.com/go-sql-driver/mysql-v1.4.1Mozilla 2.0
    github.com/gofrs/flockgithub.com/gofrs/flock-v0.8.0BSD 3
    github.com/gogo/googleapis/google/rpcgithub.com/gogo/googleapis/google/rpc-v1.2.0Apache 2.0
    github.com/gogo/protobuf/protogithub.com/gogo/protobuf/proto-v1.3.1BSD 3
    github.com/golang-collections/collections/stackgithub.com/golang-collections/collections/stack-604e922904d35e97f98a774db7881f049cd8d970MIT
    github.com/google/go-cmp/cmpgithub.com/google/go-cmp/cmp-v0.4.0BSD 3
    github.com/google/gofuzzgithub.com/google/gofuzz-v1.1.0Apache 2.0
    github.com/google/gopacketgithub.com/google/gopacket-v1.1.18BSD 3
    github.com/google/netstack/tcpip/headergithub.com/google/netstack/tcpip/header-55fcc16cd0eb096d8418f7bc5162483c31a4e82bApache 2.0
    github.com/hashicorp/go-versiongithub.com/hashicorp/go-version-v1.2.1Mozilla 2.0
    github.com/hashicorp/golang-lrugithub.com/hashicorp/golang-lru-v0.5.1Mozilla 2.0
    github.com/hashicorp/yamuxgithub.com/hashicorp/yamux-2f1d1f20f75d5404f53b9edf6b53ed5505508675Mozilla 2.0
    github.com/howeyc/fsnotifygithub.com/howeyc/fsnotify-v0.9.0BSD 3
    github.com/hpcloud/tailgithub.com/hpcloud/tail-v1.0.0MIT
    github.com/ishidawataru/sctpgithub.com/ishidawataru/sctp-00ab2ac2db07a138417639ef3f39672c65dbb9a0BSD 3
    github.com/jarcoal/httpmockgithub.com/jarcoal/httpmock-v1.0.5MIT
    github.com/jinzhu/copiergithub.com/jinzhu/copier-v0.1.0MIT
    github.com/jmespath/go-jmespathgithub.com/jmespath/go-jmespath-0.3.0Apache 2.0
    github.com/joho/godotenvgithub.com/joho/godotenv-v1.3.0MIT
    github.com/jpillora/backoffgithub.com/jpillora/backoff-v1.0.0MIT
    github.com/json-iterator/gogithub.com/json-iterator/go-v1.1.10MIT
    github.com/juju/clockgithub.com/juju/clock-9c5c9712527c7986f012361e7d13756b4d99543dLGPL 3.0
    github.com/juju/errorsgithub.com/juju/errors-3fe23663418fc1d724868c84f21b7519bbac7441LGPL 3.0
    github.com/juju/mutexgithub.com/juju/mutex-d21b13acf4bfd8a8b0482a3a78e44d98880b40d3LGPL 3.0
    github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/utilsgithub.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/utils-v1.1.0Apache 2.0
    github.com/kardianos/osextgithub.com/kardianos/osext-2bc1f35cddc0cc527b4bc3dce8578fc2a6c11384BSD 3
    github.com/kelseyhightower/envconfiggithub.com/kelseyhightower/envconfig-v1.4.0MIT
    github.com/kelseyhightower/memkvgithub.com/kelseyhightower/memkv-v0.1.1MIT
    github.com/konsorten/go-windows-terminal-sequencesgithub.com/konsorten/go-windows-terminal-sequences-v1.0.1MIT
    github.com/lestrrat-go/file-rotatelogsgithub.com/lestrrat-go/file-rotatelogs-v2.4.0MIT
    github.com/libp2p/go-reuseportgithub.com/libp2p/go-reuseport-v0.0.1ISC
    github.com/lithammer/dedentgithub.com/lithammer/dedent-v1.1.0MIT
    github.com/mailru/easyjsongithub.com/mailru/easyjson-v0.7.0MIT
    github.com/masterminds/spriggithub.com/masterminds/sprig-v2.19.0MIT
    github.com/mcuadros/go-versiongithub.com/mcuadros/go-version-92cdf37c5b7579ebaf7a036da94b40995972088dMIT
    github.com/microsoft/hcsshimgithub.com/microsoft/hcsshim-v0.8.6MIT
    github.com/mipearson/rfwgithub.com/mipearson/rfw-6f0a6f3266ba1058df9ef0c94cda1cecd2e62852MIT
    github.com/mitchellh/go-homedirgithub.com/mitchellh/go-homedir-v1.1.0MIT
    github.com/modern-go/concurrentgithub.com/modern-go/concurrent-1.0.3Apache 2.0
    github.com/modern-go/reflect2github.com/modern-go/reflect2-v1.0.1Apache 2.0
    github.com/natefinch/atomicgithub.com/natefinch/atomic-a62ce929ffcc871a51e98c6eba7b20321e3ed62dMIT
    github.com/nmrshll/go-cpgithub.com/nmrshll/go-cp-61436d3b7cfa1bc1e8e455c35d8f60b8e51ccc2eMIT
    github.com/nxadm/tailgithub.com/nxadm/tail-v1.4.4MIT
    github.com/olekukonko/tablewritergithub.com/olekukonko/tablewriter-v0.0.2MIT
    github.com/olivere/elastic/v7github.com/olivere/elastic/v7-v7.0.6MIT
    github.com/onsi/ginkgogithub.com/onsi/ginkgo-v1.15.0MIT
    github.com/onsi/gomegagithub.com/onsi/gomega-v1.7.0MIT
    github.com/openshift/api/config/v1github.com/openshift/api/config/v1-d0822898eabb929c40c5146116252477abab8d18Apache 2.0
    github.com/openshift/library-go/pkg/cryptogithub.com/openshift/library-go/pkg/crypto-9350cd67a9110bcaf9a85d391fa264afbbff1342Apache 2.0
    github.com/osrg/gobgp/clientgithub.com/osrg/gobgp/client-v1.22Apache 2.0
    github.com/paloaltonetworks/pangogithub.com/paloaltonetworks/pango-v0.1.1ISC
    github.com/patrickmn/go-cachegithub.com/patrickmn/go-cache-v2.1.0MIT
    github.com/pkg/errorsgithub.com/pkg/errors-v0.8.1BSD 2
    github.com/projectcalico/cni-plugingithub.com/projectcalico/cni-pluginApache 2.0
    github.com/projectcalico/felixgithub.com/projectcalico/felixApache 2.0
    github.com/projectcalico/go-jsongithub.com/projectcalico/go-json/json-6219dc7339ba20ee4c57df0a8baac62317d19cb1BSD 2
    github.com/projectcalico/go-yaml-wrappergithub.com/projectcalico/go-yaml-wrapper-090425220c545f6d179db17af395f5aac30b6926BSD 3
    kube-controllerskube-controllersTigera Proprietary
    github.com/projectcalico/libcalico-gogithub.com/projectcalico/libcalico-go/libApache 2.0
    github.com/projectcalico/pod2daemongithub.com/projectcalico/pod2daemonApache 2.0
    typhatyphaTigera Proprietary
    github.com/prometheus/client_golang/github.com/prometheus/client_golang/prometheus-v1.7.1Apache 2.0
    github.com/rakelkar/gonetsh/netshgithub.com/rakelkar/gonetsh/netsh-e5c5ffe4bdf04bc060fc45ff4aca2349f51c94a7Apache 2.0
    github.com/robfig/crongithub.com/robfig/cron-v1.2.0MIT
    github.com/satori/go.uuidgithub.com/satori/go.uuid-v1.2.0MIT
    github.com/shirou/gopsutil/processgithub.com/shirou/gopsutil/process-v2.19.03BSD 3
    github.com/sirupsen/logrusgithub.com/sirupsen/logrus-v1.4.2MIT
    github.com/sirupsen/logrusgithub.com/sirupsen/logrus-v1.6.0MIT
    github.com/spf13/cobragithub.com/spf13/cobra-v0.0.3Apache 2.0
    github.com/spf13/cobragithub.com/spf13/cobra-v1.0.0Apache 2.0
    github.com/spf13/pflaggithub.com/spf13/pflag-v1.0.5BSD 3
    github.com/spf13/vipergithub.com/spf13/viper-v1.6.1MIT
    github.com/stretchr/testifygithub.com/stretchr/testify/mock-v1.4.0MIT
    github.com/termie/go-shutilgithub.com/termie/go-shutil-bcacb06fecaeec8dc42af03c87c6949f4a05c74cMIT
    github.com/tigera/apigithub.com/tigera/api/Apache 2.0
    github.com/vishvananda/netlinkgithub.com/vishvananda/netlink-v1.1.0Apache 2.0
    github.com/willf/bitsetgithub.com/willf/bitset-v1.1.11BSD 3
    github.com/workiva/go-datastructures/trie/ctriegithub.com/workiva/go-datastructures/trie/ctrie-v1.0.50Apache 2.0
    github.com/x-cray/logrus-prefixed-formattergithub.com/x-cray/logrus-prefixed-formatter-v0.5.2MIT
    github.com/yalp/jsonpathgithub.com/yalp/jsonpath-5cc68e5049a040829faef3a44c00ec4332f6dec7BSD 3
    globalglobal-4.4.0.tgzMIT
    go.etcd.io/etcd/go.etcd.io/etcd/client-v0.5.0-alpha.5.0.20201125193152-8a03d2e9614bApache 2.0
    go.uber.org/zapgo.uber.org/zap-v1.15.0MIT
    golang.org/x/cryptogolang.org/x/crypto/BSD 3
    golang.org/x/netgolang.org/x/net/BSD 3
    golang.org/x/sync/golang.org/x/sync/BSD 3
    golang.org/x/sysgolang.org/x/sysBSD 3
    github.com/golang/textgolang.org/x/textGolang BSD + Patents
    golang.zx2c4.com/wireguard/golang.zx2c4.com/wireguard/MIT
    google-libphonenumbergoogle-libphonenumber-3.2.2.tgzApache 2.0
    google.golang.org/grpcgoogle.golang.org/grpc-v1.27.0Apache 2.0
    google-authgoogle_auth-1.26.1-py2.py3-none-any.whlApache 2.0
    gopkg.in/fsnotify/fsnotify.v1gopkg.in/fsnotify/fsnotify.v1-v1.4.7BSD 3
    gopkg.in/go-playground/validator.v9gopkg.in/go-playground/validator.v9-v9.30.2MIT
    gopkg.in/inf.v0gopkg.in/inf.v0-v0.9.0BSD 3
    gopkg.in/natefinch/lumberjack.v2gopkg.in/natefinch/lumberjack.v2-v2.0.0MIT
    gopkg.in/square/go-jose.v2gopkg.in/square/go-jose.v2-v2.2.3Apache 2.0
    gopkg.in/tchap/go-patricia.v2/patriciagopkg.in/tchap/go-patricia.v2/patricia-v2.3.0MIT
    gopkg.in/tomb.v1gopkg.in/tomb.v1-dd632973f1e7218eb1089048e0798ec9ae7dceb8BSD 3
    gopkg.in/yaml.v2gopkg.in/yaml.v2-v2.4.0Apache 2.0
    graphlibgraphlib-2.1.8.tgzMIT
    gudgud-1.0.0.tgzMIT
    hashas-1.0.3.tgzMIT
    has-flaghas-flag-3.0.0.tgzMIT
    has-symbolshas-symbols-1.0.1.tgzMIT
    has-valuehas-value-0.3.1.tgzMIT
    has-valuehas-value-1.0.0.tgzMIT
    has-valueshas-values-0.1.4.tgzMIT
    has-valueshas-values-1.0.0.tgzMIT
    hehe-1.2.0.tgzMIT
    heapheap-0.2.6.tgzPython 2.0
    @babel/helper-validator-identifierhelper-validator-identifier-7.10.1.tgzMIT
    @babel/highlighthighlight-7.10.1.tgzMIT
    historyhistory-4.9.0.tgzMIT
    twitterhogan.jsApache 2.0
    hogan.jshogan.js-3.0.2.tgzApache 2.0
    hoist-non-react-staticshoist-non-react-statics-3.1.0.tgzBSD 3
    hoist-non-react-staticshoist-non-react-statics-3.3.0.tgzBSD 3
    @types/html-minifier-terserhtml-minifier-terser-5.1.0.tgzMIT
    html-minifier-terserhtml-minifier-terser-5.1.1.tgzMIT
    html-webpack-pluginhtml-webpack-plugin-4.3.0.tgzMIT
    htmlparser2htmlparser2-3.10.1.tgzMIT
    humpshumps-2.0.1.tgzMIT
    icepickicepick-1.3.0.tgzMIT
    iconv-liteiconv-lite-0.4.23.tgzMIT
    idnaidna-2.10-py2.py3-none-any.whlBSD 3
    idnaidna-2.7-py2.py3-none-any.whlBSD 2
    immutableimmutable-3.8.2.tgzMIT
    importlib-metadataimportlib_metadata-2.1.1-py2.py3-none-any.whlApache 2.0
    inheritsinherits-2.0.4.tgzISC
    instantsearch.jsinstantsearch.production-4.4.1.min.jsMIT
    invariantinvariant-2.2.2.tgzBSD 3
    invariantinvariant-2.2.4.tgzMIT
    ip-regexip-regex-2.1.0.tgzMIT
    ip-regexip-regex-4.1.0.tgzMIT
    ipaddr.jsipaddr.js-1.9.1.tgzMIT
    ipaddressipaddress-1.0.23-py2.py3-none-any.whlPython 2.0
    is-accessor-descriptoris-accessor-descriptor-0.1.6.tgzMIT
    is-accessor-descriptoris-accessor-descriptor-1.0.0.tgzMIT
    is-bufferis-buffer-1.1.6.tgzMIT
    is-callableis-callable-1.1.5.tgzMIT
    is-cidris-cidr-3.1.0.tgzBSD 2
    is-data-descriptoris-data-descriptor-0.1.4.tgzMIT
    is-data-descriptoris-data-descriptor-1.0.0.tgzMIT
    is-date-objectis-date-object-1.0.1.tgzMIT
    is-descriptoris-descriptor-0.1.6.tgzMIT
    is-descriptoris-descriptor-1.0.2.tgzMIT
    is-extendableis-extendable-0.1.1.tgzMIT
    is-extendableis-extendable-1.0.1.tgzMIT
    is-ipis-ip-3.1.0.tgzMIT
    is-numberis-number-3.0.0.tgzMIT
    is-plain-objectis-plain-object-2.0.4.tgzMIT
    is-regexis-regex-1.0.5.tgzMIT
    is-streamis-stream-1.1.0.tgzMIT
    is-symbolis-symbol-1.0.3.tgzMIT
    is-windowsis-windows-1.0.2.tgzMIT
    isarrayisarray-0.0.1.tgzMIT
    isarrayisarray-1.0.0.tgzMIT
    isobjectisobject-2.1.0.tgzMIT
    isobjectisobject-3.0.1.tgzMIT
    isomorphic-fetchisomorphic-fetch-2.2.1.tgzMIT
    jqueryjquery-2.2.0.min.jsMIT
    jqueryjquery-3.4.0.min.jsMIT
    js-cookiejs-cookie-2.2.1.tgzMIT
    js-tokensjs-tokens-3.0.2.tgzMIT
    js-tokensjs-tokens-4.0.0.tgzMIT
    js-yamljs-yaml-3.14.0.tgzMIT
    jsanjsan-3.1.13.tgzMIT
    json5json5-1.0.1.tgzMIT
    jsonpicklejsonpickle-2.0.0-py2.py3-none-any.whlBSD 2
    jsrsasignjsrsasign-5.1.0.tgzMIT
    k8s.io/apik8s.io/apiApache 2.0
    k8s.io/apiextensions-apiserver/k8s.io/apiextensions-apiserver/Apache 2.0
    k8s.io/apimachineryk8s.io/apimachinery/Apache 2.0
    k8s.io/apiserverk8s.io/apiserverApache 2.0
    k8s.io/client-gok8s.io/client-go/Apache 2.0
    k8s.io/component-base/k8s.io/component-base/Apache 2.0
    k8s.io/klogk8s.io/klog-v1.0.0Apache 2.0
    k8s.io/kube-aggregator/k8s.io/kube-aggregator/Apache 2.0
    k8s.io/kube-openapi/k8s.io/kube-openapiApache 2.0
    k8s.io/kubernetes/k8s.io/kubernetes/Apache 2.0
    k8s.io/utils/stringsk8s.io/utils/stringsApache 2.0
    keycodekeycode-2.1.9.tgzMIT
    kind-ofkind-of-3.2.2.tgzMIT
    kind-ofkind-of-4.0.0.tgzMIT
    kind-ofkind-of-5.1.0.tgzMIT
    kind-ofkind-of-6.0.3.tgzMIT
    kube-controllerskube-controllers-v3.0.11Tigera Proprietary
    kube-controllerskube-controllers-v3.16.6Tigera Proprietary
    kuberneteskubernetes-12.0.1-py2.py3-none-any.whlApache 2.0
    layout-baselayout-base-1.0.2.tgzMIT
    projectcalicolibcalico-go-v3.18.0-0.devApache 2.0
    loader-utilsloader-utils-1.4.0.tgzMIT
    lodashlodash-4.17.19.tgzMIT
    lodashlodash-4.17.20.tgzMIT
    lodash-eslodash-es-4.17.15.tgzMIT
    lodash.debouncelodash.debounce-4.0.8.tgzMIT
    lodash.getlodash.get-4.4.2.tgzMIT
    lodash.isequallodash.isequal-4.5.0.tgzMIT
    lodash.throttlelodash.throttle-4.1.1.tgzMIT
    lodash.topathlodash.topath-4.5.2.tgzMIT
    pimterryloglevel-v1.6.8MIT
    loose-envifyloose-envify-1.3.1.tgzMIT
    loose-envifyloose-envify-1.4.0.tgzMIT
    lower-caselower-case-2.0.1.tgzMIT
    map-cachemap-cache-0.2.2.tgzMIT
    map-visitmap-visit-1.0.0.tgzMIT
    math-expression-evaluatormath-expression-evaluator-1.2.17.tgzMIT
    megacubomegacubo-br-Megacubo_15.4.7_linux_ia32LGPL 2.1
    memoize-onememoize-one-4.0.3.tgzMIT
    microevent.tsmicroevent.ts-0.1.1.tgzMIT
    micromatchmicromatch-3.1.10.tgzMIT
    min-documentmin-document-2.19.0.tgzMIT
    mini-create-react-contextmini-create-react-context-0.3.2.tgzMIT
    minimatchminimatch-3.0.4.tgzISC
    minimistminimist-1.2.5.tgzMIT
    mixin-deepmixin-deep-1.3.2.tgzMIT
    mkdirpmkdirp-0.3.0.tgzMIT X11
    mochamocha-1.6.0.jsMIT
    momentmoment-2.22.2.tgzMIT
    more-itertoolsmore_itertools-5.0.0-py2-none-any.whlMIT
    msms-2.0.0.tgzMIT
    nanoidnanoid-2.1.7.tgzMIT
    nanomatchnanomatch-1.2.13.tgzMIT
    netaddrnetaddr-0.7.19-py2.py3-none-any.whlBSD 3
    no-caseno-case-3.0.3.tgzMIT
    @types/nodenode-9.3.0.tgzMIT
    node-fetchnode-fetch-1.7.3.tgzMIT
    nodejsnode-v10.23.1Node.js
    nodenode-v3.17.2Tigera Proprietary
    noptnopt-1.0.10.tgzMIT
    nose-timernose-timer-0.7.1.tar.gzMIT
    nose-parameterizednose_parameterized-0.6.0-py2.py3-none-any.whlBSD 3
    nth-checknth-check-1.0.2.tgzBSD 2
    oauthliboauthlib-3.1.0-py2.py3-none-any.whlBSD 3
    object-assignobject-assign-4.1.1.tgzMIT
    object-copyobject-copy-0.1.0.tgzMIT
    object-inspectobject-inspect-1.7.0.tgzMIT
    object-keysobject-keys-1.0.11.tgzMIT
    object-keysobject-keys-1.1.1.tgzMIT
    object-visitobject-visit-1.0.1.tgzMIT
    object.assignobject.assign-4.1.0.tgzMIT
    object.getownpropertydescriptorsobject.getownpropertydescriptors-2.1.0.tgzMIT
    object.pickobject.pick-1.3.0.tgzMIT
    oidc-clientoidc-client-1.4.1.tgzApache 2.0
    openshiftorigin-v3.6.1Apache 2.0
    packagingpackaging-20.9-py2.py3-none-any.whlBSD 2
    param-caseparam-case-3.0.3.tgzMIT
    parse-durationparse-duration-0.4.4.tgzMIT
    pascal-casepascal-case-3.1.1.tgzMIT
    pascalcasepascalcase-0.1.1.tgzMIT
    path-to-regexppath-to-regexp-1.7.0.tgzMIT
    pathlib2pathlib2-2.3.5-py2.py3-none-any.whlMIT
    pegjspegjs-0.10.0.tgzMIT
    performance-nowperformance-now-2.1.0.tgzMIT
    pluggypluggy-0.13.1-py2.py3-none-any.whlMIT
    popper.jspopper-1.16.0.jsMIT
    popper.jspopper.js-1.16.1.tgzMIT
    posix-character-classesposix-character-classes-0.1.1.tgzMIT
    pretty-errorpretty-error-2.1.1.tgzMIT
    processprocess-0.11.10.tgzMIT
    promisepromise-7.3.1.tgzMIT
    prop-typesprop-types-15.5.10.jsBSD 3
    prop-typesprop-types-15.6.0.tgzMIT
    prop-typesprop-types-15.6.2.jsBSD 3
    prop-typesprop-types-15.6.2.tgzMIT
    prop-typesprop-types-15.7.2.tgzMIT
    prop-types-extraprop-types-extra-1.0.1.tgzMIT
    pypy-1.10.0-py2.py3-none-any.whlMIT
    pyasn1pyasn1-0.4.8-py2.py3-none-any.whlBSD 2
    pyasn1-modulespyasn1_modules-0.2.8-py2.py3-none-any.whlBSD 2
    pyparsingpyparsing-2.4.7-py2.py3-none-any.whlMIT
    pytestpytest-4.6.11-py2.py3-none-any.whlMIT
    python-dateutilpython_dateutil-2.8.1-py2.py3-none-any.whlBSD 3
    PyYAMLPyYAML-5.4.1-cp27-cp27mu-manylinux1_x86_64.whlMIT
    rafraf-3.4.0.tgzMIT
    raf-schdraf-schd-4.0.0.tgzMIT
    raven-jsraven-js-3.22.1.tgzBSD 2
    reactreact-15.6.1.jsMIT
    reactreact-16.13.1.tgzMIT
    reactreact-16.8.6.tgzMIT
    react-beautiful-dndreact-beautiful-dnd-10.0.2.tgzApache 2.0
    react-bootstrapreact-bootstrap-0.32.1.tgzMIT
    react-codemirror2react-codemirror2-5.1.0.tgzMIT
    react-confirmreact-confirm-0.1.16.tgzMIT
    react-cytoscapejsreact-cytoscapejs-1.2.1.tgzMIT
    plotlyreact-cytoscapejs-v1.2.1MIT
    react-day-pickerreact-day-picker-7.4.8.tgzMIT
    react-domreact-dom-15.6.1.jsMIT
    react-domreact-dom-16.13.1.tgzMIT
    react-domreact-dom-16.8.6.tgzMIT
    react-draggablereact-draggable-3.0.5.tgzMIT
    react-fast-comparereact-fast-compare-2.0.4.tgzMIT
    react-filter-boxreact-filter-box-3.4.1.tgzMIT
    @fortawesome/react-fontawesomereact-fontawesome-0.1.3.tgzMIT
    react-grid-layoutreact-grid-layout-0.16.3.tgzMIT
    react-hot-loaderreact-hot-loader-4.12.21.tgzMIT
    react-input-autosizereact-input-autosize-2.1.2.tgzMIT
    JedWatsonreact-input-autosize-v2.1.2MIT
    react-isreact-is-16.6.3.tgzMIT
    react-isreact-is-16.8.6.tgzMIT
    react-isreact-is-16.9.0.tgzMIT
    react-json-prettyreact-json-pretty-2.2.0.tgzMIT
    react-lifecycles-compatreact-lifecycles-compat-3.0.4.tgzMIT
    react-native-segmented-control-tabreact-native-segmented-control-tab-3.2.1.tgzMIT
    react-new-windowreact-new-window-0.1.2.tgzMIT
    react-notification-systemreact-notification-system-0.2.17.tgzMIT
    react-overlaysreact-overlays-0.8.3.tgzMIT
    react-prop-typesreact-prop-types-0.4.0.tgzMIT
    react-querybuilderreact-querybuilder-3.0.0.tgzMIT
    react-reduxreact-redux-5.1.1.tgzMIT
    react-reduxreact-redux-7.1.1.tgzMIT
    react-redux-formreact-redux-form-1.16.5.tgzMIT
    react-resizablereact-resizable-1.7.5.tgzMIT
    react-resize-detectorreact-resize-detector-2.3.0.tgzMIT
    react-routerreact-router-5.0.1.tgzMIT
    react-router-domreact-router-dom-5.0.1.tgzMIT
    react-showreact-show-2.0.4.tgzMIT
    react-smoothreact-smooth-1.0.2.tgzMIT
    react-split-panereact-split-pane-0.1.92.tgzMIT
    react-style-proptypereact-style-proptype-3.2.2.tgzMIT
    react-switchreact-switch-5.0.0.tgzMIT
    react-table-6react-table-6-6.11.0.tgzMIT
    react-tablereact-table-7.5.1.tgzMIT
    react-tooltipreact-tooltip-4.2.11.tgzMIT
    react-transition-groupreact-transition-group-2.2.1.tgzBSD 3
    react-transition-groupreact-transition-group-2.7.1.tgzBSD 3
    readable-streamreadable-stream-3.6.0.tgzMIT
    rechartsrecharts-1.5.0.tgzMIT
    recharts-scalerecharts-scale-0.4.2.tgzMIT
    reduce-css-calcreduce-css-calc-1.3.0.tgzMIT
    reduce-function-callreduce-function-call-1.0.2.tgzMIT
    reduxredux-4.0.1.tgzMIT
    reduxredux-4.0.4.tgzMIT
    redux-immutableredux-immutable-4.0.0.tgzBSD 3
    redux-merge-immutable-reducersredux-merge-immutable-reducers-0.1.4.tgzMIT
    redux-thunkredux-thunk-2.3.0.tgzMIT
    reflect-metadatareflect-metadata-0.1.13.tgzApache 2.0
    regenerator-runtimeregenerator-runtime-0.11.1.tgzMIT
    regenerator-runtimeregenerator-runtime-0.12.1.tgzMIT
    regenerator-runtimeregenerator-runtime-0.13.3.tgzMIT
    regenerator-runtimeregenerator-runtime-0.13.5.tgzMIT
    regex-notregex-not-1.0.2.tgzMIT
    regexp-to-astregexp-to-ast-0.4.0.tgzMIT
    relateurlrelateurl-0.2.7.tgzMIT
    remotedev-serializeremotedev-serialize-0.1.8.tgzMIT
    renderkidrenderkid-2.0.3.tgzMIT
    repeat-elementrepeat-element-1.1.3.tgzMIT
    repeat-stringrepeat-string-1.6.1.tgzMIT
    requestsrequests-2.20.1-py2.py3-none-any.whlApache 2.0
    requestsrequests-2.25.1-py2.py3-none-any.whlApache 2.0
    requests-oauthlibrequests_oauthlib-1.3.0-py2.py3-none-any.whlISC
    reselectreselect-2.5.4.tgzMIT
    @juggle/resize-observerresize-observer-3.2.0.tgzApache 2.0
    resize-observer-polyfillresize-observer-polyfill-1.5.1.tgzMIT
    resolve-pathnameresolve-pathname-2.2.0.tgzMIT
    resolve-urlresolve-url-0.2.1.tgzMIT
    retret-0.1.15.tgzMIT
    rsarsa-4.5-py2.py3-none-any.whlApache 2.0
    @babel/runtimeruntime-7.1.5.tgzMIT
    @babel/runtimeruntime-7.5.5.tgzMIT
    @babel/runtime-corejs2runtime-corejs2-7.1.5.tgzMIT
    rwrw-1.3.3.tgzBSD 3
    safe-buffersafe-buffer-5.2.1.tgzMIT
    safe-regexsafe-regex-1.1.0.tgzMIT
    safer-buffersafer-buffer-2.1.2.tgzMIT
    scandirscandir-1.10.0.tar.gzBSD 3
    schedulerscheduler-0.13.6.tgzMIT
    schedulerscheduler-0.18.0.tgzMIT
    schedulerscheduler-0.19.1.tgzMIT
    seamless-immutableseamless-immutable-7.1.4.tgzBSD 3
    semversemver-5.7.1.tgzISC
    set-valueset-value-2.0.1.tgzMIT
    setimmediatesetimmediate-1.0.5.tgzMIT
    setuptoolssetuptools-44.1.1-py2.py3-none-any.whlMIT
    shallow-compareshallow-compare-1.2.2.tgzMIT
    shallowequalshallowequal-1.1.0.tgzMIT
    sigs.k8s.io/controller-runtimesigs.k8s.io/controller-runtime-v0.7.0Apache 2.0
    sigs.k8s.io/kind/pkg/errorssigs.k8s.io/kind/pkg/errors-v0.9.0Apache 2.0
    sigs.k8s.io/yamlsigs.k8s.io/yaml-v1.2.0BSD 3
    simplejsonsimplejson-3.13.2.tar.gzAcademic 2.1
    sixsix-1.15.0-py2.py3-none-any.whlMIT
    snapdragonsnapdragon-0.8.2.tgzMIT
    snapdragon-nodesnapdragon-node-2.1.1.tgzMIT
    snapdragon-utilsnapdragon-util-3.0.1.tgzMIT
    soupsievesoupsieve-1.9.6-py2.py3-none-any.whlMIT
    @types/source-list-mapsource-list-map-0.1.2.tgzMIT
    source-mapsource-map-0.5.7.tgzBSD 3
    source-mapsource-map-0.6.1.tgzBSD 3
    source-mapsource-map-0.7.3.tgzBSD 3
    source-map-resolvesource-map-resolve-0.5.3.tgzMIT
    source-map-supportsource-map-support-0.5.19.tgzMIT
    source-map-urlsource-map-url-0.4.0.tgzMIT
    split-stringsplit-string-3.1.0.tgzMIT
    sprintfsprintf-1.0.3.jsBSD
    sprintf-jssprintf-js-1.0.3.tgzBSD 3
    static-extendstatic-extend-0.1.2.tgzMIT
    string.prototype.trimendstring.prototype.trimend-1.0.1.tgzMIT
    string.prototype.trimleftstring.prototype.trimleft-2.1.2.tgzMIT
    string.prototype.trimrightstring.prototype.trimright-2.1.2.tgzMIT
    string.prototype.trimstartstring.prototype.trimstart-1.0.1.tgzMIT
    string_decoderstring_decoder-1.3.0.tgzMIT
    strip-ansistrip-ansi-3.0.1.tgzMIT
    strongly-connected-componentsstrongly-connected-components-1.0.1.tgzMIT
    supports-colorsupports-color-5.5.0.tgzMIT
    swagger-uiswagger-ui-bundle-3.37.0.jsApache 2.0
    swagger-uiswagger-ui-standalone-preset-3.37.0.jsApache 2.0
    symbol-observablesymbol-observable-1.2.0.tgzMIT
    @types/tapabletapable-1.0.5.tgzMIT
    tapabletapable-1.1.3.tgzMIT
    JstarfishTechnical-Learning-609d9d75ca68e30aee8757b26f52bf132c644be7ISC
    termcolortermcolor-1.1.0.tar.gzMIT
    terserterser-4.7.0.tgzBSD 2
    tiny-invarianttiny-invariant-1.0.3.tgzMIT
    tiny-invarianttiny-invariant-1.0.6.tgzMIT
    tiny-warningtiny-warning-1.0.2.tgzMIT
    tiny-warningtiny-warning-1.0.3.tgzMIT
    tippy.jstippy-bundle.iife-5.2.1.min.jsMIT
    tippy.jstippy.js-6.2.5.tgzMIT
    to-object-pathto-object-path-0.3.0.tgzMIT
    to-regexto-regex-3.0.2.tgzMIT
    to-regex-rangeto-regex-range-2.1.1.tgzMIT
    tslibtslib-1.10.0.jsApache 2.0
    tslibtslib-1.10.0.tgzApache 2.0
    tslibtslib-1.13.0.jsApache 2.0
    tslibtslib.es6-1.10.0.jsApache 2.0
    tslibtslib.es6-1.13.0.jsApache 2.0
    typescript-fsatypescript-fsa-2.5.0.tgzMIT
    typescript-fsa-reducerstypescript-fsa-reducers-0.4.5.tgzMIT
    ua-parser-jsua-parser-js-0.7.18.tgzMIT
    @types/uglify-jsuglify-js-3.9.2.tgzMIT
    uncontrollableuncontrollable-4.1.0.tgzMIT
    union-valueunion-value-1.0.1.tgzMIT
    unset-valueunset-value-1.0.0.tgzMIT
    urixurix-0.1.0.tgzMIT
    urllib3urllib3-1.24.3-py2.py3-none-any.whlMIT
    urllib3urllib3-1.26.3-py2.py3-none-any.whlMIT
    useuse-3.1.1.tgzMIT
    util-deprecateutil-deprecate-1.0.2.tgzMIT
    util.promisifyutil.promisify-1.0.0.tgzMIT
    utilautila-0.4.0.tgzMIT
    uuiduuid-7.0.3.tgzMIT
    validatorvalidator-10.4.0.tgzMIT
    value-equalvalue-equal-0.4.0.tgzMIT
    warningwarning-3.0.0.tgzBSD 3
    wcwidthwcwidth-0.2.5-py2.py3-none-any.whlMIT
    @types/webpackwebpack-4.41.17.tgzMIT
    @types/webpack-sourceswebpack-sources-1.4.0.tgzMIT
    websocket_clientwebsocket_client-0.57.0-py2.py3-none-any.whlBSD 3
    whatwg-fetchwhatwg-fetch-2.0.4.tgzMIT
    worker-rpcworker-rpc-0.1.1.tgzMIT
    zippzipp-1.2.0-py2.py3-none-any.whlMIT
    github.com/projectcalico/birdgithub.com/projectcalico/bird/blob/v0.3.3GPL
    egress-gatewayegress-gatewayTigera Proprietary
    diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/apply.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/apply.mdx deleted file mode 100644 index 8046ecf4d8..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/apply.mdx +++ /dev/null @@ -1,141 +0,0 @@ ---- -description: Command to apply a policy. ---- - -# calicoctl apply - -This sections describes the `calicoctl apply` command. - -Read the [calicoctl command line interface user reference](overview.mdx) -for a full list of calicoctl commands. - -:::note - -The available actions for a specific resource type may be -limited based on the datastore used for $[prodname] (Kubernetes API). -Please refer to the -[Resources section](../../resources/overview.mdx) -for details about each resource type. - -::: - -## Displaying the help text for 'calicoctl apply' command - -Run `calicoctl apply --help` to display the following help menu for the -command. - -``` -Usage: - calicoctl apply --filename= [--recursive] [--skip-empty] [--config=] [--namespace=] - -Examples: - # Apply a policy using the data in policy.yaml. - calicoctl apply -f ./policy.yaml - - # Apply a policy based on the JSON passed into stdin. - cat policy.json | calicoctl apply -f - - -Options: - -h --help Show this screen. - -f --filename= Filename to use to apply the resource. If set to - "-" loads from stdin. If filename is a directory, this command is - invoked for each .json .yaml and .yml file within that directory, - terminating after the first failure. - -R --recursive Process the filename specified in -f or --filename recursively. - --skip-empty Do not error if any files or directory specified using -f or --filename contain no - data. - -c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] - -n --namespace= Namespace of the resource. - Only applicable to NetworkPolicy, NetworkSet, and WorkloadEndpoint. - Uses the default namespace if not specified. - --context= The name of the kubeconfig context to use. - -Description: - The apply command is used to create or replace a set of resources by filename - or stdin. JSON and YAML formats are accepted. - - Valid resource types are: - - * bgpConfiguration - * bgpPeer - * felixConfiguration - * globalNetworkPolicy - * hostEndpoint - * ipPool - * tier - * networkPolicy - * networkSet - * node - * profile - * workloadEndpoint - - When applying a resource: - - if the resource does not already exist (as determined by it's primary - identifiers) then it is created - - if the resource already exists then the specification for that resource is - replaced in it's entirety by the new resource specification. - - The output of the command indicates how many resources were successfully - applied, and the error reason if an error occurred. - - The resources are applied in the order they are specified. In the event of a - failure applying a specific resource it is possible to work out which - resource failed based on the number of resources successfully applied - - When applying a resource to perform an update, the complete resource spec - must be provided, it is not sufficient to supply only the fields that are - being updated. -``` - -### Examples - -1. Apply a set of resources (of mixed type) using the data in resources.yaml. - - ```bash - calicoctl apply -f ./resources.yaml - ``` - - Results indicate that 8 resources were successfully applied - - ``` - Successfully applied 8 resource(s) - ``` - -1. Apply two policy resources based on the JSON passed into stdin. - - ```bash - cat policy.json | calicoctl apply -f - - ``` - - Results indicate success. - - ``` - Successfully applied 2 'policy' resource(s) - ``` - -### Options - -``` --f --filename= Filename to use to apply the resource. If set to - "-" loads from stdin. --n --namespace= Namespace of the resource. - Only applicable to NetworkPolicy and WorkloadEndpoint. - Uses the default namespace if not specified. -``` - -### General options - -``` --c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] -``` - -## See also - -- [Installing calicoctl](../../../operations/clis/calicoctl/install.mdx) -- [Resources](../../resources/overview.mdx) for details on all valid resources, including file format - and schema -- [NetworkPolicy](../../resources/networkpolicy.mdx) for details on the $[prodname] selector-based policy model diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/bgp/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/bgp/index.mdx deleted file mode 100644 index 8487f70e8c..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/bgp/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: calicoctl BGP commands. -hide_table_of_contents: true ---- - -# bgp - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/bgp/overview.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/bgp/overview.mdx deleted file mode 100644 index 8bf9151265..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/bgp/overview.mdx +++ /dev/null @@ -1,37 +0,0 @@ ---- -description: Commands for calicoctl bgp. ---- - -# calicoctl bgp - -This section describes the `calicoctl bgp` commands. - -Read the [calicoctl Overview](../overview.mdx) -for a full list of calicoctl commands. - -## Displaying the help text for 'calicoctl bgp' commands - -Run `calicoctl bgp --help` to display the following help menu for the -commands. - -``` -Usage: - calicoctl bgp [...] - - peers Display information about BGP peers for a specific node. - -Options: - -h --help Show this screen. - -Description: - Commands for accessing BGP related information. - - See 'calicoctl bgp --help' to read about a specific subcommand. -``` - -## Cluster specific commands - -Details on the `calicoctl bgp` commands are described in the documents linked below -organized by sub command. - -- [calicoctl bgp diags](../cluster/diags.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/bgp/peers.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/bgp/peers.mdx deleted file mode 100644 index 4d04fe2370..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/bgp/peers.mdx +++ /dev/null @@ -1,52 +0,0 @@ ---- -description: Command to release an IP address from Calico Enterprise IP management. ---- - -# calicoctl bgp peers - -This section describes the `calicoctl bgp peers` command. - -Read the [calicoctl overview](../overview.mdx) -for a full list of calicoctl commands. - -Note, this command can be run from any location that has access to the cluster -(e.g. anywhere with kubeconfig). - -## Displaying the help text for 'calicoctl bgp peers' command - -Run `calicoctl bgp peers --help` to display the following help menu for the -command. - -``` -Usage: - calicoctl bgp peers [--config=] - -Options: - -h --help Show this screen. - -c --config= Path to the file containing connection configuration in - YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] - -Description: - The bgp peers command prints BGP related information about a given node's peers. For the - NAME parameter, you can provide either the node name or pod name of the node instance. -``` - -### Examples - -```bash -calicoctl bgp peers name-of-node -calicoctl bgp peers name-of-pod -``` - -### General options - -``` - -c --config= Path to the file containing connection configuration in - YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] -``` - -## See also - -- [Installing calicoctl](../../../../operations/clis/calicoctl/install.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/captured-packets.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/captured-packets.mdx deleted file mode 100644 index 2173484dcf..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/captured-packets.mdx +++ /dev/null @@ -1,188 +0,0 @@ ---- -description: Command to access capture files generated by a PacketCapture. ---- - -# calicoctl captured-packets - -This section describes the `calicoctl captured-packets` command. - -Read the [calicoctl command line interface user reference](overview.mdx) -for a full list of calicoctl commands. - -## Displaying the help text for 'calicoctl captured-packets' command - -Run `calicoctl captured-packets --help` to display the following help menu for the -command. - -``` -Usage: - calicoctl captured-packets ( copy | clean ) - [--config=] [--namespace=] [--all-namespaces] [--dest=] - -Examples: - # Copies capture files for packet capture from default namespace in the current directory. - calicoctl captured-packets copy my-capture - # Delete capture files for packet capture from default namespace still left on the system - calicoctl captured-packets clean my-capture - -Options: - -n --namespace= Namespace of the packet capture. - Uses the default namespace if not specified. [default: default] - -a --all-namespaces If present, list the requested packet capture(s) across all namespaces. - -d --dest= If present, uses the directory specified as the destination. [default: .] - -h --help Show this screen. - -c --config= Path to the file containing connection configuration in - YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] - -Description: - Commands for accessing Capture related information. - - See 'calicoctl captured-packets --help' to read about a specific subcommand. - -``` - -## Configure RBAC for calicoctl - -To authorize users to copy/clean captures files via `calicoctl captured-packets`, the following cluster role and cluster role binding can be used: - -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: calicoctl-captured-packets -rules: - - apiGroups: ['crd.projectcalico.org'] - resources: ['felixconfigurations'] - verbs: ['get', 'list'] - - apiGroups: [''] - resources: ['pods'] - verbs: ['get', 'list'] - - apiGroups: [''] - resources: ['pods/exec'] - verbs: ['create'] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: calicoctl-captured-packets -subjects: - - kind: User - name: dave - apiGroup: rbac.authorization.k8s.io -roleRef: - kind: ClusterRole - name: calicoctl-captured-packets - apiGroup: rbac.authorization.k8s.io -``` - -## Examples of 'calicoctl captured-packets' command - -1. Copy capture files generated by PacketCapture my-capture from namespace default. - - ```bash - calicoctl captured-packets copy my-capture - ``` - - Results indicate that capture files generated by default/my-capture were successfully copied to the current directory. - - ``` - Copy capture files for default/my-capture to . - ``` - -1. Copy capture files generated by PacketCapture my-capture from namespace default to /tmp. - - ```bash - calicoctl captured-packets copy my-capture --destination /tmp - ``` - - Results indicate that capture files generated by default/my-capture were successfully copied to /tmp directory. - - ``` - Copy capture files for default/my-capture to /tmp - ``` - -1. Copy capture files generated by PacketCapture my-capture from namespace my-namespace. - - ```bash - calicoctl captured-packets copy my-capture --namespace my-namespace - ``` - - Results indicate that capture files generated by my-namespace/my-capture were successfully copied to the current directory. - - ``` - Copy capture files for my-namespace/my-capture to . - ``` - -1. Copy capture files generated by PacketCapture my-capture from all namespaces. - - ```bash - calicoctl captured-packets copy my-capture --all-namespaces - ``` - - Results indicate that capture files generated by my-namespace/my-capture and default/my-capture were successfully copied to the current directory. - - ``` - Copy capture files for my-namespace/my-capture to . - Copy capture files for default/my-capture to . - ``` - -1. Clean capture files generated by PacketCapture my-capture from namespace my-namespace. - - ```bash - calicoctl captured-packets clean my-capture --namespace my-namespace - ``` - - Results indicate that capture files generated by my-namespace/my-capture were successfully deleted. - - ``` - Clean capture files for my-namespace/my-capture - ``` - -1. Clean capture files generated by PacketCapture my-capture from all namespaces. - - ```bash - calicoctl captured-packets clean my-capture --all-namespaces - ``` - - Results indicate that capture files generated by my-namespace/my-capture and default/my-capture were successfully cleaned. - - ``` - Clean capture files for my-namespace/my-capture to . - Clean capture files for default/my-capture to . - ``` - -### Setting the log level - -``` - -l --log-level= Set the log level (one of panic, fatal, error, - warn, info, debug) [default: panic] -``` - -```bash -calicoctl -l debug captured-packets copy my-capture -``` - -### Options - -``` - -n --namespace= Namespace of the packet capture. - Uses the default namespace if not specified. [default: default] - -a --all-namespaces If present, list the requested packet capture(s) across all namespaces. - -d --dest= If present, uses the directory specified as the destination. [default: .] -``` - -### General options - -``` - -h --help Show this screen. - -c --config= Path to the file containing connection configuration in - YAML or JSON format. - [default: ` + constants.DefaultConfigPath + `] -``` - -## See also - -- [Installing calicoctl](../../../operations/clis/calicoctl/install.mdx) -- [Resources](../../resources/overview.mdx) for details on all valid resources, including file format - and schema diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/cluster/diags.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/cluster/diags.mdx deleted file mode 100644 index ab36c4effe..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/cluster/diags.mdx +++ /dev/null @@ -1,373 +0,0 @@ ---- -description: Command to get diagnostics from a Calico cluster. ---- - -# calicoctl cluster diags - -This section describes the `calicoctl cluster diags` command. - -Read the [calicoctl Overview](../overview.mdx) -for a full list of calicoctl commands. - -Note, this command can be run from any location that has access to the cluster -(e.g. anywhere with kubeconfig). - -## Displaying the help text for 'calicoctl cluster diags' command - -Run `calicoctl cluster diags --help` to display the following help menu for the -command. - -``` -Usage: - calicoctl cluster diags [options] - -Options: - -h --help Show this screen. - --since= Only collect logs newer than provided relative - duration, in seconds (s), minutes (m) or hours (h). - --max-logs= Only collect up to this number of logs, for each - kind of Calico component. [default: 5] - --focus-nodes= Comma-separated list of nodes from which we should - try first to collect logs. - -c --config= Path to connection configuration file. - [default: /etc/calico/calicoctl.cfg] - -Description: - The cluster diags command collects a snapshot of diagnostic info and logs related - to Calico for the given cluster. It generates a .tar.gz file containing all the - diags. - - By default, to keep the .tar.gz file to a reasonable size, this command - only collects up to 5 sets of logs for each kind of Calico pod (for example, - for calico-node, or Typha, or the intrusion detection controller). To collect - more (or fewer) sets of logs, use the --max-logs option. - - To tell calicoctl to try to collect logs first from particular nodes of interest, - set the --focus-nodes option to the relevant node names, comma-separated. For a - Calico component with pods on multiple nodes, calicoctl will first collect logs - from the pods (if any) on the focus nodes, then from other nodes in the cluster. - - To collect logs only for the last few hours, minutes, or seconds, set the --since - option to indicate the desired period. -``` - -### Examples - -```bash -calicoctl cluster diags -calicoctl cluster diags --since=1h -calicoctl cluster diags --focus-nodes=infra1,control2 --max-logs=2 -``` - -An example response follows. - -``` -==== Begin collecting diagnostics. ==== -Collecting kubernetes version... -Collect kubernetes Client and Server version -Collecting Calico resources... -Collect Calico clusterinformations -Collect Calico clusterinformations -Collect Calico felixconfigurations -Collect Calico felixconfigurations -Collect Calico bgppeers -Collect Calico bgppeers -Collect Calico bgpconfigurations -Collect Calico bgpconfigurations -Collect Calico ipamblocks -Collect Calico ipamblocks -Collect Calico blockaffinities -Collect Calico blockaffinities -Collect Calico ipamhandles -Collect Calico ipamhandles -Collect Calico tiers -Collect Calico tiers -Collect Calico networkpolicies -Collect Calico networkpolicies -Collect Calico clusterinformations -Collect Calico clusterinformations -Collect Calico hostendpoints -Collect Calico hostendpoints -Collect Calico ippools -Collect Calico ippools -Collect Calico licensekeys -Collect Calico licensekeys -Collect Calico networksets -Collect Calico networksets -Collect Calico globalnetworksets -Collect Calico globalnetworksets -Collect Calico globalnetworkpolicies -Collect Calico globalnetworkpolicies -Collecting Tigera Operator details ... -Collect tigerastatuses -Collect tigerastatuses -Collect installations -Collect installations -Collect apiservers -Collect apiservers -Collect compliances -Collect compliances -Collect intrusiondetections -Collect intrusiondetections -Collect managers -Collect managers -Collect logcollectors -Collect logcollectors -Collect logstorages -Collect logstorages -Collect managementclusterconnections -Collect managementclusterconnections -Collecting core kubernetes resources... -Collect nodes -Collect nodes -Collect pods -Collect pods -Collect deployments -Collect deployments -Collect daemonsets -Collect daemonsets -Collect services -Collect services -Collect endpoints -Collect endpoints -Collect configmaps -Collect configmaps -Collect persistent volume claim -Collect persistent volume claim -Collect persistent volume -Collect persistent volume -Collect storage class -Collect storage class -Collect all namespaces -Collect all namespaces -Collecting detailed diags for namespace calico-cloud... -Collecting detailed diags for pod calico-cloud-controller-manager-594bb58687-dn86k in namespace calico-cloud on node ip-172-16-101-83.us-west-2.compute.internal... -Collecting diags for pod: calico-cloud-controller-manager-594bb58687-dn86k -Collect logs for pod calico-cloud-controller-manager-594bb58687-dn86k -Collect describe for pod calico-cloud-controller-manager-594bb58687-dn86k -Collecting detailed diags for namespace calico-system... -Collecting detailed diags for pod calico-node-tz6w9 in namespace calico-system on node ip-172-16-101-171.us-west-2.compute.internal... -Collecting diags for pod: calico-node-tz6w9 -Collect logs for pod calico-node-tz6w9 -Collect describe for pod calico-node-tz6w9 -Collecting diags for calico-node: calico-node-tz6w9 -Collect iptables for node calico-node-tz6w9 -Collect ip routes for node calico-node-tz6w9 -Collect ipv6 routes for node calico-node-tz6w9 -Collect ip rule for node calico-node-tz6w9 -Collect ip route show table all for node calico-node-tz6w9 -Collect ip addr for node calico-node-tz6w9 -Collect ip link for node calico-node-tz6w9 -Collect ip neigh for node calico-node-tz6w9 -Collect ipset list for node calico-node-tz6w9 -Collect eBPF conntrack for node calico-node-tz6w9 -Collect eBPF ipsets for node calico-node-tz6w9 -Collect eBPF nat for node calico-node-tz6w9 -Collect eBPF routes for node calico-node-tz6w9 -Collect eBPF prog for node calico-node-tz6w9 -Collect eBPF map for node calico-node-tz6w9 -Collect tc qdisc for node calico-node-tz6w9 -Collect eBPF map id 16 dumps for node calico-node-tz6w9 -Collect eBPF map id 17 dumps for node calico-node-tz6w9 -Collect eBPF map id 18 dumps for node calico-node-tz6w9 -Collect eBPF map id 19 dumps for node calico-node-tz6w9 -Collect eBPF map id 20 dumps for node calico-node-tz6w9 -Collect eBPF map id 22 dumps for node calico-node-tz6w9 -Collect eBPF map id 23 dumps for node calico-node-tz6w9 -Collect eBPF map id 24 dumps for node calico-node-tz6w9 -Collect eBPF map id 25 dumps for node calico-node-tz6w9 -Collect eBPF map id 26 dumps for node calico-node-tz6w9 -Collect eBPF map id 27 dumps for node calico-node-tz6w9 -Collect eBPF map id 28 dumps for node calico-node-tz6w9 -Collect eBPF map id 29 dumps for node calico-node-tz6w9 -Collect eBPF map id 30 dumps for node calico-node-tz6w9 -Collect eBPF map id 41 dumps for node calico-node-tz6w9 -Collect eBPF map id 46 dumps for node calico-node-tz6w9 -Collect CNI log @400000006316fe6f28fbff2c.u for the node calico-node-tz6w9 -Collect CNI log @4000000063184f691b61d1ac.u for the node calico-node-tz6w9 -Collect CNI log @400000006321838524b6cbcc.u for the node calico-node-tz6w9 -Collect CNI log @400000006333d3853a9c824c.u for the node calico-node-tz6w9 -Collect CNI log @40000000634675eb2a11c874.u for the node calico-node-tz6w9 -Collect CNI log cni.log for the node calico-node-tz6w9 -Collect CNI log config for the node calico-node-tz6w9 -Collect CNI log current for the node calico-node-tz6w9 -Collect CNI log lock for the node calico-node-tz6w9 -Collecting detailed diags for pod calico-node-tqdjj in namespace calico-system on node ip-172-16-101-210.us-west-2.compute.internal... -Collecting diags for pod: calico-node-tqdjj -Collect logs for pod calico-node-tqdjj -Collect describe for pod calico-node-tqdjj -Collecting diags for calico-node: calico-node-tqdjj -Collect iptables for node calico-node-tqdjj -Collect ip routes for node calico-node-tqdjj -Collect ipv6 routes for node calico-node-tqdjj -Collect ip rule for node calico-node-tqdjj -Collect ip route show table all for node calico-node-tqdjj -Collect ip addr for node calico-node-tqdjj -Collect ip link for node calico-node-tqdjj -Collect ip neigh for node calico-node-tqdjj -Collect ipset list for node calico-node-tqdjj -Collect eBPF conntrack for node calico-node-tqdjj -Collect eBPF ipsets for node calico-node-tqdjj -Collect eBPF nat for node calico-node-tqdjj -Collect eBPF routes for node calico-node-tqdjj -Collect eBPF prog for node calico-node-tqdjj -Collect eBPF map for node calico-node-tqdjj -Collect tc qdisc for node calico-node-tqdjj -Collect eBPF map id 15 dumps for node calico-node-tqdjj -Collect eBPF map id 16 dumps for node calico-node-tqdjj -Collect eBPF map id 17 dumps for node calico-node-tqdjj -Collect eBPF map id 18 dumps for node calico-node-tqdjj -Collect eBPF map id 19 dumps for node calico-node-tqdjj -Collect eBPF map id 21 dumps for node calico-node-tqdjj -Collect eBPF map id 22 dumps for node calico-node-tqdjj -Collect eBPF map id 23 dumps for node calico-node-tqdjj -Collect eBPF map id 24 dumps for node calico-node-tqdjj -Collect eBPF map id 25 dumps for node calico-node-tqdjj -Collect eBPF map id 26 dumps for node calico-node-tqdjj -Collect eBPF map id 27 dumps for node calico-node-tqdjj -Collect eBPF map id 28 dumps for node calico-node-tqdjj -Collect eBPF map id 29 dumps for node calico-node-tqdjj -Collect eBPF map id 34 dumps for node calico-node-tqdjj -Collect eBPF map id 39 dumps for node calico-node-tqdjj -Collect CNI log @4000000063201beb19d77c74.u for the node calico-node-tqdjj -Collect CNI log @400000006321838436856b9c.u for the node calico-node-tqdjj -Collect CNI log @400000006322d03b119c4724.u for the node calico-node-tqdjj -Collect CNI log @400000006333d383173b8c44.u for the node calico-node-tqdjj -Collect CNI log @40000000634675f5134a8f7c.u for the node calico-node-tqdjj -Collect CNI log cni.log for the node calico-node-tqdjj -Collect CNI log config for the node calico-node-tqdjj -Collect CNI log current for the node calico-node-tqdjj -Collect CNI log lock for the node calico-node-tqdjj -Collecting detailed diags for pod calico-node-4vp2g in namespace calico-system on node ip-172-16-101-83.us-west-2.compute.internal... -Collecting diags for pod: calico-node-4vp2g -Collect logs for pod calico-node-4vp2g -Collect describe for pod calico-node-4vp2g -Collecting diags for calico-node: calico-node-4vp2g -Collect iptables for node calico-node-4vp2g -Collect ip routes for node calico-node-4vp2g -Collect ipv6 routes for node calico-node-4vp2g -Collect ip rule for node calico-node-4vp2g -Collect ip route show table all for node calico-node-4vp2g -Collect ip addr for node calico-node-4vp2g -Collect ip link for node calico-node-4vp2g -Collect ip neigh for node calico-node-4vp2g -Collect ipset list for node calico-node-4vp2g -Collect eBPF conntrack for node calico-node-4vp2g -Collect eBPF ipsets for node calico-node-4vp2g -Collect eBPF nat for node calico-node-4vp2g -Collect eBPF routes for node calico-node-4vp2g -Collect eBPF prog for node calico-node-4vp2g -Collect eBPF map for node calico-node-4vp2g -Collect tc qdisc for node calico-node-4vp2g -Collect eBPF map id 15 dumps for node calico-node-4vp2g -Collect eBPF map id 16 dumps for node calico-node-4vp2g -Collect eBPF map id 17 dumps for node calico-node-4vp2g -Collect eBPF map id 18 dumps for node calico-node-4vp2g -Collect eBPF map id 19 dumps for node calico-node-4vp2g -Collect eBPF map id 21 dumps for node calico-node-4vp2g -Collect eBPF map id 22 dumps for node calico-node-4vp2g -Collect eBPF map id 23 dumps for node calico-node-4vp2g -Collect eBPF map id 24 dumps for node calico-node-4vp2g -Collect eBPF map id 25 dumps for node calico-node-4vp2g -Collect eBPF map id 26 dumps for node calico-node-4vp2g -Collect eBPF map id 27 dumps for node calico-node-4vp2g -Collect eBPF map id 28 dumps for node calico-node-4vp2g -Collect eBPF map id 29 dumps for node calico-node-4vp2g -Collect eBPF map id 36 dumps for node calico-node-4vp2g -Collect eBPF map id 43 dumps for node calico-node-4vp2g -Collect CNI log @4000000063184f4a15365dd4.u for the node calico-node-4vp2g -Collect CNI log @4000000063201c162df1601c.u for the node calico-node-4vp2g -Collect CNI log @40000000632183870cf442e4.u for the node calico-node-4vp2g -Collect CNI log @400000006333d36d010a8c54.u for the node calico-node-4vp2g -Collect CNI log @40000000634675f20f8bf524.u for the node calico-node-4vp2g -Collect CNI log cni.log for the node calico-node-4vp2g -Collect CNI log config for the node calico-node-4vp2g -Collect CNI log current for the node calico-node-4vp2g -Collect CNI log lock for the node calico-node-4vp2g -Collecting detailed diags for pod runtime-reporter-csztr in namespace calico-system on node ip-172-16-101-171.us-west-2.compute.internal... -Collecting diags for pod: runtime-reporter-csztr -Collect logs for pod runtime-reporter-csztr -Collect describe for pod runtime-reporter-csztr -Collecting detailed diags for pod runtime-reporter-9bmj8 in namespace calico-system on node ip-172-16-101-210.us-west-2.compute.internal... -Collecting diags for pod: runtime-reporter-9bmj8 -Collect logs for pod runtime-reporter-9bmj8 -Collect describe for pod runtime-reporter-9bmj8 -Collecting detailed diags for pod runtime-reporter-4jwjq in namespace calico-system on node ip-172-16-101-83.us-west-2.compute.internal... -Collecting diags for pod: runtime-reporter-4jwjq -Collect logs for pod runtime-reporter-4jwjq -Collect describe for pod runtime-reporter-4jwjq -Collecting detailed diags for pod calico-kube-controllers-667d5b588c-hkwnc in namespace calico-system on node ip-172-16-101-171.us-west-2.compute.internal... -Collecting diags for pod: calico-kube-controllers-667d5b588c-hkwnc -Collect logs for pod calico-kube-controllers-667d5b588c-hkwnc -Collect describe for pod calico-kube-controllers-667d5b588c-hkwnc -Collecting detailed diags for pod calico-typha-6494b5f6cc-xjv2h in namespace calico-system on node ip-172-16-101-171.us-west-2.compute.internal... -Collecting diags for pod: calico-typha-6494b5f6cc-xjv2h -Collect logs for pod calico-typha-6494b5f6cc-xjv2h -Collect describe for pod calico-typha-6494b5f6cc-xjv2h -Collecting detailed diags for pod calico-typha-6494b5f6cc-5d26c in namespace calico-system on node ip-172-16-101-83.us-west-2.compute.internal... -Collecting diags for pod: calico-typha-6494b5f6cc-5d26c -Collect logs for pod calico-typha-6494b5f6cc-5d26c -Collect describe for pod calico-typha-6494b5f6cc-5d26c -Collecting detailed diags for namespace tigera-access... -Collecting detailed diags for namespace tigera-dex... -Collecting detailed diags for namespace tigera-elasticsearch... -Collecting detailed diags for namespace tigera-fluentd... -Collecting detailed diags for pod fluentd-node-dncwx in namespace tigera-fluentd on node ip-172-16-101-171.us-west-2.compute.internal... -Collecting diags for pod: fluentd-node-dncwx -Collect logs for pod fluentd-node-dncwx -Collect describe for pod fluentd-node-dncwx -Collecting detailed diags for pod fluentd-node-cgtk9 in namespace tigera-fluentd on node ip-172-16-101-210.us-west-2.compute.internal... -Collecting diags for pod: fluentd-node-cgtk9 -Collect logs for pod fluentd-node-cgtk9 -Collect describe for pod fluentd-node-cgtk9 -Collecting detailed diags for pod fluentd-node-lht88 in namespace tigera-fluentd on node ip-172-16-101-83.us-west-2.compute.internal... -Collecting diags for pod: fluentd-node-lht88 -Collect logs for pod fluentd-node-lht88 -Collect describe for pod fluentd-node-lht88 -Collecting detailed diags for namespace tigera-guardian... -Collecting detailed diags for pod tigera-guardian-b97bf7d57-blqsn in namespace tigera-guardian on node ip-172-16-101-171.us-west-2.compute.internal... -Collecting diags for pod: tigera-guardian-b97bf7d57-blqsn -Collect logs for pod tigera-guardian-b97bf7d57-blqsn -Collect describe for pod tigera-guardian-b97bf7d57-blqsn -Collecting detailed diags for namespace tigera-image-assurance... -Collecting detailed diags for namespace tigera-license... -Collecting detailed diags for namespace tigera-manager... -Collecting detailed diags for namespace tigera-operator... -Collecting detailed diags for pod tigera-operator-5774454fc5-mjrx8 in namespace tigera-operator on node ip-172-16-101-83.us-west-2.compute.internal... -Collecting diags for pod: tigera-operator-5774454fc5-mjrx8 -Collect logs for pod tigera-operator-5774454fc5-mjrx8 -Collect describe for pod tigera-operator-5774454fc5-mjrx8 -Collecting detailed diags for namespace tigera-packetcapture... -Collecting detailed diags for pod tigera-packetcapture-758ff8c7db-slmd7 in namespace tigera-packetcapture on node ip-172-16-101-83.us-west-2.compute.internal... -Collecting diags for pod: tigera-packetcapture-758ff8c7db-slmd7 -Collect logs for pod tigera-packetcapture-758ff8c7db-slmd7 -Collect describe for pod tigera-packetcapture-758ff8c7db-slmd7 -Collecting detailed diags for namespace tigera-prometheus... -Collecting detailed diags for pod calico-prometheus-operator-78d9b7f47c-q2d9v in namespace tigera-prometheus on node ip-172-16-101-171.us-west-2.compute.internal... -Collecting diags for pod: calico-prometheus-operator-78d9b7f47c-q2d9v -Collect logs for pod calico-prometheus-operator-78d9b7f47c-q2d9v -Collect describe for pod calico-prometheus-operator-78d9b7f47c-q2d9v -Collecting detailed diags for namespace tigera-skraper... -Collecting detailed diags for namespace tigera-system... -Collecting detailed diags for pod tigera-apiserver-6f5ddf5697-6qfgj in namespace tigera-system on node ip-172-16-101-171.us-west-2.compute.internal... -Collecting diags for pod: tigera-apiserver-6f5ddf5697-6qfgj -Collect logs for pod tigera-apiserver-6f5ddf5697-6qfgj -Collect describe for pod tigera-apiserver-6f5ddf5697-6qfgj -Collecting detailed diags for pod tigera-apiserver-6f5ddf5697-x69r7 in namespace tigera-system on node ip-172-16-101-83.us-west-2.compute.internal... -Collecting diags for pod: tigera-apiserver-6f5ddf5697-x69r7 -Collect logs for pod tigera-apiserver-6f5ddf5697-x69r7 -Collect describe for pod tigera-apiserver-6f5ddf5697-x69r7 - -==== Producing a diagnostics bundle. ==== -Diagnostic bundle available at ./calico-diagnostics.tar.gz -``` - -### Options - -``` - --since= Only collect logs newer than provided relative duration, - in seconds (s), minutes (m) or hours (h) - -c --config= Path to the file containing connection configuration in - YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] -``` diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/cluster/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/cluster/index.mdx deleted file mode 100644 index 8dba781deb..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/cluster/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: calicoctl cluster commands. -hide_table_of_contents: true ---- - -# cluster - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/cluster/overview.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/cluster/overview.mdx deleted file mode 100644 index 1a6a15230f..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/cluster/overview.mdx +++ /dev/null @@ -1,37 +0,0 @@ ---- -description: Commands for calicoctl cluster. ---- - -# calicoctl cluster - -This section describes the `calicoctl cluster` commands. - -Read the [calicoctl Overview](../overview.mdx) -for a full list of calicoctl commands. - -## Displaying the help text for 'calicoctl cluster' commands - -Run `calicoctl cluster --help` to display the following help menu for the -commands. - -``` -Usage: - calicoctl cluster [...] - - diags Collect snapshot of diagnostic info and logs related to Calico at the cluster-level. - -Options: - -h --help Show this screen. - -Description: - Commands for accessing Cluster related information. - - See 'calicoctl cluster --help' to read about a specific subcommand. -``` - -## Cluster specific commands - -Details on the `calicoctl cluster` commands are described in the documents linked below -organized by sub command. - -- [calicoctl cluster diags](diags.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/convert.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/convert.mdx deleted file mode 100644 index 5ed5394d6d..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/convert.mdx +++ /dev/null @@ -1,86 +0,0 @@ ---- -description: Command to convert contents of policy.yaml to v3 policy. ---- - -# calicoctl convert - -This sections describes the `calicoctl convert` command. - -Read the [calicoctl command line interface user reference](overview.mdx) -for a full list of calicoctl commands. - -## Displaying the help text for 'calicoctl convert' command - -Run `calicoctl convert --help` to display the following help menu for the -command. - -``` -Usage: - calicoctl convert --filename= - [--output=] [--ignore-validation] - -Examples: - # Convert the contents of policy.yaml to a Calico v3 policy. - calicoctl convert -f ./policy.yaml -o yaml - - # Convert a policy based on the JSON passed into stdin. - cat policy.json | calicoctl convert -f - - -Options: - -h --help Show this screen. - -f --filename= Filename to use to create the resource. If set to - "-" loads from stdin. - -o --output= Output format. One of: yaml or json. - [Default: yaml] - --ignore-validation Skip validation on the converted manifest. - -Description: - Convert config files from Calico v1 or Kubernetes to Calico v3 API versions. Both YAML and JSON formats are accepted. - - The default output will be printed to stdout in YAML format. -``` - -:::note - -Currently the only Kubernetes API resource supported for conversion is NetworkPolicy. - -::: - -### Examples - -1. Convert a set of resources (of mixed type) from Calico v1 or Kubernetes to Calico v3 APIs using the data in resources.yaml. - - ```bash - calicoctl convert -f multi-resource-v1.yaml -o yaml > multi-resource-v3.yaml - ``` - - :::tip - - By default convert command outputs the converted resources to stdout, but it can be redirected to a file. - - ::: - -1. Convert a policy based on the JSON passed into stdin. - - ```bash - cat policy.json | calicoctl convert -f - - ``` - - Result will be printed to stdout. - -### Options - -``` --f --filename= Filename to use to convert the resource. If set to - "-" loads from stdin. --o --output= Output format. One of: yaml or json. - [Default: yaml] ---ignore-validation Skip validation on the converted manifest. -``` - -## See also - -- [Installing calicoctl](../../../operations/clis/calicoctl/install.mdx) -- [Resources](../../resources/overview.mdx) for details on all valid resources, including file format - and schema -- [calicoctl get](get.mdx) for details on `calicoctl get` command to get the resources. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/create.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/create.mdx deleted file mode 100644 index 78d55e43d0..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/create.mdx +++ /dev/null @@ -1,130 +0,0 @@ ---- -description: Command to create a policy. ---- - -# calicoctl create - -This sections describes the `calicoctl create` command. - -Read the [calicoctl command line interface user reference](overview.mdx) -for a full list of calicoctl commands. - -## Displaying the help text for 'calicoctl create' command - -Run `calicoctl create --help` to display the following help menu for the -command. - -``` -Usage: - calicoctl create --filename= [--recursive] [--skip-empty] [--skip-exists] [--config=] [--namespace=] - -Examples: - # Create a policy using the data in policy.yaml. - calicoctl create -f ./policy.yaml - - # Create a policy based on the JSON passed into stdin. - cat policy.json | calicoctl create -f - - -Options: - -h --help Show this screen. - -f --filename= Filename to use to create the resource. If set to - "-" loads from stdin. If filename is a directory, this command is - invoked for each .json .yaml and .yml file within that directory, - terminating after the first failure. - -R --recursive Process the filename specified in -f or --filename recursively. - --skip-empty Do not error if any files or directory specified using -f or --filename contain no - data. - --skip-exists Skip over and treat as successful any attempts to - create an entry that already exists. - -c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] - -n --namespace= Namespace of the resource. - Only applicable to NetworkPolicy, NetworkSet, and WorkloadEndpoint. - Uses the default namespace if not specified. - --context= The name of the kubeconfig context to use. - -Description: - The create command is used to create a set of resources by filename or stdin. - JSON and YAML formats are accepted. - - Valid resource types are: - - * bgpConfiguration - * bgpPeer - * felixConfiguration - * globalNetworkPolicy - * hostEndpoint - * ipPool - * tier - * networkSet - * node - * profile - * workloadEndpoint - - Attempting to create a resource that already exists is treated as a - terminating error unless the --skip-exists flag is set. If this flag is set, - resources that already exist are skipped. - - The output of the command indicates how many resources were successfully - created, and the error reason if an error occurred. If the --skip-exists - flag is set then skipped resources are included in the success count. - - The resources are created in the order they are specified. In the event of a - failure creating a specific resource it is possible to work out which - resource failed based on the number of resources successfully created. -``` - -### Examples - -1. Create a set of resources (of mixed type) using the data in resources.yaml. - - ```bash - calicoctl create -f ./resources.yaml - ``` - - Results indicate that 8 resources were successfully created. - - ``` - Successfully created 8 resource(s) - ``` - -1. Create the same set of resources reading from stdin. - - ```bash - cat resources.yaml | calicoctl create -f - - ``` - - Results indicate failure because the first resource (in this case a Profile) - already exists. - - ``` - Failed to create any resources: resource already exists: Profile(name=profile1) - ``` - -### Options - -``` --f --filename= Filename to use to create the resource. If set to - "-" loads from stdin. - --skip-exists Skip over and treat as successful any attempts to - create an entry that already exists. --n --namespace= Namespace of the resource. - Only applicable to NetworkPolicy and WorkloadEndpoint. - Uses the default namespace if not specified. -``` - -### General options - -``` --c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] -``` - -## See also - -- [Installing calicoctl](../../../operations/clis/calicoctl/install.mdx) -- [Resources](../../resources/overview.mdx) for details on all valid resources, including file format - and schema -- [NetworkPolicy](../../resources/networkpolicy.mdx) for details on the $[prodname] selector-based policy model diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/datastore/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/datastore/index.mdx deleted file mode 100644 index b8957eb3c9..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/datastore/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: calicoctl datastore commands. -hide_table_of_contents: true ---- - -# datastore - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/datastore/migrate/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/datastore/migrate/index.mdx deleted file mode 100644 index f8edbc2600..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/datastore/migrate/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: calicoctl datastore migrate commands. -hide_table_of_contents: true ---- - -# migrate - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/datastore/migrate/lock.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/datastore/migrate/lock.mdx deleted file mode 100644 index ef21c64252..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/datastore/migrate/lock.mdx +++ /dev/null @@ -1,54 +0,0 @@ ---- -description: Command and options for locking a datastore for migration. ---- - -# calicoctl datastore migrate lock - -This sections describes the `calicoctl datastore migrate lock` command. - -Read the [calicoctl Overview](../../overview.mdx) -for a full list of calicoctl commands. - -## Display the help text for 'calicoctl datastore migrate unlock' command - -Run `calicoctl datastore migrate lock --help` to display the following help menu for the -command. - -``` -Usage: - calicoctl datastore migrate lock [--config=] - -Options: - -h --help Show this screen. - -c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] - -Description: - Lock the datastore to prepare it for migration. This prevents any new - Calico resources from affecting the cluster but does not prevent updating - or creating new Calico resources. -``` - -### Examples - -Lock the datastore to prepare it for migration so that any changes to the -data will not affect the cluster during migration. - -```bash -calicoctl datastore migrate lock -``` - -### General options - -``` --c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] -``` - -## See also - -- [Install calicoctl](../../../../../operations/clis/calicoctl/install.mdx) -- [Resources](../../../../resources/overview.mdx) for details on all valid resources, including file format - and schema diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/datastore/migrate/overview.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/datastore/migrate/overview.mdx deleted file mode 100644 index 135dda3081..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/datastore/migrate/overview.mdx +++ /dev/null @@ -1,39 +0,0 @@ ---- -description: Commands for calicoctl datastore migrate. ---- - -# calicoctl datastore migrate - -This section describes the `calicoctl datastore migrate` commands. - -Read the [calicoctl Overview](../../overview.mdx) -for a full list of calicoctl commands. - -## Display the help text for 'calicoctl datastore migrate' commands - -Run `calicoctl datastore migrate --help` to display the following help menu for the -commands. - -``` -Usage: - calicoctl datastore migrate [...] - - lock Lock the datastore to prevent changes from occurring during datastore migration. - unlock Unlock the datastore to allow changes once the migration is completed. - -Options: - -h --help Show this screen. - -Description: - Migration specific commands for calicoctl. - - See 'calicoctl datastore migrate --help' to read about a specific subcommand. -``` - -## Migrate specific commands - -Details on the `calicoctl datastore migrate` commands are described in the documents linked below -organized by sub command. - -- [calicoctl datastore migrate lock](lock.mdx) -- [calicoctl datastore migrate unlock](unlock.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/datastore/migrate/unlock.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/datastore/migrate/unlock.mdx deleted file mode 100644 index 15fe494687..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/datastore/migrate/unlock.mdx +++ /dev/null @@ -1,53 +0,0 @@ ---- -description: Command and options for unlocking a datastore after migration. ---- - -# calicoctl datastore migrate unlock - -This sections describes the `calicoctl datastore migrate lock` command. - -Read the [calicoctl Overview](../../overview.mdx) -for a full list of calicoctl commands. - -## Display the help text for 'calicoctl datastore migrate unlock' command - -Run `calicoctl datastore migrate unlock --help` to display the following help menu for the -command. - -``` -Usage: - calicoctl datastore migrate unlock [--config=] - -Options: - -h --help Show this screen. - -c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] - -Description: - Unlock the datastore to complete migration. This once again allows - Calico resources to take effect in the cluster. -``` - -### Examples - -Unlock the datastore after migration to allow the Calico resources to affect -the cluster. - -```bash -calicoctl datastore migrate unlock -``` - -### General options - -``` --c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] -``` - -## See also - -- [Install calicoctl](../../../../../operations/clis/calicoctl/install.mdx) -- [Resources](../../../../resources/overview.mdx) for details on all valid resources, including file format - and schema diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/datastore/overview.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/datastore/overview.mdx deleted file mode 100644 index f1975d552f..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/datastore/overview.mdx +++ /dev/null @@ -1,37 +0,0 @@ ---- -description: Commands for calicoctl datastore ---- - -# calicoctl datastore - -This section describes the `calicoctl datastore` commands. - -Read the [calicoctl Overview](../overview.mdx) -for a full list of calicoctl commands. - -## Display the help text for 'calicoctl datastore migrate' commands - -Run `calicoctl datastore migrate --help` to display the following help menu for the -commands. - -``` -Usage: - calicoctl datastore [...] - - migrate Migrate the contents of an etcdv3 datastore to a Kubernetes datastore. - -Options: - -h --help Show this screen. - -Description: - Datastore specific commands for calicoctl. - - See 'calicoctl datastore --help' to read about a specific subcommand. -``` - -## Datastore specific commands - -Details on the `calicoctl datastore` commands are described in the documents linked below -organized by sub command. - -- [calicoctl datastore migrate](migrate/overview.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/delete.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/delete.mdx deleted file mode 100644 index c7d3074d07..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/delete.mdx +++ /dev/null @@ -1,151 +0,0 @@ ---- -description: Command to delete a policy. ---- - -# calicoctl delete - -This sections describes the `calicoctl delete` command. - -Read the [calicoctl command line interface user reference](overview.mdx) -for a full list of calicoctl commands. - -:::note - -The available actions for a specific resource type may be -limited based on the datastore used for $[prodname] (Kubernetes API). -Please refer to the -[Resources section](../../resources/overview.mdx) -for details about each resource type. - -::: - -## Displaying the help text for 'calicoctl delete' command - -Run `calicoctl delete --help` to display the following help menu for the -command. - -``` -Usage: - calicoctl delete ( ( []) | - --filename=) [--recursive] [--skip-empty] - [--skip-not-exists] [--config=] [--namespace=] - -Examples: - # Delete a policy using the type and name specified in policy.yaml. - calicoctl delete -f ./policy.yaml - - # Delete a policy based on the type and name in the YAML passed into stdin. - cat policy.yaml | calicoctl delete -f - - - # Delete policy with name "foo" - calicoctl delete policy foo - -Options: - -h --help Show this screen. - -s --skip-not-exists Skip over and treat as successful, resources that - don't exist. - -f --filename= Filename to use to delete the resource. If set to - "-" loads from stdin. If filename is a directory, this command is - invoked for each .json .yaml and .yml file within that directory, - terminating after the first failure. - -R --recursive Process the filename specified in -f or --filename recursively. - --skip-empty Do not error if any files or directory specified using -f or --filename contain no - data. - -c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] - -n --namespace= Namespace of the resource. - Only applicable to NetworkPolicy, NetworkSet, and WorkloadEndpoint. - Uses the default namespace if not specified. - --context= The name of the kubeconfig context to use. - -Description: - The delete command is used to delete a set of resources by filename or stdin, - or by type and identifiers. JSON and YAML formats are accepted for file and - stdin format. - - Valid resource types are: - - * bgpConfiguration - * bgpPeer - * felixConfiguration - * globalNetworkPolicy - * hostEndpoint - * ipPool - * tier - * networkPolicy - * node - * profile - * workloadEndpoint - - The resource type is case-insensitive and may be pluralized. - - Attempting to delete a resource that does not exists is treated as a - terminating error unless the --skip-not-exists flag is set. If this flag is - set, resources that do not exist are skipped. - - When deleting resources by type, only a single type may be specified at a - time. The name is required along with any and other identifiers required to - uniquely identify a resource of the specified type. - - The output of the command indicates how many resources were successfully - deleted, and the error reason if an error occurred. If the --skip-not-exists - flag is set then skipped resources are included in the success count. - - The resources are deleted in the order they are specified. In the event of a - failure deleting a specific resource it is possible to work out which - resource failed based on the number of resources successfully deleted. -``` - -### Examples - -1. Delete a set of resources (of mixed type) using the data in resources.yaml. - - ```bash - calicoctl delete -f ./resources.yaml - ``` - - Results indicate that 8 resources were successfully deleted. - - ``` - Successfully deleted 8 resource(s) - ``` - -1. Delete a policy resource by name. The policy is called "policy1". - - ```bash - bin/calicoctl delete policy policy1 - ``` - - Results indicate success. - - ``` - Successfully deleted 1 'policy' resource(s) - ``` - -### Options - -``` --s --skip-not-exists Skip over and treat as successful, resources that - don't exist. --f --filename= Filename to use to delete the resource. If set to - "-" loads from stdin. --n --namespace= Namespace of the resource. - Only applicable to NetworkPolicy and WorkloadEndpoint. - Uses the default namespace if not specified. -``` - -### General options - -``` --c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] -``` - -## See also - -- [Installing calicoctl](../../../operations/clis/calicoctl/install.mdx). -- [Resources](../../resources/overview.mdx) for details on all valid resources, including file format - and schema -- [NetworkPolicy](../../resources/networkpolicy.mdx) for details on the $[prodname] selector-based policy model diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/get.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/get.mdx deleted file mode 100644 index 985606eb57..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/get.mdx +++ /dev/null @@ -1,272 +0,0 @@ ---- -description: Command to list policies in the default output format. ---- - -# calicoctl get - -This sections describes the `calicoctl get` command. - -Read the [calicoctl command line interface user reference](overview.mdx) -for a full list of calicoctl commands. - -:::note - -The available actions for a specific resource type may be -limited based on the datastore used for $[prodname] (Kubernetes API). -Please refer to the -[Resources section](../../resources/overview.mdx) -for details about each resource type. - -::: - -## Displaying the help text for 'calicoctl get' command - -Run `calicoctl get --help` to display the following help menu for the -command. - -``` -Usage: - calicoctl get ( ( []) | - --filename=) [--recursive] [--skip-empty] - [--output=] [--config=] [--namespace=] [--all-namespaces] - -Examples: - # List all policy in default output format. - calicoctl get policy - - # List specific policies in YAML format - calicoctl get -o yaml policy my-policy-1 my-policy-2 - -Options: - -h --help Show this screen. - -f --filename= Filename to use to get the resource. If set to - "-" loads from stdin. If filename is a directory, this command is - invoked for each .json .yaml and .yml file within that directory, - terminating after the first failure. - -R --recursive Process the filename specified in -f or --filename recursively. - --skip-empty Do not error if any files or directory specified using -f or --filename contain no - data. - -o --output= Output format. One of: yaml, json, ps, wide, - custom-columns=..., go-template=..., - go-template-file=... [Default: ps] - -c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] - -n --namespace= Namespace of the resource. - Only applicable to NetworkPolicy, NetworkSet, and WorkloadEndpoint. - Uses the default namespace if not specified. - -A --all-namespaces If present, list the requested object(s) across - all namespaces. - --export If present, returns the requested object(s) stripped of - cluster-specific information. This flag will be ignored - if is not specified. - --context= The name of the kubeconfig context to use. - -Description: - The get command is used to display a set of resources by filename or stdin, - or by type and identifiers. JSON and YAML formats are accepted for file and - stdin format. - - Valid resource types are: - - * bgpConfiguration - * bgpPeer - * felixConfiguration - * globalNetworkPolicy - * hostEndpoint - * ipPool - * tier - * networkSet - * node - * profile - * workloadEndpoint - - The resource type is case-insensitive and may be pluralized. - - Attempting to get resources that do not exist will simply return no results. - - When getting resources by type, only a single type may be specified at a - time. The name and other identifiers (hostname, scope) are optional, and are - wildcarded when omitted. Thus if you specify no identifiers at all (other - than type), then all configured resources of the requested type will be - returned. - - By default the results are output in a ps-style table output. There are - alternative ways to display the data using the --output option: - - ps Display the results in ps-style output. - wide As per the ps option, but includes more headings. - custom-columns As per the ps option, but only display the columns - that are requested in the comma-separated list. - golang-template Display the results using the specified golang - template. This can be used to filter results, for - example to return a specific value. - golang-template-file Display the results using the golang template that is - contained in the specified file. - yaml Display the results in YAML output format. - json Display the results in JSON output format. - - Note that the data output using YAML or JSON format is always valid to use as - input to all of the resource management commands (create, apply, replace, - delete, get). - - Please refer to the docs at https://projectcalico.docs.tigera.io for more details on - the output formats, including example outputs, resource structure (required - for the golang template definitions) and the valid column names (required for - the custom-columns option). -``` - -### Options - -``` --h --help Show this screen. --f --filename= Filename to use to get the resource. If set to - "-" loads from stdin. --o --output= Output format. One of: yaml, json, ps, wide, - custom-columns=..., go-template=..., - go-template-file=... [Default: ps] --n --namespace= Namespace of the resource. - Only applicable to NetworkPolicy, NetworkSet, and WorkloadEndpoint. - Uses the default namespace if not specified. --A --all-namespaces If present, list the requested object(s) across - all namespaces. ---export If present, returns the requested object(s) stripped of - cluster-specific information. This flag will be ignored - if the resource name is not specified. -``` - -### General options - -``` --c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] -``` - -### Output options - -#### `ps` - -This is the default output format. It displays output in ps-style table output with sufficient columns to -uniquely identify the resource. - -The headings displayed for each resource type is fixed. However, we `wide` option for displaying additional -columns, and `custom-columns` for selecting which columns to display. - -Example: - -```bash -calicoctl get hostEndpoint -``` - -Response: - -``` -NAME NODE -endpoint1 host1 -myhost-eth0 myhost -``` - -#### `wide` - -Similar to the `ps` format, the `wide` option displays output in ps-style table output but with additional columns. - -The headings displayed for each resource type is fixed. See `custom-columns` for selecting which columns to display. - -Example: - -```bash -calicoctl get hostEndpoint --output=wide -``` - -Response: - -``` -NAME NODE INTERFACE IPS PROFILES -endpoint1 host1 1.2.3.4,0:bb::aa prof1,prof2 -myhost-eth0 myhost profile1 -``` - -#### `custom-columns` - -Similar to the `ps` format, the `custom-columns` option displays output in ps-style table output but allows the user -to specify and ordered, comma-separated list of columns to display in the output. The valid heading names for each -resource type is documented in the [Resources](../../resources/overview.mdx) guide. - -Example: - -``` -calicoctl get hostEndpoint --output=custom-columns=NAME,IPS -``` - -Response: - -``` -NAME IPS -endpoint1 1.2.3.4,0:bb::aa -myhost-eth0 -``` - -#### `yaml / json` - -The `yaml` and `json` options display the output as a list of YAML documents or JSON dictionaries. The fields for -resource type are documented in the [Resources](../../resources/overview.mdx) guide. - -The output from either of these formats may be used as input for all of the resource management commands. - -Example: - -```bash -calicoctl get hostEndpoint --output=yaml -``` - -Response: - -```yaml -- apiVersion: projectcalico.org/v3 - kind: HostEndpoint - metadata: - labels: - type: database - name: endpoint1 - spec: - node: host1 - expectedIPs: - - 1.2.3.4 - - 0:bb::aa - profiles: - - prof1 - - prof2 -- apiVersion: projectcalico.org/v3 - kind: HostEndpoint - metadata: - name: myhost-eth0 - spec: - node: myhost - profiles: - - profile1 -``` - -#### `go-template / go-template-file` - -The `go-template` and `go-template-file` options display the output using a golang template specified as a string -on the CLI, or defined in a separate file. -When writing a template, be aware that the data passed to the template is a golang slice of resource-lists. The -resource-lists are defined in the [libcalico API](../../resources/overview.mdx) and there is a resource-list defined for -each resource type. A resource-list contains an Items field which is itself a slice of resources. Thus, to output -the "Name" field from the supplied data, it is necessary to enumerate over the slice of resource-lists and the items -within that list. - -Example: - -```bash -bin/calicoctl get hostEndpoint --output=go-template="{{range .}}{{range .Items}}{{.ObjectMeta.Name}},{{end}}{{end}}" -endpoint1,eth0, -``` - -## See also - -- [Installing calicoctl](../../../operations/clis/calicoctl/install.mdx). -- [Resources](../../resources/overview.mdx) for details on all valid resources, including file format - and schema -- [NetworkPolicy](../../resources/networkpolicy.mdx) for details on the $[prodname] selector-based policy model diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/index.mdx deleted file mode 100644 index 3bfb0ada02..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Optional command line interface (CLI) to manage Calico resources. -hide_table_of_contents: true ---- - -# calicoctl - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/ipam/check.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/ipam/check.mdx deleted file mode 100644 index beda092ade..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/ipam/check.mdx +++ /dev/null @@ -1,68 +0,0 @@ ---- -description: Command to check IPAM status ---- - -# calicoctl ipam check - -This section describes the `calicoctl ipam check` command. - -Read the [calicoctl overview](../overview.mdx) for a full list of calicoctl commands. - -## Displaying the help text for 'calicoctl ipam check' command - -Run `calicoctl ipam check --help` to display the following help menu for the command. - -``` -Usage: - calicoctl ipam check [--config=] [--show-all-ips] [--show-problem-ips] [-o ] - -Options: - -h --help Show this screen. - -o --output= Path to output report file. - --show-all-ips Print all IPs that are checked. - --show-problem-ips Print all IPs that are leaked or not allocated properly. - -c --config= Path to the file containing connection configuration in - YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] - -Description: - The ipam check command checks the integrity of the IPAM datastructures against Kubernetes. -``` - -### Examples - -Example workflow for checking consistency and releasing leaked addresses. - -**Lock the data store** - -```bash -calicoctl datastore migrate lock -``` - -:::note - -Once the data store is locked, new pods will not be able to be launched until the data store is unlocked. - -::: - -**Generate a report using the check command** - -```bash -calicoctl ipam check -o report.json -``` - -**Release any unnecessary addresses** - -```bash -calicoctl ipam release --from-report report.json -``` - -**Unlock the data store** - -```bash -calicoctl datastore migrate unlock -``` - -## See also - -- [Installing calicoctl](../../../../operations/clis/calicoctl/install.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/ipam/configure.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/ipam/configure.mdx deleted file mode 100644 index 486080ab6c..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/ipam/configure.mdx +++ /dev/null @@ -1,48 +0,0 @@ ---- -description: Command to change IPAM configuration. ---- - -# calicoctl ipam configure - -This section describes the `calicoctl ipam configure` command. - -Read the [calicoctl overview](../overview.mdx) for a full list of calicoctl commands. - -## Displaying the help text for 'calicoctl ipam configure' command - -Run `calicoctl ipam configure --help` to display the following help menu for the -command. - -``` -Usage: - calicoctl ipam configure --strictaffinity= [--config=] - -Options: - -h --help Show this screen. - --strictaffinity= Set StrictAffinity to true/false. When StrictAffinity - is true, borrowing IP addresses is not allowed. - -c --config= Path to the file containing connection configuration in - YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] - -Description: - Modify configuration for Calico IP address management. -``` - -### Examples - -```bash -calicoctl ipam configure --strictaffinity=true -``` - -### General options - -``` --c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] -``` - -## See also - -- [Installing calicoctl](../../../../operations/clis/calicoctl/install.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/ipam/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/ipam/index.mdx deleted file mode 100644 index 1049df9db0..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/ipam/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: calicoctl IPAM commands for Calico-assigned IP addresses. -hide_table_of_contents: true ---- - -# ipam - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/ipam/overview.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/ipam/overview.mdx deleted file mode 100644 index 9eed90a72c..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/ipam/overview.mdx +++ /dev/null @@ -1,41 +0,0 @@ ---- -description: Commands for calicoctl IP address management (IPAM). ---- - -# calicoctl ipam - -This section describes the `calicoctl ipam` commands. - -Read the [calicoctl Overview](../overview.mdx) for a full list of calicoctl commands. - -## Displaying the help text for 'calicoctl ipam' commands - -Run `calicoctl ipam --help` to display the following help menu for the -commands. - -``` -Usage: - calicoctl ipam [...] - - release Release a Calico assigned IP address. - show Show details of a Calico assigned IP address, - or of overall IP usage. - configure Configure IPAM - -Options: - -h --help Show this screen. - -Description: - IP Address Management specific commands for calicoctl. - - See 'calicoctl ipam --help' to read about a specific subcommand. -``` - -## IPAM specific commands - -Details on the `calicoctl ipam` commands are described in the documents linked below -organized by sub command. - -- [calicoctl ipam release](release.mdx) -- [calicoctl ipam show](show.mdx) -- [calicoctl ipam configure](configure.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/ipam/release.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/ipam/release.mdx deleted file mode 100644 index 39da4689c8..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/ipam/release.mdx +++ /dev/null @@ -1,58 +0,0 @@ ---- -description: Command to release an IP address from Calico Enterprise IP management. ---- - -# calicoctl ipam release - -This section describes the `calicoctl ipam release` command. - -Read the [calicoctl overview](../overview.mdx) for a full list of calicoctl commands. - -## Displaying the help text for 'calicoctl ipam release' command - -Run `calicoctl ipam release --help` to display the following help menu for the -command. - -``` -Usage: - calicoctl ipam release [--ip=] [--from-report=] [--config=] - -Options: - -h --help Show this screen. - --ip= IP address to release. - --from-report= Release all leaked addresses from the report. - -c --config= Path to the file containing connection configuration in - YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] - -Description: - The ipam release command releases an IP address from the Calico IP Address - Manager that was been previously assigned to an endpoint. When an IP address - is released, it becomes available for assignment to any endpoint. - - Note that this does not remove the IP from any existing endpoints that may be - using it, so only use this command to clean up addresses from endpoints that - were not cleanly removed from Calico. -``` - -### Examples - -```bash -calicoctl ipam release --ip=192.168.1.2 -``` - -```bash -calicoctl ipam release --from-report=./report.json -``` - -### General options - -``` --c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] -``` - -## See also - -- [Installing calicoctl](../../../../operations/clis/calicoctl/install.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/ipam/show.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/ipam/show.mdx deleted file mode 100644 index acb4eeece6..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/ipam/show.mdx +++ /dev/null @@ -1,154 +0,0 @@ ---- -description: Command to see if IP address is being used. ---- - -# calicoctl ipam show - -This section describes the `calicoctl ipam show` command. - -Read the [calicoctl Overview](../overview.mdx) for a full list of calicoctl commands. - -## Displaying the help text for 'calicoctl ipam show' command - -Run `calicoctl ipam show --help` to display the following help menu for the -command. - -``` -Usage: - calicoctl ipam show [--ip= | --show-blocks] [--config=] - -Options: - -h --help Show this screen. - --ip= Report whether this specific IP address is in use. - --show-blocks Show detailed information for IP blocks as well as pools. - --show-borrowed Show detailed information for "borrowed" IP addresses. - --show-configuration Show current Calico IPAM configuration. - -c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] - -Description: - The ipam show command prints information about a given IP address, or about - overall IP usage. -``` - -### Examples - -1. Print the information associated with an IP address. - - ```bash - calicoctl ipam show --ip=192.168.1.2 - ``` - - The following result indicates that the IP is not assigned to an endpoint. - - ``` - IP 192.168.1.2 is not currently assigned - ``` - -1. Print the information associated with a different IP address. - - ```bash - calicoctl ipam show --ip=10.244.118.70 - ``` - - For a Kubernetes pod IP, attributes indicate the pod name and namespace: - - ``` - IP 10.244.118.70 is in use - Attributes: - pod: nano-66d4c99f8b-jm5s9 - namespace: default - node: ip-172-16-101-160.us-west-2.compute.internal - ``` - -1. Print a summary of IP usage. - - ```bash - calicoctl ipam show - ``` - - The table shows usage for each IP Pool: - - ``` - +----------+-------------------+------------+------------+-------------------+ - | GROUPING | CIDR | IPS TOTAL | IPS IN USE | IPS FREE | - +----------+-------------------+------------+------------+-------------------+ - | IP Pool | 10.65.0.0/16 | 65536 | 5 (0%) | 65531 (100%) | - | IP Pool | fd5f:abcd:64::/48 | 1.2089e+24 | 7 (0%) | 1.2089e+24 (100%) | - +----------+-------------------+------------+------------+-------------------+ - ``` - -1. Print more detailed IP usage by blocks. - - ```bash - calicoctl ipam show --show-blocks - ``` - - As well as the total usage per IP Pool, the table shows usage for block that has been allocated from those pools: - - ``` - +----------+-------------------------------------------+------------+------------+-------------------+ - | GROUPING | CIDR | IPS TOTAL | IPS IN USE | IPS FREE | - +----------+-------------------------------------------+------------+------------+-------------------+ - | IP Pool | 10.65.0.0/16 | 65536 | 5 (0%) | 65531 (100%) | - | Block | 10.65.79.0/26 | 64 | 5 (8%) | 59 (92%) | - | IP Pool | fd5f:abcd:64::/48 | 1.2089e+24 | 7 (0%) | 1.2089e+24 (100%) | - | Block | fd5f:abcd:64:4f2c:ec1b:27b9:1989:77c0/122 | 64 | 7 (11%) | 57 (89%) | - +----------+-------------------------------------------+------------+------------+-------------------+ - ``` - -1. Print more detailed information about borrowed IP addresses. - - ```bash - calicoctl ipam show --show-borrowed - ``` - - Table shows which IP addresses have been borrowed by which node out of which block and the entity consuming it: - - ``` - +------------+-----------------+---------------+---------------+------+------------------------------------+ - | IP | BORROWING-NODE | BLOCK | BLOCK OWNER | TYPE | ALLOCATED-TO | - +------------+-----------------+---------------+---------------+------+------------------------------------+ - | 172.16.0.1 | worker-node-1 | 172.16.0.0/29 | worker-node-2 | pod | external-ns/nginx-6db489d4b7-gln7h | - | 172.16.0.2 | worker-node-3 | 172.16.0.0/29 | worker-node-2 | pod | external-ns/nginx-6db489d4b7-kzkbv | - +------------+-----------------+---------------+---------------+------+------------------------------------+ - ``` - -1. Print current IPAM configuration. - - ```bash - calicoctl ipam show --show-configuration - ``` - - Table shows current IPAM configuration: - - ``` - +--------------------+-------+ - | PROPERTY | VALUE | - +--------------------+-------+ - | StrictAffinity | false | - | AutoAllocateBlocks | true | - +--------------------+-------+ - ``` - -### Options - -``` ---ip= Specific IP address to show. ---show-blocks Show detailed information for IP blocks as well as pools. ---show-borrowed Show detailed information for "borrowed" IP addresses. ---show-configuration Show current Calico IPAM configuration -``` - -### General options - -``` --c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] -``` - -## See also - -- [Installing calicoctl](../../../../operations/clis/calicoctl/install.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/ipam/split.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/ipam/split.mdx deleted file mode 100644 index da0e5b4315..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/ipam/split.mdx +++ /dev/null @@ -1,84 +0,0 @@ ---- -description: Command and options for splitting an existing IP pool ---- - -# calicoctl ipam split - -This section describes the `calicoctl ipam split` command. - -Read the [calicoctlOverview](../overview.mdx) -for a full list of calicoctl commands. - -## Display the help text for `calicoctl ipam split` command - -Run `calicoctl ipam split --help` to display the following help menu for the command. - -``` -Usage: - ipam split [--cidr=] [--name=] [--config=] [--allow-version-mismatch] - -Options: - -h --help Show this screen. - -c --config= Path to the file containing connection configuration in - YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] - --cidr= CIDR of the IP pool to split. - --name= Name of the IP pool to split. - --allow-version-mismatch Allow client and cluster versions mismatch. - -Description: - The ipam split command splits an IP pool specified by the specified CIDR or name - into the specified number of smaller IP pools. Each child IP pool will be of equal - size. IP pools can only be split into a number of smaller pools that is a power - of 2. - -Examples: - # Split the IP pool specified by 172.0.0.0/8 into 2 smaller pools - ipam split --cidr=172.0.0.0/8 2 -``` - -### Prerequisites - -To split an IP pool, you will first need to lock the Calico database -so that no IPAM data can change during the split. This is accomplished by using the -[`calicoctl datastore migrate lock` command](../datastore/migrate/lock.mdx). -To continue normal IPAM operation, you will need to unlock the calico datastore -after the split with the -[`calicoctl datastore migrate unlock` command](../datastore/migrate/unlock.mdx). - -### Examples - -Lock the Calico datastore. - -```bash -calicoctl datastore migrate lock -``` - -Split the IP pool specified by 172.0.0.0/15 into 2 smaller pools. - -```bash -calicoctl ipam split --cidr=172.0.0.0/15 2 -``` - -This should create 2 IP pools, one covering CIDR `172.0.0.0/16` -and one covering CIDR `172.1.0.0/16`. - -Unlock the Calico datastore to restore normal IPAM operation. - -```bash -calicoctl datastore migrate unlock -``` - -### General options - -``` - -c --config= Path to the file containing connection configuration in - YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] -``` - -## See also - -- [Install calicoctl](../../../../operations/clis/calicoctl/install.mdx) -- [Resources](../../../resources/overview.mdx) for details on all valid resources, including file format - and schema diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/label.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/label.mdx deleted file mode 100644 index ca6a93b3c8..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/label.mdx +++ /dev/null @@ -1,154 +0,0 @@ ---- -description: Command to change labels for workload endpoints or nodes. ---- - -# calicoctl label - -This section describes the `calicoctl label` command. - -Read the [calicoctl command line interface user reference](overview.mdx) -for a full list of calicoctl commands. - -:::note - -The available actions for a specific resource type may be -limited based on the datastore used for $[prodname] (Kubernetes API). -Please refer to the -[Resources section](../../resources/overview.mdx) -for details about each resource type. - -::: - -## Displaying the help text for 'calicoctl label' command - -Run `calicoctl label --help` to display the following help menu for the -command. - -``` -Usage: - calicoctl label ( - ( = [--overwrite] | - --remove ) - [--config=] [--namespace=]) - -Examples: - # Label a workload endpoint - calicoctl label workloadendpoints nginx --namespace=default app=web - - # Label a node and overwrite the original value of key 'cluster' - calicoctl label nodes node1 cluster=frontend --overwrite - - # Remove label with key 'cluster' of the node - calicoctl label nodes node1 cluster --remove - -Options: - -h --help Show this screen. - -c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] - -n --namespace= Namespace of the resource. - Only applicable to NetworkPolicy, NetworkSet, and WorkloadEndpoint. - Uses the default namespace if not specified. - --overwrite If true, overwrite the value when the key is already - present in labels. Otherwise reports error when the - labeled resource already have the key in its labels. - Can not be used with --remove. - --remove If true, remove the specified key in labels of the - resource. Reports error when specified key does not - exist. Can not be used with --overwrite. - --context= The name of the kubeconfig context to use. - -Description: - The label command is used to add or update a label on a resource. Resource types - that can be labeled are: - - * bgpConfiguration - * bgpPeer - * felixConfiguration - * globalNetworkPolicy - * globalNetworkSet - * hostEndpoint - * ipPool - * networkPolicy - * networkSet - * node - * profile - * workloadEndpoint - - The resource type is case-insensitive and may be pluralized. - - Attempting to label resources that do not exist will get an error. - - Attempting to remove a label that does not exist in the resource will get an error. - - When labeling a resource on an existing key: - - gets an error if option --overwrite is not provided. - - value of the key updates to specified value if option --overwrite is provided. -``` - -### Examples - -1. Label a node. - - ```bash - calicoctl label nodes node1 cluster=backend - ``` - - Results indicate that label was successfully applied. - - ``` - Successfully set label cluster on nodes node1 - ``` - -1. Label a node and overwrite the original value of key `cluster`. - - ```bash - calicoctl label nodes node1 cluster=frontend --overwrite - ``` - - Results indicate that label was successfully overwritten. - - ``` - Successfully updated label cluster on nodes node1 - ``` - -1. Remove label with key `cluster` from the node. - - ```bash - calicoctl label nodes node1 cluster --remove - ``` - - Results indicate that the label was successfully removed. - - ``` - Successfully removed label cluster from nodes node1. - ``` - -### Options - -``` - -n --namespace= Namespace of the resource. - Only applicable to NetworkPolicy and WorkloadEndpoint. - Uses the default namespace if not specified. - --overwrite If true, overwrite the value when the key is already - present in labels. Otherwise reports error when the - labeled resource already have the key in its labels. - Can not be used with --remove. - --remove If true, remove the specified key in labels of the - resource. Reports error when specified key does not - exist. Can not be used with --overwrite. -``` - -### General options - -``` - -c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] -``` - -## See also - -- [Installing calicoctl](../../../operations/clis/calicoctl/install.mdx) -- [Resources](../../resources/overview.mdx) for details on all valid resources, including file format - and schema diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/node/checksystem.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/node/checksystem.mdx deleted file mode 100644 index a005110637..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/node/checksystem.mdx +++ /dev/null @@ -1,66 +0,0 @@ ---- -description: Command to check compatibility of host to run a Calico node instance. ---- - -# calicoctl node checksystem - -This section describes the `calicoctl node checksystem` command. - -Read the [calicoctl Overview](../overview.mdx) -for a full list of calicoctl commands. - -## Displaying the help text for 'calicoctl node checksystem' command - -Run `calicoctl node checksystem --help` to display the following help menu for the -command. - -``` -Usage: - calicoctl node checksystem [--kernel-config=] - -Options: - -h --help Show this screen. - -f --kernel-config= Override the Kernel config file location. - Expected format is plain text. - default search locations: - "/usr/src/linux/.config", - "/boot/config-kernelVersion, - "/usr/src/linux-kernelVersion/.config", - "/usr/src/linux-headers-kernelVersion/.config", - "/lib/modules/kernelVersion/build/.config" - -Description: - Check the compatibility of this compute host to run a Calico node instance. -``` - -### Procedure - -These are the steps that `calicoctl` takes to pinpoint what modules are available in your system. - -1. `calicoctl` checks the kernel version. -2. By executing `lsmod` it tries to find out what modules are enabled. -3. Modules without a match in step 2 will be checked against `/lib/modules//modules.dep` file. -4. Modules without a match in step 2 & 3 will be checked against `/lib/modules//modules.builtin` file. -5. Modules without a match in previous steps will be tested against `kernelconfig` file `/usr/src/linux/.config`. -6. Any remaining module will be tested against loaded iptables modules in `/proc/net/ip_tables_matches`. - -### Examples - -```bash -calicoctl node checksystem -``` - -An example response follows. - -``` -xt_conntrack OK -xt_u32 OK -WARNING: Unable to detect the xt_set module. Load with `modprobe xt_set` -WARNING: Unable to detect the ipip module. Load with `modprobe ipip` -``` - -It is possible to override the `kernel-config` file using `--kernel-config` argument. In this case `calicoctl` will try to resolve the modules against the provided file and skip the default locations. - -```bash -calicoctl node checksystem --kernel-config /root/MYKERNELFILE -``` diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/node/diags.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/node/diags.mdx deleted file mode 100644 index 86f87a07b8..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/node/diags.mdx +++ /dev/null @@ -1,76 +0,0 @@ ---- -description: Command to get diagnostics from a Calico node. ---- - -# calicoctl node diags - -This section describes the `calicoctl node diags` command. - -Read the [calicoctl Overview](../overview.mdx) -for a full list of calicoctl commands. - -## Displaying the help text for 'calicoctl node diags' command - -Run `calicoctl node diags --help` to display the following help menu for the -command. - -``` -Usage: - calicoctl node diags [--log-dir=] - -Options: - -h --help Show this screen. - --log-dir= The directory containing Calico logs - [default: /var/log/calico] - -Description: - This command is used to gather diagnostic information from a Calico node. - This is usually used when trying to diagnose an issue that may be related to - your Calico network. - - The output of the command explains how to automatically upload the - diagnostics to http://transfer.sh for easy sharing of the data. Note that the - uploaded files will be deleted after 14 days. - - This command must be run on the specific Calico node that you are gathering - diagnostics for. -``` - -### Examples - -```bash -sudo calicoctl node diags -``` - -An example response follows. - -``` -Collecting diagnostics -Using temp dir: /tmp/calico676127473 -Dumping netstat -Dumping routes (IPv4) -Dumping routes (IPv6) -Dumping interface info (IPv4) -Dumping interface info (IPv6) -Dumping iptables (IPv4) -Dumping iptables (IPv6) -Dumping ipsets -exit status 1 -Dumping ipsets (container) -Copying journal for calico-node.service -Dumping felix stats -Copying Calico logs - -Diags saved to /tmp/calico676127473/diags-20170522_151219.tar.gz -If required, you can upload the diagnostics bundle to a file sharing service -such as transfer.sh using curl or similar. For example: - - curl --upload-file /tmp/calico676127473/diags-20170522_151219.tar.gz https://transfer.sh//tmp/calico676127473/diags-20170522_151219.tar.gz -``` - -### Options - -``` - --log-dir= The directory containing Calico logs. - [default: /var/log/calico] -``` diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/node/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/node/index.mdx deleted file mode 100644 index 625828c02d..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/node/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: calicoctl node commands. -hide_table_of_contents: true ---- - -# node - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/node/overview.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/node/overview.mdx deleted file mode 100644 index 57880633f6..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/node/overview.mdx +++ /dev/null @@ -1,47 +0,0 @@ ---- -description: Commands for calicoctl node. ---- - -# calicoctl node - -This section describes the `calicoctl node` commands. - -Read the [calicoctl Overview](../overview.mdx) -for a full list of calicoctl commands. - -Note that if you run `calicoctl` in a container, `calicoctl node ...` commands will -not work (they need access to parts of the host filesystem). - -## Displaying the help text for 'calicoctl node' commands - -Run `calicoctl node --help` to display the following help menu for the -commands. - -``` -Usage: - calicoctl node [...] - - run Run the Calico node container image - status View the current status of a Calico node. - diags Gather a diagnostics bundle for a Calico node. - checksystem Verify the compute host is able to run a Calico node instance. - -Options: - -h --help Show this screen. - -Description: - Node specific commands for calicoctl. These commands must be run directly on - the compute host running the Calico node instance. - - See 'calicoctl node --help' to read about a specific subcommand. -``` - -## Node specific commands - -Details on the `calicoctl node` commands are described in the documents linked below -organized by sub command. - -- [calicoctl node run](run.mdx) -- [calicoctl node status](status.mdx) -- [calicoctl node diags](diags.mdx) -- [calicoctl node checksystem](checksystem.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/node/run.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/node/run.mdx deleted file mode 100644 index 74f989db45..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/node/run.mdx +++ /dev/null @@ -1,375 +0,0 @@ ---- -description: Command and options for running a Calico node. ---- - -# calicoctl node run - -This sections describes the `calicoctl node run` command. - -Read the [calicoctl Overview](../overview.mdx) -for a full list of calicoctl commands. - -## Displaying the help text for 'calicoctl node run' command - -Run `calicoctl node run --help` to display the following help menu for the -command. - -``` -Usage: - calicoctl node run [--ip=] [--ip6=] [--as=] - [--name=] - [--ip-autodetection-method=] - [--ip6-autodetection-method=] - [--log-dir=] - [--node-image=] - [--backend=(bird|none)] - [--config=] - [--felix-config=] - [--no-default-ippools] - [--dryrun] - [--init-system] - [--disable-docker-networking] - [--docker-networking-ifprefix=] - [--use-docker-networking-container-labels] - -Options: - -h --help Show this screen. - --name= The name of the Calico node. If this is not - supplied it defaults to the host name. - --as= Set the AS number for this node. If omitted, it - will use the value configured on the node resource. - If there is no configured value and --as option is - omitted, the node will inherit the global AS number - (see 'calicoctl config' for details). - --ip= Set the local IPv4 routing address for this node. - If omitted, it will use the value configured on the - node resource. If there is no configured value - and the --ip option is omitted, the node will - attempt to autodetect an IP address to use. Use a - value of 'autodetect' to always force autodetection - of the IP each time the node starts. - --ip6= Set the local IPv6 routing address for this node. - If omitted, it will use the value configured on the - node resource. If there is no configured value - and the --ip6 option is omitted, the node will not - route IPv6. Use a value of 'autodetect' to force - autodetection of the IP each time the node starts. - --ip-autodetection-method= - Specify the autodetection method for detecting the - local IPv4 routing address for this node. The valid - options are: - > first-found - Use the first valid IP address on the first - enumerated interface (common known exceptions are - filtered out, e.g. the docker bridge). It is not - recommended to use this if you have multiple - external interfaces on your host. - > can-reach= - Use the interface determined by your host routing - tables that will be used to reach the supplied - destination IP or domain name. - > interface= - Use the first valid IP address found on interfaces - named as per the first matching supplied interface - name regex. Regexes are separated by commas - (e.g. eth.*,enp0s.*). - > skip-interface= - Use the first valid IP address on the first - enumerated interface (same logic as first-found - above) that does NOT match with any of the - specified interface name regexes. Regexes are - separated by commas (e.g. eth.*,enp0s.*). - [default: first-found] - --ip6-autodetection-method= - Specify the autodetection method for detecting the - local IPv6 routing address for this node. See - ip-autodetection-method flag for valid options. - [default: first-found] - --log-dir= The directory containing Calico logs. - [default: /var/log/calico] - --node-image= - Docker image to use for Calico's per-node container. - [default: $[registry]$[imageNames.node]:latest] - --backend=(bird|none) - Specify which networking backend to use. When set - to "none", Calico node runs in policy only mode. - [default: bird] - --dryrun Output the appropriate command, without starting the - container. - --init-system Run the appropriate command to use with an init - system. - --no-default-ippools Do not create default pools upon startup. - Default IP pools will be created if this is not set - and there are no pre-existing Calico IP pools. - --disable-docker-networking - Disable Docker networking. - --docker-networking-ifprefix= - Interface prefix to use for the network interface - within the Docker containers that have been networked - by the Calico driver. - [default: cali] - --use-docker-networking-container-labels - Extract the Calico-namespaced Docker container labels - (org.projectcalico.label.*) and apply them to the - container endpoints for use with Calico policy. - This option is only valid when using Calico Docker - networking, and when enabled traffic must be - explicitly allowed by configuring Calico policies. - -c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] - --felix-config= - Path to the file containing Felix - configuration in YAML or JSON format. - -Description: - This command is used to start a $[nodecontainer] container instance which provides - Calico networking and network policy on your compute host. -``` - -### Kubernetes as the datastore - -When $[prodname] is configured to use the Kubernetes API as the datastore, BGP routing is _currently_ -not supported. Many of the command line options related to BGP routing will -have no effect. These include: - -- `--ip`, `--ip6`, `--ip-autodetection-method`, `--ip6-autodetection-method` -- `--as` -- `--backend` - -### Examples - -Start the $[nodecontainer] with a pre-configured IPv4 address for BGP. - -```bash -sudo calicoctl node run -``` - -An example response follows. - -``` -Running command to load modules: modprobe -a xt_set ip6_tables -Enabling IPv4 forwarding -Enabling IPv6 forwarding -Increasing conntrack limit -Running the following command: - -docker run --net=host --privileged --name=$[noderunning] -d --restart=always -e ETCD_SCHEME=http -e HOSTNAME=calico -e ETCD_AUTHORITY=127.0.0.1:2379 -e AS= -e NO_DEFAULT_POOLS= -e ETCD_ENDPOINTS= -e IP= -e IP6= -e CALICO_NETWORKING_BACKEND=bird -v /var/run/docker.sock:/var/run/docker.sock -v /var/run/calico:/var/run/calico -v /lib/modules:/lib/modules -v /var/log/calico:/var/log/calico -v /run/docker/plugins:/run/docker/plugins $[registry]$[imageNames.node]:$[releaseTitle] - -Waiting for etcd connection... -Using configured IPv4 address: 192.0.2.0 -No IPv6 address configured -Using global AS number -WARNING: Could not confirm that the provided IPv4 address is assigned to this host. -Calico node name: calico -Calico node started successfully -``` - -#### IP Autodetection method examples - -The node resource includes IPv4 and IPv6 routing IP addresses that should -match those on one of the host interfaces. These IP addresses may be -configured in advance by configuring the node resource prior to starting the -`$[nodecontainer]` service, alternatively, the addresses may either be explicitly -specified or autodetected through options on the `calicoctl run` command. - -There are different autodetection methods available and you should use the one -best suited to your deployment. If you are able to explicitly specify the IP -addresses, that is always preferred over autodetection. This section describes -the available methods for autodetecting the hosts IP addresses. - -An IPv4 address is always required, and so if no address was previously -configured in the node resource, and no address was specified on the CLI, then -we will attempt to autodetect an IPv4 address. An IPv6 address, however, will -only be autodetected when explicitly requested. - -To force autodetection of an IPv4 address, use the option `--ip=autodetect`. To -force autodetection of an IPv6 address, use the option `--ip6=autodetect`. - -To set the autodetection method for IPv4, use the `--ip-autodetection-method` option. -To set the autodetection method for IPv6, use the `--ip6-autodetection-method` option. - -:::note - -If you are starting the `$[nodecontainer]` container directly (and not using the -`calicoctl run` helper command), the options are passed in environment -variables. These are described in -[Configuring `$[nodecontainer]`](../../../component-resources/node/configuration.mdx). - -::: - -**first-found** - -The `first-found` option enumerates all interface IP addresses and returns the -first valid IP address (based on IP version and type of address) on -the first valid interface. Certain known "local" interfaces -are omitted, such as the docker bridge. The order that both the interfaces -and the IP addresses are listed is system dependent. - -This is the default detection method. However, since this method only makes a -very simplified guess, it is recommended to either configure the node with a -specific IP address, or to use one of the other detection methods. - -An example with first-found auto detection method explicitly specified follows - -```bash -sudo calicoctl node run --ip autodetect --ip-autodetection-method first-found -``` - -**can-reach=DESTINATION** - -The `can-reach` method uses your local routing to determine which IP address -will be used to reach the supplied destination. Both IP addresses and domain -names may be used. - -An example with IP detection using a can-reach IP address: - -```bash -sudo calicoctl node run --ip autodetect --ip-autodetection-method can-reach=8.8.8.8 -``` - -An example with IP detection using a can-reach domain name: - -```bash -sudo calicoctl node run --ip autodetect --ip-autodetection-method can-reach=www.google.com -``` - -**interface=INTERFACE-REGEX,INTERFACE-REGEX,...** - -The `interface` method uses the supplied interface regular expressions (golang -syntax) to enumerate matching interfaces and to return the first IP address on -the first interface that matches any of the interface regexes provided. The -order that both the interfaces and the IP addresses are listed is system -dependent. - -Example with IP detection on interface eth0: - -```bash -sudo calicoctl node run --ip autodetect --ip-autodetection-method interface=eth0 -``` - -Example with IP detection on interfaces eth0, eth1, eth2 etc.: - -```bash -sudo calicoctl node run --ip autodetect --ip-autodetection-method interface=eth.* -``` - -An example with IP detection on interfaces eth0, eth1, eth2 etc. and wlp2s0: - -```bash -sudo calicoctl node run --ip-autodetect --ip-autodetection-method interface=eth.*,wlp2s0 -``` - -**skip-interface=INTERFACE-REGEX,INTERFACE-REGEX,...** - -The `skip-interface` method uses the supplied interface regular expressions (golang -syntax) to enumerate all interface IP addresses and returns the first valid IP address -(based on IP version and type of address) that does not match the listed regular -expressions. Like the `first-found` option, it also skips by default certain known -"local" interfaces such as the docker bridge. The order that both the interfaces -and the IP addresses are listed is system dependent. - -This method has the ability to take in multiple regular expressions separated by `,`. -Specifying only one regular expression for interfaces to skip will also work and a -terminating `,` character does not need to be specified for those cases. - -### Options - -``` - --name= The name of the Calico node. If this is not - supplied it defaults to the host name. - --as= Set the AS number for this node. If omitted, it - will use the value configured on the node resource. - If there is no configured value and --as option is - omitted, the node will inherit the global AS number - (see 'calicoctl config' for details). - --ip= Set the local IPv4 routing address for this node. - If omitted, it will use the value configured on the - node resource. If there is no configured value - and the --ip option is omitted, the node will - attempt to autodetect an IP address to use. Use a - value of 'autodetect' to always force autodetection - of the IP each time the node starts. - --ip6= Set the local IPv6 routing address for this node. - If omitted, it will use the value configured on the - node resource. If there is no configured value - and the --ip6 option is omitted, the node will not - route IPv6. Use a value of 'autodetect' to force - autodetection of the IP each time the node starts. - --ip-autodetection-method= - Specify the autodetection method for detecting the - local IPv4 routing address for this node. The valid - options are: - > first-found - Use the first valid IP address on the first - enumerated interface (common known exceptions are - filtered out, e.g. the docker bridge). It is not - recommended to use this if you have multiple - external interfaces on your host. - > can-reach= - Use the interface determined by your host routing - tables that will be used to reach the supplied - destination IP or domain name. - > interface= - Use the first valid IP address found on interfaces - named as per the first matching supplied interface - name regex. Regexes are separated by commas - (e.g. eth.*,enp0s.*). - > skip-interface= - Use the first valid IP address on the first - enumerated interface (same logic as first-found - above) that does NOT match with any of the - specified interface name regexes. Regexes are - separated by commas (e.g. eth.*,enp0s.*). - [default: first-found] - --ip6-autodetection-method= - Specify the autodetection method for detecting the - local IPv6 routing address for this node. See - ip-autodetection-method flag for valid options. - [default: first-found] - --log-dir= The directory containing Calico logs. - [default: /var/log/calico] - --node-image= - Docker image to use for Calico's per-node container. - [default: $[registry]$[imageNames.node]:latest] - --backend=(bird|none) - Specify which networking backend to use. When set - to "none", Calico node runs in policy only mode. - [default: bird] - --dryrun Output the appropriate command, without starting the - container. - --init-system Run the appropriate command to use with an init - system. - --no-default-ippools Do not create default pools upon startup. - Default IP pools will be created if this is not set - and there are no pre-existing Calico IP pools. - --disable-docker-networking - Disable Docker networking. - --docker-networking-ifprefix= - Interface prefix to use for the network interface - within the Docker containers that have been networked - by the Calico driver. - [default: cali] - --use-docker-networking-container-labels - Extract the Calico-namespaced Docker container labels - (org.projectcalico.label.*) and apply them to the - container endpoints for use with Calico policy. - This option is only valid when using Calico Docker - networking, and when enabled traffic must be - explicitly allowed by configuring Calico policies. -``` - -### General options - -``` --c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] -``` - -## See also - -- [Installing calicoctl](../../../../operations/clis/calicoctl/install.mdx) -- [Resources](../../../resources/overview.mdx) for details on all valid resources, including file format - and schema -- [Policy](../../../resources/networkpolicy.mdx) for details on the $[prodname] selector-based policy model diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/node/status.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/node/status.mdx deleted file mode 100644 index 6024a56cc6..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/node/status.mdx +++ /dev/null @@ -1,51 +0,0 @@ ---- -description: Command to check status of a Calico node instance. ---- - -# calicoctl node status - -This sections describes the `calicoctl node status` command. - -Read the [calicoctl Overview](../overview.mdx) -for a full list of calicoctl commands. - -## Displaying the help text for 'calicoctl node status' command - -Run `calicoctl node status --help` to display the following help menu for the -command. - -``` -Usage: - calicoctl node status - -Options: - -h --help Show this screen. - -Description: - Check the status of the Calico node instance. This includes the status and - uptime of the node instance, and BGP peering states. -``` - -### Examples - -Check the status of a $[prodname] instance. - -```bash -sudo calicoctl node status -``` - -Some sample results follow,. - -``` -Calico process is running. - -IPv4 BGP status -+--------------+-------------------+-------+----------+-------------+ -| PEER ADDRESS | PEER TYPE | STATE | SINCE | INFO | -+--------------+-------------------+-------+----------+-------------+ -| 172.17.8.102 | node-to-node mesh | up | 23:30:04 | Established | -+--------------+-------------------+-------+----------+-------------+ - -IPv6 BGP status -No IPv6 peers found. -``` diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/overview.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/overview.mdx deleted file mode 100644 index 6dcaa2251f..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/overview.mdx +++ /dev/null @@ -1,147 +0,0 @@ ---- -description: The command line interface tool (CLI) to manage Calico Enterprise network and security policy. ---- - -# calicoctl user reference - -The command line tool, `calicoctl`, makes it easy to manage $[prodname] -network and security policy, as well as other $[prodname] configurations. - -The full list of resources that can be managed, including a description of each, -is described in the [Resource definitions](../../resources/overview.mdx) -section. - -:::note - -This section provides full reference information for `calicoctl`. To learn -how to install and configure `calicoctl`, refer to -[Installing calicoctl](../../../operations/clis/calicoctl/install.mdx). - -::: - -The calicoctl command line interface provides a number of resource management -commands to allow you to create, modify, delete, and view the different -$[prodname] resources. This section is a command line reference for -`calicoctl`, organized based on the command hierarchy. - -## Top level help - -Run `calicoctl --help` to display the following help menu for the top level -calicoctl commands. - -```bash -Usage: - calicoctl [options] [...] - - create Create a resource by file, directory or stdin. - replace Replace a resource by file, directory or stdin. - apply Apply a resource by file, directory or stdin. This creates a resource - if it does not exist, and replaces a resource if it does exists. - patch Patch a pre-existing resource in place. - delete Delete a resource identified by file, directory, stdin or resource type and - name. - get Get a resource identified by file, directory, stdin or resource type and - name. - label Add or update labels of resources. - convert Convert config files between different API versions. - ipam IP address management. - cluster Access cluster information. - bgp Access BGP related information. - node Calico node management. - captured-packets Capture packet file command - version Display the version of calicoctl. - -Options: - -h --help Show this screen. - -l --log-level= Set the log level (one of panic, fatal, error, - warn, info, debug) [default: panic] - --context= The name of the kubeconfig context to use. - --allow-version-mismatch Allow client and cluster versions mismatch. - -Description: - The calicoctl command line tool is used to manage Calico network and security - policy, to view and manage endpoint configuration, and to manage a Calico - node instance. - - See 'calicoctl --help' to read about a specific subcommand. -``` - -:::note - -In a multi cluster environment if you have a [kubeconfig](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) file with multiple cluster contexts it is possible to directly change the context using calicoctl `--context` argument. - -::: - -:::note - -The versions for Calico and calicoctl should be the same and calls to calicoctl will fail if the versions do not match. If needed, this can be overridden by using the `--allow-version-mismatch` argument. - -::: - -## Top level command line options - -Details on the `calicoctl` commands are described in the documents linked below -organized by top level command. - -- [calicoctl create](create.mdx) -- [calicoctl captured-packets](captured-packets.mdx) -- [calicoctl replace](replace.mdx) -- [calicoctl apply](apply.mdx) -- [calicoctl patch](patch.mdx) -- [calicoctl delete](delete.mdx) -- [calicoctl get](get.mdx) -- [calicoctl label](label.mdx) -- [calicoctl convert](convert.mdx) -- [calicoctl ipam](ipam/overview.mdx) -- [calicoctl node](node/index.mdx) -- [calicoctl version](version.mdx) - -## Multiple networks support - -If multiple networks is enabled, the environment variable `MULTI_INTERFACE_MODE=multus` must be set to view details of these additional networks, -such as their workload endpoints. - -For more information, see the [multiple-networks how-to guide](../../../networking/configuring/multiple-networks.mdx). - -## Modifying low-level component configurations - -To update low-level Felix or BGP settings (`FelixConfiguration` and `BGPConfiguration` resource types): -1. Get the appropriate resource and store the yaml output in a file using `calicoctl get -o yaml --export > config.yaml`. -1. Modify the saved resource file. -1. Update the resource using `apply` or `replace` command: `calicoctl replace -f config.yaml`. - -See [Configuring Felix](../../component-resources/node/felix/configuration.mdx) for more details. - -## Supported resource definition aliases - -The following table lists supported aliases for $[prodname] resources when using `calicoctl`. Note that all aliases -are **case-insensitive**. - -| Resource definition | Supported calicoctl aliases | -| :----------------------------------- | :----------------------------------------------------------- | -| BGP configuration | `bgpconfig`, `bgpconfigurations`, `bgpconfigs` | -| Deep packet inspection | `deeppacketinspection`, `deeppacketinspections` -| BGP peer | `bgppeer`, `bgppeers`, `bgpp`, `bgpps`, `bp`, `bps` | -| Felix configuration | `felixconfiguration`, `felixconfig`, `felixconfigurations`, `felixconfigs` | -| Global alert | `globalalert`, `globalalerts` -| Global network policy | `globalnetworkpolicy`, `globalnetworkpolicies`, `gnp`, `gnps` | -| Global network set | `globalnetworkset`, `globalnetworksets` | -| Global report | not supported | -| Global threatfeed | `globalthreatfeed`, `globalthreatfeeds` | -| Host endpoint | `hostendpoint`, `hostendpoints`, `hep`, `heps` | -| IP pool | `ippool`, `ippools`, `ipp`, `ipps`, `pool`, `pools` | -| IP reservation | `ipreservation`, `ipreservations`, `reservation`, `reservations` | -| Kubernetes controllers configuration | `kubecontrollersconfiguration`, `kubecontrollersconfig` | -| License key | not supported | -| Managed cluster | not supported | -| Network policy | `networkpolicy`, `networkpolicies`, `policy`, `np`, `policies`, `pol`, `pols` | -| Network set | `networkset`, `networksets`, `netsets` -| Node | `node`, `nodes`, `no`, `nos` | -| Packet capture | `packetcapture`, `packetcaptures` | -| Profiles | `profile`, `profiles`, `pro`, `pros` | -| Remote cluster configuration. | `remoteclusterconfiguration`, `remoteclusterconfigurations`, `remoteclusterconfig`, `remoteclusterconfigs`, `rcc` | -| Staged global network policy | `stagedglobalnetworkpolicy`, `stagedglobalnetworkpolicies`, `sgnp`, `sgnps` | -| Staged Kubernetes network policy. | `stagedkubernetesnetworkpolicy`, `stagedkubernetesnetworkpolicies`, `stagedkubernetespolicy`, `sknp`, `stagedkubernetespolicies`, `skpol`, `skpols` | -| Staged network policy. | `stagednetworkpolicy`, `stagednetworkpolicies`, `stagedpolicy`, `snp`, `stagedpolicies`, `spol`, `spols` | -| Tier | `tier`, `tiers` | -| Workload endpoint | `workloadendpoint`, `workloadendpoints`, `wep`, `weps` | diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/patch.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/patch.mdx deleted file mode 100644 index db187d25bc..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/patch.mdx +++ /dev/null @@ -1,111 +0,0 @@ ---- -description: Command to update a node with a patch. ---- - -# calicoctl patch - -This sections describes the `calicoctl patch` command. - -Read the [calicoctl command line interface user reference](overview.mdx) -for a full list of calicoctl commands. - -## Displaying the help text for 'calicoctl patch' command - -Run `calicoctl patch --help` to display the following help menu for the -command. - -``` -Usage: - calicoctl patch --patch= [--type=] [--config=] [--namespace=] - -Examples: - # Partially update a node using a strategic merge patch. - calicoctl patch node node-0 --patch '{"spec":{"bgp": {"routeReflectorClusterID": "CLUSTER_ID"}}}' - - # Partially update a node using a json merge patch. - calicoctl patch node node-0 --patch '{"spec":{"bgp": {"routeReflectorClusterID": "CLUSTER_ID"}}}' --type json - -Options: - -h --help Show this screen. - -p --patch= Spec to use to patch the resource. - -t --type= Format of patch type: - strategic Strategic merge patch (default) - json JSON Patch, RFC 6902 (not yet implemented) - merge JSON Merge Patch, RFC 7386 (not yet implemented) - -c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: ` + constants.DefaultConfigPath + `] - -n --namespace= Namespace of the resource. - Only applicable to NetworkPolicy, NetworkSet, and WorkloadEndpoint. - Uses the default namespace if not specified. - --context= The name of the kubeconfig context to use. - -Description: - The patch command is used to patch a specific resource by type and identifiers in place. - Currently, only JSON format is accepted. - - Valid resource types are: - - * bgpConfiguration - * bgpPeer - * felixConfiguration - * globalNetworkPolicy - * globalNetworkSet - * hostEndpoint - * ipPool - * networkPolicy - * networkSet - * node - * profile - * workloadEndpoint - - The resource type is case-insensitive and may be pluralized. - Attempting to patch a resource that does not exists is treated as a - terminating error unless the --skip-not-exists flag is set. If this flag is - set, resources that do not exist are skipped. - - When patching resources by type, only a single type may be specified at a - time. The name is required along with any and other identifiers required to - uniquely identify a resource of the specified type. -``` - -### Examples - -1. Patch an IP Pool to enable outgoing NAT: - - ```bash - calicoctl patch ippool ippool1 -p '{"spec":{"natOutgoing": true}}' - ``` - - Results indicate that a resource was successfully patched: - - ``` - Successfully patched 1 'ipPool' resource - ``` - -### Options - -``` --p --patch= Spec to use to patch the resource. --t --type= Format of patch type: - strategic Strategic merge patch (default) - json JSON Patch, RFC 6902 (not yet implemented) - merge JSON Merge Patch, RFC 7386 (not yet implemented) --n --namespace= Namespace of the resource. - Only applicable to NetworkPolicy and WorkloadEndpoint. - Uses the default namespace if not specified. -``` - -### General options - -``` --c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] -``` - -## See also - -- [Installing calicoctl](../../../operations/clis/calicoctl/install.mdx) -- [Resources](../../resources/overview.mdx) for details on all valid resources, including file format - and schema diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/replace.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/replace.mdx deleted file mode 100644 index 9f5594e285..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/replace.mdx +++ /dev/null @@ -1,123 +0,0 @@ ---- -description: Command to replace an existing policy with a different one. ---- - -# calicoctl replace - -This sections describes the `calicoctl replace` command. - -Read the [calicoctl command line interface user reference](overview.mdx) -for a full list of calicoctl commands. - -## Displaying the help text for 'calicoctl replace' command - -Run `calicoctl replace --help` to display the following help menu for the -command. - -``` -Usage: - calicoctl replace --filename= [--recursive] [--skip-empty] [--config=] [--namespace=] - -Examples: - # Replace a policy using the data in policy.yaml. - calicoctl replace -f ./policy.yaml - - # Replace a policy based on the JSON passed into stdin. - cat policy.json | calicoctl replace -f - - -Options: - -h --help Show this screen. - -f --filename= Filename to use to replace the resource. If set - to "-" loads from stdin. If filename is a directory, this command is - invoked for each .json .yaml and .yml file within that directory, - terminating after the first failure. - -R --recursive Process the filename specified in -f or --filename recursively. - --skip-empty Do not error if any files or directory specified using -f or --filename contain no - data. - -c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] - -n --namespace= Namespace of the resource. - Only applicable to NetworkPolicy, NetworkSet, and WorkloadEndpoint. - Uses the default namespace if not specified. - --context= The name of the kubeconfig context to use. - -Description: - The replace command is used to replace a set of resources by filename or - stdin. JSON and YAML formats are accepted. - - Valid resource types are: - - * bgpConfiguration - * bgpPeer - * felixConfiguration - * globalNetworkPolicy - * hostEndpoint - * ipPool - * tier - * networkSet - * node - * profile - * workloadEndpoint - - Attempting to replace a resource that does not exist is treated as a - terminating error. - - The output of the command indicates how many resources were successfully - replaced, and the error reason if an error occurred. - - The resources are replaced in the order they are specified. In the event of - a failure replacing a specific resource it is possible to work out which - resource failed based on the number of resources successfully replaced. - - When replacing a resource, the complete resource spec must be provided, it is - not sufficient to supply only the fields that are being updated. -``` - -### Examples - -1. Replace a set of resources (of mixed type) using the data in resources.yaml. - - ```bash - calicoctl replace -f ./resources.yaml - ``` - - Results indicate that 8 resources were successfully replaced. - - ``` - Successfully replaced 8 resource(s) - ``` - -1. Replace a policy based on the JSON passed into stdin. - - ```bash - cat policy.json | calicoctl replace -f - - ``` - - Results indicate the policy does not exist. - - ``` - Failed to replace any 'policy' resources: resource does not exist: Policy(name=dbPolicy) - ``` - -### Options - -``` --f --filename= Filename to use to replace the resource. If set - to "-" loads from stdin. -``` - -### General options - -``` --c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] -``` - -## See also - -- [Installing calicoctl](../../../operations/clis/calicoctl/install.mdx) -- [Resources](../../resources/overview.mdx) for details on all valid resources, including file format - and schema -- [NetworkPolicy](../../resources/networkpolicy.mdx) for details on the $[prodname] selector-based policy model diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/version.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/version.mdx deleted file mode 100644 index da6852e86b..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoctl/version.mdx +++ /dev/null @@ -1,46 +0,0 @@ ---- -description: Command to display the calicoctl CLI version. ---- - -# calicoctl version - -import CalicoctlVersion from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_calicoctl-version.mdx'; - -This section describes the `calicoctl version` command. - -Read the [calicoctl Overview](overview.mdx) -for a full list of calicoctl commands. - -## Displaying the help text for 'calicoctl version' commands - -Run `calicoctl version --help` to display the following help menu for the -commands. - -``` -Usage: - calicoctl version [--config=] [--poll=] - -Options: - -h --help Show this screen. - -c --config= Path to the file containing connection configuration in - YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] - --poll= Poll for changes to the cluster information at a frequency specified using POLL duration - (e.g. 1s, 10m, 2h etc.). A value of 0 (the default) disables polling. - -Description: - Display the version of calicoctl. -``` - -### Example - -Use `calicoctl version` to obtain the following data. - - - -\* To obtain these values, you must configure `calicoctl` -[to connect to your datastore](../../../operations/clis/calicoctl/configure/overview.mdx). - -## See also - -- [Installing calicoctl](../../../operations/clis/calicoctl/install.mdx). diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoq/endpoint.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoq/endpoint.mdx deleted file mode 100644 index 79e5fa0bc5..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoq/endpoint.mdx +++ /dev/null @@ -1,109 +0,0 @@ ---- -description: Command to list policies and profiles for selected endpoints. ---- - -# calicoq endpoint - -`calicoq endpoint ` shows you the $[prodname] policies and profiles that -relate to endpoints whose full ID includes ``. It displays, for -each endpoint: - -- the policies and profiles that apply to that endpoint (that $[prodname] uses to - police traffic that is arriving at or departing from that endpoint), in the - order that they apply - -- the policies and profiles whose rule selectors match that endpoint (that - allow or disallow that endpoint as a traffic source or destination). - -The rule matches can be suppressed by giving the `-r` option. - -`` can be any substring of an endpoint's full ID, which is formed -as `///`. - -## Options - -``` --r --hide-rule-matches Don't show the list of policies and profiles whose - rule selectors match each endpoint as an allowed or - disallowed source/destination. - --s --hide-selectors Don't show the detailed selector expressions involved - (that cause each displayed profile or policy to match - each endpoint). - --o --output= Set the output format. Should be one of yaml, json, or - ps. If nothing is set, defaults to ps. -``` - -## Examples - -Here is an example with three workloads in a namespace, named with a prefix that -specifies the namespace; so `calicoq endpoint` with that prefix returns information -about all three endpoints. - -To retrieve the policies and profiles for endpoint `ns1`: - -``` -calicoq endpoint ns1 -``` - -Sample output follows. - -``` -Policies and profiles for endpoints matching "ns1": - -Workload endpoint k8s/namespace1.ns1wep1/eth0 - Policies: - Policy "namespace1/policy1" (order 500; selector "(projectcalico.org/namespace == 'namespace1') && projectcalico.org/namespace == 'namespace1'") - Profiles: - Profile "profile1" - Rule matches: - Policy "namespace1/policy1" outbound rule 1 destination match; selector "(projectcalico.org/namespace == 'namespace1') && (projectcalico.org/namespace == 'namespace1')" - -Workload endpoint k8s/namespace1.ns1wep2/eth0 - Policies: - Policy "namespace1/policy1" (order 500; selector "(projectcalico.org/namespace == 'namespace1') && projectcalico.org/namespace == 'namespace1'") - Profiles: - Profile "profile1" - Rule matches: - Policy "namespace1/policy1" outbound rule 1 destination match; selector "(projectcalico.org/namespace == 'namespace1') && (projectcalico.org/namespace == 'namespace1')" - -Workload endpoint k8s/namespace1.ns1wep3/eth0 - Policies: - Policy "namespace1/policy1" (order 500; selector "(projectcalico.org/namespace == 'namespace1') && projectcalico.org/namespace == 'namespace1'") - Profiles: - Profile "profile1" - Rule matches: - Policy "namespace1/policy1" outbound rule 1 destination match; selector "(projectcalico.org/namespace == 'namespace1') && (projectcalico.org/namespace == 'namespace1')" -``` - -Here is an example of a workload to which both normal and untracked policy -applies. The untracked policy is listed first because $[prodname] enforces -untracked policies before normal ones. - -``` -calicoq endpoint tigera-lwr-kubetest-02 --hide-rule-matches -``` - -Sample output follows. - -``` -Policies and profiles for endpoints matching "tigera-lwr-kubetest-02": - -Workload endpoint k8s/advanced-policy-demo.nginx-2371676037-bk6v2/eth0 - Policies: - Policy "donottrack" (order 500; selector "projectcalico.org/namespace == 'advanced-policy-demo'") [untracked] - Policy "advanced-policy-demo/abcdefghijklmnopqrstuvwxyz" (order 400; selector "(projectcalico.org/namespace == 'advanced-policy-demo') && projectcalico.org/namespace == 'advanced-policy-demo'") - Profiles: - Profile "k8s-ns.advanced-policy-demo" - Rule matches: - Policy "advanced-policy-demo/abcdefghijklmnopqrstuvwxyz" outbound rule 1 destination match; selector "(projectcalico.org/namespace == 'advanced-policy-demo') && (projectcalico.org/namespace == 'advanced-policy-demo')" -``` - -## See also - -- [NetworkPolicy](../../resources/networkpolicy.mdx) and - [GlobalNetworkPolicy](../../resources/globalnetworkpolicy.mdx) - for more information about the $[prodname] policy model. -- [Untracked policy](../../host-endpoints/index.mdx) for - more information about untracked policy. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoq/eval.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoq/eval.mdx deleted file mode 100644 index 6e9c568b83..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoq/eval.mdx +++ /dev/null @@ -1,60 +0,0 @@ ---- -description: Command to list endpoints matched by a selector. ---- - -# calicoq eval - -`calicoq eval ` is used to display the endpoints that are matched by -``. - -## Examples - -To find all endpoints that match the `role=='frontend'` selector, i.e. that -have a `role` label with value `frontend`: - -``` -calicoq eval "role=='frontend'" -``` - -Sample output follows. - -``` -Endpoints matching selector role=='frontend': - Host endpoint webserver1/eth0 - Host endpoint webserver2/eth0 -``` - -To find all endpoints that have an `app` label (with any value): - -``` -calicoq eval "has(app)" -``` - -Sample output follows. - -``` -Endpoints matching selector has(app): - Workload endpoint rack1-host1/k8s/default.frontend-5gs43/eth0 -``` - -(In this case the answer is a Kubernetes pod.) - -To find endpoint for a selector that does not match any endpoints: - -``` -calicoq eval "role=='endfront'" -``` - -Sample output follows. - -``` -Endpoints matching selector role=='endfront': -``` - -## See also - -- [NetworkPolicy](../../resources/networkpolicy.mdx) and - [GlobalNetworkPolicy](../../resources/globalnetworkpolicy.mdx) - for more information about the $[prodname] policy model. -- [calicoq and selectors](selectors.mdx) for - a recap on how selectors are used in $[prodname] policy. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoq/host.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoq/host.mdx deleted file mode 100644 index c784301419..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoq/host.mdx +++ /dev/null @@ -1,62 +0,0 @@ ---- -description: Command to list endpoints, policies, and profiles on the selected host. ---- - -# calicoq host - -`calicoq host ` shows you the endpoints that are hosted on -`` and all the $[prodname] policies and profiles that relate to those -endpoints. It is equivalent to running `calicoq endpoint ` for -each `` that is hosted on ``. - -## Options - -``` --r --hide-rule-matches Don't show the list of policies and profiles whose - rule selectors match each endpoint as an allowed or - disallowed source/destination. - --s --hide-selectors Don't show the detailed selector expressions involved - (that cause each displayed profile or policy to match - each endpoint). - --o --output= Set the output format. Should be one of yaml, json, or - ps. If nothing is set, defaults to ps. -``` - -## Example - -``` -DATASTORE_TYPE=kubernetes KUBECONFIG=/home/user/.kube/config calicoq host tigera-kubetest-01 -``` - -Sample output follows. - -``` -Policies and profiles for each endpoint on host "tigera-kubetest-01": - -Workload endpoint k8s/tigera-prometheus.alertmanager-calico-node-alertmanager-0/eth0 - Policies: - Profiles: - Profile "ns.projectcalico.org/tigera-prometheus" - -Workload endpoint k8s/kube-system.kube-dns-3913472980-fgf9m/eth0 - Policies: - Profiles: - Profile "ns.projectcalico.org/kube-system" - -Workload endpoint k8s/policy-demo.nginx-2371676037-j2vmh/eth0 - Policies: - Profiles: - Profile "ns.projectcalico.org/policy-demo" - Rule matches: - Policy "policy-demo/abcdefghijklmnopqrstuvwxyz" outbound rule 1 destination match; selector "projectcalico.org/namespace == 'policy-demo'" -``` - -## See also - -- [calicoq endpoint](endpoint.mdx) for - the related `calicoq endpoint` command. -- [NetworkPolicy](../../resources/networkpolicy.mdx) and - [GlobalNetworkPolicy](../../resources/globalnetworkpolicy.mdx) - for more information about the $[prodname] policy model. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoq/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoq/index.mdx deleted file mode 100644 index f71b0a9308..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoq/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: CLI to check Calico Enterprise security policies. -hide_table_of_contents: true ---- - -# calicoq - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoq/overview.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoq/overview.mdx deleted file mode 100644 index ac29036e9a..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoq/overview.mdx +++ /dev/null @@ -1,116 +0,0 @@ ---- -description: CLI to verify that your security policies are configured as intended. ---- - -# calicoq - -`calicoq` is the $[prodname] policy query utility. It is a command line tool that -makes it easy to check your $[prodname] security policies. -See [Installing calicoq](../../../operations/clis/calicoq/index.mdx) for -how to download and install `calicoq`. - -## Datastore configuration - -`calicoq` works by querying the $[prodname] datastore. For this configuration calicoq uses exactly the same -setup as `calicoctl`, which means that: - -- You can create a YAML or JSON config file, and specify that with `calicoq`'s - `-c` option. This is the best option if you have already created that file - for use with `calicoctl`. - -- Or you can set environment variables to specify the datastore type and - location: `DATASTORE_TYPE` and so on. - -For more detail, see -[Configuring calicoq](../../../operations/clis/calicoq/configure/index.mdx). - -## Commands - -The `calicoq` command line interface provides a number of policy inspection -commands to allow you to confirm that your security policies are configured -as intended. - -- The [endpoint](endpoint.mdx) - command shows you the $[prodname] policies and profiles that relate to specified - endpoints. -- The [eval](eval.mdx) command - displays the endpoints that a selector selects. -- The [host](host.mdx) command - displays the policies and profiles that are relevant to all endpoints on a - given host. -- The [policy](policy.mdx) - command shows the endpoints that are relevant to a given policy. -- The [version](version.mdx) - command displays the version of the tool. - -## Overview of usage and options - -To access the help: - -``` -calicoq -h -``` - -The help output follows. - -``` -Calico query tool. - -Usage: - calicoq [--debug] [--config=] eval - calicoq [--debug] [--config=] policy [--hide-selectors|-s] [--hide-rule-matches|-r] - calicoq [--debug] [--config=] endpoint [--hide-selectors|-s] [--hide-rule-matches|-r] - calicoq [--debug] [--config=] host [--hide-selectors|-s] [--hide-rule-matches|-r] - calicoq [--debug] version - -Description: - The calicoq command line tool is used to check Calico security policies. - - calicoq eval is used to display the endpoints that are matched by . - - calicoq policy shows the endpoints that are relevant to the named policy, - comprising: - - the endpoints that the policy applies to (for which ingress or egress traffic is policed - according to the rules in that policy) - - the endpoints that match the policy's rule selectors (that are allowed or disallowed as data - sources or destinations). - - calicoq endpoint shows you the Calico policies and profiles that relate to endpoints - whose full ID includes . - - calicoq host shows you the endpoints that are hosted on and all the Calico - policies and profiles that relate to those endpoints. - -Notes: - When specifying a namespaced NetworkPolicy name, the namespace should also be included by - specifying the in the format "/". If the namespace is - omitted it is assumed the name refers to a GlobalNetworkPolicy. - - When a Calico policy is mapped from a Kubernetes resource, the name will be prefixed with - "knp.default". For example to query the Kubernetes NetworkPolicy "test-policy" in the Namespace - "demo-ns" use the following command: - calicoq policy demo-ns/knp.default.test-policy - - For an endpoint, the full Calico ID is "///". - In the Kubernetes case "" is always "k8s", "" is ".", and "" is always "eth0". - -Options: - -c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] - - -r --hide-rule-matches Don't show the list of policies and profiles whose - rule selectors match the specified endpoint (or an - endpoint on the specified host) as an allowed or - disallowed source/destination. - - -s --hide-selectors Don't show the detailed selector expressions involved - (that cause each displayed policy or profile to apply to or match - various endpoints). - - -d --debug Log debugging information to stderr. - - -o --output= Output format. Either yaml, json, or ps. - [default: ps] -``` diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoq/policy.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoq/policy.mdx deleted file mode 100644 index 3165636154..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoq/policy.mdx +++ /dev/null @@ -1,114 +0,0 @@ ---- -description: Command to list endpoints for a policy. ---- - -# calicoq policy - -`calicoq policy ` shows the endpoints that are relevant to the -named policy, comprising: - -- the endpoints that the policy applies to (for which ingress or egress traffic - is policed according to the rules in that policy) - -- the endpoints that match the policy's rule selectors (that are allowed or - disallowed as data sources or destinations). - -(For example, if you have a database and a webserver, you might have a policy -that says `policy selector: role=='db'; rule: allow from role == 'webserver'`. - -Then the "policy applies to" selector is `role == 'db'` and the "policy's rule -selector" is `role == 'webserver'`.) - -It shows output that is equivalent to running `calicoq eval ` for the -policy's `spec.selector` and for any `selector` or `notSelector` expressions in -the `source` or `destination` of the policy's rules. - -## Options - -``` --r --hide-rule-matches Don't show the list of endpoints that match the - policy's rules as allowed or disallowed sources or - destinations. - --s --hide-selectors Don't show the detailed selector expressions involved - (that cause the policy to apply to or match various - endpoints). - --o --output= Set the output format. Should be one of yaml, json, or - ps. If nothing is set, defaults to ps. -``` - -## Examples - -In this example there are three endpoints in one namespace "namespace1". Policy "policy1" -applies to all of the endpoints in the namespace, and its rules reference -them as possible (allowed or denied) sources or destinations: - -``` -calicoq policy namespace1/policy1 -``` - -Sample output follows. - -``` -Policy "namespace1/policy1" applies to these endpoints: - Workload endpoint host1/k8s/namespace1.ns1wep1/eth0; selector "(projectcalico.org/namespace == 'namespace1') && projectcalico.org/namespace == 'namespace1'" - Workload endpoint host1/k8s/namespace1.ns1wep2/eth0; selector "(projectcalico.org/namespace == 'namespace1') && projectcalico.org/namespace == 'namespace1'" - Workload endpoint host1/k8s/namespace1.ns1wep3/eth0; selector "(projectcalico.org/namespace == 'namespace1') && projectcalico.org/namespace == 'namespace1'" - -Endpoints matching Policy "namespace1/policy1" rules: - Workload endpoint host1/k8s/namespace1.ns1wep1/eth0 - outbound rule 1 destination match; selector "(projectcalico.org/namespace == 'namespace1') && (projectcalico.org/namespace == 'namespace1')" - Workload endpoint host1/k8s/namespace1.ns1wep2/eth0 - outbound rule 1 destination match; selector "(projectcalico.org/namespace == 'namespace1') && (projectcalico.org/namespace == 'namespace1')" - Workload endpoint host1/k8s/namespace1.ns1wep3/eth0 - outbound rule 1 destination match; selector "(projectcalico.org/namespace == 'namespace1') && (projectcalico.org/namespace == 'namespace1')" -``` - -You can simplify that output by specifying `--hide-selectors`: - -``` -calicoq policy namespace1/policy1 --hide-selectors -``` - -Sample output follows. - -``` -Policy "namespace1/policy1" applies to these endpoints: - Workload endpoint host1/k8s/namespace1.ns1wep1/eth0 - Workload endpoint host1/k8s/namespace1.ns1wep2/eth0 - Workload endpoint host1/k8s/namespace1.ns1wep3/eth0 - -Endpoints matching Policy "namespace1/policy1" rules: - Workload endpoint host1/k8s/namespace1.ns1wep1/eth0 - outbound rule 1 destination match - Workload endpoint host1/k8s/namespace1.ns1wep2/eth0 - outbound rule 1 destination match - Workload endpoint host1/k8s/namespace1.ns1wep3/eth0 - outbound rule 1 destination match -``` - -If you only wanted to know the endpoints whose ingress or egress traffic is -policed according to that policy, you could simplify the output further by -adding `--hide-rule-matches`: - -``` -calicoq policy namespace1/policy1 --hide-rule-matches --hide-selectors -``` - -Sample output follows. - -``` -Policy "namespace1/policy1" applies to these endpoints: - Workload endpoint host1/k8s/namespace1.ns1wep1/eth0 - Workload endpoint host1/k8s/namespace1.ns1wep2/eth0 - Workload endpoint host1/k8s/namespace1.ns1wep3/eth0 -``` - -## See also - -- [calicoq eval](eval.mdx) for - more detail about the related `calico eval` command. -- [NetworkPolicy](../../resources/networkpolicy.mdx) and - [GlobalNetworkPolicy](../../resources/globalnetworkpolicy.mdx) - for more information about the $[prodname] selector-based policy model. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoq/selectors.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoq/selectors.mdx deleted file mode 100644 index a478e89192..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoq/selectors.mdx +++ /dev/null @@ -1,30 +0,0 @@ ---- -description: Use selectors to match criteria in endpoints, policies, or profiles . ---- - -# calicoq and selectors - -The queries that you can make with `calicoq` all involve computing the matches -between endpoints and policies or profiles, via selectors. - -Selectors can be used in the following three contexts in $[prodname] -security policy definitions: - -- A selector can be used in the definition of each $[prodname] Policy object, - to specify the endpoints (pods) that that Policy applies to (`spec.selector`). - -- A selector can be used in each ingress Rule, to specify that the Rule only - matches packets sent from a particular set of endpoints (`source.selector`), - or packets from all endpoints except a particular set (`source.notSelector`). - -- A selector can be used in each egress Rule, to specify that the Rule only - matches packets sent to a particular set of endpoints - (`destination.selector`), or packets to all endpoints except a particular set - (`destination.notSelector`). - -Note: the use of selectors in $[prodname] policy is described in detail by -[NetworkPolicy](../../resources/networkpolicy.mdx) and -[GlobalNetworkPolicy](../../resources/globalnetworkpolicy.mdx). - -Kubernetes NetworkPolicy definitions are similar but less general: they do -not support egress rules or the `notSelector` options. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoq/version.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoq/version.mdx deleted file mode 100644 index f1e1f4f0f4..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/calicoq/version.mdx +++ /dev/null @@ -1,23 +0,0 @@ ---- -description: Command to list version of the calicoq CLI. ---- - -# calicoq version - -`calicoq version` shows the version number of the calicoq tool, plus -information about the source code it was built from, and when that was. - -## Example - -```bash -calicoq version -``` - -Sample output follows. - -``` -Version: v2.0.0-cnx -Build date: 2018-01-10T21:40:16+0000 -Git tag ref: v2.0.0-cnx-rc1-12-g9157612 -Git commit: 9157612 -``` diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/index.mdx deleted file mode 100644 index 0d035958bf..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/clis/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: calicoctl and calicoq command line interfaces. -hide_table_of_contents: true ---- - -# CLIs - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/component-resources/configuration.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/component-resources/configuration.mdx deleted file mode 100644 index ced568373d..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/component-resources/configuration.mdx +++ /dev/null @@ -1,643 +0,0 @@ ---- -description: Details for configuring the Calico Enterprise CNI plugins. ---- - -# Configuring the Calico Enterprise CNI plugins - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - - - - -The $[prodname] CNI plugins do not need to be configured directly when installed by the operator. For a complete operator -configuration reference, see [the installation API reference documentation][installation]. - - - - -The $[prodname] CNI plugin is configured through the standard CNI -[configuration mechanism](https://github.com/containernetworking/cni/blob/master/SPEC.md#network-configuration) - -A minimal configuration file that uses $[prodname] for networking -and IPAM looks like this - -```json -{ - "name": "any_name", - "cniVersion": "0.1.0", - "type": "calico", - "ipam": { - "type": "calico-ipam" - } -} -``` - -If the `$[nodecontainer]` container on a node registered with a `NODENAME` other than the node hostname, the CNI plugin on this node must be configured with the same `nodename`: - -```json -{ - "name": "any_name", - "nodename": "", - "type": "calico", - "ipam": { - "type": "calico-ipam" - } -} -``` - -Additional configuration can be added as detailed below. - -## Generic - -### Datastore type - -The $[prodname] CNI plugin supports the following datastore: - -- `datastore_type` (kubernetes) - -### Logging - -Logging is always to `stderr`. Logs are also written to `/var/log/calico/cni/cni.log` on each host by default. - -Logging can be configured using the following options in the netconf. - -| Option name | Default | Description | -| -------------------- | ----------------------------- | --------------------------------------------------------------------------------------------------------- | -| `log_level` | INFO | Logging level. Allowed levels are `ERROR`, `WARNING`, `INFO`, and `DEBUG`. | -| `log_file_path` | `/var/log/calico/cni/cni.log` | Location on each host to write CNI log files to. Logging to file can be disabled by removing this option. | -| `log_file_max_size` | 100 | Max file size in MB log files can reach before they are rotated. | -| `log_file_max_age` | 30 | Max age in days that old log files will be kept on the host before they are removed. | -| `log_file_max_count` | 10 | Max number of rotated log files allowed on the host before they are cleaned up. | - -```json -{ - "name": "any_name", - "cniVersion": "0.1.0", - "type": "calico", - "log_level": "DEBUG", - "log_file_path": "/var/log/calico/cni/cni.log", - "ipam": { - "type": "calico-ipam" - } -} -``` - -### IPAM - -When using $[prodname] IPAM, the following flags determine what IP addresses should be assigned. NOTE: These flags are strings and not boolean values. - -- `assign_ipv4` (default: `"true"`) -- `assign_ipv6` (default: `"false"`) - -A specific IP address can be chosen by using [`CNI_ARGS`](https://github.com/appc/cni/blob/master/SPEC.md#parameters) and setting `IP` to the desired value. - -By default, $[prodname] IPAM will assign IP addresses from all the available IP pools. - -Optionally, the list of possible IPv4 and IPv6 pools can also be specified via the following properties: - -- `ipv4_pools`: An array of CIDR strings or pool names. (e.g., `"ipv4_pools": ["10.0.0.0/24", "20.0.0.0/16", "default-ipv4-ippool"]`) -- `ipv6_pools`: An array of CIDR strings or pool names. (e.g., `"ipv6_pools": ["2001:db8::1/120", "namedpool"]`) - -Example CNI config: - -```json -{ - "name": "any_name", - "cniVersion": "0.1.0", - "type": "calico", - "ipam": { - "type": "calico-ipam", - "assign_ipv4": "true", - "assign_ipv6": "true", - "ipv4_pools": ["10.0.0.0/24", "20.0.0.0/16", "default-ipv4-ippool"], - "ipv6_pools": ["2001:db8::1/120", "default-ipv6-ippool"] - } -} -``` - -:::note - -`ipv6_pools` will be respected only when `assign_ipv6` is set to `"true"`. - -::: - -Any IP pools specified in the CNI config must have already been created. It is an error to specify IP pools in the config that do not exist. - -### Container settings - -The following options allow configuration of settings within the container namespace. - -- allow_ip_forwarding (default is `false`) - -```json -{ - "name": "any_name", - "cniVersion": "0.1.0", - "type": "calico", - "ipam": { - "type": "calico-ipam" - }, - "container_settings": { - "allow_ip_forwarding": true - } -} -``` - -### Readiness Gates - -The following option makes CNI plugin wait for specified endpoint(s) to be ready before configuring pod networking. - -- `readiness_gates` - -This is an optional property that takes an array of URLs. Each URL specified will be polled for readiness and pod networking will continue startup once all readiness_gates are ready. - -Example CNI config: - -```json -{ - "name": "any_name", - "cniVersion": "0.1.0", - "type": "calico", - "ipam": { - "type": "calico-ipam" - }, - "readiness_gates": ["http://localhost:9099/readiness", "http://localhost:8888/status"] -} -``` - -## Kubernetes specific - -When using the $[prodname] CNI plugin with Kubernetes, the plugin must be able to access the Kubernetes API server to find the labels assigned to the Kubernetes pods. The recommended way to configure access is through a `kubeconfig` file specified in the `kubernetes` section of the network config. e.g. - -```json -{ - "name": "any_name", - "cniVersion": "0.1.0", - "type": "calico", - "kubernetes": { - "kubeconfig": "/path/to/kubeconfig" - }, - "ipam": { - "type": "calico-ipam" - } -} -``` - -As a convenience, the API location can also be configured directly, e.g. - -```json -{ - "name": "any_name", - "cniVersion": "0.1.0", - "type": "calico", - "kubernetes": { - "k8s_api_root": "http://127.0.0.1:8080" - }, - "ipam": { - "type": "calico-ipam" - } -} -``` - -### Enabling Kubernetes policy - -If you wish to use the Kubernetes `NetworkPolicy` resource then you must set a policy type in the network config. -There is a single supported policy type, `k8s`. When set, -you must also run `$[imageNames.kubeControllers]` with the policy, profile, and workloadendpoint controllers enabled. - -```json -{ - "name": "any_name", - "cniVersion": "0.1.0", - "type": "calico", - "policy": { - "type": "k8s" - }, - "kubernetes": { - "kubeconfig": "/path/to/kubeconfig" - }, - "ipam": { - "type": "calico-ipam" - } -} -``` - -When using `type: k8s`, the $[prodname] CNI plugin requires read-only Kubernetes API access to the `Pods` resource in all namespaces. - - - - - -### Enabling policy setup timeout - -The $[prodname] CNI plugin can be configured to prevent new pods from starting their containers until one of the following conditions occurs: - -- The pod's policy has finished being programmed. -- A specified amount of time has elapsed. - -By enabling this feature, you can avoid errors that can occur when a pod tries to start before the pod's policy is programmed by its host. - - - - -The policy setup timeout can be configured by setting the `linuxPolicySetupTimeoutSeconds` field in the [calicoNetwork spec](../installation/api#caliconetworkspec) of the default `operator.tigera.io/v1/installation` resource. - -The following example configures the CNI to delay a pod from starting its containers for up to 10 seconds, or until the pod's data plane has been programmed: - -```yaml -kind: Installation -apiVersion: operator.tigera.io/v1 -metadata: - name: default -spec: - calicoNetwork: - linuxPolicySetupTimeoutSeconds: 10 -``` - - - - -The policy setup timeout can be configured by setting the `policy_setup_timeout_seconds` option in the CNI config. - -Example CNI config: - -```json -{ - "name": "any_name", - "cniVersion": "0.1.0", - "type": "calico", - "policy_setup_timeout_seconds": 10, - "ipam": { - "type": "calico-ipam" - } -} -``` - -The $[prodname] CNI plugin reads Felix's `endpoint-status` directory to determine when the data plane has been programmed for a pod. -If left unset, the $[prodname] CNI plugin will look for the directory at `/var/run/calico/endpoint-status`. The path `/var/run/calico` is commonly mounted to the $[prodname] DaemonSet, meaning it can be written to by the Felix container, and read by the (host-namespace) $[prodname] CNI plugin. -To enable the `endpoint-status` directory, and adjust which directory of the Felix container it is written to, the `endpointStatusPathPrefix` option must be configured for [Felix](node/felix/configuration.mdx). - -To adjust where the $[prodname] CNI plugin looks for the `endpoint-status` directory in the host filesystem, you must set the `endpoint_status_dir` option. - -Example CNI config: - -```json -{ - "name": "any_name", - "cniVersion": "0.1.0", - "type": "calico", - "policy_setup_timeout_seconds": 10, - "endpoint_status_dir": "/path/to/endpoint-status", - "ipam": { - "type": "calico-ipam" - } -} -``` - - - -## IPAM - -### Using host-local IPAM - -Calico can be configured to use [host-local IPAM](https://www.cni.dev/plugins/current/ipam/host-local/) instead of the default `calico-ipam`. Host -local IPAM uses a pre-determined CIDR per-host, and stores allocations locally on each node. This is in contrast to Calico IPAM, which dynamically -allocates blocks of addresses and single addresses alike in response to cluster needs. - -Host local IPAM is generally only used on clusters where integration with the Kubernetes [route controller](https://kubernetes.io/docs/concepts/architecture/cloud-controller/#route-controller) is necessary. -Note that some Calico features - such as the ability to request a specific address or pool for a pod - require Calico IPAM to function, and will not work with host-local IPAM enabled. - - - - -The `host-local` IPAM plugin can be configured by setting the `Spec.CNI.IPAM.Plugin` field to `HostLocal` on the [operator.tigera.io/Installation](../installation/api.mdx#installation) API. - -Calico will use the `host-local` IPAM plugin to allocate IPv4 addresses from the node's IPv4 pod CIDR if there is an IPv4 pool configured in `Spec.IPPools`, and an IPv6 address from the node's IPv6 pod CIDR if -there is an IPv6 pool configured in `Spec.IPPools`. - -The following example configures Calico to assign dual-stack IPs to pods using the host-local IPAM plugin. - -```yaml -kind: Installation -apiVersion: operator.tigera.io/v1 -metadata: - name: default -spec: - calicoNetwork: - ipPools: - - cidr: 192.168.0.0/16 - - cidr: 2001:db8::/64 - cni: - type: Calico - ipam: - type: HostLocal -``` - - - - -When using the CNI `host-local` IPAM plugin, two special values - `usePodCidr` and `usePodCidrIPv6` - are allowed for the subnet field (either at the top-level, or in a "range"). This tells the plugin to determine the subnet to use from the Kubernetes API based on the Node.podCIDR field. $[prodname] does not use the `gateway` field of a range so that field is not required and it will be ignored if present. - -:::note - -`usePodCidr` and `usePodCidrIPv6` can only be used as the value of the `subnet` field, it cannot be used in -`rangeStart` or `rangeEnd` so those values are not useful if `subnet` is set to `usePodCidr`. - -::: - -$[prodname] supports the host-local IPAM plugin's `routes` field as follows: - -- If there is no `routes` field, $[prodname] will install a default `0.0.0.0/0`, and/or `::/0` route into the pod (depending on whether the pod has an IPv4 and/or IPv6 address). - -- If there is a `routes` field then $[prodname] will program _only_ the routes in the routes field into the pod. Since $[prodname] implements a point-to-point link into the pod, the `gw` field is not required and it will be ignored if present. All routes that $[prodname] installs will have $[prodname]'s link-local IP as the next hop. - -$[prodname] CNI plugin configuration: - -- `node_name` - - The node name to use when looking up the CIDR value (defaults to current hostname) - -```json -{ - "name": "any_name", - "cniVersion": "0.1.0", - "type": "calico", - "kubernetes": { - "kubeconfig": "/path/to/kubeconfig", - "node_name": "node-name-in-k8s" - }, - "ipam": { - "type": "host-local", - "ranges": [[{ "subnet": "usePodCidr" }], [{ "subnet": "usePodCidrIPv6" }]], - "routes": [{ "dst": "0.0.0.0/0" }, { "dst": "2001:db8::/96" }] - } -} -``` - -When making use of the `usePodCidr` or `usePodCidrIPv6` options, the $[prodname] CNI plugin requires read-only Kubernetes API access to the `Nodes` resource. - -#### Configuring node and typha - -When using `host-local` IPAM with the Kubernetes API datastore, you must configure both $[nodecontainer] and the Typha deployment to use the `Node.podCIDR` field by setting the environment variable `USE_POD_CIDR=true` in each. - - - - -### Using Kubernetes annotations - -#### Specifying IP pools on a per-namespace or per-pod basis - -In addition to specifying IP pools in the CNI config as discussed above, $[prodname] IPAM supports specifying IP pools per-namespace or per-pod using the following [Kubernetes annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/). - -- `cni.projectcalico.org/ipv4pools`: A list of configured IPv4 Pools from which to choose an address for the pod. - - Example: - - ```yaml - annotations: - 'cni.projectcalico.org/ipv4pools': '["default-ipv4-ippool"]' - ``` - -- `cni.projectcalico.org/ipv6pools`: A list of configured IPv6 Pools from which to choose an address for the pod. - - Example: - - ```yaml - annotations: - 'cni.projectcalico.org/ipv6pools': '["2001:db8::1/120"]' - ``` - -If provided, these IP pools will override any IP pools specified in the CNI config. - -:::note - -This requires the IP pools to exist before `ipv4pools` or -`ipv6pools` annotations are used. Requesting a subset of an IP pool -is not supported. IP pools requested in the annotations must exactly -match a configured [IPPool](../resources/ippool.mdx) resource. - -::: - -:::note - -The $[prodname] CNI plugin supports specifying an annotation per namespace. -If both the namespace and the pod have this annotation, the pod information will be used. -Otherwise, if only the namespace has the annotation the annotation of the namespace will -be used for each pod in it. - -::: - -#### Requesting a specific IP address - -You can also request a specific IP address through [Kubernetes annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) with $[prodname] IPAM. -There are two annotations to request a specific IP address: - -- `cni.projectcalico.org/ipAddrs`: A list of IPv4 and/or IPv6 addresses to assign to the Pod. The requested IP addresses will be assigned from $[prodname] IPAM and must exist within a configured IP pool. - - Example: - - ```yaml - annotations: - 'cni.projectcalico.org/ipAddrs': '["192.168.0.1"]' - ``` - -- `cni.projectcalico.org/ipAddrsNoIpam`: A list of IPv4 and/or IPv6 addresses to assign to the Pod, bypassing IPAM. Any IP conflicts and routing have to be taken care of manually or by some other system. - $[prodname] will only distribute routes to a Pod if its IP address falls within a $[prodname] IP pool using BGP mode. Calico will not distribute ipAddrsNoIpam routes when operating in VXLAN mode. If you assign an IP address that is not in a $[prodname] IP pool or if its IP address falls within a $[prodname] IP pool that uses VXLAN encapsulation, you must ensure that routing to that IP address is taken care of through another mechanism. - - Example: - - ```yaml - annotations: - 'cni.projectcalico.org/ipAddrsNoIpam': '["10.0.0.1"]' - ``` - - The ipAddrsNoIpam feature is disabled by default. It can be enabled in the feature_control section of the CNI network config: - - ```json - { - "name": "any_name", - "cniVersion": "0.1.0", - "type": "calico", - "ipam": { - "type": "calico-ipam" - }, - "feature_control": { - "ip_addrs_no_ipam": true - } - } - ``` - - :::caution - - This feature allows for the bypassing of network policy via IP spoofing. - Users should make sure the proper admission control is in place to prevent users from selecting arbitrary IP addresses. - - ::: - -:::note - -- The `ipAddrs` and `ipAddrsNoIpam` annotations can't be used together. -- You can only specify one IPv4/IPv6 or one IPv4 and one IPv6 address with these annotations. -- When `ipAddrs` or `ipAddrsNoIpam` is used with `ipv4pools` or `ipv6pools`, `ipAddrs` / `ipAddrsNoIpam` take priority. - -::: - -#### Requesting a floating IP - -You can request a floating IP address for a pod through [Kubernetes annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) with $[prodname]. - -:::note - -The specified address must belong to an IP Pool for advertisement to work properly. - -::: - -- `cni.projectcalico.org/floatingIPs`: A list of floating IPs which will be assigned to the pod's workload endpoint. - - Example: - - ```yaml - annotations: - 'cni.projectcalico.org/floatingIPs': '["10.0.0.1"]' - ``` - - The floatingIPs feature is disabled by default. It can be enabled in the feature_control section of the CNI network config: - - ```json - { - "name": "any_name", - "cniVersion": "0.1.0", - "type": "calico", - "ipam": { - "type": "calico-ipam" - }, - "feature_control": { - "floating_ips": true - } - } - ``` - - :::caution - - This feature can allow pods to receive traffic which may not have been intended for that pod. - Users should make sure the proper admission control is in place to prevent users from selecting arbitrary floating IP addresses. - - ::: - -### Using IP pools node selectors - -Nodes will only assign workload addresses from IP pools which select them. By -default, IP pools select all nodes, but this can be configured using the -`nodeSelector` field. Check out the [IP pool resource document](../resources/ippool.mdx) -for more details. - -Example: - -1. Create (or update) an IP pool that only allocates IPs for nodes where it - contains a label `rack=0`. - - ```bash - kubectl create -f -< diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/component-resources/kube-controllers/configuration.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/component-resources/kube-controllers/configuration.mdx deleted file mode 100644 index a5846410ab..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/component-resources/kube-controllers/configuration.mdx +++ /dev/null @@ -1,91 +0,0 @@ ---- -description: Calico Enterprise Kubernetes controllers monitor the Kubernetes API and perform actions based on cluster state. ---- - -# Configuring the Calico Enterprise Kubernetes controllers - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -The $[prodname] Kubernetes controllers are deployed in a Kubernetes cluster. The different controllers monitor the Kubernetes API -and perform actions based on cluster state. - - - - -If you have installed Calico using the operator, see the [KubeControllersConfiguration](../../resources/kubecontrollersconfig.mdx) resource instead. - - - - -The controllers are primarily configured through environment variables. When running -the controllers as a Kubernetes pod, this is accomplished through the pod manifest `env` -section. - -## The $[imageNames.kubeControllers] container - -The `$[imageNames.kubeControllers]` container includes the following controllers: - -1. node controller: watches for the removal of Kubernetes nodes and removes corresponding data from $[prodname], and optionally watches for node updates to create and sync host endpoints for each node. -1. federation controller: watches Kubernetes services and endpoints locally and across all remote clusters, and programs - Kubernetes endpoints for any locally configured service that specifies a service federation selector annotation. - -### Configuring datastore access - -The datastore type can be configured via the `DATASTORE_TYPE` environment variable. Only supported value is `kubernetes`. - -#### kubernetes - -When running the controllers as a Kubernetes pod, Kubernetes API access is [configured automatically][in-cluster-config] and -no additional configuration is required. However, the controllers can also be configured to use an explicit [kubeconfig][kubeconfig] file override to -configure API access if needed. - -| Environment | Description | Schema | -| ------------ | ------------------------------------------------------------------ | ------ | -| `KUBECONFIG` | Path to a Kubernetes kubeconfig file mounted within the container. | path | - -### Other configuration - -:::note - -Whenever possible, prefer configuring the kube-controllers component using the [KubeControllersConfiguration](../../resources/kubecontrollersconfig.mdx) API resource, -Some configuration options may not be available through environment variables. - -::: - -The following environment variables can be used to configure the $[prodname] Kubernetes controllers. - -| Environment | Description | Schema | Default | -| --------------------- | --------------------------------------------------------------------------- | --------------------------------------------------------- | ----------------------------------------------------- | -| `DATASTORE_TYPE` | Which datastore type to use | etcdv3, kubernetes | kubernetes | -| `ENABLED_CONTROLLERS` | Which controllers to run | namespace, node, policy, serviceaccount, workloadendpoint | policy,namespace,serviceaccount,workloadendpoint,node | -| `LOG_LEVEL` | Minimum log level to be displayed. | debug, info, warning, error | info | -| `KUBECONFIG` | Path to a kubeconfig file for Kubernetes API access | path | -| `SYNC_NODE_LABELS` | When enabled, Kubernetes node labels will be copied to Calico node objects. | boolean | true | -| `AUTO_HOST_ENDPOINTS` | When set to enabled, automatically create a host endpoint for each node. | enabled, disabled | disabled | - -## About each controller - -### Node controller - -The node controller has several functions. - -- Garbage collects IP addresses. -- Automatically provisions host endpoints for Kubernetes nodes. - -### Federation controller - -The federation controller syncs Kubernetes federated endpoint changes to the $[prodname] datastore. -The controller must have read access to the Kubernetes API to monitor `Service` and `Endpoints` events, and must -also have write access to update `Endpoints`. - -The federation controller is disabled by default if `ENABLED_CONTROLLERS` is not explicitly specified. - -This controller is valid for all $[prodname] datastore types. For more details refer to the -[Configuring federated services](../../../multicluster/federation/services-controller.mdx) usage guide. - - - - -[in-cluster-config]: https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/#accessing-the-api-from-a-pod -[kubeconfig]: https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/ diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/component-resources/kube-controllers/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/component-resources/kube-controllers/index.mdx deleted file mode 100644 index 303cb85423..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/component-resources/kube-controllers/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: kube-controllers is a set of Kubernetes controllers for Calico -hide_table_of_contents: true ---- - -# kube-controllers - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/component-resources/kube-controllers/prometheus.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/component-resources/kube-controllers/prometheus.mdx deleted file mode 100644 index b3d1799ed7..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/component-resources/kube-controllers/prometheus.mdx +++ /dev/null @@ -1,86 +0,0 @@ ---- -description: Review metrics for the kube-controllers component if you are using Prometheus. ---- - -# Prometheus metrics - -kube-controllers can be configured to report a number of metrics through Prometheus. This reporting is enabled by default on port 9094. See the -[configuration reference](../../resources/kubecontrollersconfig.mdx) for how to change metrics reporting configuration (or disable it completely). - -## Metric reference - -#### kube-controllers specific - -kube-controllers exports a number of Prometheus metrics. The current set is as follows. Since some metrics -may be tied to particular implementation choices inside kube-controllers we can't make any hard guarantees that -metrics will persist across releases. However, we aim not to make any spurious changes to -existing metrics. - -| Metric Name | Labels | Description | -| ------------------------------------ | ------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `ipam_allocations_in_use` | ippool, node | Number of Calico IP allocations currently in use by a workload or interface. | -| `ipam_allocations_borrowed` | ippool, node | Number of Calico IP allocations currently in use where the allocation was borrowed from a block affine to another node. | -| `ipam_allocations_gc_candidates` | ippool, node | Number of Calico IP allocations currently marked by the GC as potential leaks. This metric returns to zero under normal GC operation. | -| `ipam_allocations_gc_reclamations` | ippool, node | Count of Calico IP allocations that have been reclaimed by the GC. Increase of this counter corresponds with a decrease of the candidates gauge under normal operation. | -| `ipam_blocks` | ippool, node | Number of IPAM blocks. | -| `ipam_ippool_size` | ippool | Number of IP addresses in the IP Pool CIDR. | -| `ipam_blocks_per_node` | node | Number of IPAM blocks, indexed by the node to which they have affinity. Prefer `ipam_blocks` for new integrations. | -| `ipam_allocations_per_node` | node | Number of Calico IP allocations, indexed by node on which the allocation was made. Prefer `ipam_allocations_in_use` for new integrations. | -| `ipam_allocations_borrowed_per_node` | node | Number of Calico IP allocations borrowed from a non-affine block, indexed by node on which the allocation was made. Prefer `ipam_allocations_borrowed` for new integrations. | -| `remote_cluster_connection_status` | remote_cluster_name | Status of the remote cluster connection in federation. Represented as numeric values 0 (NotConnecting) ,1 (Connecting), 2 (InSync), 3 (ReSyncInProgress), 4 (ConfigChangeRestartRequired), 5 (ConfigInComplete). | - -Labels can be interpreted as follows: - -| Label Name | Description | -| --------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `node` | For allocation metrics, the node on which the allocation was made. For block metrics, the node for which the block has affinity. If the block has no affinity, value will be `no_affinity`. | -| `ippool` | The IP Pool that the IPAM block occupies. If there is no IP Pool which matches the block, value will be `no_ippool`. | -| `remote_cluster_name` | Name of the remote cluster in federation. | - -Prometheus metrics are self-documenting, with metrics turned on, `curl` can be used to list the -metrics along with their help text and type information. - -```bash -curl -s http://localhost:9094/metrics | head -``` - -#### CPU / memory metrics - -kube-controllers also exports the default set of metrics that Prometheus makes available. Currently, those -include: - -| Name | Description | -| -------------------------------------------- | ------------------------------------------------------------------ | -| `go_gc_duration_seconds` | A summary of the GC invocation durations. | -| `go_goroutines` | Number of goroutines that currently exist. | -| `go_memstats_alloc_bytes` | Number of bytes allocated and still in use. | -| `go_memstats_alloc_bytes_total` | Total number of bytes allocated, even if freed. | -| `go_memstats_buck_hash_sys_bytes` | Number of bytes used by the profiling bucket hash table. | -| `go_memstats_frees_total` | Total number of frees. | -| `go_memstats_gc_sys_bytes` | Number of bytes used for garbage collection system metadata. | -| `go_memstats_heap_alloc_bytes` | Number of heap bytes allocated and still in use. | -| `go_memstats_heap_idle_bytes` | Number of heap bytes waiting to be used. | -| `go_memstats_heap_inuse_bytes` | Number of heap bytes that are in use. | -| `go_memstats_heap_objects` | Number of allocated objects. | -| `go_memstats_heap_released_bytes_total` | Total number of heap bytes released to OS. | -| `go_memstats_heap_sys_bytes` | Number of heap bytes obtained from system. | -| `go_memstats_last_gc_time_seconds` | Number of seconds since 1970 of last garbage collection. | -| `go_memstats_lookups_total` | Total number of pointer lookups. | -| `go_memstats_mallocs_total` | Total number of mallocs. | -| `go_memstats_mcache_inuse_bytes` | Number of bytes in use by mcache structures. | -| `go_memstats_mcache_sys_bytes` | Number of bytes used for mcache structures obtained from system. | -| `go_memstats_mspan_inuse_bytes` | Number of bytes in use by mspan structures. | -| `go_memstats_mspan_sys_bytes` | Number of bytes used for mspan structures obtained from system. | -| `go_memstats_next_gc_bytes` | Number of heap bytes when next garbage collection will take place. | -| `go_memstats_other_sys_bytes` | Number of bytes used for other system allocations. | -| `go_memstats_stack_inuse_bytes` | Number of bytes in use by the stack allocator. | -| `go_memstats_stack_sys_bytes` | Number of bytes obtained from system for stack allocator. | -| `go_memstats_sys_bytes` | Number of bytes obtained by system. Sum of all system allocations. | -| `process_cpu_seconds_total` | Total user and system CPU time spent in seconds. | -| `process_max_fds` | Maximum number of open file descriptors. | -| `process_open_fds` | Number of open file descriptors. | -| `process_resident_memory_bytes` | Resident memory size in bytes. | -| `process_start_time_seconds` | Start time of the process since unix epoch in seconds. | -| `process_virtual_memory_bytes` | Virtual memory size in bytes. | -| `promhttp_metric_handler_requests_in_flight` | Current number of scrapes being served. | -| `promhttp_metric_handler_requests_total` | Total number of scrapes by HTTP status code. | diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/component-resources/node/configuration.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/component-resources/node/configuration.mdx deleted file mode 100644 index b43262ddbf..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/component-resources/node/configuration.mdx +++ /dev/null @@ -1,314 +0,0 @@ ---- -description: Customize cnx-node using environment variables. ---- - -# Configuring cnx-node - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -The `$[nodecontainer]` container is deployed to every node (on Kubernetes, by a DaemonSet), and runs three internal daemons: - -- Felix, the Calico daemon that runs on every node and provides endpoints. -- BIRD, the BGP daemon that distributes routing information to other nodes. -- confd, a daemon that watches the Calico datastore for config changes and updates BIRD’s config files. - -For manifest-based installations, `$[nodecontainer]` is primarily configured through environment -variables, typically set in the deployment manifest. Individual nodes may also be updated through the Node -custom resource. `$[nodecontainer]` can also be configured through the Calico Operator. - -The rest of this page lists the available configuration options, and is followed by specific considerations for -various settings. - - - - -`$[nodecontainer]` does not need to be configured directly when installed by the operator. For a complete operator -configuration reference, see [the installation API reference documentation][installation]. - - - - -## Environment variables - -### Configuring the default IP pool(s) - -Calico uses IP pools to configure how addresses are allocated to pods, and how networking works for certain -sets of addresses. You can see the full schema for IP pools here. - -`$[nodecontainer]` can be configured to create a default IP pool for you, but only if none already -exist in the cluster. The following options control the parameters on the created pool. - -| Environment | Description | Schema | -| ---------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| CALICO_IPV4POOL_CIDR | The IPv4 Pool to create if none exists at start up. It is invalid to define this variable and NO_DEFAULT_POOLS. [Default: First not used in locally of (192.168.0.0/16, 172.16.0.0/16, .., 172.31.0.0/16) ] | IPv4 CIDR | -| CALICO_IPV4POOL_BLOCK_SIZE | Block size to use for the IPv4 Pool created at startup. Block size for IPv4 should be in the range 20-32 (inclusive) [Default: `26`] | int | -| CALICO_IPV4POOL_IPIP | IPIP Mode to use for the IPv4 Pool created at start up. If set to a value other than `Never`, `CALICO_IPV4POOL_VXLAN` should not be set. [Default: `Always`] | Always, CrossSubnet, Never ("Off" is also accepted as a synonym for "Never") | -| CALICO_IPV4POOL_VXLAN | VXLAN Mode to use for the IPv4 Pool created at start up. If set to a value other than `Never`, `CALICO_IPV4POOL_IPIP` should not be set. [Default: `Never`] | Always, CrossSubnet, Never | -| CALICO_IPV4POOL_NAT_OUTGOING | Controls NAT Outgoing for the IPv4 Pool created at start up. [Default: `true`] | boolean | -| CALICO_IPV4POOL_NODE_SELECTOR | Controls the NodeSelector for the IPv4 Pool created at start up. [Default: `all()`] | [selector](../../resources/ippool.mdx#node-selector) | -| CALICO_IPV6POOL_CIDR | The IPv6 Pool to create if none exists at start up. It is invalid to define this variable and NO_DEFAULT_POOLS. [Default: ``] | IPv6 CIDR | -| CALICO_IPV6POOL_BLOCK_SIZE | Block size to use for the IPv6 POOL created at startup. Block size for IPv6 should be in the range 116-128 (inclusive) [Default: `122`] | int | -| CALICO_IPV6POOL_VXLAN | VXLAN Mode to use for the IPv6 Pool created at start up. [Default: `Never`] | Always, CrossSubnet, Never | -| CALICO_IPV6POOL_NAT_OUTGOING | Controls NAT Outgoing for the IPv6 Pool created at start up. [Default: `false`] | boolean | -| CALICO_IPV6POOL_NODE_SELECTOR | Controls the NodeSelector for the IPv6 Pool created at start up. [Default: `all()`] | [selector](../../resources/ippool.mdx#node-selector) | -| CALICO_IPV4POOL_DISABLE_BGP_EXPORT | Disable exporting routes over BGP for the IPv4 Pool created at start up. [Default: `false`] | boolean | -| CALICO_IPV6POOL_DISABLE_BGP_EXPORT | Disable exporting routes over BGP for the IPv6 Pool created at start up. [Default: `false`] | boolean | -| NO_DEFAULT_POOLS | Prevents $[prodname] from creating a default pool if one does not exist. [Default: `false`] | boolean | - -### Configuring BGP Networking - -BGP configuration for Calico nodes is normally configured through the [Node](../../resources/node.mdx), [BGPConfiguration](../../resources/bgpconfig.mdx), and [BGPPeer](../../resources/bgppeer.mdx) resources. -`$[nodecontainer]` also exposes some options to allow setting certain fields on these objects, as described -below. - -| Environment | Description | Schema | -| ------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------- | -| NODENAME | A unique identifier for this host. See [node name determination](#node-name-determination) for more details. | lowercase string | -| IP | The IPv4 address to assign this host or detection behavior at startup. Refer to [IP setting](#ip-setting) for the details of the behavior possible with this field. | IPv4 | -| IP6 | The IPv6 address to assign this host or detection behavior at startup. Refer to [IP setting](#ip-setting) for the details of the behavior possible with this field. | IPv6 | -| IP_AUTODETECTION_METHOD | The method to use to autodetect the IPv4 address for this host. This is only used when the IPv4 address is being autodetected. See [IP Autodetection methods](#ip-autodetection-methods) for details of the valid methods. [Default: `first-found`] | string | -| IP6_AUTODETECTION_METHOD | The method to use to autodetect the IPv6 address for this host. This is only used when the IPv6 address is being autodetected. See [IP Autodetection methods](#ip-autodetection-methods) for details of the valid methods. [Default: `first-found`] | string | -| AS | The AS number for this node. When specified, the value is saved in the node resource configuration for this host, overriding any previously configured value. When omitted, if an AS number has been previously configured in the node resource, that AS number is used for the peering. When omitted, if an AS number has not yet been configured in the node resource, the node will use the global value (see [example modifying Global BGP settings](../../../networking/configuring/bgp.mdx) for details.) | int | -| CALICO_ROUTER_ID | Sets the `router id` to use for BGP if no IPv4 address is set on the node. For an IPv6-only system, this may be set to `hash`. It then uses the hash of the nodename to create a 4 byte router id. See note below. [Default: ``] | string | -| CALICO_K8S_NODE_REF | The name of the corresponding node object in the Kubernetes API. When set, used for correlating this node with events from the Kubernetes API. | string | - -### Configuring Datastore Access - -| Environment | Description | Schema | -| -------------- | ------------------------------------------ | ------------------ | -| DATASTORE_TYPE | Type of datastore. [Default: `kubernetes`] | kubernetes, etcdv3 | - -#### Configuring Kubernetes Datastore Access - -| Environment | Description | Schema | -| ---------------- | ------------------------------------------------------------------------------ | ------ | -| KUBECONFIG | When using the Kubernetes datastore, the location of a kubeconfig file to use. | string | -| K8S_API_ENDPOINT | Location of the Kubernetes API. Not required if using kubeconfig. | string | -| K8S_CERT_FILE | Location of a client certificate for accessing the Kubernetes API. | string | -| K8S_KEY_FILE | Location of a client key for accessing the Kubernetes API. | string | -| K8S_CA_FILE | Location of a CA for accessing the Kubernetes API. | string | - -:::note - -When $[prodname] is configured to use the Kubernetes API as the datastore, the environments -used for BGP configuration are ignored—this includes selection of the node AS number (AS) -and all of the IP selection options (IP, IP6, IP_AUTODETECTION_METHOD, IP6_AUTODETECTION_METHOD). - -::: - -### Configuring Logging - -| Environment | Description | Schema | -| --------------------------- | -------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------- | -| CALICO_DISABLE_FILE_LOGGING | Disables logging to file. [Default: "false"] | string | -| CALICO_STARTUP_LOGLEVEL | The log severity above which startup `$[nodecontainer]` logs are sent to the stdout. [Default: `ERROR`] | DEBUG, INFO, WARNING, ERROR, CRITICAL, or NONE (case-insensitive) | - -### Configuring CNI Plugin - -`$[nodecontainer]` has a few options that are configurable based on the CNI plugin and CNI plugin -configuration used on the cluster. - -| Environment | Description | Schema | -| ----------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | -| USE_POD_CIDR | Use the Kubernetes `Node.Spec.PodCIDR` field when using host-local IPAM. Requires Kubernetes API datastore. This field is required when using the Kubernetes API datastore with host-local IPAM. [Default: false] | boolean | -| CALICO_MANAGE_CNI | Tells Calico to update the kubeconfig file at /host/etc/cni/net.d/calico-kubeconfig on credentials change. [Default: true] | boolean | - -### Other Environment Variables - -| Environment | Description | Schema | -| ------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------- | -| DISABLE_NODE_IP_CHECK | Skips checks for duplicate Node IPs. This can reduce the load on the cluster when a large number of Nodes are restarting. [Default: `false`] | boolean | -| WAIT_FOR_DATASTORE | Wait for connection to datastore before starting. If a successful connection is not made, node will shutdown. [Default: `false`] | boolean | -| CALICO_NETWORKING_BACKEND | The networking backend to use. In `bird` mode, Calico will provide BGP networking using the BIRD BGP daemon; VXLAN networking can also be used. In `vxlan` mode, only VXLAN networking is provided; BIRD and BGP are disabled. If set to `none` (also known as policy-only mode), both BIRD and VXLAN are disabled. [Default: `bird`] | bird, vxlan, none | -| CLUSTER_TYPE | Contains comma delimited list of indicators about this cluster. e.g. k8s, mesos, kubeadm, canal, bgp | string | - -## Appendix - -### Node name determination - -The `$[nodecontainer]` must know the name of the node on which it is running. The node name is used to -retrieve the [Node resource](../../resources/node.mdx) configured for this node if it exists, or to create a new node resource representing the node if it does not. It is -also used to associate the node with per-node [BGP configuration](../../resources/bgpconfig.mdx), [felix configuration](../../resources/felixconfig.mdx), and endpoints. - -When launched, the `$[nodecontainer]` container sets the node name according to the following order of precedence: - -1. The value specified in the `NODENAME` environment variable, if set. -1. The value specified in `/var/lib/calico/nodename`, if it exists. -1. The value specified in the `HOSTNAME` environment variable, if set. -1. The hostname as returned by the operating system, converted to lowercase. - -Once the node has determined its name, the value will be cached in `/var/lib/calico/nodename` for future use. - -For example, if given the following conditions: - -- `NODENAME=""` -- `/var/lib/calico/nodename` does not exist -- `HOSTNAME="host-A"` -- The operating system returns "host-A.internal.myorg.com" for the hostname - -$[nodecontainer] will use "host-a" for its name and will write the value in `/var/lib/calico/nodename`. If $[nodecontainer] -is then restarted, it will use the cached value of "host-a" read from the file on disk. - -### IP setting - -The IP (for IPv4) and IP6 (for IPv6) environment variables are used to set, -force autodetection, or disable auto detection of the address for the -appropriate IP version for the node. When the environment variable is set, -the address is saved in the -[node resource configuration](../../resources/node.mdx) -for this host, overriding any previously configured value. - -calico/node will attempt to detect subnet information from the host, and augment the provided address -if possible. - -#### IP setting special case values - -There are several special case values that can be set in the IP(6) environment variables, they are: - -- Not set or empty string: Any previously set address on the node - resource will be used. If no previous address is set on the node resource - the two versions behave differently: - - IP will do autodetection of the IPv4 address and set it on the node - resource. - - IP6 will not do autodetection. -- `autodetect`: Autodetection will always be performed for the IP address and - the detected address will overwrite any value configured in the node - resource. -- `none`: Autodetection will not be performed (this is useful to disable IPv4). - -### IP autodetection methods - -When $[prodname] is used for routing, each node must be configured with an IPv4 -address and/or an IPv6 address that will be used to route between -nodes. To eliminate node specific IP address configuration, the `$[nodecontainer]` -container can be configured to autodetect these IP addresses. In many systems, -there might be multiple physical interfaces on a host, or possibly multiple IP -addresses configured on a physical interface. In these cases, there are -multiple addresses to choose from and so autodetection of the correct address -can be tricky. - -The IP autodetection methods are provided to improve the selection of the -correct address, by limiting the selection based on suitable criteria for your -deployment. - -The following sections describe the available IP autodetection methods. - -#### first-found - -The `first-found` option enumerates all interface IP addresses and returns the -first valid IP address (based on IP version and type of address) on -the first valid interface. Certain known "local" interfaces -are omitted, such as the docker bridge. The order that both the interfaces -and the IP addresses are listed is system dependent. - -This is the default detection method. However, since this method only makes a -very simplified guess, it is recommended to either configure the node with a -specific IP address, or to use one of the other detection methods. - -e.g. - -``` -IP_AUTODETECTION_METHOD=first-found -IP6_AUTODETECTION_METHOD=first-found -``` - -#### kubernetes-internal-ip - -The `kubernetes-internal-ip` method will select the first internal IP address listed in the Kubernetes node's `Status.Addresses` field - -Example: - -``` -IP_AUTODETECTION_METHOD=kubernetes-internal-ip -IP6_AUTODETECTION_METHOD=kubernetes-internal-ip -``` - -#### can-reach=DESTINATION - -The `can-reach` method uses your local routing to determine which IP address -will be used to reach the supplied destination. Both IP addresses and domain -names may be used. - -Example using IP addresses: - -``` -IP_AUTODETECTION_METHOD=can-reach=8.8.8.8 -IP6_AUTODETECTION_METHOD=can-reach=2001:4860:4860::8888 -``` - -Example using domain names: - -``` -IP_AUTODETECTION_METHOD=can-reach=www.google.com -IP6_AUTODETECTION_METHOD=can-reach=www.google.com -``` - -#### interface=INTERFACE-REGEX - -The `interface` method uses the supplied interface [regular expression](https://pkg.go.dev/regexp) -to enumerate matching interfaces and to return the first IP address on -the first matching interface. The order that both the interfaces -and the IP addresses are listed is system dependent. - -Example with valid IP address on interface eth0, eth1, eth2 etc.: - -``` -IP_AUTODETECTION_METHOD=interface=eth.* -IP6_AUTODETECTION_METHOD=interface=eth.* -``` - -#### skip-interface=INTERFACE-REGEX - -The `skip-interface` method uses the supplied interface [regular expression](https://pkg.go.dev/regexp) -to exclude interfaces and to return the first IP address on the first -interface that does not match. The order that both the interfaces -and the IP addresses are listed is system dependent. - -Example with valid IP address on interface exclude enp6s0f0, eth0, eth1, eth2 etc.: - -``` -IP_AUTODETECTION_METHOD=skip-interface=enp6s0f0,eth.* -IP6_AUTODETECTION_METHOD=skip-interface=enp6s0f0,eth.* -``` - -#### cidr=CIDR - -The `cidr` method will select any IP address from the node that falls within the given CIDRs. For example: - -Example: - -``` -IP_AUTODETECTION_METHOD=cidr=10.0.1.0/24,10.0.2.0/24 -IP6_AUTODETECTION_METHOD=cidr=2001:4860::0/64 -``` - -### Node readiness - -The `calico/node` container supports an exec readiness endpoint. - -To access this endpoint, use the following command. - -```bash -docker exec calico-node /bin/calico-node [flag] -``` - -Substitute `[flag]` with one or more of the following. - -- `-bird-ready` -- `-bird6-ready` -- `-felix-ready` - -The BIRD readiness endpoint ensures that the BGP mesh is healthy by verifying that all BGP peers are established and -no graceful restart is in progress. If the BIRD readiness check is failing due to unreachable peers that are no longer -in the cluster, see [decommissioning a node](../../../operations/decommissioning-a-node.mdx). - -### Setting `CALICO_ROUTER_ID` for IPv6 only system - -Setting CALICO_ROUTER_ID to value `hash` will use a hash of the configured nodename for the router ID. This should only be used in IPv6-only systems with no IPv4 address to use for the router ID. Since each node chooses its own router ID in isolation, it is possible for two nodes to pick the same ID resulting in a clash. The probability of such a clash grows with cluster size so this feature should not be used in a large cluster (500+ nodes). - - - - - -[installation]: ../../installation/api.mdx diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/component-resources/node/felix/configuration.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/component-resources/node/felix/configuration.mdx deleted file mode 100644 index 81e16f8166..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/component-resources/node/felix/configuration.mdx +++ /dev/null @@ -1,447 +0,0 @@ ---- -description: Configure Felix, the daemon that runs on every machine that provides endpoints. ---- - -# Configuring Felix - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - - - - -If you have installed Calico using the operator, you cannot modify the environment provided to felix directly. To configure felix, see the [FelixConfiguration](../../../resources/felixconfig.mdx) resource instead. - - - - -:::note - -The following tables detail the configuration file and -environment variable parameters. For `FelixConfiguration` resource settings, -refer to [Felix Configuration Resource](../../../resources/felixconfig.mdx). - -::: - -Configuration for Felix is read from one of four possible locations, in order, as follows. - -1. Environment variables. -2. The Felix configuration file. -3. Host-specific `FelixConfiguration` resources (`node.`). -4. The global `FelixConfiguration` resource (`default`). - -The value of any configuration parameter is the value read from the -_first_ location containing a value. For example, if an environment variable -contains a value, it takes top precedence. - -If not set in any of these locations, most configuration parameters have -defaults, and it should be rare to have to explicitly set them. - -The full list of parameters which can be set is as follows. - -### General configuration - -| Configuration file parameter | Environment variable | Description | Schema | -| ----------------------------------- | ---------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------- | -| `DataplaneWatchdogTimeout` | `FELIX_DATAPLANEWATCHDOGTIMEOUT` | Deprecated: superseded by `HealthTimeoutOverrides`. Timeout before the main data plane goroutine is determined to have hung and Felix will report non-live and non-ready. Can be increased if the liveness check incorrectly fails (for example if Felix is running slowly on a heavily loaded system). [Default: `90`] | int | -| `AwsSrcDstCheck` | `FELIX_AWSSRCDSTCHECK` | Set the [source-destination-check](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_NAT_Instance.html#EIP_Disable_SrcDestCheck) when using AWS EC2 instances. Check [IAM role and profile configuration](../../../resources/felixconfig.mdx#aws-iam-rolepolicy-for-source-destination-check-configuration) for setting the necessary permission for this setting to work. [Default: `DoNothing`] | `DoNothing`, `Disable`, `Enable` | -| `AWSSecondaryIPSupport` | `FELIX_AWSSECONDARYIPSUPPORT` | Controls whether Felix will create secondary AWS ENIs for AWS-backed IP pools. This feature is documented in the [egress gateways on AWS guide](../../../../networking/egress/egress-gateway-aws.mdx). Should only be enabled on AWS. [Default: "Disabled"] | `Enabled`, `EnabledENIPerWorkload`, `Disabled` | -| `AWSSecondaryIPRoutingRulePriority` | `FELIX_AWSSECONDARYIPROUTINGRULEPRIORITY` | Controls the priority of the policy-based routing rules used to implement AWS-backed IP addresses. Should only be changed to avoid conflicts if your nodes have additional policy based routing rules. [Default: 101] | int | -| `AWSRequestTimeout` | `FELIX_AWSREQUESTTIMEOUT` Timeout used for communicating with the AWS API (seconds). [Default: "30"] | int | -| `DatastoreType` | `FELIX_DATASTORETYPE` | The datastore that Felix should read endpoints and policy information from. [Default: `etcdv3`] | `etcdv3`, `kubernetes` | -| `DeviceRouteSourceAddress` | `FELIX_DEVICEROUTESOURCEADDRESS` | IPv4 address to use as the source hint on device routes programmed by Felix [Default: No source hint is set on programmed routes and for local traffic from host to workload the source address will be chosen by the kernel.] | `` | -| `DeviceRouteSourceAddressIPv6` | `FELIX_DEVICEROUTESOURCEADDRESSIPV6` | IPv6 address to use as the source hint on device routes programmed by Felix [Default: No source hint is set on programmed routes and for local traffic from host to workload the source address will be chosen by the kernel.] | `` | -| `DeviceRouteProtocol` | `FELIX_DEVICEROUTEPROTOCOL` | This defines the route protocol added to programmed device routes. [Default: `RTPROT_BOOT`] | int | -| `DisableConntrackInvalidCheck` | `FELIX_DISABLECONNTRACKINVALIDCHECK` | Disable the dropping of packets that aren't either a valid handshake or part of an established connection. [Default: `false`] | boolean | -| `EndpointReportingDelaySecs` | `FELIX_ENDPOINTREPORTINGDELAYSECS` | Set the endpoint reporting delay between status check intervals, in seconds. Only used if endpoint reporting is enabled. [Default: `1`] | int | -| `EndpointReportingEnabled` | `FELIX_ENDPOINTREPORTINGENABLED` | Enable the endpoint status reporter. [Default: `false`] | boolean | -| `EndpointStatusPathPrefix` | `FELIX_ENDPOINTSTATUSPATHPREFIX` | Path to the directory where Felix should create the `endpoint-status` directory. Choosing a mounted volume such as `/var/run/calico` is recommended as the directory can then be monitored by host processes such as the Calico CNI. Leaving this field empty disables endpoint-status files. [Default: ""] | string | -| `ExternalNodesCIDRList` | `FELIX_EXTERNALNODESCIDRLIST` | Comma-delimited list of IPv4 or CIDR of external-non-calico-nodes from which IPIP traffic is accepted by calico-nodes. [Default: ""] | string | -| `FailsafeInboundHostPorts` | `FELIX_FAILSAFEINBOUNDHOSTPORTS` | Comma-delimited list of UDP/TCP/SCTP ports and CIDRs that Felix will allow incoming traffic to host endpoints on irrespective of the security policy. This is useful to avoid accidentally cutting off a host with incorrect configuration. For backwards compatibility, if the protocol is not specified, it defaults to "tcp". If a CIDR is not specified, it will allow traffic from all addresses. To disable all inbound host ports, use the value `none`. The default value allows ssh access, DHCP, BGP, etcd and the Kubernetes API. [Default: `tcp:22, udp:68, tcp:179, tcp:2379, tcp:2380, tcp:5473, tcp:6443, tcp:6666, tcp:6667`] | string | -| `FailsafeOutboundHostPorts` | `FELIX_FAILSAFEOUTBOUNDHOSTPORTS` | Comma-delimited list of UDP/TCP/SCTP ports and CIDRs that Felix will allow outgoing traffic from host endpoints to irrespective of the security policy. This is useful to avoid accidentally cutting off a host with incorrect configuration. For backwards compatibility, if the protocol is not specified, it defaults to "tcp". If a CIDR is not specified, it will allow traffic from all addresses. To disable all outbound host ports, use the value `none`. The default value opens etcd's standard ports to ensure that Felix does not get cut off from etcd as well as allowing DHCP, DNS, BGP and the Kubernetes API. [Default: `udp:53, udp:67, tcp:179, tcp:2379, tcp:2380, tcp:5473, tcp:6443, tcp:6666, tcp:6667`] | string | -| `FelixHostname` | `FELIX_FELIXHOSTNAME` | The hostname Felix reports to the plugin. Should be used if the hostname Felix autodetects is incorrect or does not match what the plugin will expect. [Default: `socket.gethostname()`] | string | -| `HealthEnabled` | `FELIX_HEALTHENABLED` | When enabled, exposes felix health information via an http endpoint. | boolean | -| `HealthHost` | `FELIX_HEALTHHOST` | The address on which Felix will respond to health requests. [Default: `localhost`] | string | -| `HealthPort` | `FELIX_HEALTHPORT` | The port on which Felix will respond to health requests. [Default: `9099`] | int | -| `HealthTimeoutOverrides` | `FELIX_HEALTHTIMEOUTOVERRIDES` | Allows the internal watchdog timeouts of individual subcomponents to be overridden; example: "InternalDataplaneMainLoop=30s,CalculationGraph=2m". This is useful for working around "false positive" liveness timeouts that can occur in particularly stressful workloads or if CPU is constrained. For a list of active subcomponents, see Felix's logs. [Default: ``] | Comma-delimited list of key/value pairs where the values are durations: `1s`, `10s`, `5m`, etc. | -| `IpInIpEnabled` | `FELIX_IPINIPENABLED` | Optional, you shouldn't need to change this setting as Felix calculates if IPIP should be enabled based on the existing IP Pools. When set, this overrides whether Felix should configure an IPinIP interface on the host. When explicitly disabled in FelixConfiguration, Felix will not clean up addresses from the `tunl0` interface (use this if you need to add addresses to that interface and don't want to have them removed). [Default: unset] | optional boolean | -| `IpInIpMtu` | `FELIX_IPINIPMTU` | The MTU to set on the IPIP tunnel device. Zero value means auto-detect. See [Configuring MTU](../../../../networking/configuring/mtu.mdx) [Default: `0`] | int | -| `IPForwarding` | `FELIX_IPFORWARDING` | _Added in: v3.19.3._ Controls whether Felix sets the host sysctls to enable IP forwarding. IP forwarding is required when using Calico for workload networking. This should be disabled only on hosts where Calico is used for host protection. | `Enabled` or `Disabled` | -| `IPv4VXLANTunnelAddr` | | IP address of the IPv4 VXLAN tunnel. This is system configured and should not be updated manually. | string | -| `LogFilePath` | `FELIX_LOGFILEPATH` | The full path to the Felix log. Set to `none` to disable file logging. [Default: `/var/log/calico/felix.log`] | string | -| `LogSeverityFile` | `FELIX_LOGSEVERITYFILE` | The log severity above which logs are sent to the log file. [Default: `Info`] | `Debug`, `Info`, `Warning`, `Error`, `Fatal` | -| `LogSeverityScreen` | `FELIX_LOGSEVERITYSCREEN` | The log severity above which logs are sent to the stdout. [Default: `Info`] | `Debug`, `Info`, `Warning`, `Error`, `Fatal` | -| `LogSeveritySys` | `FELIX_LOGSEVERITYSYS` | The log severity above which logs are sent to the syslog. Set to `none` for no logging to syslog. [Default: `Info`] | `Debug`, `Info`, `Warning`, `Error`, `Fatal` | -| `LogDebugFilenameRegex` | `FELIX_LOGDEBUGFILENAMEREGEX` | Controls which source code files have their Debug log output included in the logs. Only logs from files with names that match the given regular expression are included. The filter only applies to Debug level logs. [Default: `""`] | regex | -| `PolicySyncPathPrefix` | `FELIX_POLICYSYNCPATHPREFIX` | File system path where Felix notifies services of policy changes over Unix domain sockets. This is required only if you're configuring [L7 logs](../../../../observability/elastic/l7/configure.mdx), or [egress gateways](../../../../networking/egress/index.mdx). Set to `""` to disable. [Default: `""`] | string | -| `PrometheusGoMetricsEnabled` | `FELIX_PROMETHEUSGOMETRICSENABLED` | Set to `false` to disable Go runtime metrics collection, which the Prometheus client does by default. This reduces the number of metrics reported, reducing Prometheus load. [Default: `true`] | boolean | -| `PrometheusMetricsEnabled` | `FELIX_PROMETHEUSMETRICSENABLED` | Set to `true` to enable the Prometheus metrics server in Felix. [Default: `false`] | boolean | -| `PrometheusMetricsHost` | `FELIX_PROMETHEUSMETRICSHOST` | TCP network address that the Prometheus metrics server should bind to. [Default: `""`] | string | -| `PrometheusMetricsPort` | `FELIX_PROMETHEUSMETRICSPORT` | TCP port that the Prometheus metrics server should bind to. [Default: `9091`] | int | -| `PrometheusProcessMetricsEnabled` | `FELIX_PROMETHEUSPROCESSMETRICSENABLED` | Set to `false` to disable process metrics collection, which the Prometheus client does by default. This reduces the number of metrics reported, reducing Prometheus load. [Default: `true`] | boolean | -| `PrometheusWireguardMetricsEnabled` | `FELIX_PROMETHEUSWIREGUARDMETRICSENABLED` | Set to `false` to disable wireguard device metrics collection, which Felix does by default. [Default: `true`] | boolean | -| `RemoveExternalRoutes` | `FELIX_REMOVEEXTERNALROUTES` | Whether or not to remove device routes that have not been programmed by Felix. Disabling this will allow external applications to also add device routes. [Default: `true`] | bool | -| `ReportingIntervalSecs` | `FELIX_REPORTINGINTERVALSECS` | Interval at which Felix reports its status into the datastore. 0 means disabled and is correct for Kubernetes-only clusters. Must be non-zero in OpenStack deployments. [Default: `30`] | int | -| `ReportingTTLSecs` | `FELIX_REPORTINGTTLSECS` | Time-to-live setting for process-wide status reports. [Default: `90`] | int | -| `RouteTableRange` | `FELIX_ROUTETABLERANGE` | _deprecated in favor of `RouteTableRanges`_ Calico programs additional Linux route tables for various purposes. `RouteTableRange` specifies the indices of the route tables that Calico should use. [Default: `""`] | `-` | -| `RouteTableRanges` | `FELIX_ROUTETABLERANGES` | Calico programs additional Linux route tables for various purposes. `RouteTableRanges` specifies a set of table index ranges that Calico should use. Deprecates `RouteTableRange`, overrides `RouteTableRange`. [Default: `"1-250"`] | `-,-,...` | -| `RouteSyncDisabled` | `FELIX_ROUTESYNCDISABLED` | Set to `true` to disable Calico programming routes to local workloads. [Default: `false`] | boolean | -| `VXLANEnabled` | `FELIX_VXLANENABLED` | Optional, you shouldn't need to change this setting as Felix calculates if VXLAN should be enabled based on the existing IP Pools. When set, this overrides whether Felix should create the VXLAN tunnel device for VXLAN networking. [Default: unset] | optional boolean | -| `VXLANMTU` | `FELIX_VXLANMTU` | The MTU to set on the IPv4 VXLAN tunnel device. Zero value means auto-detect. Also controls NodePort MTU when eBPF enabled. See [Configuring MTU](../../../../networking/configuring/mtu.mdx) [Default: `0`] | int | -| `VXLANMTUV6` | `FELIX_VXLANMTUV6` | The MTU to set on the IPv6 VXLAN tunnel device. Zero value means auto-detect. Also controls NodePort MTU when eBPF enabled. See [Configuring MTU](../../../../networking/configuring/mtu.mdx) [Default: `0`] | int | -| `VXLANPort` | `FELIX_VXLANPORT` | The UDP port to use for VXLAN. [Default: `4789`] | int | -| `VXLANTunnelMACAddr` | | MAC address of the IPv4 VXLAN tunnel. This is system configured and should not be updated manually. | string | -| `VXLANVNI` | `FELIX_VXLANVNI` | The virtual network ID to use for VXLAN. [Default: `4096`] | int | -| `AllowVXLANPacketsFromWorkloads` | `FELIX_ALLOWVXLANPACKETSFROMWORKLOADS` | Set to `true` to allow VXLAN encapsulated traffic from workloads. [Default: `false`] | boolean | -| `AllowIPIPPacketsFromWorkloads` | `FELIX_ALLOWIPIPPACKETSFROMWORKLOADS` | Set to `true` to allow IPIP encapsulated traffic from workloads. [Default: `false`] | boolean | -| `TyphaAddr` | `FELIX_TYPHAADDR` | IPv4 address at which Felix should connect to Typha. [Default: none] | string | -| `TyphaK8sServiceName` | `FELIX_TYPHAK8SSERVICENAME` | Name of the Typha Kubernetes service | string | -| `Ipv6Support` | `FELIX_IPV6SUPPORT` | Enable $[prodname] networking and security for IPv6 traffic as well as for IPv4. | boolean | -| `RouteSource` | `FELIX_ROUTESOURCE` | Where Felix gets is routing information from for VXLAN and the BPF data plane. The CalicoIPAM setting is more efficient because it supports route aggregation, but it only works when Calico's IPAM or host-local IPAM is in use. Use the WorkloadIPs setting if you are using Calico's VXLAN or BPF data plane and not using Calico IPAM or host-local IPAM. [Default: "CalicoIPAM"] | 'CalicoIPAM', or 'WorkloadIPs' | -| `mtuIfacePattern` | `FELIX_MTUIFACEPATTERN` | Pattern used to discover the host's interface for MTU auto-detection. [Default: `^((en\|wl\|ww\|sl\|ib)[Pcopsvx].*\|(eth\|wlan\|wwan).*)`] | regex | -| `TPROXYMode` | `FELIX_TPROXYMODE` | Sets transparent proxying mode. [Default: "Disabled"] | 'Disabled', 'Enabled' | -| `TPROXYPort` | `FELIX_TPROXYPORT` | What local ports is the proxied traffic sent to. [Default: `16001`] | int | -| `FeatureDetectOverride` | `FELIX_FEATUREDETECTOVERRIDE` | Is used to override the feature detection. Values are specified in a comma separated list with no spaces, example; "SNATFullyRandom=true,MASQFullyRandom=false,RestoreSupportsLock=true,IPIPDeviceIsL3=true. "true" or "false" will force the feature, empty or omitted values are auto-detected. [Default: `""`] | string | -| `FeatureGates` | `FELIX_FEATUREGATES` | Is used to enable or disable tech-preview Calico features. Values are specified in a comma separated list with no spaces. This is used to enable features that are not fully production ready. Example; "AbC=enabled,XyZ=false" [Default: not set] | string - -#### Feature Gates - -* `BPFConnectTimeLoadBalancingWorkaround` - Use when connect-time loadbalancer (CTLB) is turned off or if you want to turn it off for UDP only. When CTLB is turned off, host networked processes cannot always reach services. This workaround makes sure that they can. When CTLB is turned on, UDP clients may get stuck sending traffic to endpoint that does not exist anymore. So CTLB needs to be turned off sometimes. - * `enabled` - when CTLB is turned off make sure that services are always accessible. - * `udp` - turns off CTLB for UDP only and makes sure that services are always accessible. Preferred setting to make sure that DNS works. - -### etcd datastore configuration - -| Configuration parameter | Environment variable | Description | Schema | -| ----------------------- | --------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | -| `EtcdCaFile` | `FELIX_ETCDCAFILE` | Path to the file containing the root certificate of the certificate authority (CA) that issued the etcd server certificate. Configures Felix to trust the CA that signed the root certificate. The file may contain multiple root certificates, causing Felix to trust each of the CAs included. To disable authentication of the server by Felix, set the value to `none`. [Default: `/etc/ssl/certs/ca-certificates.crt`] | string | -| `EtcdCertFile` | `FELIX_ETCDCERTFILE` | Path to the file containing the client certificate issued to Felix. Enables Felix to participate in mutual TLS authentication and identify itself to the etcd server. Example: `/etc/felix/cert.pem` (optional) | string | -| `EtcdEndpoints` | `FELIX_ETCDENDPOINTS` | Comma-delimited list of etcd endpoints to connect to. Example: `http://127.0.0.1:2379,http://127.0.0.2:2379`. | `://:` | -| `EtcdKeyFile` | `FELIX_ETCDKEYFILE` | Path to the file containing the private key matching Felix's client certificate. Enables Felix to participate in mutual TLS authentication and identify itself to the etcd server. Example: `/etc/felix/key.pem` (optional) | string | - -### Kubernetes API datastore configuration - -The Kubernetes API datastore driver reads its configuration from Kubernetes-provided environment variables. - -### iptables data plane configuration - -| Configuration parameter | Environment variable | Description | Schema | -| ------------------------------------ | ------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------- | -| `ChainInsertMode` | `FELIX_CHAININSERTMODE` | Controls whether Felix hooks the kernel's top-level iptables chains by inserting a rule at the top of the chain or by appending a rule at the bottom. `Insert` is the safe default since it prevents $[prodname]'s rules from being bypassed. If you switch to `Append` mode, be sure that the other rules in the chains signal acceptance by falling through to the $[prodname] rules, otherwise the $[prodname] policy will be bypassed. In particular `Append` mode is incompatible with DNS Policy unless kube-proxy is modified to fall through to $[prodname] rules. [Default: `Insert`] | `Insert`, `Append` | -| `DefaultEndpointToHostAction` | `FELIX_DEFAULTENDPOINTTOHOSTACTION` | This parameter controls what happens to traffic that goes from a workload endpoint to the host itself (after the traffic hits the endpoint egress policy). By default $[prodname] blocks traffic from workload endpoints to the host itself with an iptables `Drop` action. If you want to allow some or all traffic from endpoint to host, set this parameter to `Return` or `Accept`. Use `Return` if you have your own rules in the iptables "INPUT" chain; $[prodname] will insert its rules at the top of that chain, then `Return` packets to the "INPUT" chain once it has completed processing workload endpoint egress policy. Use `Accept` to unconditionally accept packets from workloads after processing workload endpoint egress policy. [Default: `Drop`] | `Drop`, `Return`, `Accept` | -| `GenericXDPEnabled` | `FELIX_GENERICXDPENABLED` | When enabled, Felix can fallback to the non-optimized `generic` XDP mode. This should only be used for testing since it doesn't improve performance over the non-XDP mode. [Default: `false`] | boolean | -| `InterfaceExclude` | `FELIX_INTERFACEEXCLUDE` | A comma-separated list of interface names that should be excluded when Felix is resolving host endpoints. The default value ensures that Felix ignores Kubernetes' internal `kube-ipvs0` device. If you want to exclude multiple interface names using a single value, the list supports regular expressions. For regular expressions you must wrap the value with `/`. For example having values `/^kube/,veth1` will exclude all interfaces that begin with `kube` and also the interface `veth1`. [Default: `kube-ipvs0`] | string | -| `IpsetsRefreshInterval` | `FELIX_IPSETSREFRESHINTERVAL` | Period, in seconds, at which Felix re-checks the IP sets in the data plane to ensure that no other process has accidentally broken $[prodname]'s rules. Set to 0 to disable IP sets refresh. [Default: `10`] | int | -| `IptablesBackend` | `FELIX_IPTABLESBACKEND` | This parameter controls which variant of iptables Felix uses. Set this to `Auto` for auto detection of the backend. If a specific backend is needed then use `nft` for hosts using a netfilter backend or `Legacy` for others. [Default: `Auto`] | `Legacy`, `nft`, `Auto` | -| `IptablesFilterAllowAction` | `FELIX_IPTABLESFILTERALLOWACTION` | This parameter controls what happens to traffic that is allowed by a Felix policy chain in the iptables filter table (i.e., a normal policy chain). The default will immediately `Accept` the traffic. Use `Return` to send the traffic back up to the system chains for further processing. [Default: `Accept`] | `Accept`, `Return` | -| `IptablesLockFilePath` | `FELIX_IPTABLESLOCKFILEPATH` | _Deprecated:_ For iptables versions prior to v1.6.2, location of the iptables lock file (later versions of iptables always use value "/run/xtables.lock"). You may need to change this if the lock file is not in its standard location (for example if you have mapped it into Felix's container at a different path). [Default: `/run/xtables.lock`] | string | -| `IptablesLockProbeIntervalMillis` | `FELIX_IPTABLESLOCKPROBEINTERVALMILLIS` | Time, in milliseconds, that Felix will wait between attempts to acquire the iptables lock if it is not available. Lower values make Felix more responsive when the lock is contended, but use more CPU. [Default: `50`] | int | -| `IptablesLockTimeoutSecs` | `FELIX_IPTABLESLOCKTIMEOUTSECS` | Time, in seconds, that Felix will wait for the iptables lock. Versions of iptables prior to v1.6.2 support disabling the iptables lock by setting this value to 0; v1.6.2 and above do not so Felix will default to 10s if a non-positive number is used. To use this feature, Felix must share the iptables lock file with all other processes that also take the lock. When running Felix inside a container, this typically requires the file /run/xtables.lock on the host to be mounted into the `$[nodecontainer]` or `calico/felix` container. [Default: `0` disabled for iptables <v1.6.2 or 10s for later versions] | int | -| `IptablesMangleAllowAction` | `FELIX_IPTABLESMANGLEALLOWACTION` | This parameter controls what happens to traffic that is allowed by a Felix policy chain in the iptables mangle table (i.e., a pre-DNAT policy chain). The default will immediately `Accept` the traffic. Use `Return` to send the traffic back up to the system chains for further processing. [Default: `Accept`] | `Accept`, `Return` | -| `IptablesMarkMask` | `FELIX_IPTABLESMARKMASK` | Mask that Felix selects its IPTables Mark bits from. Should be a 32 bit hexadecimal number with at least 8 bits set, none of which clash with any other mark bits in use on the system. When using $[prodname] with Kubernetes' `kube-proxy` in IPVS mode, [we recommend allowing at least 16 bits](#ipvs-bits). [Default: `0xffff0000`] | netmask | -| `IptablesNATOutgoingInterfaceFilter` | `FELIX_IPTABLESNATOUTGOINGINTERFACEFILTER` | This parameter can be used to limit the host interfaces on which Calico will apply SNAT to traffic leaving a Calico IPAM pool with "NAT outgoing" enabled. This can be useful if you have a main data interface, where traffic should be SNATted and a secondary device (such as the docker bridge) which is local to the host and doesn't require SNAT. This parameter uses the iptables interface matching syntax, which allows `+` as a wildcard. Most users will not need to set this. Example: if your data interfaces are eth0 and eth1 and you want to exclude the docker bridge, you could set this to `eth+` | string | -| `IptablesPostWriteCheckIntervalSecs` | `FELIX_IPTABLESPOSTWRITECHECKINTERVALSECS` | Period, in seconds, after Felix has done a write to the data plane that it schedules an extra read back to check the write was not clobbered by another process. This should only occur if another application on the system doesn't respect the iptables lock. [Default: `1`] | int | -| `IptablesRefreshInterval` | `FELIX_IPTABLESREFRESHINTERVAL` | Period, in seconds, at which Felix re-checks all iptables state to ensure that no other process has accidentally broken $[prodname]'s rules. Set to 0 to disable iptables refresh. [Default: `90`] | int | -| `LogPrefix` | `FELIX_LOGPREFIX` | The log prefix that Felix uses when rendering LOG rules. [Default: `calico-packet`] | string | -| `MaxIpsetSize` | `FELIX_MAXIPSETSIZE` | Maximum size for the ipsets used by Felix. Should be set to a number that is greater than the maximum number of IP addresses that are ever expected in a selector. [Default: `1048576`] | int | -| `NATPortRange` | `FELIX_NATPORTRANGE` | Port range used by iptables for port mapping when doing outgoing NAT. (Example: `32768:65000`). [Default: iptables maps source ports below 512 to other ports below 512: those between 512 and 1023 inclusive will be mapped to ports below 1024, and other ports will be mapped to 1024 or above. Where possible, no port alteration will occur.] | string | -| `NATOutgoingAddress` | `FELIX_NATOUTGOINGADDRESS` | Source address used by iptables for an SNAT rule when doing outgoing NAT. [Default: an iptables `MASQUERADE` rule is used for outgoing NAT which will use the address on the interface traffic is leaving on.] | `` | -| `NetlinkTimeoutSecs` | `FELIX_NETLINKTIMEOUTSECS` | Time, in seconds, that Felix will wait for netlink (i.e. routing table list/update) operations to complete before giving up and retrying. [Default: `10`] | float | -| `RouteRefreshInterval` | `FELIX_ROUTEREFRESHINTERVAL` | Period, in seconds, at which Felix re-checks the routes in the data plane to ensure that no other process has accidentally broken $[prodname]'s rules. Set to 0 to disable route refresh. [Default: `90`] | int | -| `ServiceLoopPrevention` | `FELIX_SERVICELOOPPREVENTION` | When [service IP advertisement is enabled](../../../../networking/configuring/advertise-service-ips.mdx), prevent routing loops to service IPs that are not in use, by dropping or rejecting packets that do not get DNAT'd by kube-proxy. Unless set to "Disabled", in which case such routing loops continue to be allowed. [Default: `Drop`] | `Drop`, `Reject`, `Disabled` | -| `WorkloadSourceSpoofing` | `FELIX_WORKLOADSOURCESPOOFING` | Controls whether pods can enable source IP address spoofing with the `cni.projectcalico.org/allowedSourcePrefixes` annotation. When set to `Any`, pods can use this annotation to send packets from any IP address. [Default: `Disabled`] | `Any`, `Disabled` | -| `XDPRefreshInterval` | `FELIX_XDPREFRESHINTERVAL` | Period, in seconds, at which Felix re-checks the XDP state in the data plane to ensure that no other process has accidentally broken $[prodname]'s rules. Set to 0 to disable XDP refresh. [Default: `90`] | int | -| `XDPEnabled` | `FELIX_XDPENABLED` | Enable XDP acceleration for host endpoint policies. [Default: `true`] | boolean | - -### eBPF data plane configuration - -eBPF data plane mode uses the Linux Kernel's eBPF virtual machine to implement networking and policy instead of iptables. When BPFEnabled is set to `true`, Felix will: - -- Require a v5.3 Linux kernel. -- Implement policy with eBPF programs instead of iptables. -- Activate its embedded implementation of `kube-proxy` to implement Kubernetes service load balancing. -- Disable support for IPv6. - -See [Enable the eBPF data plane](../../../../operations/ebpf/enabling-ebpf.mdx) for step-by step instructions to enable this feature. - -| Configuration parameter / Environment variable | Description | Schema | Default | -| ----------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------ | -------------- | ------ | ------ | ----------------- | -| BPFEnabled /
    FELIX_BPFENABLED | Enable eBPF data plane mode. eBPF mode has a number of limitations, see the [HOWTO guide](../../../../operations/ebpf/enabling-ebpf.mdx). | true, false | false | -| BPFDisableUnprivileged /
    FELIX_BPFDISABLEUNPRIVILEGED | If true, Felix sets the kernel.unprivileged_bpf_disabled sysctl to disable unprivileged use of BPF. This ensures that unprivileged users cannot access Calico's BPF maps and cannot insert their own BPF programs to interfere with the ones that $[prodname] installs. | true, false | true | -| BPFLogLevel /
    FELIX_BPFLOGLEVEL | The log level used by the BPF programs. The logs are emitted to the BPF trace pipe, accessible with the command `tc exec BPF debug`. | Off,Info,Debug | Off | -| BPFDataIfacePattern /
    FELIX_BPFDATAIFACEPATTERN | Controls which interfaces Felix should attach BPF programs to catch traffic to/from the external network. This needs to match the interfaces that Calico workload traffic flows over as well as any interfaces that handle incoming traffic to NodePorts and services from outside the cluster. It should not match the workload interfaces (usually named cali...).. | regular expression | ^((en|wl|ww|sl|ib)[Popsx].\*|(eth|wlan|wwan|bond).\*|tunl0$|vxlan.calico$|vxlan-v6.calico$|wireguard.cali$|wg-v6.cali$) | -| BPFConnectTimeLoadBalancingEnabled /
    FELIX_BPFCONNECTTIMELOADBALANCINGENABLED | Controls whether Felix installs the connect-time load balancer. In the current release, the connect-time load balancer is required for the host to reach kubernetes services. | true,false | true | -| BPFExternalServiceMode /
    FELIX_BPFEXTERNALSERVICEMODE | Controls how traffic from outside the cluster to NodePorts and ClusterIPs is handled. In Tunnel mode, packet is tunneled from the ingress host to the host with the backing pod and back again. In DSR mode, traffic is tunneled to the host with the backing pod and then returned directly; this requires a network that allows direct return. | Tunnel,DSR | Tunnel | -| BPFDSROptoutCIDRs /
    FELIX_BPFDSROPTOUTCIDRS | BPFDSROptoutCIDRs is a list of CIDRs that are excluded from DSR. That is, clients in those CIDRs will access nodeports as if BPFExternalServiceMode was set to Tunnel. | string | "" | -| BPFExtToServiceConnmark /
    FELIX_BPFEXTTOSERVICECONNMARK | Controls a 32bit mark that is set on connections from an external client to a local service. This mark allows us to control how packets of that connection are routed within the host and how is routing interpreted by RPF check. | int | 0 | -| BPFKubeProxyIptablesCleanupEnabled /
    FELIX_BPFKUBEPROXYIPTABLESCLEANUPENABLED | Controls whether Felix will clean up the iptables rules created by the Kubernetes `kube-proxy`; should only be enabled if `kube-proxy` is not running. | true,false | true | -| BPFKubeProxyMinSyncPeriod /
    FELIX_BPFKUBEPROXYMINSYNCPERIOD | Controls the minimum time between data plane updates for Felix's embedded `kube-proxy` implementation. | seconds | `1` | -| BPFKubeProxyEndpointSlicesEnabled /
    FELIX_BPFKUBEPROXYENDPOINTSLICESENABLED | Controls whether Felix's embedded kube-proxy derives its services from Kubernetes' EndpointSlices resources. Using EndpointSlices is more efficient but it requires EndpointSlices support to be enabled at the Kubernetes API server. | true,false | false | -| BPFMapSizeConntrack /
    FELIX_BPFMapSizeConntrack | Controls the size of the conntrack map. This map must be large enough to hold an entry for each active connection. Warning: changing the size of the conntrack map can cause disruption. | int | 512000 | -| BPFMapSizeNATFrontend /
    FELIX_BPFMapSizeNATFrontend | Controls the size of the NAT frontend map. FrontendMap should be large enough to hold an entry for each nodeport, external IP and each port in each service. | int | 65536 | -| BPFMapSizeNATBackend /
    FELIX_BPFMapSizeNATBackend | Controls the size of the NAT backend map. This is the total number of endpoints. This is mostly more than the size of the number of services. | int | 262144 | -| BPFMapSizeNATAffinity /
    FELIX_BPFMapSizeNATAffinity | Controls the size of the NAT affinity map. | int | 65536 | -| BPFMapSizeIPSets /
    FELIX_BPFMapSizeIPSets | Controls the size of the IPSets map. The IP sets map must be large enough to hold an entry for each endpoint matched by every selector in the source/destination matches in network policy. Selectors such as "all()" can result in large numbers of entries (one entry per endpoint in that case). | int | 1048576 | -| BPFMapSizeRoute /
    FELIX_BPFMapSizeRoute | Controls the size of the route map. The routes map should be large enough to hold one entry per workload and a handful of entries per host (enough to cover its own IPs and tunnel IPs). | int | 262144 | -| BPFHostConntrackBypass /
    FELIX_BPFHostConntrackBypass | Controls whether to bypass Linux conntrack in BPF mode for workloads and services. | true,false | true | -| BPFPolicyDebugEnabled /
    FELIX_BPFPOLICYDEBUGENABLED | In eBPF data plane mode, Felix records detailed information about the BPF policy programs, which can be examined with the calico-bpf command-line tool. | true, false | true | - -### Windows-specific configuration - -| Configuration parameter | Environment variable | Description | Schema | Default | -| ------------------------------- | ------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | ------------------------------------------- | -| windowsFlowLogsFileDirectory |
    FELIX_WINDOWSFLOWLOGSFILEDIRECTORY | Set the directory where flow logs files are stored on Windows nodes. This parameter only takes effect when `flowLogsFileEnabled` is set to `true`. | string | `c:\\TigeraCalico\\flowlogs` | -| windowsFlowLogsPositionFilePath |
    FELIX_WINDOWSFLOWLOGSPOSITIONFILEPATH | Specify the position of the external pipeline that reads flow logs on Windows nodes. This parameter only takes effect when `FlowLogsDynamicAggregationEnabled` is set to `true`. | string | `c:\\TigeraCalico\\flowlogs\\flows.log.pos` | -| windowsStatsDumpFilePath |
    FELIX_WINDOWSTATSDUMPFILEPATH | Specify the position of the file used for dumping flow log statistics on Windows nodes. Note this is an internal setting that users shouldn't need to modify. | string | `c:\\TigeraCalico\\stats\\dump` | -| WindowsDNSCacheFile |
    FELIX_WINDOWSDNSCACHEFILE | Specify the name of the file that Felix uses to preserve learned DNS information when restarting. | string | `c:\\TigeraCalico\\felix-dns-cache.txt` | -| WindowsDNSExtraTTL |
    FELIX_WINDOWSDNSEXTRATTL | Specify extra time in seconds to keep IPs and alias names that are learned from DNS, in addition to each name or IP's advertised TTL. | seconds | `120` | - -### Kubernetes-specific configuration - -| Configuration parameter | Environment variable | Description | Schema | -| ----------------------- | -------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------ | -| `KubeNodePortRanges` | `FELIX_KUBENODEPORTRANGES` | A list of port ranges that Felix should treat as Kubernetes node ports. Only when `kube-proxy` is configured to use IPVS mode: Felix assumes that traffic arriving at the host of one of these ports will ultimately be forwarded instead of being terminated by a host process. [Default: `30000:32767`]
    | Comma-delimited list of `:` port ranges or single ports. | -| `KubeMasqueradeBit` | `FELIX_KUBEMASQUERADEBIT` | KubeMasqueradeBit should be set to the same value as --iptables-masquerade-bit of kube-proxy when TPROXY is used. This defaults to the corresponding kube-proxy default value so it only needs to change if kube-proxy is using a non-standard setting. Must be within the range of 0-31. OpenShift sets the bit to 0 by default. [Default: 14] | integer | - -:::note - - When using $[prodname] with Kubernetes' `kube-proxy` in IPVS mode, $[prodname] uses additional -iptables mark bits to store an ID for each local $[prodname] endpoint. For example, the default `IptablesMarkMask` value, -`0xffff0000` gives $[prodname] 16 bits, up to 6 of which are used for internal purposes, leaving 10 bits for endpoint -IDs. 10 bits is enough for 1024 different values and $[prodname] uses 2 of those for internal purposes, leaving enough -for 1022 endpoints on the host. - -::: - -### Bare metal specific configuration - -| Configuration parameter | Environment variable | Description | Schema | -| ----------------------- | ----------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------ | -| `InterfacePrefix` | `FELIX_INTERFACEPREFIX` | The interface name prefix that identifies workload endpoints and so distinguishes them from host endpoint interfaces. Accepts more than one interface name prefix in comma-delimited format, e.g., `tap,cali`. Note: in environments other than bare metal, the orchestrators configure this appropriately. For example our Kubernetes and Docker integrations set the `cali` value, and our OpenStack integration sets the `tap` value. [Default: `cali`] | string | - -### $[prodname] specific configuration - -| Setting | Environment variable | Default | Meaning | -| --------------------------------------- | --------------------------------------------- | ------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `DropActionOverride` | `FELIX_DROPACTIONOVERRIDE` | `Drop` | How to treat packets that are disallowed by the current $[prodname] policy. For more detail please see below. | -| `LogDropActionOverride` | `FELIX_LOGDROPACTIONOVERRIDE` | `false` | Set to `true` to add the `DropActionOverride` to the syslog entries. For more detail please see below. | -| `PrometheusReporterEnabled` | `FELIX_PROMETHEUSREPORTERENABLED` | `false` | Set to `true` to enable Prometheus reporting of denied packet metrics. For more detail please see below. | -| `PrometheusReporterPort` | `FELIX_PROMETHEUSREPORTERPORT` | `9092` | The TCP port on which to report denied packet metrics. | -| `PrometheusReporterCertFile` | `FELIX_PROMETHEUSREPORTERCERTFILE` | None | Certificate for encrypting Prometheus denied packet metrics. | -| `PrometheusReporterKeyFile` | `FELIX_PROMETHEUSREPORTERKEYFILE` | None | Private key for encrypting Prometheus denied packet metrics. | -| `PrometheusReporterCAFile` | `FELIX_PROMETHEUSREPORTERCAFILE` | None | Trusted CA file for clients attempting to read Prometheus denied packet metrics. | -| `PrometheusMetricsCertFile` | `FELIX_PROMETHEUSMETRICSCERTFILE` | None | Certificate for encrypting general Felix Prometheus metrics. | -| `PrometheusMetricsKeyFile` | `FELIX_PROMETHEUSMETRICSKEYFILE` | None | Private key for encrypting general Felix Prometheus metrics. | -| `PrometheusMetricsCAFile` | `FELIX_PROMETHEUSMETRICSCAFILE` | None | Trusted CA file for clients attempting to read general Felix Prometheus metrics. | -| `IPSecMode` | `FELIX_IPSECMODE` | None | Controls which mode IPsec is operating on. The only supported value is `PSK`. An empty value means IPsec is not enabled. | -| `IPSecAllowUnsecuredTraffic` | `FELIX_IPSECALLOWUNSECUREDTRAFFIC` | `false` | When set to false, only IPsec-protected traffic will be allowed on the packet paths where IPsec is supported. When set to true, IPsec will be used but non-IPsec traffic will be accepted. In general, setting this to `true` is less safe since it allows an attacker to inject packets. However, it is useful when transitioning from non-IPsec to IPsec since it allows traffic to flow while the cluster negotiates the IPsec mesh. | -| `IPSecIKEAlgorithm` | `FELIX_IPSECIKEALGORITHM` | `aes128gcm16-prfsha256-ecp256` | IPsec IKE algorithm. Default is NIST suite B recommendation. | -| `IPSecESPAlgorithm` | `FELIX_IPSECESPALGORITHM` | `aes128gcm16-ecp256` | IPsec ESP algorithm. Default is NIST suite B recommendation. | -| `IPSecLogLevel` | `FELIX_IPSECLOGLEVEL` | `Info` | Controls log level for IPsec components. Set to `None` for no logging. Other valid values are `Notice`, `Info`, `Debug` and `Verbose`. | -| `IPSecPSKFile` | `FELIX_IPSECPSKFILE` | None | The path to the pre shared key file for IPsec. | -| `FlowLogsFileEnabled` | `FELIX_FLOWLOGSFILEENABLED` | `false` | Set to `true`, enables flow logs. If set to `false` no flow logging will occur. Flow logs are written to a file `flows.log` and sent to Elasticsearch. The location of this file can be configured using the `FlowLogsFileDirectory` field. File rotation settings for this `flows.log` file can be configured using the fields `FlowLogsFileMaxFiles` and `FlowLogsFileMaxFileSizeMB`. Note that flow log exports to Elasticsearch are dependent on flow logs getting written to this file. Setting this parameter to `false` will disable flow logs. | -| `FlowLogsFileIncludeLabels` | `FELIX_FLOWLOGSFILEINCLUDELABELS` | `false` | Set to `true` to include endpoint label information in flow logs. This parameter only takes effect when `FlowLogsFileEnabled` is set to `true`. | -| `FlowLogsFileIncludePolicies` | `FELIX_FLOWLOGSFILEINCLUDEPOLICIES` | `false` | Set to `true` to include policy match information in flow logs. This parameter only takes effect when `FlowLogsFileEnabled` is set to `true`. | -| `FlowLogsFileIncludeService` | `FELIX_FLOWLOGSFILEINCLUDESERVICE` | `false` | Set to `true` to include destination service information in flow logs. The service information is derived from pre-DNAT destination IP and is therefore only available on the node where DNAT occurs. This parameter only takes effect when `FlowLogsFileEnabled` is set to `true`. | -| `FlowLogsFileDirectory` | `FELIX_FLOWLOGSFILEDIRECTORY` | `/var/log/calico/flowlogs` | The directory where flow logs files are stored. This parameter only takes effect when `FlowLogsFileEnabled` is set to `true`. | -| `FlowLogsFileMaxFiles` | `FELIX_FLOWLOGSFILEMAXFILES` | `5` | The number of files to keep when rotating flow log files. This parameter only takes effect when `FlowLogsFileEnabled` is set to `true`. | -| `FlowLogsFileMaxFileSizeMB` | `FELIX_FLOWLOGSFILEMAXFILESIZEMB` | `100` | The max size in MB of flow logs files before rotation. This parameter only takes effect when `FlowLogsFileEnabled` is set to `true`. | -| `FlowLogsFlushInterval` | `FELIX_FLOWLOGSFLUSHINTERVAL` | `300` | The period, in seconds, at which Felix exports the flow logs. | -| `FlowLogsEnableNetworkSets` | `FELIX_FLOWLOGSENABLENETWORKSETS` | `false` | Whether to specify the network set a flow log originates from. | -| `FlowLogsFileAggregationKindForAllowed` | `FELIX_FLOWLOGSFILEAGGREGATIONKINDFORALLOWED` | `2` | How much to aggregate the flow logs sent to Elasticsearch for allowed traffic. Bear in mind that changing this value may have a dramatic impact on the volume of flow logs sent to Elasticsearch. `0` means no aggregation, `1` means aggregate all flows that share a source port on each node, `2` means aggregate all flows that share source ports or are from the same ReplicaSet and `3` means aggregate all flows that share destination and source ports and are from the same ReplicateSet | -| `FlowLogsFileAggregationKindForDenied` | `FELIX_FLOWLOGSFILEAGGREGATIONKINDFORDENIED` | `1` | How much to aggregate the flow logs sent to Elasticsearch for denied traffic. Bear in mind that changing this value may have a dramatic impact on the volume of flow logs sent to Elasticsearch. `0` means no aggregation, `1` means aggregate all flows that share a source port on each node, and `2` means aggregate all flows that share source ports or are from the same ReplicaSet and `3` means aggregate all flows that share destination and source ports and are from the same ReplicateSet. | -| `FlowLogsDynamicAggregationEnabled` | `FELIX_FLOWLOGSDYNAMICAGGREGATIONENABLED` | `false` | Enable dynamic aggregation for flow logs. This will increase aggregation up to the maximum level allowed (which is 3 and means aggregate all flows that share destination and source ports and are from the same ReplicateSet) when it detects the pipeline for reading flow logs is stalled. It will revert to its initial aggregation level when this condition changes. The initial aggregation level can be specified using `FlowLogsFileAggregationKindForAllowed` and `FlowLogsFileAggregationKindForDenied`. If these values are not specified, default values of `2` and `1` will be used. | -| `FlowLogsPositionFilePath` | `FELIX_FLOWLOGSPOSITIONPATH` | `/var/log/calico/flows.log.pos` | Default path of the position file. It is used to read the current state of pipeline for flow logs. This parameter will be used only when `FlowLogsDynamicAggregationEnabled` is set to `true` | -| `FlowLogsAggregationThresholdBytes` | `FELIX_FLOWLOGSAGGREGATIONTHRESHOLDBYTES` | `8192` | Default threshold to determine how far behind the pipeline for flow logs can get before aggregation starts in. Detecting a difference of 8192 bytes means increase 1 level, while a difference of 16384 means increasing two levels. This parameter will be used only when `FlowLogsDynamicAggregationEnabled` is set to `true`. | -| `FlowLogsCollectProcessInfo` | `FELIX_FLOWLOGSCOLLECTPROCESSINFO` | `true` | If enabled Felix will load the kprobe BPF programs to collect process info. | -| `FlowLogsCollectTcpStats` | `FELIX_FLOWLOGSCOLLECTTCPSTATS` | `true` | If enabled Felix will collect TCP socket stats using BPF and requires a recent kernel that supports BPF | -| `FlowLogsCollectProcessPath` | `FELIX_FLOWLOGSCOLLECTPROCESSPATH` | `true` | If enabled, along with FlowLogsCollectProcessInfo, each flow log will contain the full path of the process executable and the arguments with which the executable was invoked. If path or arguments cannot be determined, Felix will fallback to using task names and arguments will be empty. For full functionality, this feature should be enabled via operator see [Enabling process path](../../../../observability/elastic/flow/processpath.mdx) | -| `FlowLogsFilePerFlowProcessLimit` | `FELIX_FLOWLOGSFILEPERFLOWPROCESSLIMIT` | `2` | Specify the maximum number of flow log entries with distinct process information beyond which process information will be aggregated. | -| `FlowLogsFilePerFlowProcessArgsLimit` | `FELIX_FLOWLOGSFILEPERFLOWPROCESSARGSLIMIT` | `5` | Specify the maximum number of arguments beyond which the process arguments will be aggregated. | -| `DNSCacheFile` | `FELIX_DNSCACHEFILE` | `/var/run/calico/felix-dns-cache.txt` | The name of the file that Felix uses to preserve learned DNS information when restarting. | -| `DNSCacheSaveInterval` | `FELIX_DNSCACHESAVEINTERVAL` | `60` | The periodic interval at which Felix saves learned DNS information to the cache file. | -| `DNSCacheEpoch` | `FELIX_DNSCACHEEPOCH` | `0` | An arbitrary number that can be changed, at runtime, to tell Felix to discard all its learned DNS information. | -| `DNSExtraTTL` | `FELIX_DNSEXTRATTL` | `0` | Extra time, in seconds, to keep IPs and alias names that are learned from DNS, in addition to each name or IP's advertised TTL. | -| `DNSTrustedServers` | `FELIX_DNSTRUSTEDSERVERS` | `k8s-service:kube-dns` | The DNS servers that Felix should trust. Each entry here must be `[:]` - indicating an explicit DNS server IP - or `k8s-service:[/][:port]` - indicating a Kubernetes DNS service. `` defaults to the first service port, or 53 for an IP, and `` to `kube-system`. An IPv6 address with a port must use the square brackets convention, for example `[fd00:83a6::12]:5353`. Note that Felix (calico-node) will need RBAC permission to read the details of each service specified by a `k8s-service:...` form. | -| `DNSLogsFileEnabled` | `FELIX_DNSLOGSFILEENABLED` | `false` | Set to `true`, enables DNS logs. If set to `false` no DNS logging will occur. DNS logs are written to a file `dns.log` and sent to Elasticsearch. The location of this file can be configured using the `DNSLogsFileDirectory` field. File rotation settings for this `dns.log` file can be configured using the fields `DNSLogsFileMaxFiles` and `DNSLogsFileMaxFileSizeMB`. Note that DNS log exports to Elasticsearch are dependent on DNS logs getting written to this file. Setting this parameter to `false` will disable DNS logs. | -| `DNSLogsFileDirectory` | `FELIX_DNSLOGSFILEDIRECTORY` | `/var/log/calico/dnslogs` | The directory where DNS logs files are stored. This parameter only takes effect when `DNSLogsFileEnabled` is `true`. | -| `DNSLogsFileMaxFiles` | `FELIX_DNSLOGSFILEMAXFILES` | `5` | The number of files to keep when rotating DNS log files. This parameter only takes effect when `DNSLogsFileEnabled` is `true`. | -| `DNSLogsFileMaxFileSizeMB` | `FELIX_DNSLOGSFILEMAXFILESIZEMB` | `100` | The max size in MB of DNS log files before rotation. This parameter only takes effect when `DNSLogsFileEnabled` is `true`. | -| `DNSLogsFlushInterval` | `FELIX_DNSLOGSFLUSHINTERVAL` | `300` | The period, in seconds, at which Felix exports DNS logs. | -| `DNSLogsFileAggregationKind` | `FELIX_DNSLOGSFILEAGGREGATIONKIND` | `1` | How much to aggregate DNS logs. Bear in mind that changing this value may have a dramatic impact on the volume of flow logs sent to Elasticsearch. `0` means no aggregation, `1` means aggregate similar DNS logs from workloads in the same ReplicaSet. | -| `DNSLogsFileIncludeLabels` | `FELIX_DNSLOGSFILEINCLUDELABELS` | `true` | Whether to include client and server workload labels in DNS logs. | -| `DNSLogsFilePerNodeLimit` | `FELIX_DNSLOGSFILEPERNODELIMIT` | `0` (no limit) | Limit on the number of DNS logs that can be emitted within each flush interval. When this limit has been reached, Felix counts the number of unloggable DNS responses within the flush interval, and emits a WARNING log with that count at the same time as it flushes the buffered DNS logs. | -| `DNSLogsLatency` | `FELIX_DNSLOGSLATENCY` | `true` | Indicates to include measurements of DNS request/response latency in each DNS log. | -| `EgressIPSupport` | `FELIX_EGRESSIPSUPPORT` | `Disabled` | Defines three different support modes for egress gateway function. `Disabled` means egress gateways are not supported. `EnabledPerNamespace` means egress gateway function is enabled and can be configured on a per-namespace basis (but per-pod egress annotations are ignored). `EnabledPerNamespaceOrPerPod` means egress gateway function is enabled and can be configured per-namespace or per-pod (with per-pod egress annotations overriding namespace annotations). | -| `EgressIPVXLANPort` | `FELIX_EGRESSIPVXLANPORT` | `4097` | Port to use for egress gateway VXLAN traffic. A value of `0` means "use the kernel default". | -| `EgressIPVXLANVNI` | `FELIX_EGRESSIPVXLANVNI` | `4790` | Virtual network ID to use for egress gateway VXLAN traffic. A value of `0` means "use the kernel default". | -| `EgressIPRoutingRulePriority` | `FELIX_EGRESSIPROUTINGRULEPRIORITY` | `100` | Priority value to use for the egress gateway routing rule. | -| `L7LogsFileEnabled` | `FELIX_L7LOGSFILEENABLED` | `true` | If set to `false` no L7 logging will occur. L7 logs are written to a file `l7.log` and sent to Elasticsearch. The location of this file can be configured using the `L7LogsFileDirectory` field. File rotation settings for this `l7.log` file can be configured using the fields `L7LogsFileMaxFiles` and `L7LogsFileMaxFileSizeMB`. Note that L7 log exports to Elasticsearch are dependent on L7 logs getting written to this file. | -| `L7LogsFileDirectory` | `FELIX_L7LOGSFILEDIRECTORY` | `/var/log/calico/l7logs` | The directory where L7 log files are stored. This parameter only takes effect when `L7LogsFileEnabled` is `true`. | -| `L7LogsFileMaxFiles` | `FELIX_L7LOGSFILEMAXFILES` | `5` | The number of files to keep when rotating L7 log files. This parameter only takes effect when `L7LogsFileEnabled` is `true`. | -| `L7LogsFileMaxFileSizeMB` | `FELIX_L7LOGSFILEMAXFILESIZEMB` | `100` | The max size in MB of L7 log files before rotation. This parameter only takes effect when `L7LogsFileEnabled` is `true`. | -| `L7LogsFlushInterval` | `FELIX_L7LOGSFLUSHINTERVAL` | `300` | The period, in seconds, at which Felix exports L7 logs. | -| `L7LogsFileAggregationHTTPHeaderInfo` | `FELIX_L7LOGSFILEAGGREGATIONHTTPHEADERINFO` | `ExcludeL7HTTPHeaderInfo` | How to handle HTTP header information for aggregating L7 logs. Bear in mind that changing this value may have a dramatic impact on the volume of L7 logs sent to Elasticsearch. Possible values include `ExcludeL7HTTPHeaderInfo` and `IncludeL7HTTPHeaderInfo`. | -| `L7LogsFileAggregationHTTPMethod` | `FELIX_L7LOGSFILEAGGREGATIONHTTPMETHOD` | `IncludeL7HTTPMethod` | How to handle HTTP method data for aggregating L7 logs. Bear in mind that changing this value may have a dramatic impact on the volume of L7 logs sent to Elasticsearch. Possible values include `ExcludeL7HTTPMethod` and `IncludeL7HTTPMethod`. | -| `L7LogsFileAggregationServiceInfo` | `FELIX_L7LOGSFILEAGGREGATIONSERVICEINFO` | `IncludeL7ServiceInfo` | How to handle service information for aggregating L7 logs. Bear in mind that changing this value may have a dramatic impact on the volume of L7 logs sent to Elasticsearch. Possible values include `ExcludeL7ServiceInfo` and `IncludeL7ServiceInfo`. | -| `L7LogsFileAggregationDestinationInfo` | `FELIX_L7LOGSFILEAGGREGATIONDESTINATIONINFO` | `IncludeL7DestinationInfo` | How to handle destination metadata for aggregating L7 logs. Bear in mind that changing this value may have a dramatic impact on the volume of L7 logs sent to Elasticsearch. Possible values include `ExcludeL7DestinationInfo` and `IncludeL7DestinationInfo`. | -| `L7LogsFileAggregationSourceInfo` | `FELIX_L7LOGSFILEAGGREGATIONSOURCEINFO` | `IncludeL7SourceInfoNoPort` | How to handle source metadata for aggregating L7 logs. Bear in mind that changing this value may have a dramatic impact on the volume of L7 logs sent to Elasticsearch. Possible values include `ExcludeL7SourceInfo`, `IncludeL7SourceInfoNoPort`, and `IncludeL7SourceInfo`. | -| `L7LogsFileAggregationResponseCode` | `FELIX_L7LOGSFILEAGGREGATIONRESPONSECODE` | `IncludeL7ResponseCode` | How to handle response code data for aggregating L7 logs. Bear in mind that changing this value may have a dramatic impact on the volume of L7 logs sent to Elasticsearch. Possible values include `ExcludeL7ResponseCode` and `IncludeL7ResponseCode`. | -| `L7LogsFileAggregationTrimURL` | `FELIX_L7LOGSFILEAGGREGATIONTRIMURL` | `IncludeL7FullURL` | How to handle URL data for aggregating L7 logs. Bear in mind that changing this value may have a dramatic impact on the volume of L7 logs sent to Elasticsearch. Possible values include `ExcludeL7URL`, `TrimURLQuery`, `TrimURLQueryAndPath`, and `IncludeL7FullURL`. | -| `L7LogsFileAggregationNumURLPath` | `FELIX_L7LOGSFILEAGGREGATIONNUMURLPATH` | `5` | How many components in the path to limit the URL by. This parameter only takes effect when `L7LogsFileAggregationTrimURL` is set to `IncludeL7FullURL`. Bear in mind that changing this value may have a dramatic impact on the volume of L7 logs sent to Elasticsearch. Negative values set the limit to infinity. | - -DropActionOverride controls what happens to each packet that is denied by -the current $[prodname] policy - i.e. by the ordered combination of all the -configured policies and profiles that apply to that packet. It may be -set to one of the following values: - -- `Drop` -- `Accept` -- `LogAndDrop` -- `LogAndAccept` - -Normally the `Drop` or `LogAndDrop` value should be used, as dropping a -packet is the obvious implication of that packet being denied. However when -experimenting, or debugging a scenario that is not behaving as you expect, the -`Accept` and `LogAndAccept` values can be useful: then the packet will be -still be allowed through. - -When set to `LogAndDrop` or `LogAndAccept`, each denied packet is logged in -syslog, with an entry like this: - -``` -May 18 18:42:44 ubuntu kernel: [ 1156.246182] calico-drop: IN=tunl0 OUT=cali76be879f658 MAC= SRC=192.168.128.30 DST=192.168.157.26 LEN=60 TOS=0x00 PREC=0x00 TTL=62 ID=56743 DF PROTO=TCP SPT=56248 DPT=80 WINDOW=29200 RES=0x00 SYN URGP=0 MARK=0xa000000 -``` - -If the `LogDropActionOverride` flag is set, then the `DropActionOverride` will also appear in the syslog entry: - -``` -May 18 18:42:44 ubuntu kernel: [ 1156.246182] calico-drop LOGandDROP: IN=tunl0 OUT=cali76be879f658 MAC= SRC=192.168.128.30 DST=192.168.157.26 LEN=60 TOS=0x00 PREC=0x00 TTL=62 ID=56743 DF PROTO=TCP SPT=56248 DPT=80 WINDOW=29200 RES=0x00 SYN URGP=0 MARK=0xa000000 -``` - -When the reporting of denied packet metrics is enabled, Felix keeps counts of -recently denied packets and publishes these as Prometheus metrics on the port -configured by the `PrometheusReporterPort` setting. - -Note that denied packet metrics are independent of the DropActionOverride -setting. Specifically, if packets that would normally be denied are being -allowed through by a setting of `Accept` or `LogAndAccept`, those packets -still contribute to the denied packet metrics as just described. - -### Felix-Typha Configuration - -| Configuration parameter | Environment variable | Description | Schema | -| ----------------------- | --------------------------- | ----------------------------------------------------------------------------------------- | ------ | -| `TyphaAddr` | `FELIX_TYPHAADDR` | Address of the Typha Server when running outside a K8S Cluster, in the format IP:PORT | string | -| `TyphaK8sServiceName` | `FELIX_TYPHAK8SSERVICENAME` | Service Name of Typha Deployment when running inside a K8S Cluster | string | -| `TyphaK8sNamespace` | `FELIX_TYPHAK8SNAMESPACE` | Namespace of Typha Deployment when running inside a K8S Cluster. [Default: `kube-system`] | string | -| `TyphaReadTimeout` | `FELIX_TYPHAREADTIMEOUT` | Timeout of Felix when reading information from Typha, in seconds. [Default: 30] | int | -| `TyphaWriteTimeout` | `FELIX_TYPHAWRITETIMEOUT` | Timeout of Felix when writing information to Typha, in seconds. [Default: 30] | int | - -### Felix-Typha TLS configuration - -| Configuration parameter | Environment variable | Description | Schema | -| ----------------------- | --------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------ | -| `TyphaCAFile` | `FELIX_TYPHACAFILE` | Path to the file containing the root certificate of the CA that issued the Typha server certificate. Configures Felix to trust the CA that signed the root certificate. The file may contain multiple root certificates, causing Felix to trust each of the CAs included. Example: `/etc/felix/ca.pem` | string | -| `TyphaCertFile` | `FELIX_TYPHACERTFILE` | Path to the file containing the client certificate issued to Felix. Enables Felix to participate in mutual TLS authentication and identify itself to the Typha server. Example: `/etc/felix/cert.pem` | string | -| `TyphaCN` | `FELIX_TYPHACN` | If set, the `Common Name` that Typha's certificate must have. If you have enabled TLS on the communications from Felix to Typha, you must set a value here or in `TyphaURISAN`. You can set values in both, as well, such as to facilitate a migration from using one to the other. If either matches, the communication succeeds. [Default: none] | string | -| `TyphaKeyFile` | `FELIX_TYPHAKEYFILE` | Path to the file containing the private key matching the Felix client certificate. Enables Felix to participate in mutual TLS authentication and identify itself to the Typha server. Example: `/etc/felix/key.pem` (optional) | string | -| `TyphaURISAN` | `FELIX_TYPHAURISAN` | If set, a URI SAN that Typha's certificate must have. We recommend populating this with a [SPIFFE](https://github.com/spiffe/spiffe/blob/master/standards/SPIFFE-ID.md#2-spiffe-identity) string that identifies Typha. All Typha instances should use the same SPIFFE ID. If you have enabled TLS on the communications from Felix to Typha, you must set a value here or in `TyphaCN`. You can set values in both, as well, such as to facilitate a migration from using one to the other. If either matches, the communication succeeds. [Default: none] | string | - -For more information on how to use and set these variables, refer to -[Connections from Node to Typha (Kubernetes)](../../../../operations/comms/crypto-auth.mdx#connections-from-node-to-typha-kubernetes). - -### PacketCapture configuration - -The following parameters fine tune packet capture rotation: - -| Configuration parameter | Environment variable | Description | Schema | -| ------------------------ | --------------------------------- | --------------------------------------------------------------------------------------------- | ------ | -| `CaptureDir` | `FELIX_CAPTUREDIR` | Controls the directory where packet capture files are stored. Example: `/var/log/calico/pcap` | string | -| `CaptureMaxSizeBytes` | `FELIX_CAPTUREMAXSIZEBYTES` | Controls the maximum size in bytes for a packet capture file before rotation. | int | -| `CaptureRotationSeconds` | `FELIX_CAPTUREMAXROTATIONSECONDS` | Controls the rotation period in seconds for a packet capture file. | int | -| `CaptureMaxFiles` | `FELIX_CAPTUREMAXFILES` | Controls the maximum number rotated packet capture files. | int | - -### WireGuard configuration - -| Configuration parameter | Environment variable | Description | Schema | | -| ------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------- | ------- | -------------- | -| wireguardEnabled | Enable encryption for IPv4 on WireGuard supported nodes in cluster. When enabled, pod to pod traffic will be sent over encrypted tunnels between the nodes. | `true`, `false` | boolean | `false` | -| wireguardEnabledV6 | Enable encryption for IPv6 on WireGuard supported nodes in cluster. When enabled, pod to pod traffic will be sent over encrypted tunnels between the nodes. | `true`, `false` | boolean | `false` | -| wireguardInterfaceName | Name of the IPv4 WireGuard interface created by Felix. If you change the name, and want to clean up the previously-configured interface names on each node, this is a manual process. | string | string | wireguard.cali | -| wireguardInterfaceNameV6 | Name of the IPv6 WireGuard interface created by Felix. If you change the name, and want to clean up the previously-configured interface names on each node, this is a manual process. | string | string | wg-v6.cali | -| wireguardListeningPort | Port used by IPv4 WireGuard tunnels. Felix sets up an IPv4 WireGuard tunnel on each node specified by this port. Available for configuration only in the global FelixConfiguration resource; setting it per host, config-file or environment variable will not work. | 1-65535 | int | 51820 | -| wireguardListeningPortV6 | Port used by IPv6 WireGuard tunnels. Felix sets up an IPv6 WireGuard tunnel on each node specified by this port. Available for configuration only in the global FelixConfiguration resource; setting it per host, config-file or environment variable will not work. | 1-65535 | int | 51821 | -| wireguardMTU | MTU set on the IPv4 WireGuard interface created by Felix. Zero value means auto-detect. See [Configuring MTU](../../../../networking/configuring/mtu.mdx). | int | int | 0 | -| wireguardMTUV6 | MTU set on the IPv6 WireGuard interface created by Felix. Zero value means auto-detect. See [Configuring MTU](../../../../networking/configuring/mtu.mdx). | int | int | 0 | -| wireguardRoutingRulePriority | WireGuard routing rule priority value set up by Felix. If you change the default value, set it to a value most appropriate to routing rules for your nodes. | 1-32765 | int | 99 | -| wireguardHostEncryptionEnabled | **Experimental**: Adds host-namespace workload IP's to WireGuard's list of peers. Should **not** be enabled when WireGuard is enabled on a cluster's control plane node, as networking deadlock can occur. | true, false | boolean | false | -| wireguardKeepAlive | WireguardKeepAlive controls Wireguard PersistentKeepalive option. Set 0 to disable. [Default: 0] | int | int | 25 | - -For more information on encrypting in-cluster traffic with WireGuard, refer to -[Encrypt cluster pod traffic](../../../../compliance/encrypt-cluster-pod-traffic.mdx) - -## Environment variables - -The highest priority of configuration is that read from environment -variables. To set a configuration parameter via an environment variable, -set the environment variable formed by taking `FELIX_` and appending the -uppercase form of the variable name. For example, to set the etcd -address, set the environment variable `FELIX_ETCDADDR`. Other examples -include `FELIX_ETCDSCHEME`, `FELIX_ETCDKEYFILE`, `FELIX_ETCDCERTFILE`, -`FELIX_ETCDCAFILE`, `FELIX_FELIXHOSTNAME`, `FELIX_LOGFILEPATH` and -`FELIX_METADATAADDR`. - -:::note -To set a parameter to an empty value using an environment variable, assign an empty string to it (e.g., `FELIX_FAILSAFEINBOUNDHOSTPORTS=""`). -::: - -## Configuration file - -On startup, Felix reads an ini-style configuration file. The path to -this file defaults to `/etc/calico/felix.cfg` but can be overridden -using the `-c` or `--config-file` options on the command line. If the -file exists, then it is read (ignoring section names) and all parameters -are set from it. - -In OpenStack, we recommend putting all configuration into configuration -files, since the etcd database is transient (and may be recreated by the -OpenStack plugin in certain error cases). However, in a Docker -environment the use of environment variables or etcd is often more -convenient. - -## Datastore - -Felix also reads configuration parameters from the datastore. It supports -a global setting and a per-host override. - -1. Get the current felixconfig settings. - - ```bash - kubectl get felixconfiguration.projectcalico.org default -o yaml --export > felix.yaml - ``` - -1. Modify logFilePath to your intended path, e.g. "/tmp/felix.log" - - ```bash - vim felix.yaml - ``` - - :::tip - - For a global change set name to "default". - For a node-specific change: set name to `node.`, e.g. "node.$[prodname]-node-1" - - ::: - -1. Replace the current felixconfig settings - - ```bash - kubectl replace -f felix.yaml - ``` - -For more information, see [Felix Configuration Resource](../../../resources/felixconfig.mdx). - - - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/component-resources/node/felix/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/component-resources/node/felix/index.mdx deleted file mode 100644 index b7c1e2d1c1..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/component-resources/node/felix/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Felix is a Calico component that runs on every machine that provides endpoints. -hide_table_of_contents: true ---- - -# Felix - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/component-resources/node/felix/prometheus.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/component-resources/node/felix/prometheus.mdx deleted file mode 100644 index 8ee5371b9e..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/component-resources/node/felix/prometheus.mdx +++ /dev/null @@ -1,226 +0,0 @@ ---- -description: Review metrics for the Felix component if you are using Prometheus. ---- - -# Prometheus metrics - -Felix can be configured to report a number of metrics through Prometheus. See the -[configuration reference](configuration.mdx) for how to enable metrics reporting. - -## Metric reference - -### Felix specific - -Felix exports a number of Prometheus metrics. The current set is as follows. Since some metrics -are tied to particular implementation choices inside Felix we can't make any hard guarantees that -metrics will persist across releases. However, we aim not to make any spurious changes to -existing metrics. - -| Name | Description | -| ---------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `felix_active_local_endpoints` | Number of active endpoints on this host. | -| `felix_active_local_policies` | Number of active policies on this host. | -| `felix_active_local_selectors` | Number of active selectors on this host. | -| `felix_active_local_tags` | Number of active tags on this host. | -| `felix_bpf_conntrack_cleaned` | Number of entries cleaned during a conntrack table sweep. | -| `felix_bpf_conntrack_cleaned_total` | Total number of entries cleaned during conntrack table sweeps, incremented for each clean individually. | -| `felix_bpf_conntrack_expired` | Number of entries cleaned during a conntrack table sweep due to expiration. | -| `felix_bpf_conntrack_expired_total` | Total number of entries cleaned during conntrack table sweep due to expiration - by reason. | -| `felix_bpf_conntrack_inforeader_blocks` | Conntrack InfoReader would-blocks. | -| `felix_bpf_conntrack_stale_nat` | Number of entries cleaned during a conntrack table sweep due to stale NAT. | -| `felix_bpf_conntrack_stale_nat_total` | Total number of entries cleaned during conntrack table sweeps due to stale NAT. | -| `felix_bpf_conntrack_sweeps` | Number of conntrack table sweeps made so far. | -| `felix_bpf_conntrack_used` | Number of used entries visited during a conntrack table sweep. | -| `felix_bpf_conntrack_sweep_duration` | Conntrack sweep execution time (ns). | -| `felix_bpf_num_ip_sets` | Number of BPF IP sets managed in the data plane. | -| `felix_calc_graph_output_events` | Number of events emitted by the calculation graph. | -| `felix_calc_graph_update_time_seconds` | Seconds to update calculation graph for each datastore OnUpdate call. | -| `felix_calc_graph_updates_processed` | Number of datastore updates processed by the calculation graph. | -| `felix_cluster_num_host_endpoints` | Total number of host endpoints cluster-wide. | -| `felix_cluster_num_hosts` | Total number of $[prodname] hosts in the cluster. | -| `felix_cluster_num_policies` | Total number of policies in the cluster. | -| `felix_cluster_num_profiles` | Total number of profiles in the cluster. | -| `felix_cluster_num_tiers` | Total number of $[prodname] tiers in the cluster. | -| `felix_cluster_num_workload_endpoints` | Total number of workload endpoints cluster-wide. | -| `felix_egress_gateway_remote_polls{status="total"}` | Total number of remote egress gateway pods that Felix is polling for health/connectivity. Only egress gateways with a named "health" port will be polled. | -| `felix_egress_gateway_remote_polls{status="up"}` | Total number of remote egress gateway pods that have successful probes. | -| `felix_egress_gateway_remote_polls{status="probe-failed"}` | Total number of remote egress gateway pods that have failed probes. | -| `felix_exec_time_micros` | Summary of time taken to fork/exec child processes | -| `felix_int_dataplane_addr_msg_batch_size` | Number of interface address messages processed in each batch. Higher values indicate we're doing more batching to try to keep up. | -| `felix_int_dataplane_apply_time_seconds` | Time in seconds that it took to apply a data plane update. | -| `felix_int_dataplane_failures` | Number of times data plane updates failed and will be retried. | -| `felix_int_dataplane_iface_msg_batch_size` | Number of interface state messages processed in each batch. Higher values indicate we're doing more batching to try to keep up. | -| `felix_int_dataplane_messages` | Number data plane messages by type. | -| `felix_int_dataplane_msg_batch_size` | Number of messages processed in each batch. Higher values indicate we're doing more batching to try to keep up. | -| `felix_ipsec_bindings_total` | Total number of ipsec bindings. | -| `felix_ipsec_errors` | Number of ipsec command failures. | -| `felix_ipset_calls` | Number of ipset commands executed. | -| `felix_ipset_errors` | Number of ipset command failures. | -| `felix_ipset_lines_executed` | Number of ipset operations executed. | -| `felix_ipsets_calico` | Number of active $[prodname] IP sets. | -| `felix_ipsets_total` | Total number of active IP sets. | -| `felix_iptables_chains` | Number of active iptables chains. | -| `felix_iptables_lines_executed` | Number of iptables rule updates executed. | -| `felix_iptables_lock_acquire_secs` | Time taken to acquire the iptables lock. | -| `felix_iptables_lock_retries` | Number of times the iptables lock was already held and felix had to retry to acquire it. | -| `felix_iptables_restore_calls` | Number of iptables-restore calls. | -| `felix_iptables_restore_errors` | Number of iptables-restore errors. | -| `felix_iptables_rules` | Number of active iptables rules. | -| `felix_iptables_save_calls` | Number of iptables-save calls. | -| `felix_iptables_save_errors` | Number of iptables-save errors. | -| `felix_log_errors` | Number of errors encountered while logging. | -| `felix_logs_dropped` | Number of logs dropped because the output stream was blocked. | -| `felix_reporter_log_errors` | Number of errors encountered while logging in the Syslog. | -| `felix_reporter_logs_dropped` | Number of logs dropped because the output was blocked in the Syslog reporter. | -| `felix_resync_state` | Current datastore state. | -| `felix_resyncs_started` | Number of times Felix has started resyncing with the datastore. | -| `felix_route_table_list_seconds` | Time taken to list all the interfaces during a resync. | -| `felix_route_table_per_iface_sync_seconds` | Time taken to sync each interface | - -Prometheus metrics are self-documenting, with metrics turned on, `curl` can be used to list the -metrics along with their help text and type information. - -```bash -curl -s http://localhost:9091/metrics | head -``` - -Example response: - -``` -# HELP felix_active_local_endpoints Number of active endpoints on this host. -# TYPE felix_active_local_endpoints gauge -felix_active_local_endpoints 91 -# HELP felix_active_local_policies Number of active policies on this host. -# TYPE felix_active_local_policies gauge -felix_active_local_policies 0 -# HELP felix_active_local_selectors Number of active selectors on this host. -# TYPE felix_active_local_selectors gauge -felix_active_local_selectors 82 -... -``` - -### Label indexing metrics - -The label index is a subcomponent of Felix that is responsible for calculating the set of endpoints and network sets -that match each selector that is in an active policy rule. Policy rules are active on a particular node if the policy -they belong to selects a workload or host endpoint on that node with its top-level selector (in `spec.selector`). -Inactive policies have minimal CPU cost because their selectors do not get indexed. - -Since the label index must match the active selectors against _all_ endpoints and network sets in the cluster, its -performance is critical and it supports various optimizations to minimize CPU usage. Its metrics can be used to -check that the optimizations are active for your policy set. - -#### `felix_label_index_num_endpoints` - -Reports the total number of endpoints (and similar objects such as network sets) being tracked by the index. -This should match the number of endpoints and network sets in your cluster. - -#### `felix_label_index_num_active_selectors{optimized="true|false"}` - -Reports the total number of active selectors, broken into `optimized="true"` and `optimized="false"` sub-totals. - -The `optimized="true"` total tracks the number of selectors that the label index was able to optimize. Those -selectors should be calculated efficiently even in clusters with hundreds of thousands of endpoints. In general the -CPU used to calculate them should be proportional to the number of endpoints that match them and the churn rate of -_those_ endpoints. - -The `optimized="false"` total tracks the number of selectors that could not be optimized. Unoptimized selectors are -much more costly to calculate; the CPU used to calculate them is proportional to the number of endpoints -in the cluster and their churn rate. It is generally OK to have a handful of unoptimized selectors, -but if many selectors are unoptimized the CPU usage can be substantial at high scale. - -For more information on writing selectors that can be optimized, see the [this](../../../resources/networkpolicy.mdx#selector-performance-in-entityrules) -section of the `NetworkPolicy` reference. - -#### `felix_label_index_selector_evals{result="true|false"}` - -Counts the total number of times that a selector was evaluated vs an endpoint to determine if it matches, broken -down by match (`true`) or no-match (`false`). The ratio of match to no-match shows how effective the selector -indexing optimizations are for your policy set. The more effectively the label index can optimize the selectors, -the fewer "no-match" results it will report relative to "match". - -If you have more than a handful of active selectors and `felix_label_index_selector_evals{result="false"}` is many -times `felix_label_index_selector_evals{result="true"}` then it is likely that some selectors in the policy set are -not being optimized effectively. - -#### `felix_label_index_strategy_evals{strategy="..."}` - -This is a technical statistic that shows how many times the label index has employed each optimization -strategy that it has available. The strategies will likely evolve over time but, at time of writing, they are -as follows: - -- `endpoint-full-scan`: the least efficient fall back strategy for unoptimized selectors. The index - scanned _all_ endpoints to find the matches for a selector. - -- `endpoint|parent-no-match`: the most efficient strategy; the index was able to prove that nothing matched the - selector so it was able to skip the scan entirely. - -- `endpoint|parent-single-value`: the label index was able to limit the scan to only those endpoints/parents that - have a particular label and value combination. For example, selector `label == "value"` would only scan items that - had exactly that label set to "value". - -- `endpoint|parent-multi-value`: the label index was able to limit the scan to only those endpoints/parents that - have a particular label and one of a few values. For example, selector `label in {"a", "b")` would only scan items - that had exactly that label with one of the given values. - -- `endpoint|parent-label-name`: the label index was able to limit the scan to only those endpoints/parents that - hava a particular label (but was unable to limit it to a particular subset of values). For example, `has(label)` - would result in that kind of scan. - -Terminology: here "endpoint" means "endpoint or NetworkSet" and "parent" is Felix's internal name for resources like -Kubernetes Namespaces. A "parent" scan means that the label index scanned all endpoints that have a parent -matching the strategy. - -### CPU / memory metrics - -Felix also exports the default set of metrics that Prometheus makes available. Currently, those -include: - -| Name | Description | -| ---------------------------------- | ------------------------------------------------------------------------------------------- | -| `go_gc_duration_seconds` | A summary of the GC invocation durations. | -| `go_goroutines` | Number of goroutines that currently exist. | -| `go_info` | Go version. | -| `go_memstats_alloc_bytes` | Number of bytes allocated and still in use. | -| `go_memstats_alloc_bytes_total` | Total number of bytes allocated, even if freed. | -| `go_memstats_buck_hash_sys_bytes` | Number of bytes used by the profiling bucket hash table. | -| `go_memstats_frees_total` | Total number of frees. | -| `go_memstats_gc_cpu_fraction` | The fraction of this program’s available CPU time used by the GC since the program started. | -| `go_memstats_gc_sys_bytes` | Number of bytes used for garbage collection system metadata. | -| `go_memstats_heap_alloc_bytes` | Number of heap bytes allocated and still in use. | -| `go_memstats_heap_idle_bytes` | Number of heap bytes waiting to be used. | -| `go_memstats_heap_inuse_bytes` | Number of heap bytes that are in use. | -| `go_memstats_heap_objects` | Number of allocated objects. | -| `go_memstats_heap_released_bytes` | Number of heap bytes released to OS. | -| `go_memstats_heap_sys_bytes` | Number of heap bytes obtained from system. | -| `go_memstats_last_gc_time_seconds` | Number of seconds since 1970 of last garbage collection. | -| `go_memstats_lookups_total` | Total number of pointer lookups. | -| `go_memstats_mallocs_total` | Total number of mallocs. | -| `go_memstats_mcache_inuse_bytes` | Number of bytes in use by mcache structures. | -| `go_memstats_mcache_sys_bytes` | Number of bytes used for mcache structures obtained from system. | -| `go_memstats_mspan_inuse_bytes` | Number of bytes in use by mspan structures. | -| `go_memstats_mspan_sys_bytes` | Number of bytes used for mspan structures obtained from system. | -| `go_memstats_next_gc_bytes` | Number of heap bytes when next garbage collection will take place. | -| `go_memstats_other_sys_bytes` | Number of bytes used for other system allocations. | -| `go_memstats_stack_inuse_bytes` | Number of bytes in use by the stack allocator. | -| `go_memstats_stack_sys_bytes` | Number of bytes obtained from system for stack allocator. | -| `go_memstats_sys_bytes` | Number of bytes obtained by system. Sum of all system allocations. | -| `go_threads` | Number of OS threads created. | -| `process_cpu_seconds_total` | Total user and system CPU time spent in seconds. | -| `process_max_fds` | Maximum number of open file descriptors. | -| `process_open_fds` | Number of open file descriptors. | -| `process_resident_memory_bytes` | Resident memory size in bytes. | -| `process_start_time_seconds` | Start time of the process since unix epoch in seconds. | -| `process_virtual_memory_bytes` | Virtual memory size in bytes. | -| `process_virtual_memory_max_bytes` | Maximum amount of virtual memory available in bytes. | - -### Wireguard Metrics - -Felix also exports wireguard device stats if found/detected. Can be disabled via Felix configuration. - -| Name | Description | -| ------------------------------------ | ------------------------------------------------------------------------------------------------- | -| `wireguard_meta` | Gauge. Device / interface information for a felix/calico node, values are in this metric's labels | -| `wireguard_bytes_rcvd` | Counter. Current bytes received from a peer identified by a peer public key and endpoint | -| `wireguard_bytes_sent` | Counter. Current bytes sent to a peer identified by a peer public key and endpoint | -| `wireguard_latest_handshake_seconds` | Gauge. Last handshake with a peer, unix timestamp in seconds. | diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/component-resources/node/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/component-resources/node/index.mdx deleted file mode 100644 index c468cc112a..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/component-resources/node/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Learn about the components that comprise the cnx-node. -hide_table_of_contents: true ---- - -# Calico Enterprise node (cnx-node) - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/component-resources/typha/configuration.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/component-resources/typha/configuration.mdx deleted file mode 100644 index 039acdcd07..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/component-resources/typha/configuration.mdx +++ /dev/null @@ -1,106 +0,0 @@ ---- -description: Configure Typha for scaling Kubernetes API datastore (kdd). ---- - -# Configuring Typha - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - - - - -Typha configuration cannot be modified when Calico Enterprise is installed via the operator. - - - - -Configuration for Typha is read from one of two possible locations, in -order, as follows. - -1. Environment variables, prefixed with `TYPHA_`. - -2. The Typha configuration file. The path to this file defaults to - `/etc/calico/typha.cfg` but can be overridden using the `-c` or - `--config-file` options on the command line. - -The value of any configuration parameter is the value read from the -_first_ location containing a value. For example, if an environment variable -contains a value, it takes precedence. - -If not set in any of these locations, most configuration parameters have -defaults, and it should be rare to have to explicitly set them. - -The full list of parameters which can be set is as follows. - -### General configuration - -| Configuration parameter | Environment variable | Description | Schema | -| --------------------------------------- | --------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------- | -| `DatastoreType` | `TYPHA_DATASTORETYPE` | The datastore that Typha should read endpoints and policy information from. [Default: `etcdv3`] | `etcdv3`, `kubernetes` | -| `HealthEnabled` | `TYPHA_HEALTHENABLED` | When enabled, exposes Typha health information via an http endpoint. | boolean | -| `HealthPort` | `TYPHA_HEALTHPORT` | The port that Typha will serve health information over. [Default: `9098`] | int | -| `HealthHost` | `TYPHA_HEALTHHOST` | The address that Typha will bind its health endpoint to. [Default: `localhost`] | string | -| `LogFilePath` | `TYPHA_LOGFILEPATH` | The full path to the Typha log. Set to `none` to disable file logging. [Default: `/var/log/calico/typha.log`] | string | -| `LogSeverityFile` | `TYPHA_LOGSEVERITYFILE` | The log severity above which logs are sent to the log file. [Default: `Info`] | `Debug`, `Info`, `Warning`, `Error`, `Fatal` | -| `LogSeverityScreen` | `TYPHA_LOGSEVERITYSCREEN` | The log severity above which logs are sent to the stdout. [Default: `Info`] | `Debug`, `Info`, `Warning`, `Error`, `Fatal` | -| `LogSeveritySys` | `TYPHA_LOGSEVERITYSYS` | The log severity above which logs are sent to the syslog. Set to `""` for no logging to syslog. [Default: `Info`] | `Debug`, `Info`, `Warning`, `Error`, `Fatal` | -| `PrometheusGoMetricsEnabled` | `TYPHA_PROMETHEUSGOMETRICSENABLED` | Set to `false` to disable Go runtime metrics collection, which the Prometheus client does by default. This reduces the number of metrics reported, reducing Prometheus load. [Default: `true`] | boolean | -| `PrometheusMetricsEnabled` | `TYPHA_PROMETHEUSMETRICSENABLED` | Set to `true` to enable the Prometheus metrics server in Typha. [Default: `false`] | boolean | -| `PrometheusMetricsHost` | `TYPHA_PROMETHEUSMETRICSHOST` | TCP network address that the Prometheus metrics server should bind to. [Default: `""`] | string | -| `PrometheusMetricsPort` | `TYPHA_PROMETHEUSMETRICSPORT` | TCP port that the Prometheus metrics server should bind to. [Default: `9091`] | int | -| `PrometheusProcessMetricsEnabled` | `TYPHA_PROMETHEUSPROCESSMETRICSENABLED` | Set to `false` to disable process metrics collection, which the Prometheus client does by default. This reduces the number of metrics reported, reducing Prometheus load. [Default: `true`] | boolean | -| `ServerHandshakeTimeoutSecs` | `TYPHA_SERVERHANDSHAKETIMEOUTSECS` | Maximum time that Typha server should wait for the client TLS handshake to be performed. | int | -| `ShutdownTimeoutSecs` | `TYPHA_SHUTDOWNTIMEOUTSECS` | Maximum time that Typha should take to do a graceful shut down. In Kubernetes, this should match Typha's `terminationGracePeriodSeconds`. | int | -| `ShutdownConnectionDropIntervalMaxSecs` | `TYPHA_SHUTDOWNCONNECTIONDROPINTERVALMAXSECS` | Maximum time between terminating two connections when doing a graceful shutdown. Prevents very slow shut downs if `ShutdownTimeoutSecs` is large but Typha only has a small number of clients. | int | - -:::note - -By default, if the health endpoint is enabled Typha listens on localhost. However, if Typha is used in -Kubernetes, the kubelet will do health checks using the pod IP. To work around this discrepancy, the Typha image -supports a health-check CLI command that fetches the health endpoint: -`calico-typha check (readiness|liveness) --port=`. If you modify the health port, you will need to add the -`--port=` argument to the liveness and readiness probe commands in the manifest. - -::: - -### Kubernetes API datastore configuration - -The Kubernetes API datastore driver reads its configuration from Kubernetes-provided environment variables. - -### $[prodname] specific configuration - -| Setting | Environment variable | Default | Meaning | -| --------------------------- | --------------------------------- | ------- | ------------------------------------------------------------------------ | -| `PrometheusMetricsCertFile` | `TYPHA_PROMETHEUSMETRICSCERTFILE` | None | Certificate for encrypting Typha Prometheus metrics. | -| `PrometheusMetricsKeyFile` | `TYPHA_PROMETHEUSMETRICSKEYFILE` | None | Private key for encrypting Typha Prometheus metrics. | -| `PrometheusMetricsCAFile` | `TYPHA_PROMETHEUSMETRICSCAFILE` | None | Trusted CA file for clients attempting to read Typha Prometheus metrics. | - -When the `PrometheusMetrics...File` parameters are set, Typha's -Prometheus port is TLS-secured such that only a validated client can -read Prometheus metrics, and the data is encrypted in transit. A -valid client must then connect over HTTPS and present a certificate -that is signed by one of the trusted CAs in the -`PrometheusMetricsCAFile` setting. - -#### Environment variables - -| Environment | Description | Schema | -| ------------ | --------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | -| USE_POD_CIDR | Use the Kubernetes `Node.Spec.PodCIDR` field. This field is required when using the Kubernetes API datastore with host-local IPAM. [Default: false] | boolean | - -### Felix-Typha TLS configuration - -| Configuration parameter | Environment variable | Description | Schema | -| ----------------------- | ---------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------ | -| `CAFile` | `TYPHA_CAFILE` | Path to the file containing the root certificate of the CA that issued the Felix client certificate. Configures Typha to trust the CA that signed the Felix client certificate. The file may contain multiple root certificates, causing Typha to trust each of the CAs included. Example: `/etc/typha/ca.pem` | string | -| `ClientCN` | `TYPHA_CLIENTCN` | If set, the `Common Name` that Felix's certificate must have. If you have enabled TLS on the communications from Felix to Typha, you must set a value here or in `ClientURISAN`. You can set values in both, as well, such as to facilitate a migration from using one to the other. If either matches, the communication succeeds. [Default: none] | string | -| `ClientURISAN` | `TYPHA_CLIENTURISAN` | If set, a URI SAN that Felix's certificate must have. We recommend populating this with a [SPIFFE](https://github.com/spiffe/spiffe/blob/master/standards/SPIFFE-ID.md#2-spiffe-identity) string that identifies Felix. All Felix instances should use the same SPIFFE ID. If you have enabled TLS on the communications from Felix to Typha, you must set a value here or in `ClientCN`. You can set values in both, as well, such as to facilitate a migration from using one to the other. If either matches, the communication succeeds. [Default: none] | string | -| `ServerCertFile` | `TYPHA_SERVERCERTFILE` | Path to the file containing the server certificate issued to Typha. Typha presents this to Felix clients during the TLS handshake. Example: `/etc/typha/cert.pem` | string | -| `ServerKeyFile` | `TYPHA_SERVERKEYFILE` | Path to the file containing the private key matching the Typha server certificate. Example: `/etc/typha/key.pem` (optional) | string | - -For more information on how to use and set these variables, refer to -[Connections from Node to Typha (Kubernetes)](../../../operations/comms/crypto-auth.mdx#connections-from-node-to-typha-kubernetes). - - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/component-resources/typha/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/component-resources/typha/index.mdx deleted file mode 100644 index 7c686c6303..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/component-resources/typha/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Typha is the Calico component for scaling Kubernetes deployments. -hide_table_of_contents: true ---- - -# Typha - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/component-resources/typha/overview.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/component-resources/typha/overview.mdx deleted file mode 100644 index fc3c771648..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/component-resources/typha/overview.mdx +++ /dev/null @@ -1,12 +0,0 @@ ---- -description: Increase scale and reduce node impact on the datastore using Typha. ---- - -# Reduce node impacts on datastore - -The Typha daemon sits between the datastore (such as the Kubernetes API server) and many instances of Felix. Typha's main purpose is to increase scale by reducing each node's impact on the datastore. Services such as [Felix](https://github.com/projectcalico/felix) and [confd](https://github.com/projectcalico/confd) connect to Typha instead of connecting directly to the datastore as Typha maintains a single datastore connection on behalf of all its clients. It caches the datastore state and deduplicates events so that they can be fanned out to many listeners. - -## Advantages - -- Since one Typha instance can support hundreds of Felix instances, it reduces the load on the datastore by a large factor. -- Since Typha can filter out updates that are not relevant to Felix, it also reduces Felix's CPU usage. In a high-scale (100+ node) Kubernetes cluster, this is essential because the number of updates generated by the API server scales with the number of nodes. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/component-resources/typha/prometheus.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/component-resources/typha/prometheus.mdx deleted file mode 100644 index c8c2c4886a..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/component-resources/typha/prometheus.mdx +++ /dev/null @@ -1,120 +0,0 @@ ---- -description: Review metrics for the Typha component if you are using Prometheus. ---- - -# Prometheus metrics - -Typha can be configured to report a number of metrics through Prometheus. The Prometheus port can be controlled -via the `typhaPrometheusPort` field in the operator's [`Installation` resource](../../installation/api.mdx#installation). - -## Metric reference - -#### Typha specific - -Typha exports a number of Prometheus metrics. The current set is as follows. Since some metrics -are tied to particular implementation choices inside Typha we can't make any hard guarantees that -metrics will persist across releases. However, we aim not to make any spurious changes to -existing metrics. - -##### Terminology - -**Syncer:** Many of Typha's metrics are now parameterised by "syncer type"; Typha runs one "syncer" for each -type of client that it supports. The "syncer" is the component that synchronises Typha's local cache -of the datastore with the upstream datastore. The syncer type is attached to the metrics via a -Prometheus label `syncer="..."`. - -**Breadcrumb:** Typha's internal cache stores a series of snapshots of the state of the datastore along with -a list of changes when compared to the previous snapshot. We call the combination of a snapshot and the list of -changes a "breadcrumb". Breadcrumbs are linked together into a linked list as they are created. When a client -connects, Typha sends the snapshot from the most recent breadcrumb to the client; then, it "follows the breadcrumbs" -on behalf of that client, sending it the change list from each breadcrumb. - -| Name | Description | -| ------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `typha_cache_size` | The total number of key/value pairs in Typha's in-memory cache. | -| `typha_snapshots_generated` | The total number of binary snapshots generated by Typha. Binary snapshots are generated once and then shared between multiple clients for performance. | -| `typha_snapshots_reused` | The number of binary snapshots that Typha was able to reuse for multiple clients, thus reducing CPU usage. | -| `typha_snapshot_raw_bytes` | The size of the most recent binary snapshot in bytes pre-compression. | -| `typha_snapshot_compressed_bytes` | The size of the most recent binary snapshot in bytes post-compression. | -| `typha_breadcrumb_block` | Count of the number of times Typha got the next Breadcrumb after blocking. | -| `typha_breadcrumb_non_block` | typha_breadcrumb_non_block Count of the number of times Typha got the next Breadcrumb without blocking. | -| `typha_breadcrumb_seq_number` | Current (server-local) sequence number; number of snapshot deltas processed. | -| `typha_breadcrumb_size` | Number of KVs recorded in each breadcrumb. | -| `typha_client_latency_secs` | Per-client latency. I.e. how far behind the current state is each client. | -| `typha_client_snapshot_send_secs` | How long it took to send the initial snapshot to each client. | -| `typha_client_write_latency_secs` | Per-client write. How long each write call is taking. | -| `typha_connections_accepted` | Total number of connections accepted over time. | -| `typha_connections_active` | Number of open client connections (including connections that have not completed the handshake). | -| `typha_connections_streaming` | Number of client connections that are actively streaming (i.e. connections that successfully completed the handshake). | -| `typha_connections_dropped` | Total number of connections dropped due to rebalancing. | -| `typha_kvs_per_msg` | Number of KV pairs sent in each message. | -| `typha_log_errors` | Number of errors encountered while logging. | -| `typha_logs_dropped` | Number of logs dropped because the output stream was blocked. | -| `typha_next_breadcrumb_latency_secs` | Time to retrieve next breadcrumb when already behind. | -| `typha_ping_latency` | Round-trip ping/pong latency to client. Typha's protocol includes a regular ping/pong keepalive to verify that the connection is still up. | -| `typha_updates_skipped` | Total number of updates skipped because the datastore change was not relevant. (For example, an update to a Kubernetes Pod field that $[prodname] does not read.) | -| `typha_updates_total` | Total number of updates received from the datastore. | -| `remote_cluster_connection_status` | Status of the remote cluster connection in federation. Represented as numeric values 0 (NotConnecting) ,1 (Connecting), 2 (InSync), 3 (ReSyncInProgress), 4 (ConfigChangeRestartRequired), 5 (ConfigInComplete). Uses `remote_cluster_name` label to represent name of the remote cluster in federation.| - -Prometheus metrics are self-documenting, with metrics turned on, `curl` from a node running Typha can be used to list the -metrics along with their help text and type information. - -```bash -curl -s http://localhost:9091/metrics | head -``` - -Example response: - -``` -# HELP typha_breadcrumb_block Count of the number of times Typha got the next Breadcrumb after blocking. -# TYPE typha_breadcrumb_block counter -typha_breadcrumb_block 57 -# HELP typha_breadcrumb_non_block Count of the number of times Typha got the next Breadcrumb without blocking. -# TYPE typha_breadcrumb_non_block counter -typha_breadcrumb_non_block 0 -# HELP typha_breadcrumb_seq_number Current (server-local) sequence number; number of snapshot deltas processed. -# TYPE typha_breadcrumb_seq_number gauge -typha_breadcrumb_seq_number 22215 -... -``` - -#### CPU / memory metrics - -Typha also exports the default set of metrics that Prometheus makes available. Currently, those -include: - -| Name | Description | -| -------------------------------------------- | ------------------------------------------------------------------ | -| `go_gc_duration_seconds` | A summary of the GC invocation durations. | -| `go_goroutines` | Number of goroutines that currently exist. | -| `go_memstats_alloc_bytes` | Number of bytes allocated and still in use. | -| `go_memstats_alloc_bytes_total` | Total number of bytes allocated, even if freed. | -| `go_memstats_buck_hash_sys_bytes` | Number of bytes used by the profiling bucket hash table. | -| `go_memstats_frees_total` | Total number of frees. | -| `go_memstats_gc_sys_bytes` | Number of bytes used for garbage collection system metadata. | -| `go_memstats_heap_alloc_bytes` | Number of heap bytes allocated and still in use. | -| `go_memstats_heap_idle_bytes` | Number of heap bytes waiting to be used. | -| `go_memstats_heap_inuse_bytes` | Number of heap bytes that are in use. | -| `go_memstats_heap_objects` | Number of allocated objects. | -| `go_memstats_heap_released_bytes_total` | Total number of heap bytes released to OS. | -| `go_memstats_heap_sys_bytes` | Number of heap bytes obtained from system. | -| `go_memstats_last_gc_time_seconds` | Number of seconds since 1970 of last garbage collection. | -| `go_memstats_lookups_total` | Total number of pointer lookups. | -| `go_memstats_mallocs_total` | Total number of mallocs. | -| `go_memstats_mcache_inuse_bytes` | Number of bytes in use by mcache structures. | -| `go_memstats_mcache_sys_bytes` | Number of bytes used for mcache structures obtained from system. | -| `go_memstats_mspan_inuse_bytes` | Number of bytes in use by mspan structures. | -| `go_memstats_mspan_sys_bytes` | Number of bytes used for mspan structures obtained from system. | -| `go_memstats_next_gc_bytes` | Number of heap bytes when next garbage collection will take place. | -| `go_memstats_other_sys_bytes` | Number of bytes used for other system allocations. | -| `go_memstats_stack_inuse_bytes` | Number of bytes in use by the stack allocator. | -| `go_memstats_stack_sys_bytes` | Number of bytes obtained from system for stack allocator. | -| `go_memstats_sys_bytes` | Number of bytes obtained by system. Sum of all system allocations. | -| `process_cpu_seconds_total` | Total user and system CPU time spent in seconds. | -| `process_max_fds` | Maximum number of open file descriptors. | -| `process_open_fds` | Number of open file descriptors. | -| `process_resident_memory_bytes` | Resident memory size in bytes. | -| `process_start_time_seconds` | Start time of the process since unix epoch in seconds. | -| `process_virtual_memory_bytes` | Virtual memory size in bytes. | -| `promhttp_metric_handler_requests_in_flight` | Current number of scrapes being served. | -| `promhttp_metric_handler_requests_total` | Total number of scrapes by HTTP status code. | diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/component-versions.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/component-versions.mdx deleted file mode 100644 index 5bbdfe55ac..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/component-versions.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: A list of component versions for Calico Enterprise ---- - -import ComponentVersions from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/ComponentVersions'; - -# Component versions - -This page lists the specific component versions that go into each release of $[prodname]. - - \ No newline at end of file diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/faq.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/faq.mdx deleted file mode 100644 index 47f7ea4995..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/faq.mdx +++ /dev/null @@ -1,440 +0,0 @@ ---- -description: Common questions that users ask about Calico Enterprise. ---- - -# Frequently asked questions - -## Why use $[prodname]? - -The problem $[prodname] tries to solve is the networking of workloads (VMs, -containers, etc) in a high scale environment. Existing L2-based methods -for solving this problem have problems at high scale. Compared to these, -we think $[prodname] is more scalable, simpler, and more flexible. We think -you should look into it if you have more than a handful of nodes on a -single site. - -$[prodname] also provides a rich network security model that -allows operators and developers to declare intent-based network security -policy that is automatically rendered into distributed firewall rules -across a cluster of containers, VMs, and/or servers. - -For a more detailed discussion of this topic, see our blog post at -[Why Calico?](https://www.projectcalico.org/why-calico/). - -## Does $[prodname] work with IPv6? - -Yes! $[prodname]'s core components support IPv6 out of the box. However, -not all orchestrators that we integrate with support IPv6 yet. - -## Why does my container have a route to 169.254.1.1? - -In a $[prodname] network, each host acts as a gateway router for the -workloads that it hosts. In container deployments, $[prodname] uses -169.254.1.1 as the address for the $[prodname] router. By using a -link-local address, $[prodname] saves precious IP addresses and avoids -burdening the user with configuring a suitable address. - -While the routing table may look a little odd to someone who is used to -configuring LAN networking, using explicit routes rather than -subnet-local gateways is fairly common in WAN networking. - -## Why isn't $[prodname] working with a containerized Kubelet? - -$[prodname] hosted install places the necessary CNI binaries and config on each -Kubernetes node in a directory on the host as specified in the manifest. By -default it places binaries in /opt/cni/bin and config /etc/cni/net.d. - -When running the kubelet as a container using hyperkube, -you need to make sure that the containerized kubelet can see the CNI network -plugins and config that have been installed by mounting them into the kubelet container. - -For example add the following arguments to the kubelet-wrapper service: - -``` ---volume /etc/cni/net.d:/etc/cni/net.d \ ---volume /opt/cni/bin:/opt/cni/bin \ -``` - -Without the above volume mounts, the kubelet will not call the $[prodname] CNI binaries, and so -$[prodname] [workload endpoints](resources/workloadendpoint.mdx) will -not be created, and $[prodname] policy will not be enforced. - -## How do I view $[prodname] CNI logs? - -The $[prodname] CNI plugin emits logs to stderr, which are then logged out by the kubelet. Where these logs end up -depend on how your kubelet is configured. For deployments using `systemd`, you can do this via `journalctl`. - -The log level can be configured via the CNI network configuration file, by changing the value of the -key `log_level`. See [Configuring the $[prodname] CNI plugins](component-resources/configuration.mdx) for more information. - -CNI plugin logs can also be found in `/var/log/calico/cni`. - -## How do I configure the pod IP range? - -When using $[prodname] IPAM, IP addresses are assigned from [IP Pools](resources/ippool.mdx). - -By default, all enabled IP pools are used. However, you can specify which IP pools to use for IP address management in the [CNI network config](component-resources/configuration.mdx#ipam), -or on a per-pod basis using [Kubernetes annotations](component-resources/configuration.mdx#using-kubernetes-annotations). - -## How do I assign a specific IP address to a pod? - -For most use cases it's not necessary to assign specific IP addresses to a Kubernetes pod and it's recommended to use Kubernetes services instead. -However, if you do need to assign a particular address to a pod, $[prodname] provides two ways of doing this: - -- You can request an IP that is available in $[prodname] IPAM using the `cni.projectcalico.org/ipAddrs` annotation. -- You can request an IP using the `cni.projectcalico.org/ipAddrsNoIpam` annotation. Note that this annotation bypasses the configured IPAM plugin, and thus in most cases it is recommended to use the above annotation. - -See the [Requesting a specific IP address](component-resources/configuration.mdx#requesting-a-specific-ip-address) section in the CNI plugin reference documentation for more details. - -## Why can't I see the 169.254.1.1 address mentioned above on my host? - -$[prodname] tries hard to avoid interfering with any other configuration -on the host. Rather than adding the gateway address to the host side -of each workload interface, $[prodname] sets the `proxy_arp` flag on the -interface. This makes the host behave like a gateway, responding to -ARPs for 169.254.1.1 without having to actually allocate the IP address -to the interface. - -## Why do all cali\* interfaces have the MAC address ee:ee:ee:ee:ee:ee? - -In some setups the kernel is unable to generate a persistent MAC address and so -$[prodname] assigns a MAC address itself. Since $[prodname] uses -point-to-point routed interfaces, traffic does not reach the data link layer -so the MAC Address is never used and can therefore be the same for all the -cali\* interfaces. - -## Can I prevent my Kubernetes pods from initiating outgoing connections? - -Yes! The Kubernetes [`NetworkPolicy`](https://kubernetes.io/docs/concepts/services-networking/network-policies/) -API added support for egress policies in v1.8. You can also use `calicoctl` -to configure egress policy to prevent Kubernetes pods from initiating outgoing -connections based on the full set of supported $[prodname] policy primitives -including labels, Kubernetes namespaces, CIDRs, and ports. - -## I've heard $[prodname] uses proxy ARP, doesn't proxy ARP cause a lot of problems? - -It can, but not in the way that $[prodname] uses it. - -In container deployments, $[prodname] only uses proxy ARP for resolving the -169.254.1.1 address. The routing table inside the container ensures -that all traffic goes via the 169.254.1.1 gateway so that is the only -IP that will be ARPed by the container. - -## Is $[prodname] compliant with PCI/DSS requirements? - -PCI certification applies to the whole end-to-end system, of which -$[prodname] would be a part. We understand that most current solutions use -VLANs, but after studying the PCI requirements documents, we believe -that $[prodname] does meet those requirements and that nothing in the -documents _mandates_ the use of VLANs. - -## How do I enable IP-in-IP and NAT outgoing on an IP pool? - -1. Retrieve current IP pool config. - - ```bash - calicoctl get ipPool --export -o yaml > pool.yaml - ``` - -2. Modify IP pool config. - - Modify the pool's spec to enable IP-in-IP and NAT outgoing. (See - [IP pools](resources/ippool.mdx) - for other settings that can be edited.) - - ```shell - - apiVersion: projectcalico.org/v3 - kind: IPPool - metadata: - name: ippool-1 - spec: - cidr: 192.168.0.0/16 - ipipMode: Always - natOutgoing: true - ``` - -3. Load the modified file. - - ```bash - kubectl replace -f pool.yaml - ``` - -## How does $[prodname] maintain saved state? - -State is saved in a few places in a $[prodname] deployment, depending on -whether it's global or local state. - -Local state is state that belongs on a single compute host, associated -with a single running Felix instance (things like kernel routes, tap -devices etc.). Local state is entirely stored by the Linux kernel on the -host, with Felix storing it only as a temporary mirror. This makes Felix -effectively stateless, with the kernel acting as a backing data store on -one side and Kubernetes (kdd) as a data source on the other. - -If Felix is restarted, it learns current local state by interrogating -the kernel at start up. It then reads from the etcd datastore all the local state -which it should have, and updates the kernel to match. This approach has -strong resiliency benefits, in that if Felix restarts you don't suddenly -lose access to your VMs or containers. As long as the Linux kernel is -running, you've still got full functionality. - -The bulk of global state is mastered in whatever component hosts the -plugin. - -- In certain cases, `etcd` itself contains the master copy of - the data. This is because some Docker deployments have an `etcd` - cluster that has the required resiliency characteristics, used to - store all system configuration and so `etcd` is configured so as to - be a suitable store for critical data. -- In other orchestration systems, it may be stored in distributed - databases, either owned directly by the plugin or by the - orchestrator itself. - -The only other state storage in a $[prodname] network is in the BGP sessions, -which approximate a distributed database of routes. This BGP state is -simply a replicated copy of the per-host routes configured by Felix -based on the global state provided by the orchestrator. - -This makes the $[prodname] design very simple, because we store very little -state. All of our components can be shut down and restarted without risk, -because they resynchronize state as necessary. This makes modeling -their behavior extremely simple, reducing the complexity of bugs. - -## I heard $[prodname] is suggesting layer 2: I thought you were layer 3! What's happening? - -It's important to distinguish what $[prodname] provides to the workloads -hosted in a data center (a purely layer 3 network) with what the $[prodname] -project _recommends_ operators use to build their underlying network -fabric. - -$[prodname]'s core principle is that _applications_ and _workloads_ -overwhelmingly need only IP connectivity to communicate. For this reason -we build an IP-forwarded network to connect the tenant applications and -workloads to each other and the broader world. - -However, the underlying physical fabric obviously needs to be set up -too. Here, $[prodname] has discussed how both a layer 2 (see -[here](architecture/design/l2-interconnect-fabric.mdx)) -or a layer 3 (see -[here](architecture/design/l3-interconnect-fabric.mdx)) -fabric -could be integrated with $[prodname]. This is one of the great strengths of -the $[prodname] model: it allows the infrastructure to be decoupled from what -we show to the tenant applications and workloads. - -We have some thoughts on different interconnect approaches (as noted -above), but just because we say that there are layer 2 and layer 3 ways -of building the fabric, and that those decisions may have an impact on -route scale, does not mean that $[prodname] is "going back to Ethernet" or -that we're recommending layer 2 for tenant applications. In all cases we -forward on IP packets, no matter what architecture is used to build the -fabric. - -## How do I control policy/connectivity without virtual/physical firewalls? - -$[prodname] provides an extremely rich security policy model, applying policy at the first and last hop -of the routed traffic within the $[prodname] network (the source and -destination compute hosts). - -This model is substantially more robust to failure than a centralized -firewall-based model. In particular, the $[prodname] approach has no -single point of failure: if the device enforcing the firewall has failed -then so has one of the workloads involved in the traffic (because the -firewall is enforced by the compute host). - -This model is also extremely amenable to scaling out. Because we have a -central repository of policy configuration, but apply it at the edges of -the network (the hosts) where it is needed, we automatically ensure that -the rules match the topology of the data center. This allows easy -scaling out, and gives us all the advantages of a single firewall (one -place to manage the rules), but none of the disadvantages (single points -of failure, state sharing, hairpinning of traffic, etc.). - -Lastly, we decouple the reachability of nodes and the policy applied to -them. We use BGP to distribute the topology of the network, telling -every node how to get to every endpoint in case two endpoints need to -communicate. We use policy to decide _if_ those two nodes should -communicate, and if so, how. If policy changes and two endpoints should -now communicate, where before they shouldn’t have, all we have to do is -update policy: the reachability information does not change. If later -they should be denied the ability to communicate, the policy is updated -again, and again the reachability doesn’t have to change. - -## Why isn't the `-p` flag on `docker run` working as expected? - -The `-p` flag tells Docker to set up port mapping to connect a port on the -Docker host to a port on your container via the `docker0` bridge. - -If a host's containers are connected to the `docker0` bridge interface, $[prodname] -would be unable to enforce security rules between workloads on the same host; -all containers on the bridge would be able to communicate with one other. - -## Can $[prodname] containers use any IP address within a pool, even subnet network/broadcast addresses? - -Yes! $[prodname] is fully routed, so all IP address within a $[prodname] pool are usable as -private IP addresses to assign to a workload. This means addresses commonly -reserved in a L2 subnet, such as IPv4 addresses ending in .0 or .255, are perfectly -okay to use. - -## How do I get network traffic into and out of my $[prodname] cluster? - -The recommended way to get traffic to/from your $[prodname] network is by peering to -your existing data center L3 routers using BGP and by assigning globally -routable IPs (public IPs) to containers that need to be accessed from the internet. -This allows incoming traffic to be routed directly to your containers without the -need for NAT. This flat L3 approach delivers exceptional network scalability -and performance. - -A common scenario is for your container hosts to be on their own -isolated layer 2 network, like a rack in your server room or an entire data -center. Access to that network is via a router, which also is the default -router for all the container hosts. - -If this describes your infrastructure, -[Configure outgoing NAT](../networking/configuring/workloads-outside-cluster.mdx explains in more detail -what to do. Otherwise, if you have a layer 3 (IP) fabric, then there are -detailed datacenter networking recommendations given -in [$[prodname] over IP fabrics](architecture/design/l3-interconnect-fabric.mdx). -We'd also encourage you to [get in touch](https://www.projectcalico.org/contact) -to discuss your environment. - -### How can I enable NAT for outgoing traffic from containers with private IP addresses? - -If you want to allow containers with private IP addresses to be able to access the -internet then you can use your data center's existing outbound NAT capabilities -(typically provided by the data center's border routers). - -Alternatively you can use $[prodname]'s built in outbound NAT capability by enabling it on any -$[prodname] IP pool. In this case $[prodname] will perform outbound NAT locally on the compute -node on which each container is hosted. - -```bash -cat < - natOutgoing: true -EOF -``` - -Where `` is the CIDR of your IP pool, for example `192.168.0.0/16`. - -Remember: the security profile for the container will need to allow traffic to the -internet as well. Refer to the appropriate guide for your orchestration -system for details on how to configure policy. - -### How can I enable NAT for incoming traffic to containers with private IP addresses? - -As discussed, the recommended way to get traffic to containers that -need to be accessed from the internet is to give them public IP addresses and -to configure $[prodname] to peer with the data center's existing L3 routers. - -In cases where this is not possible then you can configure incoming NAT -(also known as DNAT) on your data centers existing border routers. Alternatively -you can configure incoming NAT with port mapping on the host on which the container -is running on. - -1. Create a new chain called `expose-ports` to hold the NAT rules. - - ```bash - iptables -t nat -N expose-ports - ``` - -1. Jump to that chain from the `OUTPUT` and `PREROUTING` chains. - - ```bash - iptables -t nat -A OUTPUT -j expose-ports - iptables -t nat -A PREROUTING -j expose-ports - ``` - - :::tip - - The `OUTPUT` chain is hit by traffic originating on the host itself; - the `PREROUTING` chain is hit by traffic coming from elsewhere. - - ::: - -1. For each port you want to expose, add a rule to the - expose-ports chain, replacing `` with the host IP that you - want to use to expose the port and `` with the host port. - - ```bash - iptables -t nat -A expose-ports -p tcp --destination \ - --dport -j DNAT --to : - ``` - -For example, you have a container to which you've assigned the `CALICO_IP` -of 192.168.7.4, and you have NGINX running on port 8080 inside the container. -If you want to expose this service on port 80 and your host has IP 192.0.2.1, -then you could run the following commands: - -```bash -iptables -t nat -N expose-ports -iptables -t nat -A OUTPUT -j expose-ports -iptables -t nat -A PREROUTING -j expose-ports - -iptables -t nat -A expose-ports -p tcp --destination 192.0.2.1 --dport 80 -j DNAT --to 192.168.7.4:8080 -``` - -The commands will need to be run each time the host is restarted. - -Remember: the security profile for the container will need to allow traffic to the exposed port as well. -Refer to the appropriate guide for your orchestration system for details on how to configure policy. - -### Can I run $[prodname] in a public cloud environment? - -Yes. If you are running in a public cloud that doesn't allow either L3 peering or L2 connectivity between $[prodname] hosts then you can enable IP-in-IP in your $[prodname] IP pool: - -```bash -cat < - ipipMode: Always - natOutgoing: true -EOF -``` - -$[prodname] will then route traffic between $[prodname] hosts using IP-in-IP. - -For best performance in AWS, you can disable [Source/Destination Check](resources/felixconfig.mdx#spec) instead of using IP-in-IP or VXLAN; but only if all your instances are in the same subnet of your VPC. The setting must be `Disable` for the EC2 instance(s) to process traffic not matching the host interface IP address. This is also applicable if your cluster is spread across multiple subnets. If your cluster traffic crosses subnets, set `ipipMode` (or `vxlanMode`) to `CrossSubnet` to reduce the encapsulation overhead. Check [configuring overlay networking](../networking/configuring/vxlan-ipip.mdx) for the details. - -You can disable Source/Destination Check using [Felix configuration](resources/felixconfig.mdx), the AWS CLI, or the EC2 console. For example, using the AWS CLI: - -```bash -aws ec2 modify-instance-attribute --instance-id --source-dest-check "{\"Value\": false}" - -cat < - natOutgoing: true -EOF -``` - -### On AWS with IP-in-IP, why do I see no connectivity between workloads or only see connectivity if I ping in both directions? - -By default, AWS security groups block incoming IP-in-IP traffic. - -However, if an instance has recently sent some IP-in-IP traffic out when it receives some incoming IP-in-IP traffic, -then AWS sees that as a response to an outgoing connection and it allows the incoming traffic. This leads to some very -confusing behavior where traffic can be blocked and then suddenly start working! - -To resolve the issue, add a rule to your security groups that allows inbound and outbound IP-in-IP traffic (IP protocol -number 4) between your hosts. - -## Can Calico do IP multicast? - -Calico is a routed L3 network where each pod gets a /32. There's no broadcast domain for pods. -That means that multicast doesn't just work as a side effect of broadcast. To get multicast to -work, the host needs to act as a multicast gateway of some kind. Calico's architecture was designed -to extend to cover that case but it's not part of the product as yet. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/host-endpoints/connectivity.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/host-endpoints/connectivity.mdx deleted file mode 100644 index ce65240507..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/host-endpoints/connectivity.mdx +++ /dev/null @@ -1,91 +0,0 @@ ---- -description: Customize the Calico failsafe policy to protect host endpoints. ---- - -# Creating policy for basic connectivity - -When a host endpoint is added, if there is no security policy for that -endpoint, $[prodname] will default to denying traffic to/from that endpoint, -except for traffic that is allowed by the [failsafe rules](failsafe.mdx). - -While the [failsafe rules](failsafe.mdx) provide protection against removing all -connectivity to a host: - -- They are overly broad in allowing inbound SSH on any interface and - allowing traffic out to etcd's ports on any interface. - -- Depending on your network, they may not cover all the ports that are - required; for example, your network may rely on allowing ICMP, - or DHCP. - -Therefore, we recommend creating a failsafe $[prodname] security policy that -is tailored to your environment. The example command below shows one -example of how you might do that; the command uses `calicoctl` to create a single -policy resource, which: - -- Applies to all known endpoints. -- Allows inbound ssh access from a defined “management” subnet. -- Allows outbound connectivity to etcd on a particular IP; if you have multiple etcd servers you should duplicate the rule for each destination. -- Allows inbound ICMP. -- Allows outbound UDP on port 67, for DHCP. - -When running this command, replace the placeholders in angle brackets with -appropriate values for your deployment. - -{/* */} - -```bash -cat <" - destination: - ports: [22] - - action: Allow - protocol: ICMP - egress: - - action: Allow - protocol: TCP - destination: - nets: [/32] - ports: [] - - action: Allow - protocol: TCP - destination: - nets: [] - - action: Allow - protocol: UDP - destination: - ports: [67] -EOF -``` - -Once you have such a policy in place, you may want to disable the -[failsafe rules](failsafe.mdx). - -:::note - -Packets that reach the end of the list of rules fall-through to the -next policy (sorted by the `order` field). -The selector in the policy, `all()`, will match _all_ endpoints, -including any workload endpoints. If you have workload endpoints as -well as host endpoints then you may wish to use a more restrictive -selector. For example, you could label management interfaces with -label `endpoint_type = management` and then use selector -`endpoint_type == "management"` -If you are using $[prodname] for networking workloads, you should add -inbound and outbound rules to allow BGP: add an ingress and egress rule -to allow TCP traffic to destination port 179. - -::: diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/host-endpoints/conntrack.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/host-endpoints/conntrack.mdx deleted file mode 100644 index c7d6fa1255..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/host-endpoints/conntrack.mdx +++ /dev/null @@ -1,29 +0,0 @@ ---- -description: Workaround for Linux conntrack if Calico policy is not working as it should. ---- - -# Connection tracking - -$[prodname] uses Linux's connection tracking ('conntrack') as an important -optimization to its processing. It generally means that $[prodname] only needs to -check its policies for the first packet in an allowed flow—between a pair of -IP addresses and ports—and then conntrack automatically allows further -packets in the same flow, without $[prodname] rechecking every packet. - -This can, however, make it look like a $[prodname] policy is not working as it -should, if policy is changed to disallow a flow that was previously allowed. -If packets were recently exchanged on the previously allowed flow, and so there -is conntrack state for that flow that has not yet expired, that conntrack state -will allow further packets between the same IP addresses and ports, even after -the $[prodname] policy has been changed. - -Per $[prodname]'s current implementation, there are two workarounds for this: - -- Somehow ensure that no further packets flow between the relevant IP - addresses and ports until the conntrack state has expired (typically about - a minute). - -- Use the 'conntrack' tool to delete the relevant conntrack state; for example - `conntrack -D -p tcp --orig-port-dst 80`. - -Then you should observe that the new $[prodname] policy is enforced for new packets. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/host-endpoints/failsafe.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/host-endpoints/failsafe.mdx deleted file mode 100644 index d203c50af4..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/host-endpoints/failsafe.mdx +++ /dev/null @@ -1,39 +0,0 @@ ---- -description: Avoid cutting off connectivity to hosts because of incorrect network policies. ---- - -# Failsafe rules - -To avoid completely cutting off a host via incorrect or malformed -policy, $[prodname] has a failsafe mechanism that keeps various pinholes open -in the firewall. - -By default, $[prodname] keeps the following ports open on _all_ host endpoints: - -| Port | Protocol | Direction | Purpose | -| ---- | -------- | ------------------ | ------------------------------ | -| 22 | TCP | Inbound | SSH access | -| 53 | UDP | Outbound | DNS queries | -| 67 | UDP | Outbound | DHCP access | -| 68 | UDP | Inbound | DHCP access | -| 179 | TCP | Inbound & Outbound | BGP access (Calico networking) | -| 6443 | TCP | Inbound & Outbound | Kubernetes API server access | - -The lists of failsafe ports can be configured via the configuration parameters -`FailsafeInboundHostPorts` and `FailsafeOutboundHostPorts` -described in [Configuring Felix](../component-resources/node/felix/configuration.mdx) -. They -can be disabled by setting each configuration value to "[]". - -:::note - -Removing the inbound failsafe rules can leave a host inaccessible. - -Removing the outbound failsafe rules can leave Felix unable to connect -to the datastore. - -Before disabling the failsafe rules, we recommend creating a policy to -replace it with more-specific rules for your environment: see -[Creating policy for basic connectivity](connectivity.mdx). - -::: diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/host-endpoints/forwarded.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/host-endpoints/forwarded.mdx deleted file mode 100644 index 92b6c0e087..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/host-endpoints/forwarded.mdx +++ /dev/null @@ -1,80 +0,0 @@ ---- -description: Learn the subtleties using the applyOnForward option in host endpoint policies. ---- - -# Apply on forwarded traffic - -If `applyOnForward` is `false`, the host endpoint policy applies to traffic to/from -local processes only. - -If `applyOnForward` is `true`, the host endpoint policy also applies to forwarded traffic: - -- Traffic that comes in via a host endpoint and is forwarded to a local workload (container/pod/VM). -- Traffic from a local workload that is forwarded out via a host endpoint. -- Traffic that comes in via a host endpoint and is forwarded out via another host endpoint. - -By default, `applyOnForward` is `false`. - -Untracked policies and pre-DNAT policies must have `applyOnForward` set to `true` -because they apply to all forwarded traffic. - -Forwarded traffic is allowed by default if no policies apply to the endpoint and direction. In -other words, if a host endpoint is configured, but there are no policies with `applyOnForward` -set to `true` that apply to that host endpoint and traffic direction, forwarded traffic is -allowed in that direction. For example if a forwarded flow is incoming via a host endpoint, but there are -no Ingress policies with `applyOnForward: true` that apply to that host endpoint, the flow is -allowed. If there are `applyOnForward: true` policies that select the host endpoint and direction, -but no rules in the policies allow the traffic, the traffic is denied. - -This is different from how $[prodname] treats traffic to or from a local process: -if a host endpoint is configured and there are no policies that select the host endpoint in -the traffic direction, or no rules that allow the traffic, the traffic is denied. - -Traffic that traverses a host endpoint and is forwarded to a workload endpoint must also pass -the applicable workload endpoint policy, if any. That is to say, if an `applyOnForward: true` host -endpoint policy allows the traffic, but workload endpoint policy denies it, the packet is still dropped. - -Traffic that ingresses one host endpoint, is forwarded, and egresses host endpoint must -pass ingress policy on the first host endpoint and egress policy on the second host endpoint. - -:::note - -$[prodname]'s handling of host endpoint policy has changed, since before -Calico v3.0, in two ways: - -- It will not apply at all to forwarded traffic, by default. If you have an existing - policy and you want it to apply to forwarded traffic, you need to add `applyOnForward: true` to the policy. -- Even with `applyOnForward: true`, the treatment is not quite the same in - Calico v3.0 as in previous releases, because–once a host endpoint is configured– - Calico v3.0 allows forwarded traffic through that endpoint by default, whereas - previous releases denied forwarded traffic through that endpoint by default. - If you want to maintain the default-deny behavior for all host-endpoint forwarded - traffic, you can create an empty policy with `applyOnForward` set to `true` - that applies to all traffic on all host endpoints. - -::: - -```bash -calicoctl apply -f - < diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/host-endpoints/objects.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/host-endpoints/objects.mdx deleted file mode 100644 index 881d42708c..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/host-endpoints/objects.mdx +++ /dev/null @@ -1,124 +0,0 @@ ---- -description: To protect a host interface, start by creating a host endpoint object in etcd. ---- - -# Creating host endpoint objects - -For each host endpoint that you want $[prodname] to secure, you'll need to -create a host endpoint object in etcd. Use the `calicoctl create` command -to create a host endpoint resource (`HostEndpoint`). - -There are two ways to specify the interface that a host endpoint should -refer to. You can either specify the name of the interface or its -expected IP address. In either case, you'll also need to know the name given to -the $[prodname] node running on the host that owns the interface; in most cases this -will be the same as the hostname of the host. - -For example, to secure the interface named `eth0` with IP 10.0.0.1 on -host `my-host`, run the command below. The name of the endpoint is an -arbitrary name required for endpoint identification. - -When running this command, replace the placeholders in angle brackets with -appropriate values for your deployment. - -```bash -calicoctl create -f - < - labels: - role: webserver - environment: production - spec: - interfaceName: eth0 - node: - profiles: [] - expectedIPs: ["10.0.0.1"] -EOF -``` - -:::note - -Felix tries to detect the correct hostname for a system. It logs -out the value it has determined at start-of-day in the following -format: -`2015-10-20 17:42:09,813 \[INFO\]\[30149/5\] calico.felix.config 285: Parameter FelixHostname (Felix compute host hostname) has value 'my-hostname' read from None` -The value (in this case `'my-hostname'`) needs to match the hostname -used in etcd. Ideally, the host's system hostname should be set -correctly but if that's not possible, the Felix value can be -overridden with the FelixHostname configuration setting. See -configuration for more details. - -::: - -Where `` is an optional list of security profiles -to apply to the endpoint and labels contains a set of arbitrary -key/value pairs that can be used in selector expressions. - -{/* TODO(smc) data-model: Link to new data model docs. */} - -:::note - -When rendering security rules on other hosts, $[prodname] uses the -`expectedIPs` field to resolve label selectors -to IP addresses. If the `expectedIPs` field is omitted -then security rules that use labels will fail to match -this endpoint. -Or, if you knew that the IP address should be 10.0.0.1, but not the name -of the interface: - -```bash -calicoctl create -f - < - labels: - role: webserver - environment: production - spec: - node: - profiles: [] - expectedIPs: ["10.0.0.1"] -EOF -``` - -::: - -After you create host endpoint objects, Felix will start policing -traffic to/from that interface. If you have no policy or profiles in -place, then you should see traffic being dropped on the interface. - -:::note - -By default, $[prodname] has a failsafe in place that allows certain -traffic such as ssh. See below for more details on -disabling/configuring the failsafe rules. - -::: - -If you don't see traffic being dropped, check the hostname, IP address -and (if used) the interface name in the configuration. If there was -something wrong with the endpoint data, Felix will log a validation -error at `WARNING` level and it will ignore the endpoint: - -A `grep` through the Felix logs for the string "Validation failed" should allow -you to locate the error. - -```bash -grep "Validation failed" /var/log/calico/felix.log -``` - -An example error follows. - -``` -2016-05-31 12:16:21,651 [WARNING][8657/3] calico.felix.fetcd 1017: - Validation failed for host endpoint HostEndpointId, treating as - missing: 'name' or 'expected_ipvX_addrs' must be present.; - '{ "labels": {"foo": "bar"}, "profile_ids": ["prof1"]}' -``` - -The error can be quite long but it should log the precise cause of the -rejection; in this case `'name' or 'expected\_ipvX\_addrs' must be present` tells us that either the interface's name or its expected IP -address must be specified. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/host-endpoints/overview.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/host-endpoints/overview.mdx deleted file mode 100644 index 6b2b005849..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/host-endpoints/overview.mdx +++ /dev/null @@ -1,57 +0,0 @@ ---- -description: Secure host network interfaces. ---- - -# Host endpoints - -This guide describes how to use $[prodname] to secure the network interfaces -of the host itself (as opposed to those of any container/VM workloads -that are present on the host). We call such interfaces "host endpoints", -to distinguish them from "workload endpoints" (such as containers or VMs). - -$[prodname] supports the same rich security policy model for host endpoints (host -endpoint policy) that it supports for workload endpoints. Host endpoints can -have labels, and their labels are in the same "namespace" as those of workload -endpoints. This allows security rules for either type of endpoint to refer to -the other type (or a mix of the two) using labels and selectors. - -$[prodname] does not support setting IPs or policing MAC addresses for host -interfaces, it assumes that the interfaces are configured by the -underlying network fabric. - -$[prodname] distinguishes workload endpoints from host endpoints by a configurable -prefix. Unless you happen to have host interfaces whose name matches the -default for that prefix (`cali`), you won't need to change it. In case you do, -see the `InterfacePrefix` configuration value at [Configuring Felix](../component-resources/node/felix/configuration.mdx) -. -Interfaces that start with a value listed in `InterfacePrefix` are assumed to -be workload interfaces. Others are treated as host interfaces. - -$[prodname] blocks all traffic to/from workload interfaces by default; -allowing traffic only if the interface is known and policy is in place. -However, for host endpoints, $[prodname] is more lenient; it only polices -traffic to/from interfaces that it's been explicitly told about. Traffic -to/from other interfaces is left alone. - -You can use host endpoint policy to secure a NAT gateway or router. $[prodname] -supports selector-based policy when running on a gateway or router, allowing for -rich, dynamic security policy based on the labels attached to your host endpoints. - -You can apply host endpoint policies to three types of traffic: - -- Traffic that is terminated locally. -- Traffic that is forwarded between host endpoints. -- Traffic that is forwarded between a host endpoint and a workload endpoint on the - same host. - -Set the `applyOnForward` flag to `true` to apply a policy to forwarded traffic. -See [GlobalNetworkPolicy spec](../resources/globalnetworkpolicy.mdx#spec). - -:::note - -Both traffic forwarded between host endpoints and traffic forwarded -between a host endpoint and a workload endpoint on the same host is regarded as -`forwarded traffic`. -![](/img/calico-enterprise/bare-metal-packet-flows.svg) - -::: diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/host-endpoints/pre-dnat.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/host-endpoints/pre-dnat.mdx deleted file mode 100644 index 298b001ce8..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/host-endpoints/pre-dnat.mdx +++ /dev/null @@ -1,46 +0,0 @@ ---- -description: Apply rules in a host endpoint policy before any DNAT. ---- - -# Pre-DNAT policy - -Policy for host endpoints can be marked as `preDNAT`. This means that rules in -that policy should be applied before any DNAT (Destination Network Address -Translation), which is useful if it is more convenient to specify $[prodname] policy -in terms of a packet's original destination IP address and port, than in terms -of that packet's destination IP address and port after it has been DNAT'd. - -An example is securing access to Kubernetes NodePorts from outside the cluster. -Traffic from outside is addressed to any node's IP address, on a known -NodePort, and Kubernetes (kube-proxy) then DNATs that to the IP address of one -of the pods that provides the corresponding service, and the relevant port -number on that pod (which is usually different from the NodePort). - -As NodePorts are the externally advertised way of connecting to services (and a -NodePort uniquely identifies a service, whereas an internal port number may -not), it makes sense to express $[prodname] policy to expose or secure particular -Services in terms of the corresponding NodePorts. But that is only possible if -the $[prodname] policy is applied before DNAT changes the NodePort to something -else. Hence this kind of policy needs `preDNAT` set to `true`. - -In addition to being applied before any DNAT, the enforcement of pre-DNAT -policy differs from that of normal host endpoint policy in three key details, -reflecting that it is designed for the policing of incoming traffic from -outside the cluster: - -- Pre-DNAT policy may only have ingress rules, not egress. (When incoming - traffic is allowed by the ingress rules, standard connection tracking is - sufficient to allow the return path traffic.) - -- Pre-DNAT policy is enforced for all traffic arriving through a host - endpoint, regardless of where that traffic is going, and - in particular - - even if that traffic is routed to a local workload on the same host. - (Whereas normal host endpoint policy is skipped, for traffic going to a - local workload.) - -- There is no 'default drop' semantic for pre-DNAT policy (as there is for - normal host endpoint policy). In other words, if a host endpoint is defined - but has no pre-DNAT policies that explicitly allow or deny a particular - incoming packet, that packet is allowed to continue on its way, and will - then be accepted or dropped according to workload policy (if it is going to - a local workload) or to normal host endpoint policy (if not). diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/host-endpoints/selector.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/host-endpoints/selector.mdx deleted file mode 100644 index 9fc2d1bc4d..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/host-endpoints/selector.mdx +++ /dev/null @@ -1,30 +0,0 @@ ---- -description: Apply ordered policies to endpoints that match specific label selectors. ---- - -# Selector-based policies - -We recommend using selector-based security policy with -host endpoints. This allows ordered policy to be applied to -endpoints that match particular label selectors. - -For example, you could add a second policy for webserver access: - -```bash -cat < - - - - - - - -## calicoctl reference - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -## calicoq reference - - - - - - - - - - - -## Resource definitions - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -## Host endpoints - - - - - - - - - - - - -## Architecture - - - - - - -## Other reference topics - - - - - - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/installation/_README.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/installation/_README.mdx deleted file mode 100644 index a35570764b..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/installation/_README.mdx +++ /dev/null @@ -1,7 +0,0 @@ -# Generating API reference docs - -The api.html doc in this directory is generated using https://github.com/tmjd/gen-crd-api-reference-docs/tree/kb_v2. - -To generate an updated file, change to the root of the docs repository and run -the appropriate Makefile target. See the `README.md` file for more details on -how to list available targets and which ones to run. \ No newline at end of file diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/installation/_api.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/installation/_api.mdx deleted file mode 100644 index 765b8717c4..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/installation/_api.mdx +++ /dev/null @@ -1,4722 +0,0 @@ -Packages -- [operator.tigera.io/v1](#operatortigeraiov1) - - -{/* vale off */} - -## operator.tigera.io/v1 - -API Schema definitions for configuring the installation of Calico and Calico Enterprise - -Package v1 contains API Schema definitions for the operator v1 API group - -Resource Types -- [APIServer](#apiserver) -- [ApplicationLayer](#applicationlayer) -- [Authentication](#authentication) -- [Compliance](#compliance) -- [EgressGateway](#egressgateway) -- [ImageSet](#imageset) -- [Installation](#installation) -- [IntrusionDetection](#intrusiondetection) -- [LogCollector](#logcollector) -- [LogStorage](#logstorage) -- [ManagementCluster](#managementcluster) -- [ManagementClusterConnection](#managementclusterconnection) -- [Manager](#manager) -- [Monitor](#monitor) -- [PacketCaptureAPI](#packetcaptureapi) -- [PolicyRecommendation](#policyrecommendation) -- [TigeraStatus](#tigerastatus) - - - -### APIServer - - - -APIServer installs the Tigera API server and related resources. At most one instance -of this resource is supported. It must be named "default" or "tigera-secure". - -| Field | Description | -| --- | --- | -| `apiVersion` _string_ | `operator.tigera.io/v1` | -| `kind` _string_ | `APIServer` | -| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | -| `spec` _[APIServerSpec](#apiserverspec)_ | Specification of the desired state for the Tigera API server. | -| `status` _[APIServerStatus](#apiserverstatus)_ | Most recently observed status for the Tigera API server. | - - -### APIServerDeployment - - - -APIServerDeployment is the configuration for the API server Deployment. - -_Appears in:_ -- [APIServerSpec](#apiserverspec) - -| Field | Description | -| --- | --- | -| `metadata` _[Metadata](#metadata)_ | (Optional) Refer to Kubernetes API documentation for fields of `metadata`. | -| `spec` _[APIServerDeploymentSpec](#apiserverdeploymentspec)_ | (Optional) Spec is the specification of the API server Deployment. | - - -### APIServerDeploymentContainer - - - -APIServerDeploymentContainer is an API server Deployment container. - -_Appears in:_ -- [APIServerDeploymentPodSpec](#apiserverdeploymentpodspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is an enum which identifies the API server Deployment container by name.
    Supported values are: calico-apiserver, tigera-queryserver | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) Resources allows customization of limits and requests for compute resources such as cpu and memory. If specified, this overrides the named API server Deployment container's resources. If omitted, the API server Deployment will use its default value for this container's resources. If used in conjunction with the deprecated ComponentResources, then this value takes precedence. | - - -### APIServerDeploymentInitContainer - - - -APIServerDeploymentInitContainer is an API server Deployment init container. - -_Appears in:_ -- [APIServerDeploymentPodSpec](#apiserverdeploymentpodspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is an enum which identifies the API server Deployment init container by name.
    Supported values are: calico-apiserver-certs-key-cert-provisioner | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) Resources allows customization of limits and requests for compute resources such as cpu and memory. If specified, this overrides the named API server Deployment init container's resources. If omitted, the API server Deployment will use its default value for this init container's resources. | - - -### APIServerDeploymentPodSpec - - - -APIServerDeploymentDeploymentPodSpec is the API server Deployment's PodSpec. - -_Appears in:_ -- [APIServerDeploymentPodTemplateSpec](#apiserverdeploymentpodtemplatespec) - -| Field | Description | -| --- | --- | -| `initContainers` _[APIServerDeploymentInitContainer](#apiserverdeploymentinitcontainer) array_ | (Optional) InitContainers is a list of API server init containers. If specified, this overrides the specified API server Deployment init containers. If omitted, the API server Deployment will use its default values for its init containers. | -| `containers` _[APIServerDeploymentContainer](#apiserverdeploymentcontainer) array_ | (Optional) Containers is a list of API server containers. If specified, this overrides the specified API server Deployment containers. If omitted, the API server Deployment will use its default values for its containers. | -| `affinity` _[Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#affinity-v1-core)_ | (Optional) Affinity is a group of affinity scheduling rules for the API server pods. If specified, this overrides any affinity that may be set on the API server Deployment. If omitted, the API server Deployment will use its default value for affinity.
    WARNING: Please note that this field will override the default API server Deployment affinity. | -| `nodeSelector` _object (keys:string, values:string)_ | NodeSelector is the API server pod's scheduling constraints. If specified, each of the key/value pairs are added to the API server Deployment nodeSelector provided the key does not already exist in the object's nodeSelector. If used in conjunction with ControlPlaneNodeSelector, that nodeSelector is set on the API server Deployment and each of this field's key/value pairs are added to the API server Deployment nodeSelector provided the key does not already exist in the object's nodeSelector. If omitted, the API server Deployment will use its default value for nodeSelector.
    WARNING: Please note that this field will modify the default API server Deployment nodeSelector. | -| `topologySpreadConstraints` _[TopologySpreadConstraint](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#topologyspreadconstraint-v1-core) array_ | (Optional) TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed. | -| `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#toleration-v1-core) array_ | (Optional) Tolerations is the API server pod's tolerations. If specified, this overrides any tolerations that may be set on the API server Deployment. If omitted, the API server Deployment will use its default value for tolerations.
    WARNING: Please note that this field will override the default API server Deployment tolerations. | -| `priorityClassName` _string_ | (Optional) PriorityClassName allows to specify a PriorityClass resource to be used. | - - -### APIServerDeploymentPodTemplateSpec - - - -APIServerDeploymentPodTemplateSpec is the API server Deployment's PodTemplateSpec - -_Appears in:_ -- [APIServerDeploymentSpec](#apiserverdeploymentspec) - -| Field | Description | -| --- | --- | -| `metadata` _[Metadata](#metadata)_ | (Optional) Refer to Kubernetes API documentation for fields of `metadata`. | -| `spec` _[APIServerDeploymentPodSpec](#apiserverdeploymentpodspec)_ | (Optional) Spec is the API server Deployment's PodSpec. | - - -### APIServerDeploymentSpec - - - -APIServerDeploymentSpec defines configuration for the API server Deployment. - -_Appears in:_ -- [APIServerDeployment](#apiserverdeployment) - -| Field | Description | -| --- | --- | -| `minReadySeconds` _integer_ | (Optional) MinReadySeconds is the minimum number of seconds for which a newly created Deployment pod should be ready without any of its container crashing, for it to be considered available. If specified, this overrides any minReadySeconds value that may be set on the API server Deployment. If omitted, the API server Deployment will use its default value for minReadySeconds. | -| `template` _[APIServerDeploymentPodTemplateSpec](#apiserverdeploymentpodtemplatespec)_ | (Optional) Template describes the API server Deployment pod that will be created. | - - -### APIServerSpec - - - -APIServerSpec defines the desired state of Tigera API server. - -_Appears in:_ -- [APIServer](#apiserver) - -| Field | Description | -| --- | --- | -| `apiServerDeployment` _[APIServerDeployment](#apiserverdeployment)_ | APIServerDeployment configures the calico-apiserver (or tigera-apiserver in Enterprise) Deployment. If used in conjunction with ControlPlaneNodeSelector or ControlPlaneTolerations, then these overrides take precedence. | - - -### APIServerStatus - - - -APIServerStatus defines the observed state of Tigera API server. - -_Appears in:_ -- [APIServer](#apiserver) - -| Field | Description | -| --- | --- | -| `state` _string_ | State provides user-readable status. | -| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#condition-v1-meta) array_ | (Optional) Conditions represents the latest observed set of conditions for the component. A component may be one or more of Ready, Progressing, Degraded or other customer types. | - - -### AWSEgressGateway - - - -AWSEgressGateway defines the configurations for deploying EgressGateway in AWS - -_Appears in:_ -- [EgressGatewaySpec](#egressgatewayspec) - -| Field | Description | -| --- | --- | -| `nativeIP` _[NativeIP](#nativeip)_ | (Optional) NativeIP defines if EgressGateway is to use an AWS backed IPPool.
    Default: Disabled | -| `elasticIPs` _string array_ | (Optional) ElasticIPs defines the set of elastic IPs that can be used for Egress Gateway pods. NativeIP must be Enabled if elastic IPs are set. | - - -### AdditionalLogSourceSpec - - - - - -_Appears in:_ -- [LogCollectorSpec](#logcollectorspec) - -| Field | Description | -| --- | --- | -| `eksCloudwatchLog` _[EksCloudwatchLogsSpec](#ekscloudwatchlogsspec)_ | (Optional) If specified with EKS Provider in Installation, enables fetching EKS audit logs. | - - -### AdditionalLogStoreSpec - - - - - -_Appears in:_ -- [LogCollectorSpec](#logcollectorspec) - -| Field | Description | -| --- | --- | -| `s3` _[S3StoreSpec](#s3storespec)_ | (Optional) If specified, enables exporting of flow, audit, and DNS logs to Amazon S3 storage. | -| `syslog` _[SyslogStoreSpec](#syslogstorespec)_ | (Optional) If specified, enables exporting of flow, audit, and DNS logs to syslog. | -| `splunk` _[SplunkStoreSpec](#splunkstorespec)_ | (Optional) If specified, enables exporting of flow, audit, and DNS logs to splunk. | - - -### AlertManager - - - - - -_Appears in:_ -- [MonitorSpec](#monitorspec) - -| Field | Description | -| --- | --- | -| `spec` _[AlertManagerSpec](#alertmanagerspec)_ | (Optional) Spec is the specification of the Alertmanager. | - - -### AlertManagerSpec - - - - - -_Appears in:_ -- [AlertManager](#alertmanager) - -| Field | Description | -| --- | --- | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | Define resources requests and limits for single Pods. | - - -### AnomalyDetectionSpec - - - - - -_Appears in:_ -- [IntrusionDetectionSpec](#intrusiondetectionspec) - -| Field | Description | -| --- | --- | -| `storageClassName` _string_ | (Optional) StorageClassName is now deprecated, and configuring it has no effect. | - - -### ApplicationLayer - - - -ApplicationLayer is the Schema for the applicationlayers API - -| Field | Description | -| --- | --- | -| `apiVersion` _string_ | `operator.tigera.io/v1` | -| `kind` _string_ | `ApplicationLayer` | -| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | -| `spec` _[ApplicationLayerSpec](#applicationlayerspec)_ | | -| `status` _[ApplicationLayerStatus](#applicationlayerstatus)_ | | - - -### ApplicationLayerPolicyStatusType - -_Underlying type:_ _string_ - - - -_Appears in:_ -- [ApplicationLayerSpec](#applicationlayerspec) - -| Value | Description | -| --- | --- | -| `Enabled` | | -| `Disabled` | | - - -### ApplicationLayerSpec - - - -ApplicationLayerSpec defines the desired state of ApplicationLayer - -_Appears in:_ -- [ApplicationLayer](#applicationlayer) - -| Field | Description | -| --- | --- | -| `webApplicationFirewall` _[WAFStatusType](#wafstatustype)_ | WebApplicationFirewall controls whether or not ModSecurity enforcement is enabled for the cluster. When enabled, Services may opt-in to having ingress traffic examed by ModSecurity. | -| `logCollection` _[LogCollectionSpec](#logcollectionspec)_ | Specification for application layer (L7) log collection. | -| `applicationLayerPolicy` _[ApplicationLayerPolicyStatusType](#applicationlayerpolicystatustype)_ | Application Layer Policy controls whether or not ALP enforcement is enabled for the cluster. When enabled, NetworkPolicies with HTTP Match rules may be defined to opt-in workloads for traffic enforcement on the application layer. | -| `envoy` _[EnvoySettings](#envoysettings)_ | User-configurable settings for the Envoy proxy. | -| `l7LogCollectorDaemonSet` _[L7LogCollectorDaemonSet](#l7logcollectordaemonset)_ | (Optional) L7LogCollectorDaemonSet configures the L7LogCollector DaemonSet. | - - -### ApplicationLayerStatus - - - -ApplicationLayerStatus defines the observed state of ApplicationLayer - -_Appears in:_ -- [ApplicationLayer](#applicationlayer) - -| Field | Description | -| --- | --- | -| `state` _string_ | State provides user-readable status. | -| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#condition-v1-meta) array_ | (Optional) Conditions represents the latest observed set of conditions for the component. A component may be one or more of Ready, Progressing, Degraded or other customer types. | - - - - -### Authentication - - - -Authentication is the Schema for the authentications API - -| Field | Description | -| --- | --- | -| `apiVersion` _string_ | `operator.tigera.io/v1` | -| `kind` _string_ | `Authentication` | -| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | -| `spec` _[AuthenticationSpec](#authenticationspec)_ | | -| `status` _[AuthenticationStatus](#authenticationstatus)_ | | - - -### AuthenticationLDAP - - - -AuthenticationLDAP is the configuration needed to setup LDAP. - -_Appears in:_ -- [AuthenticationSpec](#authenticationspec) - -| Field | Description | -| --- | --- | -| `host` _string_ | The host and port of the LDAP server. Example: ad.example.com:636 | -| `startTLS` _boolean_ | (Optional) StartTLS whether to enable the startTLS feature for establishing TLS on an existing LDAP session. If true, the ldap:// protocol is used and then issues a StartTLS command, otherwise, connections will use the ldaps:// protocol. | -| `userSearch` _[UserSearch](#usersearch)_ | User entry search configuration to match the credentials with a user. | -| `groupSearch` _[GroupSearch](#groupsearch)_ | (Optional) Group search configuration to find the groups that a user is in. | - - -### AuthenticationOIDC - - - -AuthenticationOIDC is the configuration needed to setup OIDC. - -_Appears in:_ -- [AuthenticationSpec](#authenticationspec) - -| Field | Description | -| --- | --- | -| `issuerURL` _string_ | IssuerURL is the URL to the OIDC provider. | -| `usernameClaim` _string_ | UsernameClaim specifies which claim to use from the OIDC provider as the username. | -| `requestedScopes` _string array_ | (Optional) RequestedScopes is a list of scopes to request from the OIDC provider. If not provided, the following scopes are requested: ["openid", "email", "profile", "groups", "offline_access"]. | -| `usernamePrefix` _string_ | (Optional) Deprecated. Please use Authentication.Spec.UsernamePrefix instead. | -| `groupsClaim` _string_ | (Optional) GroupsClaim specifies which claim to use from the OIDC provider as the group. | -| `groupsPrefix` _string_ | (Optional) Deprecated. Please use Authentication.Spec.GroupsPrefix instead. | -| `emailVerification` _[EmailVerificationType](#emailverificationtype)_ | (Optional) Some providers do not include the claim "email_verified" when there is no verification in the user enrollment process or if they are acting as a proxy for another identity provider. By default those tokens are deemed invalid. To skip this check, set the value to "InsecureSkip".
    Default: Verify | -| `promptTypes` _[PromptType](#prompttype) array_ | (Optional) PromptTypes is an optional list of string values that specifies whether the identity provider prompts the end user for re-authentication and consent. See the RFC for more information on prompt types: https://openid.net/specs/openid-connect-core-1_0.html.
    Default: "Consent" | -| `type` _[OIDCType](#oidctype)_ | (Optional)
    Default: "Dex" | - - -### AuthenticationOpenshift - - - -AuthenticationOpenshift is the configuration needed to setup Openshift. - -_Appears in:_ -- [AuthenticationSpec](#authenticationspec) - -| Field | Description | -| --- | --- | -| `issuerURL` _string_ | IssuerURL is the URL to the Openshift OAuth provider. Ex.: https://api.my-ocp-domain.com:6443 | - - -### AuthenticationSpec - - - -AuthenticationSpec defines the desired state of Authentication - -_Appears in:_ -- [Authentication](#authentication) - -| Field | Description | -| --- | --- | -| `managerDomain` _string_ | ManagerDomain is the domain name of the Manager | -| `usernamePrefix` _string_ | (Optional) If specified, UsernamePrefix is prepended to each user obtained from the identity provider. Note that Kibana does not support a user prefix, so this prefix is removed from Kubernetes User when translating log access ClusterRoleBindings into Elastic. | -| `groupsPrefix` _string_ | (Optional) If specified, GroupsPrefix is prepended to each group obtained from the identity provider. Note that Kibana does not support a groups prefix, so this prefix is removed from Kubernetes Groups when translating log access ClusterRoleBindings into Elastic. | -| `oidc` _[AuthenticationOIDC](#authenticationoidc)_ | (Optional) OIDC contains the configuration needed to setup OIDC authentication. | -| `openshift` _[AuthenticationOpenshift](#authenticationopenshift)_ | (Optional) Openshift contains the configuration needed to setup Openshift OAuth authentication. | -| `ldap` _[AuthenticationLDAP](#authenticationldap)_ | (Optional) LDAP contains the configuration needed to setup LDAP authentication. | -| `dexDeployment` _[DexDeployment](#dexdeployment)_ | (Optional) DexDeployment configures the Dex Deployment. | - - -### AuthenticationStatus - - - -AuthenticationStatus defines the observed state of Authentication - -_Appears in:_ -- [Authentication](#authentication) - -| Field | Description | -| --- | --- | -| `state` _string_ | State provides user-readable status. | -| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#condition-v1-meta) array_ | (Optional) Conditions represents the latest observed set of conditions for the component. A component may be one or more of Ready, Progressing, Degraded or other customer types. | - - -### BGPOption - -_Underlying type:_ _string_ - -BGPOption describes the mode of BGP to use. - -One of: Enabled, Disabled - -_Appears in:_ -- [CalicoNetworkSpec](#caliconetworkspec) - -| Value | Description | -| --- | --- | -| `Enabled` | | -| `Disabled` | | - - -### CAType - -_Underlying type:_ _string_ - -CAType specifies which verification method the tunnel client should use to verify the tunnel server's identity. - -One of: Tigera, Public - -_Appears in:_ -- [ManagementClusterTLS](#managementclustertls) - -| Value | Description | -| --- | --- | -| `Tigera` | | -| `Public` | | - - -### CNILogging - - - - - -_Appears in:_ -- [Logging](#logging) - -| Field | Description | -| --- | --- | -| `logSeverity` _[LogLevel](#loglevel)_ | (Optional)
    Default: Info | -| `logFileMaxSize` _[Quantity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#quantity-resource-api)_ | (Optional)
    Default: 100Mi | -| `logFileMaxAgeDays` _integer_ | (Optional)
    Default: 30 (days) | -| `logFileMaxCount` _integer_ | (Optional)
    Default: 10 | - - -### CNIPluginType - -_Underlying type:_ _string_ - -CNIPluginType describes the type of CNI plugin used. - -One of: Calico, GKE, AmazonVPC, AzureVNET - -_Appears in:_ -- [CNISpec](#cnispec) - -| Value | Description | -| --- | --- | -| `Calico` | | -| `GKE` | | -| `AmazonVPC` | | -| `AzureVNET` | | - - -### CNISpec - - - -CNISpec contains configuration for the CNI plugin. - -_Appears in:_ -- [InstallationSpec](#installationspec) - -| Field | Description | -| --- | --- | -| `type` _[CNIPluginType](#cniplugintype)_ | Specifies the CNI plugin that will be used in the Calico or Calico Enterprise installation. * For KubernetesProvider GKE, this field defaults to GKE. * For KubernetesProvider AKS, this field defaults to AzureVNET. * For KubernetesProvider EKS, this field defaults to AmazonVPC. * If aws-node daemonset exists in kube-system when the Installation resource is created, this field defaults to AmazonVPC. * For all other cases this field defaults to Calico. For the value Calico, the CNI plugin binaries and CNI config will be installed as part of deployment, for all other values the CNI plugin binaries and CNI config is a dependency that is expected to be installed separately.
    Default: Calico | -| `ipam` _[IPAMSpec](#ipamspec)_ | (Optional) IPAM specifies the pod IP address management that will be used in the Calico or Calico Enterprise installation. | - - -### CSINodeDriverDaemonSet - - - -CSINodeDriverDaemonSet is the configuration for the csi-node-driver DaemonSet. - -_Appears in:_ -- [InstallationSpec](#installationspec) - -| Field | Description | -| --- | --- | -| `metadata` _[Metadata](#metadata)_ | (Optional) Refer to Kubernetes API documentation for fields of `metadata`. | -| `spec` _[CSINodeDriverDaemonSetSpec](#csinodedriverdaemonsetspec)_ | (Optional) Spec is the specification of the csi-node-driver DaemonSet. | - - -### CSINodeDriverDaemonSetContainer - - - -CSINodeDriverDaemonSetContainer is a csi-node-driver DaemonSet container. - -_Appears in:_ -- [CSINodeDriverDaemonSetPodSpec](#csinodedriverdaemonsetpodspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is an enum which identifies the csi-node-driver DaemonSet container by name.
    Supported values are: calico-csi, csi-node-driver-registrar. | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) Resources allows customization of limits and requests for compute resources such as cpu and memory. If specified, this overrides the named csi-node-driver DaemonSet container's resources. If omitted, the csi-node-driver DaemonSet will use its default value for this container's resources. | - - -### CSINodeDriverDaemonSetPodSpec - - - -CSINodeDriverDaemonSetPodSpec is the csi-node-driver DaemonSet's PodSpec. - -_Appears in:_ -- [CSINodeDriverDaemonSetPodTemplateSpec](#csinodedriverdaemonsetpodtemplatespec) - -| Field | Description | -| --- | --- | -| `containers` _[CSINodeDriverDaemonSetContainer](#csinodedriverdaemonsetcontainer) array_ | (Optional) Containers is a list of csi-node-driver containers. If specified, this overrides the specified csi-node-driver DaemonSet containers. If omitted, the csi-node-driver DaemonSet will use its default values for its containers. | -| `affinity` _[Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#affinity-v1-core)_ | (Optional) Affinity is a group of affinity scheduling rules for the csi-node-driver pods. If specified, this overrides any affinity that may be set on the csi-node-driver DaemonSet. If omitted, the csi-node-driver DaemonSet will use its default value for affinity.
    WARNING: Please note that this field will override the default csi-node-driver DaemonSet affinity. | -| `nodeSelector` _object (keys:string, values:string)_ | (Optional) NodeSelector is the csi-node-driver pod's scheduling constraints. If specified, each of the key/value pairs are added to the csi-node-driver DaemonSet nodeSelector provided the key does not already exist in the object's nodeSelector. If omitted, the csi-node-driver DaemonSet will use its default value for nodeSelector.
    WARNING: Please note that this field will modify the default csi-node-driver DaemonSet nodeSelector. | -| `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#toleration-v1-core) array_ | (Optional) Tolerations is the csi-node-driver pod's tolerations. If specified, this overrides any tolerations that may be set on the csi-node-driver DaemonSet. If omitted, the csi-node-driver DaemonSet will use its default value for tolerations.
    WARNING: Please note that this field will override the default csi-node-driver DaemonSet tolerations. | - - -### CSINodeDriverDaemonSetPodTemplateSpec - - - -CSINodeDriverDaemonSetPodTemplateSpec is the csi-node-driver DaemonSet's PodTemplateSpec - -_Appears in:_ -- [CSINodeDriverDaemonSetSpec](#csinodedriverdaemonsetspec) - -| Field | Description | -| --- | --- | -| `metadata` _[Metadata](#metadata)_ | (Optional) Refer to Kubernetes API documentation for fields of `metadata`. | -| `spec` _[CSINodeDriverDaemonSetPodSpec](#csinodedriverdaemonsetpodspec)_ | (Optional) Spec is the csi-node-driver DaemonSet's PodSpec. | - - -### CSINodeDriverDaemonSetSpec - - - -CSINodeDriverDaemonSetSpec defines configuration for the csi-node-driver DaemonSet. - -_Appears in:_ -- [CSINodeDriverDaemonSet](#csinodedriverdaemonset) - -| Field | Description | -| --- | --- | -| `minReadySeconds` _integer_ | (Optional) MinReadySeconds is the minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. If specified, this overrides any minReadySeconds value that may be set on the csi-node-driver DaemonSet. If omitted, the csi-node-driver DaemonSet will use its default value for minReadySeconds. | -| `template` _[CSINodeDriverDaemonSetPodTemplateSpec](#csinodedriverdaemonsetpodtemplatespec)_ | (Optional) Template describes the csi-node-driver DaemonSet pod that will be created. | - - -### CalicoKubeControllersDeployment - - - -CalicoKubeControllersDeployment is the configuration for the calico-kube-controllers Deployment. - -_Appears in:_ -- [InstallationSpec](#installationspec) - -| Field | Description | -| --- | --- | -| `metadata` _[Metadata](#metadata)_ | (Optional) Refer to Kubernetes API documentation for fields of `metadata`. | -| `spec` _[CalicoKubeControllersDeploymentSpec](#calicokubecontrollersdeploymentspec)_ | (Optional) Spec is the specification of the calico-kube-controllers Deployment. | - - -### CalicoKubeControllersDeploymentContainer - - - -CalicoKubeControllersDeploymentContainer is a calico-kube-controllers Deployment container. - -_Appears in:_ -- [CalicoKubeControllersDeploymentPodSpec](#calicokubecontrollersdeploymentpodspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is an enum which identifies the calico-kube-controllers Deployment container by name.
    Supported values are: calico-kube-controllers | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) Resources allows customization of limits and requests for compute resources such as cpu and memory. If specified, this overrides the named calico-kube-controllers Deployment container's resources. If omitted, the calico-kube-controllers Deployment will use its default value for this container's resources. If used in conjunction with the deprecated ComponentResources, then this value takes precedence. | - - -### CalicoKubeControllersDeploymentPodSpec - - - -CalicoKubeControllersDeploymentPodSpec is the calico-kube-controller Deployment's PodSpec. - -_Appears in:_ -- [CalicoKubeControllersDeploymentPodTemplateSpec](#calicokubecontrollersdeploymentpodtemplatespec) - -| Field | Description | -| --- | --- | -| `containers` _[CalicoKubeControllersDeploymentContainer](#calicokubecontrollersdeploymentcontainer) array_ | (Optional) Containers is a list of calico-kube-controllers containers. If specified, this overrides the specified calico-kube-controllers Deployment containers. If omitted, the calico-kube-controllers Deployment will use its default values for its containers. | -| `affinity` _[Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#affinity-v1-core)_ | (Optional) Affinity is a group of affinity scheduling rules for the calico-kube-controllers pods. If specified, this overrides any affinity that may be set on the calico-kube-controllers Deployment. If omitted, the calico-kube-controllers Deployment will use its default value for affinity.
    WARNING: Please note that this field will override the default calico-kube-controllers Deployment affinity. | -| `nodeSelector` _object (keys:string, values:string)_ | NodeSelector is the calico-kube-controllers pod's scheduling constraints. If specified, each of the key/value pairs are added to the calico-kube-controllers Deployment nodeSelector provided the key does not already exist in the object's nodeSelector. If used in conjunction with ControlPlaneNodeSelector, that nodeSelector is set on the calico-kube-controllers Deployment and each of this field's key/value pairs are added to the calico-kube-controllers Deployment nodeSelector provided the key does not already exist in the object's nodeSelector. If omitted, the calico-kube-controllers Deployment will use its default value for nodeSelector.
    WARNING: Please note that this field will modify the default calico-kube-controllers Deployment nodeSelector. | -| `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#toleration-v1-core) array_ | (Optional) Tolerations is the calico-kube-controllers pod's tolerations. If specified, this overrides any tolerations that may be set on the calico-kube-controllers Deployment. If omitted, the calico-kube-controllers Deployment will use its default value for tolerations.
    WARNING: Please note that this field will override the default calico-kube-controllers Deployment tolerations. | - - -### CalicoKubeControllersDeploymentPodTemplateSpec - - - -CalicoKubeControllersDeploymentPodTemplateSpec is the calico-kube-controllers Deployment's PodTemplateSpec - -_Appears in:_ -- [CalicoKubeControllersDeploymentSpec](#calicokubecontrollersdeploymentspec) - -| Field | Description | -| --- | --- | -| `metadata` _[Metadata](#metadata)_ | (Optional) Refer to Kubernetes API documentation for fields of `metadata`. | -| `spec` _[CalicoKubeControllersDeploymentPodSpec](#calicokubecontrollersdeploymentpodspec)_ | (Optional) Spec is the calico-kube-controllers Deployment's PodSpec. | - - -### CalicoKubeControllersDeploymentSpec - - - -CalicoKubeControllersDeploymentSpec defines configuration for the calico-kube-controllers Deployment. - -_Appears in:_ -- [CalicoKubeControllersDeployment](#calicokubecontrollersdeployment) - -| Field | Description | -| --- | --- | -| `minReadySeconds` _integer_ | (Optional) MinReadySeconds is the minimum number of seconds for which a newly created Deployment pod should be ready without any of its container crashing, for it to be considered available. If specified, this overrides any minReadySeconds value that may be set on the calico-kube-controllers Deployment. If omitted, the calico-kube-controllers Deployment will use its default value for minReadySeconds. | -| `template` _[CalicoKubeControllersDeploymentPodTemplateSpec](#calicokubecontrollersdeploymentpodtemplatespec)_ | (Optional) Template describes the calico-kube-controllers Deployment pod that will be created. | - - -### CalicoNetworkSpec - - - -CalicoNetworkSpec specifies configuration options for Calico provided pod networking. - -_Appears in:_ -- [InstallationSpec](#installationspec) - -| Field | Description | -| --- | --- | -| `linuxDataplane` _[LinuxDataplaneOption](#linuxdataplaneoption)_ | (Optional) LinuxDataplane is used to select the dataplane used for Linux nodes. In particular, it causes the operator to add required mounts and environment variables for the particular dataplane. If not specified, iptables mode is used.
    Default: Iptables | -| `windowsDataplane` _[WindowsDataplaneOption](#windowsdataplaneoption)_ | (Optional) WindowsDataplane is used to select the dataplane used for Windows nodes. In particular, it causes the operator to add required mounts and environment variables for the particular dataplane. If not specified, it is disabled and the operator will not render the Calico Windows nodes daemonset.
    Default: Disabled | -| `bgp` _[BGPOption](#bgpoption)_ | (Optional) BGP configures whether or not to enable Calico's BGP capabilities. | -| `ipPools` _[IPPool](#ippool) array_ | (Optional) IPPools contains a list of IP pools to create if none exist. At most one IP pool of each address family may be specified. If omitted, a single pool will be configured if needed. | -| `mtu` _integer_ | (Optional) MTU specifies the maximum transmission unit to use on the pod network. If not specified, Calico will perform MTU auto-detection based on the cluster network. | -| `nodeAddressAutodetectionV4` _[NodeAddressAutodetection](#nodeaddressautodetection)_ | (Optional) NodeAddressAutodetectionV4 specifies an approach to automatically detect node IPv4 addresses. If not specified, will use default auto-detection settings to acquire an IPv4 address for each node. | -| `nodeAddressAutodetectionV6` _[NodeAddressAutodetection](#nodeaddressautodetection)_ | (Optional) NodeAddressAutodetectionV6 specifies an approach to automatically detect node IPv6 addresses. If not specified, IPv6 addresses will not be auto-detected. | -| `hostPorts` _[HostPortsType](#hostportstype)_ | (Optional) HostPorts configures whether or not Calico will support Kubernetes HostPorts. Valid only when using the Calico CNI plugin.
    Default: Enabled | -| `multiInterfaceMode` _[MultiInterfaceMode](#multiinterfacemode)_ | (Optional) MultiInterfaceMode configures what will configure multiple interface per pod. Only valid for Calico Enterprise installations using the Calico CNI plugin.
    Default: None | -| `containerIPForwarding` _[ContainerIPForwardingType](#containeripforwardingtype)_ | (Optional) ContainerIPForwarding configures whether ip forwarding will be enabled for containers in the CNI configuration.
    Default: Disabled | -| `sysctl` _[Sysctl](#sysctl) array_ | (Optional) Sysctl configures sysctl parameters for tuning plugin | -| `linuxPolicySetupTimeoutSeconds` _integer_ | (Optional) LinuxPolicySetupTimeoutSeconds delays new pods from running containers until their policy has been programmed in the dataplane. The specified delay defines the maximum amount of time that the Calico CNI plugin will wait for policy to be programmed. Only applies to pods created on Linux nodes. * A value of 0 disables pod startup delays.
    Default: 0 | - - -### CalicoNodeDaemonSet - - - -CalicoNodeDaemonSet is the configuration for the calico-node DaemonSet. - -_Appears in:_ -- [InstallationSpec](#installationspec) - -| Field | Description | -| --- | --- | -| `metadata` _[Metadata](#metadata)_ | (Optional) Refer to Kubernetes API documentation for fields of `metadata`. | -| `spec` _[CalicoNodeDaemonSetSpec](#caliconodedaemonsetspec)_ | (Optional) Spec is the specification of the calico-node DaemonSet. | - - -### CalicoNodeDaemonSetContainer - - - -CalicoNodeDaemonSetContainer is a calico-node DaemonSet container. - -_Appears in:_ -- [CalicoNodeDaemonSetPodSpec](#caliconodedaemonsetpodspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is an enum which identifies the calico-node DaemonSet container by name.
    Supported values are: calico-node | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) Resources allows customization of limits and requests for compute resources such as cpu and memory. If specified, this overrides the named calico-node DaemonSet container's resources. If omitted, the calico-node DaemonSet will use its default value for this container's resources. If used in conjunction with the deprecated ComponentResources, then this value takes precedence. | - - -### CalicoNodeDaemonSetInitContainer - - - -CalicoNodeDaemonSetInitContainer is a calico-node DaemonSet init container. - -_Appears in:_ -- [CalicoNodeDaemonSetPodSpec](#caliconodedaemonsetpodspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is an enum which identifies the calico-node DaemonSet init container by name.
    Supported values are: install-cni, hostpath-init, flexvol-driver, mount-bpffs, node-certs-key-cert-provisioner, calico-node-prometheus-server-tls-key-cert-provisioner | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) Resources allows customization of limits and requests for compute resources such as cpu and memory. If specified, this overrides the named calico-node DaemonSet init container's resources. If omitted, the calico-node DaemonSet will use its default value for this container's resources. If used in conjunction with the deprecated ComponentResources, then this value takes precedence. | - - -### CalicoNodeDaemonSetPodSpec - - - -CalicoNodeDaemonSetPodSpec is the calico-node DaemonSet's PodSpec. - -_Appears in:_ -- [CalicoNodeDaemonSetPodTemplateSpec](#caliconodedaemonsetpodtemplatespec) - -| Field | Description | -| --- | --- | -| `initContainers` _[CalicoNodeDaemonSetInitContainer](#caliconodedaemonsetinitcontainer) array_ | (Optional) InitContainers is a list of calico-node init containers. If specified, this overrides the specified calico-node DaemonSet init containers. If omitted, the calico-node DaemonSet will use its default values for its init containers. | -| `containers` _[CalicoNodeDaemonSetContainer](#caliconodedaemonsetcontainer) array_ | (Optional) Containers is a list of calico-node containers. If specified, this overrides the specified calico-node DaemonSet containers. If omitted, the calico-node DaemonSet will use its default values for its containers. | -| `affinity` _[Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#affinity-v1-core)_ | (Optional) Affinity is a group of affinity scheduling rules for the calico-node pods. If specified, this overrides any affinity that may be set on the calico-node DaemonSet. If omitted, the calico-node DaemonSet will use its default value for affinity.
    WARNING: Please note that this field will override the default calico-node DaemonSet affinity. | -| `nodeSelector` _object (keys:string, values:string)_ | (Optional) NodeSelector is the calico-node pod's scheduling constraints. If specified, each of the key/value pairs are added to the calico-node DaemonSet nodeSelector provided the key does not already exist in the object's nodeSelector. If omitted, the calico-node DaemonSet will use its default value for nodeSelector.
    WARNING: Please note that this field will modify the default calico-node DaemonSet nodeSelector. | -| `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#toleration-v1-core) array_ | (Optional) Tolerations is the calico-node pod's tolerations. If specified, this overrides any tolerations that may be set on the calico-node DaemonSet. If omitted, the calico-node DaemonSet will use its default value for tolerations.
    WARNING: Please note that this field will override the default calico-node DaemonSet tolerations. | - - -### CalicoNodeDaemonSetPodTemplateSpec - - - -CalicoNodeDaemonSetPodTemplateSpec is the calico-node DaemonSet's PodTemplateSpec - -_Appears in:_ -- [CalicoNodeDaemonSetSpec](#caliconodedaemonsetspec) - -| Field | Description | -| --- | --- | -| `metadata` _[Metadata](#metadata)_ | (Optional) Refer to Kubernetes API documentation for fields of `metadata`. | -| `spec` _[CalicoNodeDaemonSetPodSpec](#caliconodedaemonsetpodspec)_ | (Optional) Spec is the calico-node DaemonSet's PodSpec. | - - -### CalicoNodeDaemonSetSpec - - - -CalicoNodeDaemonSetSpec defines configuration for the calico-node DaemonSet. - -_Appears in:_ -- [CalicoNodeDaemonSet](#caliconodedaemonset) - -| Field | Description | -| --- | --- | -| `minReadySeconds` _integer_ | (Optional) MinReadySeconds is the minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. If specified, this overrides any minReadySeconds value that may be set on the calico-node DaemonSet. If omitted, the calico-node DaemonSet will use its default value for minReadySeconds. | -| `template` _[CalicoNodeDaemonSetPodTemplateSpec](#caliconodedaemonsetpodtemplatespec)_ | (Optional) Template describes the calico-node DaemonSet pod that will be created. | - - -### CalicoNodeWindowsDaemonSet - - - -CalicoNodeWindowsDaemonSet is the configuration for the calico-node-windows DaemonSet. - -_Appears in:_ -- [InstallationSpec](#installationspec) - -| Field | Description | -| --- | --- | -| `metadata` _[Metadata](#metadata)_ | (Optional) Refer to Kubernetes API documentation for fields of `metadata`. | -| `spec` _[CalicoNodeWindowsDaemonSetSpec](#caliconodewindowsdaemonsetspec)_ | (Optional) Spec is the specification of the calico-node-windows DaemonSet. | - - -### CalicoNodeWindowsDaemonSetContainer - - - -CalicoNodeWindowsDaemonSetContainer is a calico-node-windows DaemonSet container. - -_Appears in:_ -- [CalicoNodeWindowsDaemonSetPodSpec](#caliconodewindowsdaemonsetpodspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is an enum which identifies the calico-node-windows DaemonSet container by name.
    Supported values are: calico-node-windows | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) Resources allows customization of limits and requests for compute resources such as cpu and memory. If specified, this overrides the named calico-node-windows DaemonSet container's resources. If omitted, the calico-node-windows DaemonSet will use its default value for this container's resources. If used in conjunction with the deprecated ComponentResources, then this value takes precedence. | - - -### CalicoNodeWindowsDaemonSetInitContainer - - - -CalicoNodeWindowsDaemonSetInitContainer is a calico-node-windows DaemonSet init container. - -_Appears in:_ -- [CalicoNodeWindowsDaemonSetPodSpec](#caliconodewindowsdaemonsetpodspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is an enum which identifies the calico-node-windows DaemonSet init container by name.
    Supported values are: install-cni;hostpath-init, flexvol-driver, mount-bpffs, node-certs-key-cert-provisioner, calico-node-windows-prometheus-server-tls-key-cert-provisioner | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) Resources allows customization of limits and requests for compute resources such as cpu and memory. If specified, this overrides the named calico-node-windows DaemonSet init container's resources. If omitted, the calico-node-windows DaemonSet will use its default value for this container's resources. If used in conjunction with the deprecated ComponentResources, then this value takes precedence. | - - -### CalicoNodeWindowsDaemonSetPodSpec - - - -CalicoNodeWindowsDaemonSetPodSpec is the calico-node-windows DaemonSet's PodSpec. - -_Appears in:_ -- [CalicoNodeWindowsDaemonSetPodTemplateSpec](#caliconodewindowsdaemonsetpodtemplatespec) - -| Field | Description | -| --- | --- | -| `initContainers` _[CalicoNodeWindowsDaemonSetInitContainer](#caliconodewindowsdaemonsetinitcontainer) array_ | (Optional) InitContainers is a list of calico-node-windows init containers. If specified, this overrides the specified calico-node-windows DaemonSet init containers. If omitted, the calico-node-windows DaemonSet will use its default values for its init containers. | -| `containers` _[CalicoNodeWindowsDaemonSetContainer](#caliconodewindowsdaemonsetcontainer) array_ | (Optional) Containers is a list of calico-node-windows containers. If specified, this overrides the specified calico-node-windows DaemonSet containers. If omitted, the calico-node-windows DaemonSet will use its default values for its containers. | -| `affinity` _[Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#affinity-v1-core)_ | (Optional) Affinity is a group of affinity scheduling rules for the calico-node-windows pods. If specified, this overrides any affinity that may be set on the calico-node-windows DaemonSet. If omitted, the calico-node-windows DaemonSet will use its default value for affinity.
    WARNING: Please note that this field will override the default calico-node-windows DaemonSet affinity. | -| `nodeSelector` _object (keys:string, values:string)_ | (Optional) NodeSelector is the calico-node-windows pod's scheduling constraints. If specified, each of the key/value pairs are added to the calico-node-windows DaemonSet nodeSelector provided the key does not already exist in the object's nodeSelector. If omitted, the calico-node-windows DaemonSet will use its default value for nodeSelector.
    WARNING: Please note that this field will modify the default calico-node-windows DaemonSet nodeSelector. | -| `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#toleration-v1-core) array_ | (Optional) Tolerations is the calico-node-windows pod's tolerations. If specified, this overrides any tolerations that may be set on the calico-node-windows DaemonSet. If omitted, the calico-node-windows DaemonSet will use its default value for tolerations.
    WARNING: Please note that this field will override the default calico-node-windows DaemonSet tolerations. | - - -### CalicoNodeWindowsDaemonSetPodTemplateSpec - - - -CalicoNodeWindowsDaemonSetPodTemplateSpec is the calico-node-windows DaemonSet's PodTemplateSpec - -_Appears in:_ -- [CalicoNodeWindowsDaemonSetSpec](#caliconodewindowsdaemonsetspec) - -| Field | Description | -| --- | --- | -| `metadata` _[Metadata](#metadata)_ | (Optional) Refer to Kubernetes API documentation for fields of `metadata`. | -| `spec` _[CalicoNodeWindowsDaemonSetPodSpec](#caliconodewindowsdaemonsetpodspec)_ | (Optional) Spec is the calico-node-windows DaemonSet's PodSpec. | - - -### CalicoNodeWindowsDaemonSetSpec - - - -CalicoNodeWindowsDaemonSetSpec defines configuration for the calico-node-windows DaemonSet. - -_Appears in:_ -- [CalicoNodeWindowsDaemonSet](#caliconodewindowsdaemonset) - -| Field | Description | -| --- | --- | -| `minReadySeconds` _integer_ | (Optional) MinReadySeconds is the minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. If specified, this overrides any minReadySeconds value that may be set on the calico-node-windows DaemonSet. If omitted, the calico-node-windows DaemonSet will use its default value for minReadySeconds. | -| `template` _[CalicoNodeWindowsDaemonSetPodTemplateSpec](#caliconodewindowsdaemonsetpodtemplatespec)_ | (Optional) Template describes the calico-node-windows DaemonSet pod that will be created. | - - -### CalicoWindowsUpgradeDaemonSet - - - -Deprecated. The CalicoWindowsUpgradeDaemonSet is deprecated and will be removed from the API in the future. -CalicoWindowsUpgradeDaemonSet is the configuration for the calico-windows-upgrade DaemonSet. - -_Appears in:_ -- [InstallationSpec](#installationspec) - -| Field | Description | -| --- | --- | -| `metadata` _[Metadata](#metadata)_ | (Optional) Refer to Kubernetes API documentation for fields of `metadata`. | -| `spec` _[CalicoWindowsUpgradeDaemonSetSpec](#calicowindowsupgradedaemonsetspec)_ | (Optional) Spec is the specification of the calico-windows-upgrade DaemonSet. | - - -### CalicoWindowsUpgradeDaemonSetContainer - - - -CalicoWindowsUpgradeDaemonSetContainer is a calico-windows-upgrade DaemonSet container. - -_Appears in:_ -- [CalicoWindowsUpgradeDaemonSetPodSpec](#calicowindowsupgradedaemonsetpodspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is an enum which identifies the calico-windows-upgrade DaemonSet container by name. | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) Resources allows customization of limits and requests for compute resources such as cpu and memory. If specified, this overrides the named calico-windows-upgrade DaemonSet container's resources. If omitted, the calico-windows-upgrade DaemonSet will use its default value for this container's resources. | - - -### CalicoWindowsUpgradeDaemonSetPodSpec - - - -CalicoWindowsUpgradeDaemonSetPodSpec is the calico-windows-upgrade DaemonSet's PodSpec. - -_Appears in:_ -- [CalicoWindowsUpgradeDaemonSetPodTemplateSpec](#calicowindowsupgradedaemonsetpodtemplatespec) - -| Field | Description | -| --- | --- | -| `containers` _[CalicoWindowsUpgradeDaemonSetContainer](#calicowindowsupgradedaemonsetcontainer) array_ | (Optional) Containers is a list of calico-windows-upgrade containers. If specified, this overrides the specified calico-windows-upgrade DaemonSet containers. If omitted, the calico-windows-upgrade DaemonSet will use its default values for its containers. | -| `affinity` _[Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#affinity-v1-core)_ | (Optional) Affinity is a group of affinity scheduling rules for the calico-windows-upgrade pods. If specified, this overrides any affinity that may be set on the calico-windows-upgrade DaemonSet. If omitted, the calico-windows-upgrade DaemonSet will use its default value for affinity.
    WARNING: Please note that this field will override the default calico-windows-upgrade DaemonSet affinity. | -| `nodeSelector` _object (keys:string, values:string)_ | (Optional) NodeSelector is the calico-windows-upgrade pod's scheduling constraints. If specified, each of the key/value pairs are added to the calico-windows-upgrade DaemonSet nodeSelector provided the key does not already exist in the object's nodeSelector. If omitted, the calico-windows-upgrade DaemonSet will use its default value for nodeSelector.
    WARNING: Please note that this field will modify the default calico-windows-upgrade DaemonSet nodeSelector. | -| `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#toleration-v1-core) array_ | (Optional) Tolerations is the calico-windows-upgrade pod's tolerations. If specified, this overrides any tolerations that may be set on the calico-windows-upgrade DaemonSet. If omitted, the calico-windows-upgrade DaemonSet will use its default value for tolerations.
    WARNING: Please note that this field will override the default calico-windows-upgrade DaemonSet tolerations. | - - -### CalicoWindowsUpgradeDaemonSetPodTemplateSpec - - - -CalicoWindowsUpgradeDaemonSetPodTemplateSpec is the calico-windows-upgrade DaemonSet's PodTemplateSpec - -_Appears in:_ -- [CalicoWindowsUpgradeDaemonSetSpec](#calicowindowsupgradedaemonsetspec) - -| Field | Description | -| --- | --- | -| `metadata` _[Metadata](#metadata)_ | (Optional) Refer to Kubernetes API documentation for fields of `metadata`. | -| `spec` _[CalicoWindowsUpgradeDaemonSetPodSpec](#calicowindowsupgradedaemonsetpodspec)_ | (Optional) Spec is the calico-windows-upgrade DaemonSet's PodSpec. | - - -### CalicoWindowsUpgradeDaemonSetSpec - - - -CalicoWindowsUpgradeDaemonSetSpec defines configuration for the calico-windows-upgrade DaemonSet. - -_Appears in:_ -- [CalicoWindowsUpgradeDaemonSet](#calicowindowsupgradedaemonset) - -| Field | Description | -| --- | --- | -| `minReadySeconds` _integer_ | (Optional) MinReadySeconds is the minimum number of seconds for which a newly created Deployment pod should be ready without any of its container crashing, for it to be considered available. If specified, this overrides any minReadySeconds value that may be set on the calico-windows-upgrade DaemonSet. If omitted, the calico-windows-upgrade DaemonSet will use its default value for minReadySeconds. | -| `template` _[CalicoWindowsUpgradeDaemonSetPodTemplateSpec](#calicowindowsupgradedaemonsetpodtemplatespec)_ | (Optional) Template describes the calico-windows-upgrade DaemonSet pod that will be created. | - - -### CertificateManagement - - - -CertificateManagement configures pods to submit a CertificateSigningRequest to the certificates.k8s.io/v1beta1 API in order -to obtain TLS certificates. This feature requires that you bring your own CSR signing and approval process, otherwise -pods will be stuck during initialization. - -_Appears in:_ -- [InstallationSpec](#installationspec) - -| Field | Description | -| --- | --- | -| `caCert` _integer array_ | Certificate of the authority that signs the CertificateSigningRequests in PEM format. | -| `signerName` _string_ | When a CSR is issued to the certificates.k8s.io API, the signerName is added to the request in order to accommodate for clusters with multiple signers. Must be formatted as: `/`. | -| `keyAlgorithm` _string_ | (Optional) Specify the algorithm used by pods to generate a key pair that is associated with the X.509 certificate request.
    Default: RSAWithSize2048 | -| `signatureAlgorithm` _string_ | (Optional) Specify the algorithm used for the signature of the X.509 certificate request.
    Default: SHA256WithRSA | - - -### CollectProcessPathOption - -_Underlying type:_ _string_ - - - -_Appears in:_ -- [LogCollectorSpec](#logcollectorspec) - -| Value | Description | -| --- | --- | -| `Enabled` | | -| `Disabled` | | - - -### CommonPrometheusFields - - - - - -_Appears in:_ -- [PrometheusSpec](#prometheusspec) - -| Field | Description | -| --- | --- | -| `containers` _[PrometheusContainer](#prometheuscontainer) array_ | (Optional) Containers is a list of Prometheus containers. If specified, this overrides the specified Prometheus Deployment containers. If omitted, the Prometheus Deployment will use its default values for its containers. | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | Define resources requests and limits for single Pods. | - - -### Compliance - - - -Compliance installs the components required for Tigera compliance reporting. At most one instance -of this resource is supported. It must be named "tigera-secure". - -| Field | Description | -| --- | --- | -| `apiVersion` _string_ | `operator.tigera.io/v1` | -| `kind` _string_ | `Compliance` | -| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | -| `spec` _[ComplianceSpec](#compliancespec)_ | Specification of the desired state for Tigera compliance reporting. | -| `status` _[ComplianceStatus](#compliancestatus)_ | Most recently observed state for Tigera compliance reporting. | - - -### ComplianceBenchmarkerDaemonSet - - - -ComplianceBenchmarkerDaemonSet is the configuration for the Compliance Benchmarker DaemonSet. - -_Appears in:_ -- [ComplianceSpec](#compliancespec) - -| Field | Description | -| --- | --- | -| `spec` _[ComplianceBenchmarkerDaemonSetSpec](#compliancebenchmarkerdaemonsetspec)_ | (Optional) Spec is the specification of the Compliance Benchmarker DaemonSet. | - - -### ComplianceBenchmarkerDaemonSetContainer - - - -ComplianceBenchmarkerDaemonSetContainer is a Compliance Benchmarker DaemonSet container. - -_Appears in:_ -- [ComplianceBenchmarkerDaemonSetPodSpec](#compliancebenchmarkerdaemonsetpodspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is an enum which identifies the Compliance Benchmarker DaemonSet container by name.
    Supported values are: compliance-benchmarker | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) Resources allows customization of limits and requests for compute resources such as cpu and memory. If specified, this overrides the named Compliance Benchmarker DaemonSet container's resources. If omitted, the Compliance Benchmarker DaemonSet will use its default value for this container's resources. | - - -### ComplianceBenchmarkerDaemonSetInitContainer - - - -ComplianceBenchmarkerDaemonSetInitContainer is a Compliance Benchmarker DaemonSet init container. - -_Appears in:_ -- [ComplianceBenchmarkerDaemonSetPodSpec](#compliancebenchmarkerdaemonsetpodspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is an enum which identifies the Compliance Benchmarker DaemonSet init container by name.
    Supported values are: tigera-compliance-benchmarker-tls-key-cert-provisioner | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) Resources allows customization of limits and requests for compute resources such as cpu and memory. If specified, this overrides the named Compliance Benchmarker DaemonSet init container's resources. If omitted, the Compliance Benchmarker DaemonSet will use its default value for this init container's resources. | - - -### ComplianceBenchmarkerDaemonSetPodSpec - - - -ComplianceBenchmarkerDaemonSetPodSpec is the Compliance Benchmarker DaemonSet's PodSpec. - -_Appears in:_ -- [ComplianceBenchmarkerDaemonSetPodTemplateSpec](#compliancebenchmarkerdaemonsetpodtemplatespec) - -| Field | Description | -| --- | --- | -| `initContainers` _[ComplianceBenchmarkerDaemonSetInitContainer](#compliancebenchmarkerdaemonsetinitcontainer) array_ | (Optional) InitContainers is a list of Compliance benchmark init containers. If specified, this overrides the specified Compliance Benchmarker DaemonSet init containers. If omitted, the Compliance Benchmarker DaemonSet will use its default values for its init containers. | -| `containers` _[ComplianceBenchmarkerDaemonSetContainer](#compliancebenchmarkerdaemonsetcontainer) array_ | (Optional) Containers is a list of Compliance benchmark containers. If specified, this overrides the specified Compliance Benchmarker DaemonSet containers. If omitted, the Compliance Benchmarker DaemonSet will use its default values for its containers. | - - -### ComplianceBenchmarkerDaemonSetPodTemplateSpec - - - -ComplianceBenchmarkerDaemonSetPodTemplateSpec is the Compliance Benchmarker DaemonSet's PodTemplateSpec - -_Appears in:_ -- [ComplianceBenchmarkerDaemonSetSpec](#compliancebenchmarkerdaemonsetspec) - -| Field | Description | -| --- | --- | -| `spec` _[ComplianceBenchmarkerDaemonSetPodSpec](#compliancebenchmarkerdaemonsetpodspec)_ | (Optional) Spec is the Compliance Benchmarker DaemonSet's PodSpec. | - - -### ComplianceBenchmarkerDaemonSetSpec - - - -ComplianceBenchmarkerDaemonSetSpec defines configuration for the Compliance Benchmarker DaemonSet. - -_Appears in:_ -- [ComplianceBenchmarkerDaemonSet](#compliancebenchmarkerdaemonset) - -| Field | Description | -| --- | --- | -| `template` _[ComplianceBenchmarkerDaemonSetPodTemplateSpec](#compliancebenchmarkerdaemonsetpodtemplatespec)_ | (Optional) Template describes the Compliance Benchmarker DaemonSet pod that will be created. | - - -### ComplianceControllerDeployment - - - -ComplianceControllerDeployment is the configuration for the compliance controller Deployment. - -_Appears in:_ -- [ComplianceSpec](#compliancespec) - -| Field | Description | -| --- | --- | -| `spec` _[ComplianceControllerDeploymentSpec](#compliancecontrollerdeploymentspec)_ | (Optional) Spec is the specification of the compliance controller Deployment. | - - -### ComplianceControllerDeploymentContainer - - - -ComplianceControllerDeploymentContainer is a compliance controller Deployment container. - -_Appears in:_ -- [ComplianceControllerDeploymentPodSpec](#compliancecontrollerdeploymentpodspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is an enum which identifies the compliance controller Deployment container by name.
    Supported values are: compliance-controller | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) Resources allows customization of limits and requests for compute resources such as cpu and memory. If specified, this overrides the named compliance controller Deployment container's resources. If omitted, the compliance controller Deployment will use its default value for this container's resources. | - - -### ComplianceControllerDeploymentInitContainer - - - -ComplianceControllerDeploymentInitContainer is a compliance controller Deployment init container. - -_Appears in:_ -- [ComplianceControllerDeploymentPodSpec](#compliancecontrollerdeploymentpodspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is an enum which identifies the compliance controller Deployment init container by name.
    Supported values are: tigera-compliance-controller-tls-key-cert-provisioner | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) Resources allows customization of limits and requests for compute resources such as cpu and memory. If specified, this overrides the named compliance controller Deployment init container's resources. If omitted, the compliance controller Deployment will use its default value for this init container's resources. | - - -### ComplianceControllerDeploymentPodSpec - - - -ComplianceControllerDeploymentPodSpec is the compliance controller Deployment's PodSpec. - -_Appears in:_ -- [ComplianceControllerDeploymentPodTemplateSpec](#compliancecontrollerdeploymentpodtemplatespec) - -| Field | Description | -| --- | --- | -| `initContainers` _[ComplianceControllerDeploymentInitContainer](#compliancecontrollerdeploymentinitcontainer) array_ | (Optional) InitContainers is a list of compliance controller init containers. If specified, this overrides the specified compliance controller Deployment init containers. If omitted, the compliance controller Deployment will use its default values for its init containers. | -| `containers` _[ComplianceControllerDeploymentContainer](#compliancecontrollerdeploymentcontainer) array_ | (Optional) Containers is a list of compliance controller containers. If specified, this overrides the specified compliance controller Deployment containers. If omitted, the compliance controller Deployment will use its default values for its containers. | - - -### ComplianceControllerDeploymentPodTemplateSpec - - - -ComplianceControllerDeploymentPodTemplateSpec is the compliance controller Deployment's PodTemplateSpec - -_Appears in:_ -- [ComplianceControllerDeploymentSpec](#compliancecontrollerdeploymentspec) - -| Field | Description | -| --- | --- | -| `spec` _[ComplianceControllerDeploymentPodSpec](#compliancecontrollerdeploymentpodspec)_ | (Optional) Spec is the compliance controller Deployment's PodSpec. | - - -### ComplianceControllerDeploymentSpec - - - -ComplianceControllerDeploymentSpec defines configuration for the compliance controller Deployment. - -_Appears in:_ -- [ComplianceControllerDeployment](#compliancecontrollerdeployment) - -| Field | Description | -| --- | --- | -| `template` _[ComplianceControllerDeploymentPodTemplateSpec](#compliancecontrollerdeploymentpodtemplatespec)_ | (Optional) Template describes the compliance controller Deployment pod that will be created. | - - -### ComplianceReporterPodSpec - - - -ComplianceReporterPodSpec is the ComplianceReporter PodSpec. - -_Appears in:_ -- [ComplianceReporterPodTemplateSpec](#compliancereporterpodtemplatespec) - -| Field | Description | -| --- | --- | -| `initContainers` _[ComplianceReporterPodTemplateInitContainer](#compliancereporterpodtemplateinitcontainer) array_ | (Optional) InitContainers is a list of ComplianceReporter PodSpec init containers. If specified, this overrides the specified ComplianceReporter PodSpec init containers. If omitted, the ComplianceServer Deployment will use its default values for its init containers. | -| `containers` _[ComplianceReporterPodTemplateContainer](#compliancereporterpodtemplatecontainer) array_ | (Optional) Containers is a list of ComplianceServer containers. If specified, this overrides the specified ComplianceReporter PodSpec containers. If omitted, the ComplianceServer Deployment will use its default values for its containers. | - - -### ComplianceReporterPodTemplate - - - -ComplianceReporterPodTemplate is the configuration for the ComplianceReporter PodTemplate. - -_Appears in:_ -- [ComplianceSpec](#compliancespec) - -| Field | Description | -| --- | --- | -| `template` _[ComplianceReporterPodTemplateSpec](#compliancereporterpodtemplatespec)_ | (Optional) Spec is the specification of the ComplianceReporter PodTemplateSpec. | - - -### ComplianceReporterPodTemplateContainer - - - -ComplianceReporterPodTemplateContainer is a ComplianceServer Deployment container. - -_Appears in:_ -- [ComplianceReporterPodSpec](#compliancereporterpodspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is an enum which identifies the ComplianceServer Deployment container by name.
    Supported values are: reporter | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) Resources allows customization of limits and requests for compute resources such as cpu and memory. If specified, this overrides the named ComplianceServer Deployment container's resources. If omitted, the ComplianceServer Deployment will use its default value for this container's resources. | - - -### ComplianceReporterPodTemplateInitContainer - - - -ComplianceReporterPodTemplateInitContainer is a ComplianceServer Deployment init container. - -_Appears in:_ -- [ComplianceReporterPodSpec](#compliancereporterpodspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is an enum which identifies the ComplianceReporter PodSpec init container by name.
    Supported values are: tigera-compliance-reporter-tls-key-cert-provisioner | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) Resources allows customization of limits and requests for compute resources such as cpu and memory. If specified, this overrides the named ComplianceReporter PodSpec init container's resources. If omitted, the ComplianceServer Deployment will use its default value for this init container's resources. | - - -### ComplianceReporterPodTemplateSpec - - - -ComplianceReporterPodTemplateSpec is the ComplianceReporter PodTemplateSpec. - -_Appears in:_ -- [ComplianceReporterPodTemplate](#compliancereporterpodtemplate) - -| Field | Description | -| --- | --- | -| `spec` _[ComplianceReporterPodSpec](#compliancereporterpodspec)_ | (Optional) Spec is the ComplianceReporter PodTemplate's PodSpec. | - - -### ComplianceServerDeployment - - - -ComplianceServerDeployment is the configuration for the ComplianceServer Deployment. - -_Appears in:_ -- [ComplianceSpec](#compliancespec) - -| Field | Description | -| --- | --- | -| `spec` _[ComplianceServerDeploymentSpec](#complianceserverdeploymentspec)_ | (Optional) Spec is the specification of the ComplianceServer Deployment. | - - -### ComplianceServerDeploymentContainer - - - -ComplianceServerDeploymentContainer is a ComplianceServer Deployment container. - -_Appears in:_ -- [ComplianceServerDeploymentPodSpec](#complianceserverdeploymentpodspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is an enum which identifies the ComplianceServer Deployment container by name.
    Supported values are: compliance-server | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) Resources allows customization of limits and requests for compute resources such as cpu and memory. If specified, this overrides the named ComplianceServer Deployment container's resources. If omitted, the ComplianceServer Deployment will use its default value for this container's resources. | - - -### ComplianceServerDeploymentInitContainer - - - -ComplianceServerDeploymentInitContainer is a ComplianceServer Deployment init container. - -_Appears in:_ -- [ComplianceServerDeploymentPodSpec](#complianceserverdeploymentpodspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is an enum which identifies the ComplianceServer Deployment init container by name.
    Supported values are: tigera-compliance-server-tls-key-cert-provisioner | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) Resources allows customization of limits and requests for compute resources such as cpu and memory. If specified, this overrides the named ComplianceServer Deployment init container's resources. If omitted, the ComplianceServer Deployment will use its default value for this init container's resources. | - - -### ComplianceServerDeploymentPodSpec - - - -ComplianceServerDeploymentPodSpec is the ComplianceServer Deployment's PodSpec. - -_Appears in:_ -- [ComplianceServerDeploymentPodTemplateSpec](#complianceserverdeploymentpodtemplatespec) - -| Field | Description | -| --- | --- | -| `initContainers` _[ComplianceServerDeploymentInitContainer](#complianceserverdeploymentinitcontainer) array_ | (Optional) InitContainers is a list of ComplianceServer init containers. If specified, this overrides the specified ComplianceServer Deployment init containers. If omitted, the ComplianceServer Deployment will use its default values for its init containers. | -| `containers` _[ComplianceServerDeploymentContainer](#complianceserverdeploymentcontainer) array_ | (Optional) Containers is a list of ComplianceServer containers. If specified, this overrides the specified ComplianceServer Deployment containers. If omitted, the ComplianceServer Deployment will use its default values for its containers. | - - -### ComplianceServerDeploymentPodTemplateSpec - - - -ComplianceServerDeploymentPodTemplateSpec is the ComplianceServer Deployment's PodTemplateSpec - -_Appears in:_ -- [ComplianceServerDeploymentSpec](#complianceserverdeploymentspec) - -| Field | Description | -| --- | --- | -| `spec` _[ComplianceServerDeploymentPodSpec](#complianceserverdeploymentpodspec)_ | (Optional) Spec is the ComplianceServer Deployment's PodSpec. | - - -### ComplianceServerDeploymentSpec - - - -ComplianceServerDeploymentSpec defines configuration for the ComplianceServer Deployment. - -_Appears in:_ -- [ComplianceServerDeployment](#complianceserverdeployment) - -| Field | Description | -| --- | --- | -| `template` _[ComplianceServerDeploymentPodTemplateSpec](#complianceserverdeploymentpodtemplatespec)_ | (Optional) Template describes the ComplianceServer Deployment pod that will be created. | - - -### ComplianceSnapshotterDeployment - - - -ComplianceSnapshotterDeployment is the configuration for the compliance snapshotter Deployment. - -_Appears in:_ -- [ComplianceSpec](#compliancespec) - -| Field | Description | -| --- | --- | -| `spec` _[ComplianceSnapshotterDeploymentSpec](#compliancesnapshotterdeploymentspec)_ | (Optional) Spec is the specification of the compliance snapshotter Deployment. | - - -### ComplianceSnapshotterDeploymentContainer - - - -ComplianceSnapshotterDeploymentContainer is a compliance snapshotter Deployment container. - -_Appears in:_ -- [ComplianceSnapshotterDeploymentPodSpec](#compliancesnapshotterdeploymentpodspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is an enum which identifies the compliance snapshotter Deployment container by name.
    Supported values are: compliance-snapshotter | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) Resources allows customization of limits and requests for compute resources such as cpu and memory. If specified, this overrides the named compliance snapshotter Deployment container's resources. If omitted, the compliance snapshotter Deployment will use its default value for this container's resources. | - - -### ComplianceSnapshotterDeploymentInitContainer - - - -ComplianceSnapshotterDeploymentInitContainer is a compliance snapshotter Deployment init container. - -_Appears in:_ -- [ComplianceSnapshotterDeploymentPodSpec](#compliancesnapshotterdeploymentpodspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is an enum which identifies the compliance snapshotter Deployment init container by name.
    Supported values are: tigera-compliance-snapshotter-tls-key-cert-provisioner | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) Resources allows customization of limits and requests for compute resources such as cpu and memory. If specified, this overrides the named compliance snapshotter Deployment init container's resources. If omitted, the compliance snapshotter Deployment will use its default value for this init container's resources. | - - -### ComplianceSnapshotterDeploymentPodSpec - - - -ComplianceSnapshotterDeploymentPodSpec is the compliance snapshotter Deployment's PodSpec. - -_Appears in:_ -- [ComplianceSnapshotterDeploymentPodTemplateSpec](#compliancesnapshotterdeploymentpodtemplatespec) - -| Field | Description | -| --- | --- | -| `initContainers` _[ComplianceSnapshotterDeploymentInitContainer](#compliancesnapshotterdeploymentinitcontainer) array_ | (Optional) InitContainers is a list of compliance snapshotter init containers. If specified, this overrides the specified compliance snapshotter Deployment init containers. If omitted, the compliance snapshotter Deployment will use its default values for its init containers. | -| `containers` _[ComplianceSnapshotterDeploymentContainer](#compliancesnapshotterdeploymentcontainer) array_ | (Optional) Containers is a list of compliance snapshotter containers. If specified, this overrides the specified compliance snapshotter Deployment containers. If omitted, the compliance snapshotter Deployment will use its default values for its containers. | - - -### ComplianceSnapshotterDeploymentPodTemplateSpec - - - -ComplianceSnapshotterDeploymentPodTemplateSpec is the compliance snapshotter Deployment's PodTemplateSpec - -_Appears in:_ -- [ComplianceSnapshotterDeploymentSpec](#compliancesnapshotterdeploymentspec) - -| Field | Description | -| --- | --- | -| `spec` _[ComplianceSnapshotterDeploymentPodSpec](#compliancesnapshotterdeploymentpodspec)_ | (Optional) Spec is the compliance snapshotter Deployment's PodSpec. | - - -### ComplianceSnapshotterDeploymentSpec - - - -ComplianceSnapshotterDeploymentSpec defines configuration for the compliance snapshotter Deployment. - -_Appears in:_ -- [ComplianceSnapshotterDeployment](#compliancesnapshotterdeployment) - -| Field | Description | -| --- | --- | -| `template` _[ComplianceSnapshotterDeploymentPodTemplateSpec](#compliancesnapshotterdeploymentpodtemplatespec)_ | (Optional) Template describes the compliance snapshotter Deployment pod that will be created. | - - -### ComplianceSpec - - - -ComplianceSpec defines the desired state of Tigera compliance reporting capabilities. - -_Appears in:_ -- [Compliance](#compliance) - -| Field | Description | -| --- | --- | -| `complianceControllerDeployment` _[ComplianceControllerDeployment](#compliancecontrollerdeployment)_ | (Optional) ComplianceControllerDeployment configures the Compliance Controller Deployment. | -| `complianceSnapshotterDeployment` _[ComplianceSnapshotterDeployment](#compliancesnapshotterdeployment)_ | (Optional) ComplianceSnapshotterDeployment configures the Compliance Snapshotter Deployment. | -| `complianceBenchmarkerDaemonSet` _[ComplianceBenchmarkerDaemonSet](#compliancebenchmarkerdaemonset)_ | (Optional) ComplianceBenchmarkerDaemonSet configures the Compliance Benchmarker DaemonSet. | -| `complianceServerDeployment` _[ComplianceServerDeployment](#complianceserverdeployment)_ | (Optional) ComplianceServerDeployment configures the Compliance Server Deployment. | -| `complianceReporterPodTemplate` _[ComplianceReporterPodTemplate](#compliancereporterpodtemplate)_ | (Optional) ComplianceReporterPodTemplate configures the Compliance Reporter PodTemplate. | - - -### ComplianceStatus - - - -ComplianceStatus defines the observed state of Tigera compliance reporting capabilities. - -_Appears in:_ -- [Compliance](#compliance) - -| Field | Description | -| --- | --- | -| `state` _string_ | State provides user-readable status. | -| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#condition-v1-meta) array_ | (Optional) Conditions represents the latest observed set of conditions for the component. A component may be one or more of Ready, Progressing, Degraded or other customer types. | - - -### ComponentName - -_Underlying type:_ _string_ - -ComponentName represents a single component. - -One of: Node, Typha, KubeControllers - -_Appears in:_ -- [ComponentResource](#componentresource) - -| Value | Description | -| --- | --- | -| `Node` | | -| `NodeWindows` | | -| `FelixWindows` | | -| `ConfdWindows` | | -| `Typha` | | -| `KubeControllers` | | - - -### ComponentResource - - - -Deprecated. Please use component resource config fields in Installation.Spec instead. -The ComponentResource struct associates a ResourceRequirements with a component by name - -_Appears in:_ -- [InstallationSpec](#installationspec) - -| Field | Description | -| --- | --- | -| `componentName` _[ComponentName](#componentname)_ | ComponentName is an enum which identifies the component | -| `resourceRequirements` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | ResourceRequirements allows customization of limits and requests for compute resources such as cpu and memory. | - - -### ConditionStatus - -_Underlying type:_ _string_ - -ConditionStatus represents the status of a particular condition. A condition may be one of: True, False, Unknown. - -_Appears in:_ -- [TigeraStatusCondition](#tigerastatuscondition) - -| Value | Description | -| --- | --- | -| `True` | | -| `False` | | -| `Unknown` | | - - -### ContainerIPForwardingType - -_Underlying type:_ _string_ - -ContainerIPForwardingType specifies whether the CNI config for container ip forwarding is enabled. - -_Appears in:_ -- [CalicoNetworkSpec](#caliconetworkspec) - -| Value | Description | -| --- | --- | -| `Enabled` | | -| `Disabled` | | - - - - -### DashboardsJobContainer - - - -DashboardsJobContainer is the Dashboards job container. - -_Appears in:_ -- [DashboardsJobPodSpec](#dashboardsjobpodspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is an enum which identifies the Dashboard Job container by name.
    Supported values are: dashboards-installer | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) Resources allows customization of limits and requests for compute resources such as cpu and memory. If specified, this overrides the named Dashboard Job container's resources. If omitted, the Dashboard Job will use its default value for this container's resources. | - - -### DashboardsJobPodSpec - - - -DashboardsJobPodSpec is the Dashboards job's PodSpec. - -_Appears in:_ -- [DashboardsJobPodTemplateSpec](#dashboardsjobpodtemplatespec) - -| Field | Description | -| --- | --- | -| `containers` _[DashboardsJobContainer](#dashboardsjobcontainer) array_ | (Optional) Containers is a list of dashboards job containers. If specified, this overrides the specified Dashboard job containers. If omitted, the Dashboard job will use its default values for its containers. | - - -### DashboardsJobPodTemplateSpec - - - -DashboardsJobPodTemplateSpec is the Dashboards job's PodTemplateSpec - -_Appears in:_ -- [DashboardsJobSpec](#dashboardsjobspec) - -| Field | Description | -| --- | --- | -| `spec` _[DashboardsJobPodSpec](#dashboardsjobpodspec)_ | (Optional) Spec is the Dashboard job's PodSpec. | - - -### DashboardsJobSpec - - - -DashboardsJobSpec defines configuration for the Dashboards job. - -_Appears in:_ -- [DashboardsJob](#dashboardsjob) - -| Field | Description | -| --- | --- | -| `template` _[DashboardsJobPodTemplateSpec](#dashboardsjobpodtemplatespec)_ | (Optional) Template describes the Dashboards job pod that will be created. | - - -### DataType - -_Underlying type:_ _string_ - -DataType represent the type of data stored - -_Validation:_ -- Enum: [Alerts AuditLogs BGPLogs ComplianceBenchmarks ComplianceReports ComplianceSnapshots DNSLogs FlowLogs L7Logs RuntimeReports ThreatFeedsDomainSet ThreatFeedsIPSet WAFLogs] - - -_Appears in:_ -- [Index](#index) - -| Value | Description | -| --- | --- | -| `Alerts` | | -| `AuditLogs` | | -| `BGPLogs` | | -| `ComplianceBenchmarks` | | -| `ComplianceReports` | | -| `ComplianceSnapshots` | | -| `DNSLogs` | | -| `FlowLogs` | | -| `L7Logs` | | -| `RuntimeReports` | | -| `ThreatFeedsDomainSet` | | -| `ThreatFeedsIPSet` | | -| `WAFLogs` | | - - -### DexDeployment - - - -DexDeployment is the configuration for the Dex Deployment. - -_Appears in:_ -- [AuthenticationSpec](#authenticationspec) - -| Field | Description | -| --- | --- | -| `spec` _[DexDeploymentSpec](#dexdeploymentspec)_ | (Optional) Spec is the specification of the Dex Deployment. | - - -### DexDeploymentContainer - - - -DexDeploymentContainer is a Dex Deployment container. - -_Appears in:_ -- [DexDeploymentPodSpec](#dexdeploymentpodspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is an enum which identifies the Dex Deployment container by name.
    Supported values are: tigera-dex | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) Resources allows customization of limits and requests for compute resources such as cpu and memory. If specified, this overrides the named Dex Deployment container's resources. If omitted, the Dex Deployment will use its default value for this container's resources. | - - -### DexDeploymentInitContainer - - - -DexDeploymentInitContainer is a Dex Deployment init container. - -_Appears in:_ -- [DexDeploymentPodSpec](#dexdeploymentpodspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is an enum which identifies the Dex Deployment init container by name.
    Supported values are: tigera-dex-tls-key-cert-provisioner | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) Resources allows customization of limits and requests for compute resources such as cpu and memory. If specified, this overrides the named Dex Deployment init container's resources. If omitted, the Dex Deployment will use its default value for this init container's resources. | - - -### DexDeploymentPodSpec - - - -DexDeploymentPodSpec is the Dex Deployment's PodSpec. - -_Appears in:_ -- [DexDeploymentPodTemplateSpec](#dexdeploymentpodtemplatespec) - -| Field | Description | -| --- | --- | -| `initContainers` _[DexDeploymentInitContainer](#dexdeploymentinitcontainer) array_ | (Optional) InitContainers is a list of Dex init containers. If specified, this overrides the specified Dex Deployment init containers. If omitted, the Dex Deployment will use its default values for its init containers. | -| `containers` _[DexDeploymentContainer](#dexdeploymentcontainer) array_ | (Optional) Containers is a list of Dex containers. If specified, this overrides the specified Dex Deployment containers. If omitted, the Dex Deployment will use its default values for its containers. | - - -### DexDeploymentPodTemplateSpec - - - -DexDeploymentPodTemplateSpec is the Dex Deployment's PodTemplateSpec - -_Appears in:_ -- [DexDeploymentSpec](#dexdeploymentspec) - -| Field | Description | -| --- | --- | -| `spec` _[DexDeploymentPodSpec](#dexdeploymentpodspec)_ | (Optional) Spec is the Dex Deployment's PodSpec. | - - -### DexDeploymentSpec - - - -DexDeploymentSpec defines configuration for the Dex Deployment. - -_Appears in:_ -- [DexDeployment](#dexdeployment) - -| Field | Description | -| --- | --- | -| `template` _[DexDeploymentPodTemplateSpec](#dexdeploymentpodtemplatespec)_ | (Optional) Template describes the Dex Deployment pod that will be created. | - - -### ECKOperatorStatefulSet - - - -ECKOperatorStatefulSet is the configuration for the ECKOperator StatefulSet. - -_Appears in:_ -- [LogStorageSpec](#logstoragespec) - -| Field | Description | -| --- | --- | -| `spec` _[ECKOperatorStatefulSetSpec](#eckoperatorstatefulsetspec)_ | (Optional) Spec is the specification of the ECKOperator StatefulSet. | - - -### ECKOperatorStatefulSetContainer - - - -ECKOperatorStatefulSetContainer is a ECKOperator StatefulSet container. - -_Appears in:_ -- [ECKOperatorStatefulSetPodSpec](#eckoperatorstatefulsetpodspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is an enum which identifies the ECKOperator StatefulSet container by name.
    Supported values are: manager | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) Resources allows customization of limits and requests for compute resources such as cpu and memory. If specified, this overrides the named ECKOperator StatefulSet container's resources. If omitted, the ECKOperator StatefulSet will use its default value for this container's resources. | - - -### ECKOperatorStatefulSetInitContainer - - - -ECKOperatorStatefulSetInitContainer is a ECKOperator StatefulSet init container. - -_Appears in:_ -- [ECKOperatorStatefulSetPodSpec](#eckoperatorstatefulsetpodspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is an enum which identifies the ECKOperator StatefulSet init container by name. | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) Resources allows customization of limits and requests for compute resources such as cpu and memory. If specified, this overrides the named ECKOperator StatefulSet init container's resources. If omitted, the ECKOperator StatefulSet will use its default value for this init container's resources. | - - -### ECKOperatorStatefulSetPodSpec - - - -ECKOperatorStatefulSetPodSpec is the ECKOperator StatefulSet's PodSpec. - -_Appears in:_ -- [ECKOperatorStatefulSetPodTemplateSpec](#eckoperatorstatefulsetpodtemplatespec) - -| Field | Description | -| --- | --- | -| `initContainers` _[ECKOperatorStatefulSetInitContainer](#eckoperatorstatefulsetinitcontainer) array_ | (Optional) InitContainers is a list of ECKOperator StatefulSet init containers. If specified, this overrides the specified ECKOperator StatefulSet init containers. If omitted, the ECKOperator StatefulSet will use its default values for its init containers. | -| `containers` _[ECKOperatorStatefulSetContainer](#eckoperatorstatefulsetcontainer) array_ | (Optional) Containers is a list of ECKOperator StatefulSet containers. If specified, this overrides the specified ECKOperator StatefulSet containers. If omitted, the ECKOperator StatefulSet will use its default values for its containers. | - - -### ECKOperatorStatefulSetPodTemplateSpec - - - -ECKOperatorStatefulSetPodTemplateSpec is the ECKOperator StatefulSet's PodTemplateSpec - -_Appears in:_ -- [ECKOperatorStatefulSetSpec](#eckoperatorstatefulsetspec) - -| Field | Description | -| --- | --- | -| `spec` _[ECKOperatorStatefulSetPodSpec](#eckoperatorstatefulsetpodspec)_ | (Optional) Spec is the ECKOperator StatefulSet's PodSpec. | - - -### ECKOperatorStatefulSetSpec - - - -ECKOperatorStatefulSetSpec defines configuration for the ECKOperator StatefulSet. - -_Appears in:_ -- [ECKOperatorStatefulSet](#eckoperatorstatefulset) - -| Field | Description | -| --- | --- | -| `template` _[ECKOperatorStatefulSetPodTemplateSpec](#eckoperatorstatefulsetpodtemplatespec)_ | (Optional) Template describes the ECKOperator StatefulSet pod that will be created. | - - -### EGWDeploymentContainer - - - -EGWDeploymentContainer is a Egress Gateway Deployment container. - -_Appears in:_ -- [EgressGatewayDeploymentPodSpec](#egressgatewaydeploymentpodspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is an enum which identifies the EGW Deployment container by name.
    Supported values are: calico-egw | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) Resources allows customization of limits and requests for compute resources such as cpu and memory. If specified, this overrides the named EGW Deployment container's resources. If omitted, the EGW Deployment will use its default value for this container's resources. If used in conjunction with the deprecated ComponentResources, then this value takes precedence. | - - -### EGWDeploymentInitContainer - - - -EGWDeploymentInitContainer is a Egress Gateway Deployment init container. - -_Appears in:_ -- [EgressGatewayDeploymentPodSpec](#egressgatewaydeploymentpodspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is an enum which identifies the EGW Deployment init container by name.
    Supported values are: egress-gateway-init | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) Resources allows customization of limits and requests for compute resources such as cpu and memory. If specified, this overrides the named EGW Deployment init container's resources. If omitted, the EGW Deployment will use its default value for this init container's resources. If used in conjunction with the deprecated ComponentResources, then this value takes precedence. | - - -### EKSLogForwarderDeployment - - - -EKSLogForwarderDeployment is the configuration for the EKSLogForwarder Deployment. - -_Appears in:_ -- [LogCollectorSpec](#logcollectorspec) - -| Field | Description | -| --- | --- | -| `spec` _[EKSLogForwarderDeploymentSpec](#ekslogforwarderdeploymentspec)_ | (Optional) Spec is the specification of the EKSLogForwarder Deployment. | - - -### EKSLogForwarderDeploymentContainer - - - -EKSLogForwarderDeploymentContainer is a EKSLogForwarder Deployment container. - -_Appears in:_ -- [EKSLogForwarderDeploymentPodSpec](#ekslogforwarderdeploymentpodspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is an enum which identifies the EKSLogForwarder Deployment container by name.
    Supported values are: eks-log-forwarder | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) Resources allows customization of limits and requests for compute resources such as cpu and memory. If specified, this overrides the named EKSLogForwarder Deployment container's resources. If omitted, the EKSLogForwarder Deployment will use its default value for this container's resources. | - - -### EKSLogForwarderDeploymentInitContainer - - - -EKSLogForwarderDeploymentInitContainer is a EKSLogForwarder Deployment init container. - -_Appears in:_ -- [EKSLogForwarderDeploymentPodSpec](#ekslogforwarderdeploymentpodspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is an enum which identifies the EKSLogForwarder Deployment init container by name.
    Supported values are: eks-log-forwarder-startup | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) Resources allows customization of limits and requests for compute resources such as cpu and memory. If specified, this overrides the named EKSLogForwarder Deployment init container's resources. If omitted, the EKSLogForwarder Deployment will use its default value for this init container's resources. | - - -### EKSLogForwarderDeploymentPodSpec - - - -EKSLogForwarderDeploymentPodSpec is the EKSLogForwarder Deployment's PodSpec. - -_Appears in:_ -- [EKSLogForwarderDeploymentPodTemplateSpec](#ekslogforwarderdeploymentpodtemplatespec) - -| Field | Description | -| --- | --- | -| `initContainers` _[EKSLogForwarderDeploymentInitContainer](#ekslogforwarderdeploymentinitcontainer) array_ | (Optional) InitContainers is a list of EKSLogForwarder init containers. If specified, this overrides the specified EKSLogForwarder Deployment init containers. If omitted, the EKSLogForwarder Deployment will use its default values for its init containers. | -| `containers` _[EKSLogForwarderDeploymentContainer](#ekslogforwarderdeploymentcontainer) array_ | (Optional) Containers is a list of EKSLogForwarder containers. If specified, this overrides the specified EKSLogForwarder Deployment containers. If omitted, the EKSLogForwarder Deployment will use its default values for its containers. | - - -### EKSLogForwarderDeploymentPodTemplateSpec - - - -EKSLogForwarderDeploymentPodTemplateSpec is the EKSLogForwarder Deployment's PodTemplateSpec - -_Appears in:_ -- [EKSLogForwarderDeploymentSpec](#ekslogforwarderdeploymentspec) - -| Field | Description | -| --- | --- | -| `spec` _[EKSLogForwarderDeploymentPodSpec](#ekslogforwarderdeploymentpodspec)_ | (Optional) Spec is the EKSLogForwarder Deployment's PodSpec. | - - -### EKSLogForwarderDeploymentSpec - - - -EKSLogForwarderDeploymentSpec defines configuration for the EKSLogForwarder Deployment. - -_Appears in:_ -- [EKSLogForwarderDeployment](#ekslogforwarderdeployment) - -| Field | Description | -| --- | --- | -| `template` _[EKSLogForwarderDeploymentPodTemplateSpec](#ekslogforwarderdeploymentpodtemplatespec)_ | (Optional) Template describes the EKSLogForwarder Deployment pod that will be created. | - - -### EgressGateway - - - -EgressGateway is the Schema for the egressgateways API - -| Field | Description | -| --- | --- | -| `apiVersion` _string_ | `operator.tigera.io/v1` | -| `kind` _string_ | `EgressGateway` | -| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | -| `spec` _[EgressGatewaySpec](#egressgatewayspec)_ | | -| `status` _[EgressGatewayStatus](#egressgatewaystatus)_ | | - - -### EgressGatewayDeploymentPodSpec - - - -EgressGatewayDeploymentPodSpec is the Egress Gateway Deployment's PodSpec. - -_Appears in:_ -- [EgressGatewayDeploymentPodTemplateSpec](#egressgatewaydeploymentpodtemplatespec) - -| Field | Description | -| --- | --- | -| `initContainers` _[EGWDeploymentInitContainer](#egwdeploymentinitcontainer) array_ | (Optional) InitContainers is a list of EGW init containers. If specified, this overrides the specified EGW Deployment init containers. If omitted, the EGW Deployment will use its default values for its init containers. | -| `containers` _[EGWDeploymentContainer](#egwdeploymentcontainer) array_ | (Optional) Containers is a list of EGW containers. If specified, this overrides the specified EGW Deployment containers. If omitted, the EGW Deployment will use its default values for its containers. | -| `affinity` _[Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#affinity-v1-core)_ | (Optional) Affinity is a group of affinity scheduling rules for the EGW pods. | -| `nodeSelector` _object (keys:string, values:string)_ | (Optional) NodeSelector gives more control over the nodes where the Egress Gateway pods will run on. | -| `terminationGracePeriodSeconds` _integer_ | (Optional) TerminationGracePeriodSeconds defines the termination grace period of the Egress Gateway pods in seconds. | -| `topologySpreadConstraints` _[TopologySpreadConstraint](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#topologyspreadconstraint-v1-core) array_ | (Optional) TopologySpreadConstraints defines how the Egress Gateway pods should be spread across different AZs. | -| `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#toleration-v1-core) array_ | (Optional) Tolerations is the egress gateway pod's tolerations. If specified, this overrides any tolerations that may be set on the EGW Deployment. If omitted, the EGW Deployment will use its default value for tolerations. | -| `priorityClassName` _string_ | (Optional) PriorityClassName allows to specify a PriorityClass resource to be used. | - - -### EgressGatewayDeploymentPodTemplateSpec - - - -EgressGatewayDeploymentPodTemplateSpec is the EGW Deployment's PodTemplateSpec - -_Appears in:_ -- [EgressGatewaySpec](#egressgatewayspec) - -| Field | Description | -| --- | --- | -| `metadata` _[EgressGatewayMetadata](#egressgatewaymetadata)_ | (Optional) Refer to Kubernetes API documentation for fields of `metadata`. | -| `spec` _[EgressGatewayDeploymentPodSpec](#egressgatewaydeploymentpodspec)_ | (Optional) Spec is the EGW Deployment's PodSpec. | - - -### EgressGatewayFailureDetection - - - -EgressGatewayFailureDetection defines the fields the needed for determining Egress Gateway -readiness. - -_Appears in:_ -- [EgressGatewaySpec](#egressgatewayspec) - -| Field | Description | -| --- | --- | -| `healthTimeoutDataStoreSeconds` _integer_ | (Optional) HealthTimeoutDataStoreSeconds defines how long Egress Gateway can fail to connect to the datastore before reporting not ready. This value must be greater than 0.
    Default: 90 | -| `icmpProbe` _[ICMPProbe](#icmpprobe)_ | (Optional) ICMPProbe define outgoing ICMP probes that Egress Gateway will use to verify its upstream connection. Egress Gateway will report not ready if all fail. Timeout must be greater than interval. | -| `httpProbe` _[HTTPProbe](#httpprobe)_ | (Optional) HTTPProbe define outgoing HTTP probes that Egress Gateway will use to verify its upsteam connection. Egress Gateway will report not ready if all fail. Timeout must be greater than interval. | - - -### EgressGatewayIPPool - - - - - -_Appears in:_ -- [EgressGatewaySpec](#egressgatewayspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | (Optional) Name is the name of the IPPool that the Egress Gateways can use. | -| `cidr` _string_ | (Optional) CIDR is the IPPool CIDR that the Egress Gateways can use. | - - -### EgressGatewayMetadata - - - -EgressGatewayMetadata contains the standard Kubernetes labels and annotations fields. - -_Appears in:_ -- [EgressGatewayDeploymentPodTemplateSpec](#egressgatewaydeploymentpodtemplatespec) - -| Field | Description | -| --- | --- | -| `labels` _object (keys:string, values:string)_ | (Optional) Labels is a map of string keys and values that may match replica set and service selectors. Each of these key/value pairs are added to the object's labels provided the key does not already exist in the object's labels. If not specified will default to projectcalico.org/egw:[name], where [name] is the name of the Egress Gateway resource. | -| `annotations` _object (keys:string, values:string)_ | (Optional) Annotations is a map of arbitrary non-identifying metadata. Each of these key/value pairs are added to the object's annotations provided the key does not already exist in the object's annotations. | - - -### EgressGatewaySpec - - - -EgressGatewaySpec defines the desired state of EgressGateway - -_Appears in:_ -- [EgressGateway](#egressgateway) - -| Field | Description | -| --- | --- | -| `replicas` _integer_ | (Optional) Replicas defines how many instances of the Egress Gateway pod will run. | -| `ipPools` _[EgressGatewayIPPool](#egressgatewayippool) array_ | IPPools defines the IP Pools that the Egress Gateway pods should be using. Either name or CIDR must be specified. IPPools must match existing IPPools. | -| `externalNetworks` _string array_ | (Optional) ExternalNetworks defines the external network names this Egress Gateway is associated with. ExternalNetworks must match existing external networks. | -| `logSeverity` _[LogLevel](#loglevel)_ | (Optional) LogSeverity defines the logging level of the Egress Gateway.
    Default: Info | -| `template` _[EgressGatewayDeploymentPodTemplateSpec](#egressgatewaydeploymentpodtemplatespec)_ | (Optional) Template describes the EGW Deployment pod that will be created. | -| `egressGatewayFailureDetection` _[EgressGatewayFailureDetection](#egressgatewayfailuredetection)_ | (Optional) EgressGatewayFailureDetection is used to configure how Egress Gateway determines readiness. If both ICMP, HTTP probes are defined, one ICMP probe and one HTTP probe should succeed for Egress Gateways to become ready. Otherwise one of ICMP or HTTP probe should succeed for Egress gateways to become ready if configured. | -| `aws` _[AWSEgressGateway](#awsegressgateway)_ | (Optional) AWS defines the additional configuration options for Egress Gateways on AWS. | - - -### EgressGatewayStatus - - - -EgressGatewayStatus defines the observed state of EgressGateway - -_Appears in:_ -- [EgressGateway](#egressgateway) - -| Field | Description | -| --- | --- | -| `state` _string_ | State provides user-readable status. | -| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#condition-v1-meta) array_ | (Optional) Conditions represents the latest observed set of conditions for the component. A component may be one or more of Ready, Progressing, Degraded or other customer types. | - - -### EksCloudwatchLogsSpec - - - -EksConfigSpec defines configuration for fetching EKS audit logs. - -_Appears in:_ -- [AdditionalLogSourceSpec](#additionallogsourcespec) - -| Field | Description | -| --- | --- | -| `region` _string_ | AWS Region EKS cluster is hosted in. | -| `groupName` _string_ | Cloudwatch log-group name containing EKS audit logs. | -| `streamPrefix` _string_ | (Optional) Prefix of Cloudwatch log stream containing EKS audit logs in the log-group.
    Default: kube-apiserver-audit- | -| `fetchInterval` _integer_ | (Optional) Cloudwatch audit logs fetching interval in seconds.
    Default: 60 | - - -### ElasticsearchMetricsDeployment - - - -ElasticsearchMetricsDeployment is the configuration for the tigera-elasticsearch-metric Deployment. - -_Appears in:_ -- [LogStorageSpec](#logstoragespec) - -| Field | Description | -| --- | --- | -| `spec` _[ElasticsearchMetricsDeploymentSpec](#elasticsearchmetricsdeploymentspec)_ | (Optional) Spec is the specification of the ElasticsearchMetrics Deployment. | - - -### ElasticsearchMetricsDeploymentContainer - - - -ElasticsearchMetricsDeploymentContainer is a ElasticsearchMetricsDeployment container. - -_Appears in:_ -- [ElasticsearchMetricsDeploymentPodSpec](#elasticsearchmetricsdeploymentpodspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is an enum which identifies the ElasticsearchMetricsDeployment container by name.
    Supported values are: tigera-elasticsearch-metrics | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) Resources allows customization of limits and requests for compute resources such as cpu and memory. If specified, this overrides the named ElasticsearchMetricsDeployment container's resources. If omitted, the ElasticsearchMetrics Deployment will use its default value for this container's resources. | - - -### ElasticsearchMetricsDeploymentInitContainer - - - -ElasticsearchMetricsDeploymentInitContainer is a ElasticsearchMetricsDeployment init container. - -_Appears in:_ -- [ElasticsearchMetricsDeploymentPodSpec](#elasticsearchmetricsdeploymentpodspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is an enum which identifies the ElasticsearchMetricsDeployment init container by name.
    Supported values are: tigera-ee-elasticsearch-metrics-tls-key-cert-provisioner | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) Resources allows customization of limits and requests for compute resources such as cpu and memory. If specified, this overrides the named ElasticsearchMetricsDeployment init container's resources. If omitted, the ElasticsearchMetrics Deployment will use its default value for this init container's resources. | - - -### ElasticsearchMetricsDeploymentPodSpec - - - -ElasticsearchMetricsDeploymentPodSpec is the tElasticsearchMetricsDeployment's PodSpec. - -_Appears in:_ -- [ElasticsearchMetricsDeploymentPodTemplateSpec](#elasticsearchmetricsdeploymentpodtemplatespec) - -| Field | Description | -| --- | --- | -| `initContainers` _[ElasticsearchMetricsDeploymentInitContainer](#elasticsearchmetricsdeploymentinitcontainer) array_ | (Optional) InitContainers is a list of ElasticsearchMetricsDeployment init containers. If specified, this overrides the specified ElasticsearchMetricsDeployment init containers. If omitted, the ElasticsearchMetrics Deployment will use its default values for its init containers. | -| `containers` _[ElasticsearchMetricsDeploymentContainer](#elasticsearchmetricsdeploymentcontainer) array_ | (Optional) Containers is a list of ElasticsearchMetricsDeployment containers. If specified, this overrides the specified ElasticsearchMetricsDeployment containers. If omitted, the ElasticsearchMetrics Deployment will use its default values for its containers. | - - -### ElasticsearchMetricsDeploymentPodTemplateSpec - - - -ElasticsearchMetricsDeploymentPodTemplateSpec is the ElasticsearchMetricsDeployment's PodTemplateSpec - -_Appears in:_ -- [ElasticsearchMetricsDeploymentSpec](#elasticsearchmetricsdeploymentspec) - -| Field | Description | -| --- | --- | -| `spec` _[ElasticsearchMetricsDeploymentPodSpec](#elasticsearchmetricsdeploymentpodspec)_ | (Optional) Spec is the ElasticsearchMetrics Deployment's PodSpec. | - - -### ElasticsearchMetricsDeploymentSpec - - - -ElasticsearchMetricsDeploymentSpec defines configuration for the ElasticsearchMetricsDeployment Deployment. - -_Appears in:_ -- [ElasticsearchMetricsDeployment](#elasticsearchmetricsdeployment) - -| Field | Description | -| --- | --- | -| `template` _[ElasticsearchMetricsDeploymentPodTemplateSpec](#elasticsearchmetricsdeploymentpodtemplatespec)_ | (Optional) Template describes the ElasticsearchMetrics Deployment pod that will be created. | - - -### EmailVerificationType - -_Underlying type:_ _string_ - - - -_Appears in:_ -- [AuthenticationOIDC](#authenticationoidc) - -| Value | Description | -| --- | --- | -| `Verify` | | -| `InsecureSkip` | | - - -### EncapsulationType - -_Underlying type:_ _string_ - -EncapsulationType is the type of encapsulation to use on an IP pool. - -One of: IPIP, VXLAN, IPIPCrossSubnet, VXLANCrossSubnet, None - -_Appears in:_ -- [IPPool](#ippool) - -| Value | Description | -| --- | --- | -| `IPIPCrossSubnet` | | -| `IPIP` | | -| `VXLAN` | | -| `VXLANCrossSubnet` | | -| `None` | | - - -### EncryptionOption - -_Underlying type:_ _string_ - -EncryptionOption specifies the traffic encryption mode when connecting to a Syslog server. - -One of: None, TLS - -_Appears in:_ -- [SyslogStoreSpec](#syslogstorespec) - -| Value | Description | -| --- | --- | -| `None` | | -| `TLS` | | - - -### Endpoint - - - -Endpoint contains a subset of relevant fields from the Prometheus Endpoint struct. - -_Appears in:_ -- [ServiceMonitor](#servicemonitor) - -| Field | Description | -| --- | --- | -| `params` _object (keys:string, values:string array)_ | Optional HTTP URL parameters
    Default: scrape all metrics. | -| `bearerTokenSecret` _[SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#secretkeyselector-v1-core)_ | Secret to mount to read bearer token for scraping targets. Recommended: when unset, the operator will create a Secret, a ClusterRole and a ClusterRoleBinding. | -| `interval` _[Duration](#duration)_ | Interval at which metrics should be scraped. If not specified Prometheus' global scrape interval is used. | -| `scrapeTimeout` _[Duration](#duration)_ | Timeout after which the scrape is ended. If not specified, the Prometheus global scrape timeout is used unless it is less than `Interval` in which the latter is used. | -| `honorLabels` _boolean_ | HonorLabels chooses the metric's labels on collisions with target labels. | -| `honorTimestamps` _boolean_ | HonorTimestamps controls whether Prometheus respects the timestamps present in scraped data. | -| `metricRelabelings` _RelabelConfig array_ | MetricRelabelConfigs to apply to samples before ingestion. | -| `relabelings` _RelabelConfig array_ | RelabelConfigs to apply to samples before scraping. Prometheus Operator automatically adds relabelings for a few standard Kubernetes fields. The original scrape job's name is available via the `__tmp_prometheus_job_name` label. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config | - - -### EnvoySettings - - - - - -_Appears in:_ -- [ApplicationLayerSpec](#applicationlayerspec) - -| Field | Description | -| --- | --- | -| `xffNumTrustedHops` _integer_ | (Optional) The number of additional ingress proxy hops from the right side of the x-forwarded-for HTTP header to trust when determining the origin client’s IP address. 0 is permitted, but >=1 is the typical setting. | -| `useRemoteAddress` _boolean_ | (Optional) If set to true, the Envoy connection manager will use the real remote address of the client connection when determining internal versus external origin and manipulating various headers. | - - -### ExternalPrometheus - - - - - -_Appears in:_ -- [MonitorSpec](#monitorspec) - -| Field | Description | -| --- | --- | -| `serviceMonitor` _[ServiceMonitor](#servicemonitor)_ | (Optional) ServiceMonitor when specified, the operator will create a ServiceMonitor object in the namespace. It is recommended that you configure labels if you want your prometheus instance to pick up the configuration automatically. The operator will configure 1 endpoint by default: - Params to scrape all metrics available in Calico Enterprise. - BearerTokenSecret (If not overridden, the operator will also create corresponding RBAC that allows authz to the metrics.) - TLSConfig, containing the caFile and serverName. | -| `namespace` _string_ | Namespace is the namespace where the operator will create resources for your Prometheus instance. The namespace must be created before the operator will create Prometheus resources. | - - -### FIPSMode - -_Underlying type:_ _string_ - - - -_Appears in:_ -- [InstallationSpec](#installationspec) - -| Value | Description | -| --- | --- | -| `Enabled` | | -| `Disabled` | | - - -### FluentdDaemonSet - - - -FluentdDaemonSet is the configuration for the Fluentd DaemonSet. - -_Appears in:_ -- [LogCollectorSpec](#logcollectorspec) - -| Field | Description | -| --- | --- | -| `spec` _[FluentdDaemonSetSpec](#fluentddaemonsetspec)_ | (Optional) Spec is the specification of the Fluentd DaemonSet. | - - -### FluentdDaemonSetContainer - - - -FluentdDaemonSetContainer is a Fluentd DaemonSet container. - -_Appears in:_ -- [FluentdDaemonSetPodSpec](#fluentddaemonsetpodspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is an enum which identifies the Fluentd DaemonSet container by name.
    Supported values are: fluentd | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) Resources allows customization of limits and requests for compute resources such as cpu and memory. If specified, this overrides the named Fluentd DaemonSet container's resources. If omitted, the Fluentd DaemonSet will use its default value for this container's resources. | - - -### FluentdDaemonSetInitContainer - - - -FluentdDaemonSetInitContainer is a Fluentd DaemonSet init container. - -_Appears in:_ -- [FluentdDaemonSetPodSpec](#fluentddaemonsetpodspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is an enum which identifies the Fluentd DaemonSet init container by name.
    Supported values are: tigera-fluentd-prometheus-tls-key-cert-provisioner | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) Resources allows customization of limits and requests for compute resources such as cpu and memory. If specified, this overrides the named Fluentd DaemonSet init container's resources. If omitted, the Fluentd DaemonSet will use its default value for this init container's resources. | - - -### FluentdDaemonSetPodSpec - - - -FluentdDaemonSetPodSpec is the Fluentd DaemonSet's PodSpec. - -_Appears in:_ -- [FluentdDaemonSetPodTemplateSpec](#fluentddaemonsetpodtemplatespec) - -| Field | Description | -| --- | --- | -| `initContainers` _[FluentdDaemonSetInitContainer](#fluentddaemonsetinitcontainer) array_ | (Optional) InitContainers is a list of Fluentd DaemonSet init containers. If specified, this overrides the specified Fluentd DaemonSet init containers. If omitted, the Fluentd DaemonSet will use its default values for its init containers. | -| `containers` _[FluentdDaemonSetContainer](#fluentddaemonsetcontainer) array_ | (Optional) Containers is a list of Fluentd DaemonSet containers. If specified, this overrides the specified Fluentd DaemonSet containers. If omitted, the Fluentd DaemonSet will use its default values for its containers. | - - -### FluentdDaemonSetPodTemplateSpec - - - -FluentdDaemonSetPodTemplateSpec is the Fluentd DaemonSet's PodTemplateSpec - -_Appears in:_ -- [FluentdDaemonSetSpec](#fluentddaemonsetspec) - -| Field | Description | -| --- | --- | -| `spec` _[FluentdDaemonSetPodSpec](#fluentddaemonsetpodspec)_ | (Optional) Spec is the Fluentd DaemonSet's PodSpec. | - - -### FluentdDaemonSetSpec - - - -FluentdDaemonSetSpec defines configuration for the Fluentd DaemonSet. - -_Appears in:_ -- [FluentdDaemonSet](#fluentddaemonset) - -| Field | Description | -| --- | --- | -| `template` _[FluentdDaemonSetPodTemplateSpec](#fluentddaemonsetpodtemplatespec)_ | (Optional) Template describes the Fluentd DaemonSet pod that will be created. | - - -### GroupSearch - - - -Group search configuration to find the groups that a user is in. - -_Appears in:_ -- [AuthenticationLDAP](#authenticationldap) - -| Field | Description | -| --- | --- | -| `baseDN` _string_ | BaseDN to start the search from. For example "cn=groups,dc=example,dc=com" | -| `filter` _string_ | (Optional) Optional filter to apply when searching the directory. For example "(objectClass=posixGroup)" | -| `nameAttribute` _string_ | The attribute of the group that represents its name. This attribute can be used to apply RBAC to a user group. | -| `userMatchers` _[UserMatch](#usermatch) array_ | Following list contains field pairs that are used to match a user to a group. It adds an additional requirement to the filter that an attribute in the group must match the user's attribute value. | - - -### GuardianDeployment - - - -GuardianDeployment is the configuration for the guardian Deployment. - -_Appears in:_ -- [ManagementClusterConnectionSpec](#managementclusterconnectionspec) - -| Field | Description | -| --- | --- | -| `spec` _[GuardianDeploymentSpec](#guardiandeploymentspec)_ | (Optional) Spec is the specification of the guardian Deployment. | - - -### GuardianDeploymentContainer - - - -GuardianDeploymentContainer is a guardian Deployment container. - -_Appears in:_ -- [GuardianDeploymentPodSpec](#guardiandeploymentpodspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is an enum which identifies the guardian Deployment container by name.
    Supported values are: tigera-guardian | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) Resources allows customization of limits and requests for compute resources such as cpu and memory. If specified, this overrides the named guardian Deployment container's resources. If omitted, the guardian Deployment will use its default value for this container's resources. | - - -### GuardianDeploymentInitContainer - - - -GuardianDeploymentInitContainer is a guardian Deployment init container. - -_Appears in:_ -- [GuardianDeploymentPodSpec](#guardiandeploymentpodspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is an enum which identifies the guardian Deployment init container by name. | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) Resources allows customization of limits and requests for compute resources such as cpu and memory. If specified, this overrides the named guardian Deployment init container's resources. If omitted, the guardian Deployment will use its default value for this init container's resources. | - - -### GuardianDeploymentPodSpec - - - -GuardianDeploymentPodSpec is the guardian Deployment's PodSpec. - -_Appears in:_ -- [GuardianDeploymentPodTemplateSpec](#guardiandeploymentpodtemplatespec) - -| Field | Description | -| --- | --- | -| `initContainers` _[GuardianDeploymentInitContainer](#guardiandeploymentinitcontainer) array_ | (Optional) InitContainers is a list of guardian init containers. If specified, this overrides the specified guardian Deployment init containers. If omitted, the guardian Deployment will use its default values for its init containers. | -| `containers` _[GuardianDeploymentContainer](#guardiandeploymentcontainer) array_ | (Optional) Containers is a list of guardian containers. If specified, this overrides the specified guardian Deployment containers. If omitted, the guardian Deployment will use its default values for its containers. | - - -### GuardianDeploymentPodTemplateSpec - - - -GuardianDeploymentPodTemplateSpec is the guardian Deployment's PodTemplateSpec - -_Appears in:_ -- [GuardianDeploymentSpec](#guardiandeploymentspec) - -| Field | Description | -| --- | --- | -| `spec` _[GuardianDeploymentPodSpec](#guardiandeploymentpodspec)_ | (Optional) Spec is the guardian Deployment's PodSpec. | - - -### GuardianDeploymentSpec - - - -GuardianDeploymentSpec defines configuration for the guardian Deployment. - -_Appears in:_ -- [GuardianDeployment](#guardiandeployment) - -| Field | Description | -| --- | --- | -| `template` _[GuardianDeploymentPodTemplateSpec](#guardiandeploymentpodtemplatespec)_ | (Optional) Template describes the guardian Deployment pod that will be created. | - - -### HTTPProbe - - - -HTTPProbe defines the HTTP probe configuration for Egress Gateway. - -_Appears in:_ -- [EgressGatewayFailureDetection](#egressgatewayfailuredetection) - -| Field | Description | -| --- | --- | -| `urls` _string array_ | URLs define the list of HTTP probe URLs. Egress Gateway will probe each URL periodically.If all probes fail, Egress Gateway will report non-ready. | -| `intervalSeconds` _integer_ | (Optional) IntervalSeconds defines the interval of HTTP probes. Used when URLs is non-empty.
    Default: 10 | -| `timeoutSeconds` _integer_ | (Optional) TimeoutSeconds defines the timeout value of HTTP probes. Used when URLs is non-empty.
    Default: 30 | - - -### HostPortsType - -_Underlying type:_ _string_ - -HostPortsType specifies host port support. - -One of: Enabled, Disabled - -_Appears in:_ -- [CalicoNetworkSpec](#caliconetworkspec) - -| Value | Description | -| --- | --- | -| `Enabled` | | -| `Disabled` | | - - -### ICMPProbe - - - -ICMPProbe defines the ICMP probe configuration for Egress Gateway. - -_Appears in:_ -- [EgressGatewayFailureDetection](#egressgatewayfailuredetection) - -| Field | Description | -| --- | --- | -| `ips` _string array_ | IPs define the list of ICMP probe IPs. Egress Gateway will probe each IP periodically. If all probes fail, Egress Gateway will report non-ready. | -| `intervalSeconds` _integer_ | (Optional) IntervalSeconds defines the interval of ICMP probes. Used when IPs is non-empty.
    Default: 5 | -| `timeoutSeconds` _integer_ | (Optional) TimeoutSeconds defines the timeout value of ICMP probes. Used when IPs is non-empty.
    Default: 15 | - - -### IPAMPluginType - -_Underlying type:_ _string_ - - - -_Appears in:_ -- [IPAMSpec](#ipamspec) - -| Value | Description | -| --- | --- | -| `Calico` | | -| `HostLocal` | | -| `AmazonVPC` | | -| `AzureVNET` | | - - -### IPAMSpec - - - -IPAMSpec contains configuration for pod IP address management. - -_Appears in:_ -- [CNISpec](#cnispec) - -| Field | Description | -| --- | --- | -| `type` _[IPAMPluginType](#ipamplugintype)_ | Specifies the IPAM plugin that will be used in the Calico or Calico Enterprise installation. * For CNI Plugin Calico, this field defaults to Calico. * For CNI Plugin GKE, this field defaults to HostLocal. * For CNI Plugin AzureVNET, this field defaults to AzureVNET. * For CNI Plugin AmazonVPC, this field defaults to AmazonVPC. The IPAM plugin is installed and configured only if the CNI plugin is set to Calico, for all other values of the CNI plugin the plugin binaries and CNI config is a dependency that is expected to be installed separately.
    Default: Calico | - - -### IPPool - - - - - -_Appears in:_ -- [CalicoNetworkSpec](#caliconetworkspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is the name of the IP pool. If omitted, this will be generated. | -| `cidr` _string_ | CIDR contains the address range for the IP Pool in classless inter-domain routing format. | -| `encapsulation` _[EncapsulationType](#encapsulationtype)_ | (Optional) Encapsulation specifies the encapsulation type that will be used with the IP Pool.
    Default: IPIP | -| `natOutgoing` _[NATOutgoingType](#natoutgoingtype)_ | (Optional) NATOutgoing specifies if NAT will be enabled or disabled for outgoing traffic.
    Default: Enabled | -| `nodeSelector` _string_ | (Optional) NodeSelector specifies the node selector that will be set for the IP Pool.
    Default: 'all()' | -| `blockSize` _integer_ | (Optional) BlockSize specifies the CIDR prefex length to use when allocating per-node IP blocks from the main IP pool CIDR.
    Default: 26 (IPv4), 122 (IPv6) | -| `disableBGPExport` _boolean_ | (Optional) DisableBGPExport specifies whether routes from this IP pool's CIDR are exported over BGP.
    Default: false | -| `allowedUses` _[IPPoolAllowedUse](#ippoolalloweduse) array_ | AllowedUse controls what the IP pool will be used for. If not specified or empty, defaults to ["Tunnel", "Workload"] for back-compatibility | - - -### IPPoolAllowedUse - -_Underlying type:_ _string_ - - - -_Appears in:_ -- [IPPool](#ippool) - -| Value | Description | -| --- | --- | -| `Workload` | | -| `Tunnel` | | - - -### Image - - - - - -_Appears in:_ -- [ImageSetSpec](#imagesetspec) - -| Field | Description | -| --- | --- | -| `image` _string_ | Image is an image that the operator deploys and instead of using the built in tag the operator will use the Digest for the image identifier. The value should be the image name without registry or tag or digest. For the image `docker.io/calico/node:v3.17.1` it should be represented as `calico/node` | -| `digest` _string_ | Digest is the image identifier that will be used for the Image. The field should not include a leading `@` and must be prefixed with `sha256:`. | - - -### ImageSet - - - -ImageSet is used to specify image digests for the images that the operator deploys. -The name of the ImageSet is expected to be in the format `-`. -The `variant` used is `enterprise` if the InstallationSpec Variant is -`TigeraSecureEnterprise` otherwise it is `calico`. -The `release` must match the version of the variant that the operator is built to deploy, -this version can be obtained by passing the `--version` flag to the operator binary. - -| Field | Description | -| --- | --- | -| `apiVersion` _string_ | `operator.tigera.io/v1` | -| `kind` _string_ | `ImageSet` | -| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | -| `spec` _[ImageSetSpec](#imagesetspec)_ | | - - -### ImageSetSpec - - - -ImageSetSpec defines the desired state of ImageSet. - -_Appears in:_ -- [ImageSet](#imageset) - -| Field | Description | -| --- | --- | -| `images` _[Image](#image) array_ | Images is the list of images to use digests. All images that the operator will deploy must be specified. | - - - - -### Indices - - - -Indices defines the configuration for the indices in an Elasticsearch cluster. - -_Appears in:_ -- [LogStorageSpec](#logstoragespec) - -| Field | Description | -| --- | --- | -| `replicas` _integer_ | (Optional) Replicas defines how many replicas each index will have. See https://www.elastic.co/guide/en/elasticsearch/reference/current/scalability.html | - - -### Installation - - - -Installation configures an installation of Calico or Calico Enterprise. At most one instance -of this resource is supported. It must be named "default". The Installation API installs core networking -and network policy components, and provides general install-time configuration. - -| Field | Description | -| --- | --- | -| `apiVersion` _string_ | `operator.tigera.io/v1` | -| `kind` _string_ | `Installation` | -| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | -| `spec` _[InstallationSpec](#installationspec)_ | Specification of the desired state for the Calico or Calico Enterprise installation. | -| `status` _[InstallationStatus](#installationstatus)_ | Most recently observed state for the Calico or Calico Enterprise installation. | - - -### InstallationSpec - - - -InstallationSpec defines configuration for a Calico or Calico Enterprise installation. - -_Appears in:_ -- [Installation](#installation) -- [InstallationStatus](#installationstatus) - -| Field | Description | -| --- | --- | -| `variant` _[ProductVariant](#productvariant)_ | (Optional) Variant is the product to install - one of Calico or TigeraSecureEnterprise
    Default: Calico | -| `registry` _string_ | (Optional) Registry is the default Docker registry used for component Docker images. If specified then the given value must end with a slash character (`/`) and all images will be pulled from this registry. If not specified then the default registries will be used. A special case value, UseDefault, is supported to explicitly specify the default registries will be used. Image format: `/:` This option allows configuring the `` portion of the above format. | -| `imagePath` _string_ | (Optional) ImagePath allows for the path part of an image to be specified. If specified then the specified value will be used as the image path for each image. If not specified or empty, the default for each image will be used. A special case value, UseDefault, is supported to explicitly specify the default image path will be used for each image. Image format: `/:` This option allows configuring the `` portion of the above format. | -| `imagePrefix` _string_ | (Optional) ImagePrefix allows for the prefix part of an image to be specified. If specified then the given value will be used as a prefix on each image. If not specified or empty, no prefix will be used. A special case value, UseDefault, is supported to explicitly specify the default image prefix will be used for each image. Image format: `/:` This option allows configuring the `` portion of the above format. | -| `imagePullSecrets` _[LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#localobjectreference-v1-core) array_ | (Optional) ImagePullSecrets is an array of references to container registry pull secrets to use. These are applied to all images to be pulled. | -| `kubernetesProvider` _[Provider](#provider)_ | (Optional) KubernetesProvider specifies a particular provider of the Kubernetes platform and enables provider-specific configuration. If the specified value is empty, the Operator will attempt to automatically determine the current provider. If the specified value is not empty, the Operator will still attempt auto-detection, but will additionally compare the auto-detected value to the specified value to confirm they match. | -| `cni` _[CNISpec](#cnispec)_ | (Optional) CNI specifies the CNI that will be used by this installation. | -| `calicoNetwork` _[CalicoNetworkSpec](#caliconetworkspec)_ | (Optional) CalicoNetwork specifies networking configuration options for Calico. | -| `typhaAffinity` _[TyphaAffinity](#typhaaffinity)_ | (Optional) Deprecated. Please use Installation.Spec.TyphaDeployment instead. TyphaAffinity allows configuration of node affinity characteristics for Typha pods. | -| `controlPlaneNodeSelector` _object (keys:string, values:string)_ | (Optional) ControlPlaneNodeSelector is used to select control plane nodes on which to run Calico components. This is globally applied to all resources created by the operator excluding daemonsets. | -| `controlPlaneTolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#toleration-v1-core) array_ | (Optional) ControlPlaneTolerations specify tolerations which are then globally applied to all resources created by the operator. | -| `controlPlaneReplicas` _integer_ | (Optional) ControlPlaneReplicas defines how many replicas of the control plane core components will be deployed. This field applies to all control plane components that support High Availability. Defaults to 2. | -| `nodeMetricsPort` _integer_ | (Optional) NodeMetricsPort specifies which port calico/node serves prometheus metrics on. By default, metrics are not enabled. If specified, this overrides any FelixConfiguration resources which may exist. If omitted, then prometheus metrics may still be configured through FelixConfiguration. | -| `typhaMetricsPort` _integer_ | (Optional) TyphaMetricsPort specifies which port calico/typha serves prometheus metrics on. By default, metrics are not enabled. | -| `flexVolumePath` _string_ | (Optional) FlexVolumePath optionally specifies a custom path for FlexVolume. If not specified, FlexVolume will be enabled by default. If set to 'None', FlexVolume will be disabled. The default is based on the kubernetesProvider. | -| `kubeletVolumePluginPath` _string_ | (Optional) KubeletVolumePluginPath optionally specifies enablement of Calico CSI plugin. If not specified, CSI will be enabled by default. If set to 'None', CSI will be disabled.
    Default: /var/lib/kubelet | -| `nodeUpdateStrategy` _[DaemonSetUpdateStrategy](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#daemonsetupdatestrategy-v1-apps)_ | (Optional) NodeUpdateStrategy can be used to customize the desired update strategy, such as the MaxUnavailable field. | -| `componentResources` _[ComponentResource](#componentresource) array_ | (Optional) Deprecated. Please use CalicoNodeDaemonSet, TyphaDeployment, and KubeControllersDeployment. ComponentResources can be used to customize the resource requirements for each component. Node, Typha, and KubeControllers are supported for installations. | -| `certificateManagement` _[CertificateManagement](#certificatemanagement)_ | (Optional) CertificateManagement configures pods to submit a CertificateSigningRequest to the certificates.k8s.io/v1beta1 API in order to obtain TLS certificates. This feature requires that you bring your own CSR signing and approval process, otherwise pods will be stuck during initialization. | -| `nonPrivileged` _[NonPrivilegedType](#nonprivilegedtype)_ | (Optional) NonPrivileged configures Calico to be run in non-privileged containers as non-root users where possible. | -| `calicoNodeDaemonSet` _[CalicoNodeDaemonSet](#caliconodedaemonset)_ | CalicoNodeDaemonSet configures the calico-node DaemonSet. If used in conjunction with the deprecated ComponentResources, then these overrides take precedence. | -| `csiNodeDriverDaemonSet` _[CSINodeDriverDaemonSet](#csinodedriverdaemonset)_ | CSINodeDriverDaemonSet configures the csi-node-driver DaemonSet. | -| `calicoKubeControllersDeployment` _[CalicoKubeControllersDeployment](#calicokubecontrollersdeployment)_ | CalicoKubeControllersDeployment configures the calico-kube-controllers Deployment. If used in conjunction with the deprecated ComponentResources, then these overrides take precedence. | -| `typhaDeployment` _[TyphaDeployment](#typhadeployment)_ | TyphaDeployment configures the typha Deployment. If used in conjunction with the deprecated ComponentResources or TyphaAffinity, then these overrides take precedence. | -| `calicoWindowsUpgradeDaemonSet` _[CalicoWindowsUpgradeDaemonSet](#calicowindowsupgradedaemonset)_ | Deprecated. The CalicoWindowsUpgradeDaemonSet is deprecated and will be removed from the API in the future. CalicoWindowsUpgradeDaemonSet configures the calico-windows-upgrade DaemonSet. | -| `calicoNodeWindowsDaemonSet` _[CalicoNodeWindowsDaemonSet](#caliconodewindowsdaemonset)_ | CalicoNodeWindowsDaemonSet configures the calico-node-windows DaemonSet. | -| `fipsMode` _[FIPSMode](#fipsmode)_ | (Optional) FIPSMode uses images and features only that are using FIPS 140-2 validated cryptographic modules and standards.
    Default: Disabled | -| `logging` _[Logging](#logging)_ | (Optional) Logging Configuration for Components | -| `windowsNodes` _[WindowsNodeSpec](#windowsnodespec)_ | (Optional) Windows Configuration | -| `serviceCIDRs` _string array_ | (Optional) Kubernetes Service CIDRs. Specifying this is required when using Calico for Windows. | - - -### InstallationStatus - - - -InstallationStatus defines the observed state of the Calico or Calico Enterprise installation. - -_Appears in:_ -- [Installation](#installation) - -| Field | Description | -| --- | --- | -| `variant` _[ProductVariant](#productvariant)_ | Variant is the most recently observed installed variant - one of Calico or TigeraSecureEnterprise | -| `mtu` _integer_ | MTU is the most recently observed value for pod network MTU. This may be an explicitly configured value, or based on Calico's native auto-detetion. | -| `imageSet` _string_ | (Optional) ImageSet is the name of the ImageSet being used, if there is an ImageSet that is being used. If an ImageSet is not being used then this will not be set. | -| `computed` _[InstallationSpec](#installationspec)_ | (Optional) Computed is the final installation including overlaid resources. | -| `calicoVersion` _string_ | CalicoVersion shows the current running version of calico. CalicoVersion along with Variant is needed to know the exact version deployed. | -| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#condition-v1-meta) array_ | (Optional) Conditions represents the latest observed set of conditions for the component. A component may be one or more of Ready, Progressing, Degraded or other customer types. | - - -### IntrusionDetection - - - -IntrusionDetection installs the components required for Tigera intrusion detection. At most one instance -of this resource is supported. It must be named "tigera-secure". - -| Field | Description | -| --- | --- | -| `apiVersion` _string_ | `operator.tigera.io/v1` | -| `kind` _string_ | `IntrusionDetection` | -| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | -| `spec` _[IntrusionDetectionSpec](#intrusiondetectionspec)_ | Specification of the desired state for Tigera intrusion detection. | -| `status` _[IntrusionDetectionStatus](#intrusiondetectionstatus)_ | Most recently observed state for Tigera intrusion detection. | - - -### IntrusionDetectionComponentName - -_Underlying type:_ _string_ - - - -_Appears in:_ -- [IntrusionDetectionComponentResource](#intrusiondetectioncomponentresource) - -| Value | Description | -| --- | --- | -| `DeepPacketInspection` | | - - -### IntrusionDetectionComponentResource - - - -The ComponentResource struct associates a ResourceRequirements with a component by name - -_Appears in:_ -- [IntrusionDetectionSpec](#intrusiondetectionspec) - -| Field | Description | -| --- | --- | -| `componentName` _[IntrusionDetectionComponentName](#intrusiondetectioncomponentname)_ | ComponentName is an enum which identifies the component | -| `resourceRequirements` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | ResourceRequirements allows customization of limits and requests for compute resources such as cpu and memory. | - - -### IntrusionDetectionControllerDeployment - - - -IntrusionDetectionControllerDeployment is the configuration for the IntrusionDetectionController Deployment. - -_Appears in:_ -- [IntrusionDetectionSpec](#intrusiondetectionspec) - -| Field | Description | -| --- | --- | -| `spec` _[IntrusionDetectionControllerDeploymentSpec](#intrusiondetectioncontrollerdeploymentspec)_ | (Optional) Spec is the specification of the IntrusionDetectionController Deployment. | - - -### IntrusionDetectionControllerDeploymentContainer - - - -IntrusionDetectionControllerDeploymentContainer is a IntrusionDetectionController Deployment container. - -_Appears in:_ -- [IntrusionDetectionControllerDeploymentPodSpec](#intrusiondetectioncontrollerdeploymentpodspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is an enum which identifies the IntrusionDetectionController Deployment container by name.
    Supported values are: controller, webhooks-processor | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) Resources allows customization of limits and requests for compute resources such as cpu and memory. If specified, this overrides the named IntrusionDetectionController Deployment container's resources. If omitted, the IntrusionDetection Deployment will use its default value for this container's resources. | - - -### IntrusionDetectionControllerDeploymentInitContainer - - - -IntrusionDetectionControllerDeploymentInitContainer is a IntrusionDetectionController Deployment init container. - -_Appears in:_ -- [IntrusionDetectionControllerDeploymentPodSpec](#intrusiondetectioncontrollerdeploymentpodspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is an enum which identifies the IntrusionDetectionController Deployment init container by name.
    Supported values are: intrusion-detection-tls-key-cert-provisioner | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) Resources allows customization of limits and requests for compute resources such as cpu and memory. If specified, this overrides the named IntrusionDetectionController Deployment init container's resources. If omitted, the IntrusionDetectionController Deployment will use its default value for this init container's resources. | - - -### IntrusionDetectionControllerDeploymentPodSpec - - - -IntrusionDetectionControllerDeploymentPodSpec is the IntrusionDetectionController Deployment's PodSpec. - -_Appears in:_ -- [IntrusionDetectionControllerDeploymentPodTemplateSpec](#intrusiondetectioncontrollerdeploymentpodtemplatespec) - -| Field | Description | -| --- | --- | -| `initContainers` _[IntrusionDetectionControllerDeploymentInitContainer](#intrusiondetectioncontrollerdeploymentinitcontainer) array_ | (Optional) InitContainers is a list of IntrusionDetectionController init containers. If specified, this overrides the specified IntrusionDetectionController Deployment init containers. If omitted, the IntrusionDetectionController Deployment will use its default values for its init containers. | -| `containers` _[IntrusionDetectionControllerDeploymentContainer](#intrusiondetectioncontrollerdeploymentcontainer) array_ | (Optional) Containers is a list of IntrusionDetectionController containers. If specified, this overrides the specified IntrusionDetectionController Deployment containers. If omitted, the IntrusionDetectionController Deployment will use its default values for its containers. | - - -### IntrusionDetectionControllerDeploymentPodTemplateSpec - - - -IntrusionDetectionControllerDeploymentPodTemplateSpec is the IntrusionDetectionController Deployment's PodTemplateSpec - -_Appears in:_ -- [IntrusionDetectionControllerDeploymentSpec](#intrusiondetectioncontrollerdeploymentspec) - -| Field | Description | -| --- | --- | -| `spec` _[IntrusionDetectionControllerDeploymentPodSpec](#intrusiondetectioncontrollerdeploymentpodspec)_ | (Optional) Spec is the IntrusionDetectionController Deployment's PodSpec. | - - -### IntrusionDetectionControllerDeploymentSpec - - - -IntrusionDetectionControllerDeploymentSpec defines configuration for the IntrusionDetectionController Deployment. - -_Appears in:_ -- [IntrusionDetectionControllerDeployment](#intrusiondetectioncontrollerdeployment) - -| Field | Description | -| --- | --- | -| `template` _[IntrusionDetectionControllerDeploymentPodTemplateSpec](#intrusiondetectioncontrollerdeploymentpodtemplatespec)_ | (Optional) Template describes the IntrusionDetectionController Deployment pod that will be created. | - - -### IntrusionDetectionSpec - - - -IntrusionDetectionSpec defines the desired state of Tigera intrusion detection capabilities. - -_Appears in:_ -- [IntrusionDetection](#intrusiondetection) - -| Field | Description | -| --- | --- | -| `componentResources` _[IntrusionDetectionComponentResource](#intrusiondetectioncomponentresource) array_ | (Optional) ComponentResources can be used to customize the resource requirements for each component. Only DeepPacketInspection is supported for this spec. | -| `anomalyDetection` _[AnomalyDetectionSpec](#anomalydetectionspec)_ | (Optional) AnomalyDetection is now deprecated, and configuring it has no effect. | -| `intrusionDetectionControllerDeployment` _[IntrusionDetectionControllerDeployment](#intrusiondetectioncontrollerdeployment)_ | (Optional) IntrusionDetectionControllerDeployment configures the IntrusionDetection Controller Deployment. | - - -### IntrusionDetectionStatus - - - -IntrusionDetectionStatus defines the observed state of Tigera intrusion detection capabilities. - -_Appears in:_ -- [IntrusionDetection](#intrusiondetection) - -| Field | Description | -| --- | --- | -| `state` _string_ | State provides user-readable status. | -| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#condition-v1-meta) array_ | (Optional) Conditions represents the latest observed set of conditions for the component. A component may be one or more of Ready, Progressing, Degraded or other customer types. | - - -### Kibana - - - -Kibana is the configuration for the Kibana. - -_Appears in:_ -- [LogStorageSpec](#logstoragespec) - -| Field | Description | -| --- | --- | -| `spec` _[KibanaSpec](#kibanaspec)_ | (Optional) Spec is the specification of the Kibana. | - - -### KibanaContainer - - - -KibanaContainer is a Kibana container. - -_Appears in:_ -- [KibanaPodSpec](#kibanapodspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is an enum which identifies the Kibana Deployment container by name.
    Supported values are: kibana | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) Resources allows customization of limits and requests for compute resources such as cpu and memory. If specified, this overrides the named Kibana container's resources. If omitted, the Kibana will use its default value for this container's resources. | - - -### KibanaInitContainer - - - -KibanaInitContainer is a Kibana init container. - -_Appears in:_ -- [KibanaPodSpec](#kibanapodspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is an enum which identifies the Kibana init container by name.
    Supported values are: key-cert-provisioner | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) Resources allows customization of limits and requests for compute resources such as cpu and memory. If specified, this overrides the named Kibana Deployment init container's resources. If omitted, the Kibana Deployment will use its default value for this init container's resources. If used in conjunction with the deprecated ComponentResources, then this value takes precedence. | - - -### KibanaPodSpec - - - -KibanaPodSpec is the Kibana Deployment's PodSpec. - -_Appears in:_ -- [KibanaPodTemplateSpec](#kibanapodtemplatespec) - -| Field | Description | -| --- | --- | -| `initContainers` _[KibanaInitContainer](#kibanainitcontainer) array_ | (Optional) InitContainers is a list of Kibana init containers. If specified, this overrides the specified Kibana Deployment init containers. If omitted, the Kibana Deployment will use its default values for its init containers. | -| `containers` _[KibanaContainer](#kibanacontainer) array_ | (Optional) Containers is a list of Kibana containers. If specified, this overrides the specified Kibana Deployment containers. If omitted, the Kibana Deployment will use its default values for its containers. | - - -### KibanaPodTemplateSpec - - - -KibanaPodTemplateSpec is the Kibana's PodTemplateSpec - -_Appears in:_ -- [KibanaSpec](#kibanaspec) - -| Field | Description | -| --- | --- | -| `spec` _[KibanaPodSpec](#kibanapodspec)_ | (Optional) Spec is the Kibana's PodSpec. | - - -### KibanaSpec - - - - - -_Appears in:_ -- [Kibana](#kibana) - -| Field | Description | -| --- | --- | -| `template` _[KibanaPodTemplateSpec](#kibanapodtemplatespec)_ | (Optional) Template describes the Kibana pod that will be created. | - - -### KubernetesAutodetectionMethod - -_Underlying type:_ _string_ - -KubernetesAutodetectionMethod is a method of detecting an IP address based on the Kubernetes API. - -One of: NodeInternalIP - -_Appears in:_ -- [NodeAddressAutodetection](#nodeaddressautodetection) - -| Value | Description | -| --- | --- | -| `NodeInternalIP` | NodeInternalIP detects a node IP using the first status.Addresses entry of the relevant IP family with type NodeInternalIP on the Kubernetes nodes API. | - - -### L7LogCollectorDaemonSet - - - -L7LogCollectorDaemonSet is the configuration for the L7LogCollector DaemonSet. - -_Appears in:_ -- [ApplicationLayerSpec](#applicationlayerspec) - -| Field | Description | -| --- | --- | -| `spec` _[L7LogCollectorDaemonSetSpec](#l7logcollectordaemonsetspec)_ | (Optional) Spec is the specification of the L7LogCollector DaemonSet. | - - -### L7LogCollectorDaemonSetContainer - - - -L7LogCollectorDaemonSetContainer is a L7LogCollector DaemonSet container. - -_Appears in:_ -- [L7LogCollectorDaemonSetPodSpec](#l7logcollectordaemonsetpodspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is an enum which identifies the L7LogCollector DaemonSet container by name.
    Supported values are: l7-collector, envoy-proxy, dikastes | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) Resources allows customization of limits and requests for compute resources such as cpu and memory. If specified, this overrides the named L7LogCollector DaemonSet container's resources. If omitted, the L7LogCollector DaemonSet will use its default value for this container's resources. | - - -### L7LogCollectorDaemonSetInitContainer - - - -L7LogCollectorDaemonSetInitContainer is a L7LogCollector DaemonSet init container. - -_Appears in:_ -- [L7LogCollectorDaemonSetPodSpec](#l7logcollectordaemonsetpodspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is an enum which identifies the L7LogCollector DaemonSet init container by name. | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) Resources allows customization of limits and requests for compute resources such as cpu and memory. If specified, this overrides the named L7LogCollector DaemonSet init container's resources. If omitted, the L7LogCollector DaemonSet will use its default value for this init container's resources. | - - -### L7LogCollectorDaemonSetPodSpec - - - -L7LogCollectorDaemonSetPodSpec is the L7LogCollector DaemonSet's PodSpec. - -_Appears in:_ -- [L7LogCollectorDaemonSetPodTemplateSpec](#l7logcollectordaemonsetpodtemplatespec) - -| Field | Description | -| --- | --- | -| `initContainers` _[L7LogCollectorDaemonSetInitContainer](#l7logcollectordaemonsetinitcontainer) array_ | (Optional) InitContainers is a list of L7LogCollector DaemonSet init containers. If specified, this overrides the specified L7LogCollector DaemonSet init containers. If omitted, the L7LogCollector DaemonSet will use its default values for its init containers. | -| `containers` _[L7LogCollectorDaemonSetContainer](#l7logcollectordaemonsetcontainer) array_ | (Optional) Containers is a list of L7LogCollector DaemonSet containers. If specified, this overrides the specified L7LogCollector DaemonSet containers. If omitted, the L7LogCollector DaemonSet will use its default values for its containers. | - - -### L7LogCollectorDaemonSetPodTemplateSpec - - - -L7LogCollectorDaemonSetPodTemplateSpec is the L7LogCollector DaemonSet's PodTemplateSpec - -_Appears in:_ -- [L7LogCollectorDaemonSetSpec](#l7logcollectordaemonsetspec) - -| Field | Description | -| --- | --- | -| `spec` _[L7LogCollectorDaemonSetPodSpec](#l7logcollectordaemonsetpodspec)_ | (Optional) Spec is the L7LogCollector DaemonSet's PodSpec. | - - -### L7LogCollectorDaemonSetSpec - - - -L7LogCollectorDaemonSetSpec defines configuration for the L7LogCollector DaemonSet. - -_Appears in:_ -- [L7LogCollectorDaemonSet](#l7logcollectordaemonset) - -| Field | Description | -| --- | --- | -| `template` _[L7LogCollectorDaemonSetPodTemplateSpec](#l7logcollectordaemonsetpodtemplatespec)_ | (Optional) Template describes the L7LogCollector DaemonSet pod that will be created. | - - -### LinseedDeployment - - - -LinseedDeployment is the configuration for the linseed Deployment. - -_Appears in:_ -- [LogStorageSpec](#logstoragespec) - -| Field | Description | -| --- | --- | -| `spec` _[LinseedDeploymentSpec](#linseeddeploymentspec)_ | (Optional) Spec is the specification of the linseed Deployment. | - - -### LinseedDeploymentContainer - - - -LinseedDeploymentContainer is a linseed Deployment container. - -_Appears in:_ -- [LinseedDeploymentPodSpec](#linseeddeploymentpodspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is an enum which identifies the linseed Deployment container by name.
    Supported values are: tigera-linseed | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) Resources allows customization of limits and requests for compute resources such as cpu and memory. If specified, this overrides the named linseed Deployment container's resources. If omitted, the linseed Deployment will use its default value for this container's resources. | - - -### LinseedDeploymentInitContainer - - - -LinseedDeploymentInitContainer is a linseed Deployment init container. - -_Appears in:_ -- [LinseedDeploymentPodSpec](#linseeddeploymentpodspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is an enum which identifies the linseed Deployment init container by name.
    Supported values are: tigera-secure-linseed-token-tls-key-cert-provisioner,tigera-secure-linseed-cert-key-cert-provisioner | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) Resources allows customization of limits and requests for compute resources such as cpu and memory. If specified, this overrides the named linseed Deployment init container's resources. If omitted, the linseed Deployment will use its default value for this init container's resources. | - - -### LinseedDeploymentPodSpec - - - -LinseedDeploymentPodSpec is the linseed Deployment's PodSpec. - -_Appears in:_ -- [LinseedDeploymentPodTemplateSpec](#linseeddeploymentpodtemplatespec) - -| Field | Description | -| --- | --- | -| `initContainers` _[LinseedDeploymentInitContainer](#linseeddeploymentinitcontainer) array_ | (Optional) InitContainers is a list of linseed init containers. If specified, this overrides the specified linseed Deployment init containers. If omitted, the linseed Deployment will use its default values for its init containers. | -| `containers` _[LinseedDeploymentContainer](#linseeddeploymentcontainer) array_ | (Optional) Containers is a list of linseed containers. If specified, this overrides the specified linseed Deployment containers. If omitted, the linseed Deployment will use its default values for its containers. | - - -### LinseedDeploymentPodTemplateSpec - - - -LinseedDeploymentPodTemplateSpec is the linseed Deployment's PodTemplateSpec - -_Appears in:_ -- [LinseedDeploymentSpec](#linseeddeploymentspec) - -| Field | Description | -| --- | --- | -| `spec` _[LinseedDeploymentPodSpec](#linseeddeploymentpodspec)_ | (Optional) Spec is the linseed Deployment's PodSpec. | - - -### LinseedDeploymentSpec - - - -LinseedDeploymentSpec defines configuration for the linseed Deployment. - -_Appears in:_ -- [LinseedDeployment](#linseeddeployment) - -| Field | Description | -| --- | --- | -| `template` _[LinseedDeploymentPodTemplateSpec](#linseeddeploymentpodtemplatespec)_ | (Optional) Template describes the linseed Deployment pod that will be created. | - - -### LinuxDataplaneOption - -_Underlying type:_ _string_ - -LinuxDataplaneOption controls which dataplane is to be used on Linux nodes. - -One of: Iptables, BPF - -_Appears in:_ -- [CalicoNetworkSpec](#caliconetworkspec) - -| Value | Description | -| --- | --- | -| `Iptables` | | -| `BPF` | | -| `VPP` | | - - -### LogCollectionSpec - - - - - -_Appears in:_ -- [ApplicationLayerSpec](#applicationlayerspec) - -| Field | Description | -| --- | --- | -| `collectLogs` _[LogCollectionStatusType](#logcollectionstatustype)_ | (Optional) This setting enables or disable log collection. Allowed values are Enabled or Disabled. | -| `logIntervalSeconds` _integer_ | (Optional) Interval in seconds for sending L7 log information for processing.
    Default: 5 sec | -| `logRequestsPerInterval` _integer_ | (Optional) Maximum number of unique L7 logs that are sent LogIntervalSeconds. Adjust this to limit the number of L7 logs sent per LogIntervalSeconds to felix for further processing, use negative number to ignore limits.
    Default: -1 | - - -### LogCollectionStatusType - -_Underlying type:_ _string_ - - - -_Appears in:_ -- [LogCollectionSpec](#logcollectionspec) - -| Value | Description | -| --- | --- | -| `Disabled` | | -| `Enabled` | | - - -### LogCollector - - - -LogCollector installs the components required for Tigera flow and DNS log collection. At most one instance -of this resource is supported. It must be named "tigera-secure". When created, this installs fluentd on all nodes -configured to collect Tigera log data and export it to Tigera's Elasticsearch cluster as well as any additionally configured destinations. - -| Field | Description | -| --- | --- | -| `apiVersion` _string_ | `operator.tigera.io/v1` | -| `kind` _string_ | `LogCollector` | -| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | -| `spec` _[LogCollectorSpec](#logcollectorspec)_ | Specification of the desired state for Tigera log collection. | -| `status` _[LogCollectorStatus](#logcollectorstatus)_ | Most recently observed state for Tigera log collection. | - - -### LogCollectorSpec - - - -LogCollectorSpec defines the desired state of Tigera flow, audit, and DNS log collection. - -_Appears in:_ -- [LogCollector](#logcollector) - -| Field | Description | -| --- | --- | -| `additionalStores` _[AdditionalLogStoreSpec](#additionallogstorespec)_ | (Optional) Configuration for exporting flow, audit, and DNS logs to external storage. | -| `additionalSources` _[AdditionalLogSourceSpec](#additionallogsourcespec)_ | (Optional) Configuration for importing audit logs from managed kubernetes cluster log sources. | -| `collectProcessPath` _[CollectProcessPathOption](#collectprocesspathoption)_ | (Optional) Configuration for enabling/disabling process path collection in flowlogs. If Enabled, this feature sets hostPID to true in order to read process cmdline.
    Default: Enabled | -| `multiTenantManagementClusterNamespace` _string_ | (Optional) If running as a multi-tenant management cluster, the namespace in which the management cluster's tenant services are running. | -| `fluentdDaemonSet` _[FluentdDaemonSet](#fluentddaemonset)_ | FluentdDaemonSet configures the Fluentd DaemonSet. | -| `eksLogForwarderDeployment` _[EKSLogForwarderDeployment](#ekslogforwarderdeployment)_ | (Optional) EKSLogForwarderDeployment configures the EKSLogForwarderDeployment Deployment. | - - -### LogCollectorStatus - - - -LogCollectorStatus defines the observed state of Tigera flow and DNS log collection - -_Appears in:_ -- [LogCollector](#logcollector) - -| Field | Description | -| --- | --- | -| `state` _string_ | State provides user-readable status. | -| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#condition-v1-meta) array_ | (Optional) Conditions represents the latest observed set of conditions for the component. A component may be one or more of Ready, Progressing, Degraded or other customer types. | - - -### LogLevel - -_Underlying type:_ _string_ - - - -_Appears in:_ -- [CNILogging](#cnilogging) -- [EgressGatewaySpec](#egressgatewayspec) - -| Value | Description | -| --- | --- | -| `Trace` | | -| `Info` | | -| `Debug` | | -| `Warn` | | -| `Fatal` | | -| `Error` | | - - -### LogStorage - - - -LogStorage installs the components required for Tigera flow and DNS log storage. At most one instance -of this resource is supported. It must be named "tigera-secure". When created, this installs an Elasticsearch cluster for use by -Calico Enterprise. - -| Field | Description | -| --- | --- | -| `apiVersion` _string_ | `operator.tigera.io/v1` | -| `kind` _string_ | `LogStorage` | -| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | -| `spec` _[LogStorageSpec](#logstoragespec)_ | Specification of the desired state for Tigera log storage. | -| `status` _[LogStorageStatus](#logstoragestatus)_ | Most recently observed state for Tigera log storage. | - - -### LogStorageComponentName - -_Underlying type:_ _string_ - -LogStorageComponentName CRD enum - -_Appears in:_ -- [LogStorageComponentResource](#logstoragecomponentresource) - -| Value | Description | -| --- | --- | -| `ECKOperator` | | - - -### LogStorageComponentResource - - - -The ComponentResource struct associates a ResourceRequirements with a component by name - -_Appears in:_ -- [LogStorageSpec](#logstoragespec) - -| Field | Description | -| --- | --- | -| `componentName` _[LogStorageComponentName](#logstoragecomponentname)_ | Deprecated. Please use ECKOperatorStatefulSet. ComponentName is an enum which identifies the component | -| `resourceRequirements` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | ResourceRequirements allows customization of limits and requests for compute resources such as cpu and memory. | - - -### LogStorageSpec - - - -LogStorageSpec defines the desired state of Tigera flow and DNS log storage. - -_Appears in:_ -- [LogStorage](#logstorage) - -| Field | Description | -| --- | --- | -| `nodes` _[Nodes](#nodes)_ | Nodes defines the configuration for a set of identical Elasticsearch cluster nodes, each of type master, data, and ingest. | -| `indices` _[Indices](#indices)_ | (Optional) Index defines the configuration for the indices in the Elasticsearch cluster. | -| `retention` _[Retention](#retention)_ | (Optional) Retention defines how long data is retained in the Elasticsearch cluster before it is cleared. | -| `storageClassName` _string_ | (Optional) StorageClassName will populate the PersistentVolumeClaim.StorageClassName that is used to provision disks to the Tigera Elasticsearch cluster. The StorageClassName should only be modified when no LogStorage is currently active. We recommend choosing a storage class dedicated to Tigera LogStorage only. Otherwise, data retention cannot be guaranteed during upgrades. See https://docs.tigera.io/maintenance/upgrading for up-to-date instructions.
    Default: tigera-elasticsearch | -| `dataNodeSelector` _object (keys:string, values:string)_ | (Optional) DataNodeSelector gives you more control over the node that Elasticsearch will run on. The contents of DataNodeSelector will be added to the PodSpec of the Elasticsearch nodes. For the pod to be eligible to run on a node, the node must have each of the indicated key-value pairs as labels as well as access to the specified StorageClassName. | -| `componentResources` _[LogStorageComponentResource](#logstoragecomponentresource) array_ | (Optional) ComponentResources can be used to customize the resource requirements for each component. Only ECKOperator is supported for this spec. | -| `eckOperatorStatefulSet` _[ECKOperatorStatefulSet](#eckoperatorstatefulset)_ | (Optional) ECKOperatorStatefulSet configures the ECKOperator StatefulSet. If used in conjunction with the deprecated ComponentResources, then these overrides take precedence. | -| `kibana` _[Kibana](#kibana)_ | (Optional) Kibana configures the Kibana Spec. | -| `linseedDeployment` _[LinseedDeployment](#linseeddeployment)_ | LinseedDeployment configures the linseed Deployment. | -| `elasticsearchMetricsDeployment` _[ElasticsearchMetricsDeployment](#elasticsearchmetricsdeployment)_ | ElasticsearchMetricsDeployment configures the tigera-elasticsearch-metric Deployment. | - - -### LogStorageStatus - - - -LogStorageStatus defines the observed state of Tigera flow and DNS log storage. - -_Appears in:_ -- [LogStorage](#logstorage) - -| Field | Description | -| --- | --- | -| `state` _string_ | State provides user-readable status. | -| `elasticsearchHash` _string_ | ElasticsearchHash represents the current revision and configuration of the installed Elasticsearch cluster. This is an opaque string which can be monitored for changes to perform actions when Elasticsearch is modified. | -| `kibanaHash` _string_ | KibanaHash represents the current revision and configuration of the installed Kibana dashboard. This is an opaque string which can be monitored for changes to perform actions when Kibana is modified. | -| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#condition-v1-meta) array_ | (Optional) Conditions represents the latest observed set of conditions for the component. A component may be one or more of Ready, Progressing, Degraded or other customer types. | - - -### Logging - - - - - -_Appears in:_ -- [InstallationSpec](#installationspec) - -| Field | Description | -| --- | --- | -| `cni` _[CNILogging](#cnilogging)_ | (Optional) Customized logging specification for calico-cni plugin | - - -### ManagementCluster - - - -The presence of ManagementCluster in your cluster, will configure it to be the management plane to which managed -clusters can connect. At most one instance of this resource is supported. It must be named "tigera-secure". - -| Field | Description | -| --- | --- | -| `apiVersion` _string_ | `operator.tigera.io/v1` | -| `kind` _string_ | `ManagementCluster` | -| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | -| `spec` _[ManagementClusterSpec](#managementclusterspec)_ | | - - -### ManagementClusterConnection - - - -ManagementClusterConnection represents a link between a managed cluster and a management cluster. At most one -instance of this resource is supported. It must be named "tigera-secure". - -| Field | Description | -| --- | --- | -| `apiVersion` _string_ | `operator.tigera.io/v1` | -| `kind` _string_ | `ManagementClusterConnection` | -| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | -| `spec` _[ManagementClusterConnectionSpec](#managementclusterconnectionspec)_ | | -| `status` _[ManagementClusterConnectionStatus](#managementclusterconnectionstatus)_ | | - - -### ManagementClusterConnectionSpec - - - -ManagementClusterConnectionSpec defines the desired state of ManagementClusterConnection - -_Appears in:_ -- [ManagementClusterConnection](#managementclusterconnection) - -| Field | Description | -| --- | --- | -| `managementClusterAddr` _string_ | (Optional) Specify where the managed cluster can reach the management cluster. Ex.: "10.128.0.10:30449". A managed cluster should be able to access this address. This field is used by managed clusters only. | -| `tls` _[ManagementClusterTLS](#managementclustertls)_ | (Optional) TLS provides options for configuring how Managed Clusters can establish an mTLS connection with the Management Cluster. | -| `guardianDeployment` _[GuardianDeployment](#guardiandeployment)_ | GuardianDeployment configures the guardian Deployment. | - - -### ManagementClusterConnectionStatus - - - -ManagementClusterConnectionStatus defines the observed state of ManagementClusterConnection - -_Appears in:_ -- [ManagementClusterConnection](#managementclusterconnection) - -| Field | Description | -| --- | --- | -| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#condition-v1-meta) array_ | (Optional) Conditions represents the latest observed set of conditions for the component. A component may be one or more of Ready, Progressing, Degraded or other customer types. | - - -### ManagementClusterSpec - - - -ManagementClusterSpec defines the desired state of a ManagementCluster - -_Appears in:_ -- [ManagementCluster](#managementcluster) - -| Field | Description | -| --- | --- | -| `address` _string_ | (Optional) This field specifies the externally reachable address to which your managed cluster will connect. When a managed cluster is added, this field is used to populate an easy-to-apply manifest that will connect both clusters. Valid examples are: "0.0.0.0:31000", "example.com:32000", "[::1]:32500" | -| `tls` _[TLS](#tls)_ | (Optional) TLS provides options for configuring how Managed Clusters can establish an mTLS connection with the Management Cluster. | - - -### ManagementClusterTLS - - - - - -_Appears in:_ -- [ManagementClusterConnectionSpec](#managementclusterconnectionspec) - -| Field | Description | -| --- | --- | -| `ca` _[CAType](#catype)_ | CA indicates which verification method the tunnel client should use to verify the tunnel server's identity. When left blank or set to 'Tigera', the tunnel client will expect a self-signed cert to be included in the certificate bundle and will expect the cert to have a Common Name (CN) of 'voltron'. When set to 'Public', the tunnel client will use its installed system certs and will use the managementClusterAddr to verify the tunnel server's identity.
    Default: Tigera | - - -### Manager - - - -Manager installs the Calico Enterprise manager graphical user interface. At most one instance -of this resource is supported. It must be named "tigera-secure". - -| Field | Description | -| --- | --- | -| `apiVersion` _string_ | `operator.tigera.io/v1` | -| `kind` _string_ | `Manager` | -| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | -| `spec` _[ManagerSpec](#managerspec)_ | Specification of the desired state for the Calico Enterprise manager. | -| `status` _[ManagerStatus](#managerstatus)_ | Most recently observed state for the Calico Enterprise manager. | - - -### ManagerDeployment - - - -ManagerDeployment is the configuration for the Manager Deployment. - -_Appears in:_ -- [ManagerSpec](#managerspec) - -| Field | Description | -| --- | --- | -| `spec` _[ManagerDeploymentSpec](#managerdeploymentspec)_ | (Optional) Spec is the specification of the Manager Deployment. | - - -### ManagerDeploymentContainer - - - -ManagerDeploymentContainer is a Manager Deployment container. - -_Appears in:_ -- [ManagerDeploymentPodSpec](#managerdeploymentpodspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is an enum which identifies the Manager Deployment container by name.
    Supported values are: tigera-voltron, tigera-manager, tigera-es-proxy | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) Resources allows customization of limits and requests for compute resources such as cpu and memory. If specified, this overrides the named Manager Deployment container's resources. If omitted, the Manager Deployment will use its default value for this container's resources. | - - -### ManagerDeploymentInitContainer - - - -ManagerDeploymentInitContainer is a Manager Deployment init container. - -_Appears in:_ -- [ManagerDeploymentPodSpec](#managerdeploymentpodspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is an enum which identifies the Manager Deployment init container by name.
    Supported values are: manager-tls-key-cert-provisioner, internal-manager-tls-key-cert-provisioner, tigera-voltron-linseed-tls-key-cert-provisioner | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) Resources allows customization of limits and requests for compute resources such as cpu and memory. If specified, this overrides the named Manager Deployment init container's resources. If omitted, the Manager Deployment will use its default value for this init container's resources. If used in conjunction with the deprecated ComponentResources, then this value takes precedence. | - - -### ManagerDeploymentPodSpec - - - -ManagerDeploymentPodSpec is the Manager Deployment's PodSpec. - -_Appears in:_ -- [ManagerDeploymentPodTemplateSpec](#managerdeploymentpodtemplatespec) - -| Field | Description | -| --- | --- | -| `initContainers` _[ManagerDeploymentInitContainer](#managerdeploymentinitcontainer) array_ | (Optional) InitContainers is a list of Manager init containers. If specified, this overrides the specified Manager Deployment init containers. If omitted, the Manager Deployment will use its default values for its init containers. | -| `containers` _[ManagerDeploymentContainer](#managerdeploymentcontainer) array_ | (Optional) Containers is a list of Manager containers. If specified, this overrides the specified Manager Deployment containers. If omitted, the Manager Deployment will use its default values for its containers. | - - -### ManagerDeploymentPodTemplateSpec - - - -ManagerDeploymentPodTemplateSpec is the Manager Deployment's PodTemplateSpec - -_Appears in:_ -- [ManagerDeploymentSpec](#managerdeploymentspec) - -| Field | Description | -| --- | --- | -| `spec` _[ManagerDeploymentPodSpec](#managerdeploymentpodspec)_ | (Optional) Spec is the Manager Deployment's PodSpec. | - - -### ManagerDeploymentSpec - - - -ManagerDeploymentSpec defines configuration for the Manager Deployment. - -_Appears in:_ -- [ManagerDeployment](#managerdeployment) - -| Field | Description | -| --- | --- | -| `template` _[ManagerDeploymentPodTemplateSpec](#managerdeploymentpodtemplatespec)_ | (Optional) Template describes the Manager Deployment pod that will be created. | - - -### ManagerSpec - - - -ManagerSpec defines configuration for the Calico Enterprise manager GUI. - -_Appears in:_ -- [Manager](#manager) - -| Field | Description | -| --- | --- | -| `managerDeployment` _[ManagerDeployment](#managerdeployment)_ | (Optional) ManagerDeployment configures the Manager Deployment. | - - -### ManagerStatus - - - -ManagerStatus defines the observed state of the Calico Enterprise manager GUI. - -_Appears in:_ -- [Manager](#manager) - -| Field | Description | -| --- | --- | -| `state` _string_ | State provides user-readable status. | -| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#condition-v1-meta) array_ | (Optional) Conditions represents the latest observed set of conditions for the component. A component may be one or more of Ready, Progressing, Degraded or other customer types. | - - -### Metadata - - - -Metadata contains the standard Kubernetes labels and annotations fields. - -_Appears in:_ -- [APIServerDeployment](#apiserverdeployment) -- [APIServerDeploymentPodTemplateSpec](#apiserverdeploymentpodtemplatespec) -- [CSINodeDriverDaemonSet](#csinodedriverdaemonset) -- [CSINodeDriverDaemonSetPodTemplateSpec](#csinodedriverdaemonsetpodtemplatespec) -- [CalicoKubeControllersDeployment](#calicokubecontrollersdeployment) -- [CalicoKubeControllersDeploymentPodTemplateSpec](#calicokubecontrollersdeploymentpodtemplatespec) -- [CalicoNodeDaemonSet](#caliconodedaemonset) -- [CalicoNodeDaemonSetPodTemplateSpec](#caliconodedaemonsetpodtemplatespec) -- [CalicoNodeWindowsDaemonSet](#caliconodewindowsdaemonset) -- [CalicoNodeWindowsDaemonSetPodTemplateSpec](#caliconodewindowsdaemonsetpodtemplatespec) -- [CalicoWindowsUpgradeDaemonSet](#calicowindowsupgradedaemonset) -- [CalicoWindowsUpgradeDaemonSetPodTemplateSpec](#calicowindowsupgradedaemonsetpodtemplatespec) -- [TyphaDeployment](#typhadeployment) -- [TyphaDeploymentPodTemplateSpec](#typhadeploymentpodtemplatespec) - -| Field | Description | -| --- | --- | -| `labels` _object (keys:string, values:string)_ | (Optional) Labels is a map of string keys and values that may match replicaset and service selectors. Each of these key/value pairs are added to the object's labels provided the key does not already exist in the object's labels. | -| `annotations` _object (keys:string, values:string)_ | (Optional) Annotations is a map of arbitrary non-identifying metadata. Each of these key/value pairs are added to the object's annotations provided the key does not already exist in the object's annotations. | - - -### Monitor - - - -Monitor is the Schema for the monitor API. At most one instance -of this resource is supported. It must be named "tigera-secure". - -| Field | Description | -| --- | --- | -| `apiVersion` _string_ | `operator.tigera.io/v1` | -| `kind` _string_ | `Monitor` | -| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | -| `spec` _[MonitorSpec](#monitorspec)_ | | -| `status` _[MonitorStatus](#monitorstatus)_ | | - - -### MonitorSpec - - - -MonitorSpec defines the desired state of Tigera monitor. - -_Appears in:_ -- [Monitor](#monitor) - -| Field | Description | -| --- | --- | -| `externalPrometheus` _[ExternalPrometheus](#externalprometheus)_ | ExternalPrometheus optionally configures integration with an external Prometheus for scraping Calico metrics. When specified, the operator will render resources in the defined namespace. This option can be useful for configuring scraping from git-ops tools without the need of post-installation steps. | -| `prometheus` _[Prometheus](#prometheus)_ | (Optional) Prometheus is the configuration for the Prometheus. | -| `alertManager` _[AlertManager](#alertmanager)_ | (Optional) AlertManager is the configuration for the AlertManager. | - - -### MonitorStatus - - - -MonitorStatus defines the observed state of Tigera monitor. - -_Appears in:_ -- [Monitor](#monitor) - -| Field | Description | -| --- | --- | -| `state` _string_ | State provides user-readable status. | -| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#condition-v1-meta) array_ | (Optional) Conditions represents the latest observed set of conditions for the component. A component may be one or more of Ready, Progressing, Degraded or other customer types. | - - -### MultiInterfaceMode - -_Underlying type:_ _string_ - -MultiInterfaceMode describes the method of providing multiple pod interfaces. - -One of: None, Multus - -_Appears in:_ -- [CalicoNetworkSpec](#caliconetworkspec) - -| Value | Description | -| --- | --- | -| `None` | | -| `Multus` | | - - -### NATOutgoingType - -_Underlying type:_ _string_ - -NATOutgoingType describe the type of outgoing NAT to use. - -One of: Enabled, Disabled - -_Appears in:_ -- [IPPool](#ippool) - -| Value | Description | -| --- | --- | -| `Enabled` | | -| `Disabled` | | - - -### NativeIP - -_Underlying type:_ _string_ - -NativeIP defines if Egress Gateway pods should have AWS IPs. -When NativeIP is enabled, the IPPools should be backed by AWS subnet. - -_Appears in:_ -- [AWSEgressGateway](#awsegressgateway) - -| Value | Description | -| --- | --- | -| `Enabled` | | -| `Disabled` | | - - -### NodeAddressAutodetection - - - -NodeAddressAutodetection provides configuration options for auto-detecting node addresses. At most one option -can be used. If no detection option is specified, then IP auto detection will be disabled for this address family and IPs -must be specified directly on the Node resource. - -_Appears in:_ -- [CalicoNetworkSpec](#caliconetworkspec) - -| Field | Description | -| --- | --- | -| `firstFound` _boolean_ | (Optional) FirstFound uses default interface matching parameters to select an interface, performing best-effort filtering based on well-known interface names. | -| `kubernetes` _[KubernetesAutodetectionMethod](#kubernetesautodetectionmethod)_ | (Optional) Kubernetes configures Calico to detect node addresses based on the Kubernetes API. | -| `interface` _string_ | (Optional) Interface enables IP auto-detection based on interfaces that match the given regex. | -| `skipInterface` _string_ | (Optional) SkipInterface enables IP auto-detection based on interfaces that do not match the given regex. | -| `canReach` _string_ | (Optional) CanReach enables IP auto-detection based on which source address on the node is used to reach the specified IP or domain. | -| `cidrs` _string array_ | CIDRS enables IP auto-detection based on which addresses on the nodes are within one of the provided CIDRs. | - - -### NodeAffinity - - - -NodeAffinity is similar to *v1.NodeAffinity, but allows us to limit available schedulers. - -_Appears in:_ -- [TyphaAffinity](#typhaaffinity) - -| Field | Description | -| --- | --- | -| `preferredDuringSchedulingIgnoredDuringExecution` _[PreferredSchedulingTerm](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#preferredschedulingterm-v1-core) array_ | (Optional) The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. | -| `requiredDuringSchedulingIgnoredDuringExecution` _[NodeSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#nodeselector-v1-core)_ | (Optional)
    WARNING: Please note that if the affinity requirements specified by this field are not met at scheduling time, the pod will NOT be scheduled onto the node. There is no fallback to another affinity rules with this setting. This may cause networking disruption or even catastrophic failure! PreferredDuringSchedulingIgnoredDuringExecution should be used for affinity unless there is a specific well understood reason to use RequiredDuringSchedulingIgnoredDuringExecution and you can guarantee that the RequiredDuringSchedulingIgnoredDuringExecution will always have sufficient nodes to satisfy the requirement. NOTE: RequiredDuringSchedulingIgnoredDuringExecution is set by default for AKS nodes, to avoid scheduling Typhas on virtual-nodes. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. | - - -### NodeSet - - - -NodeSets defines configuration specific to each Elasticsearch Node Set - -_Appears in:_ -- [Nodes](#nodes) - -| Field | Description | -| --- | --- | -| `selectionAttributes` _[NodeSetSelectionAttribute](#nodesetselectionattribute) array_ | SelectionAttributes defines K8s node attributes a NodeSet should use when setting the Node Affinity selectors and Elasticsearch cluster awareness attributes for the Elasticsearch nodes. The list of SelectionAttributes are used to define Node Affinities and set the node awareness configuration in the running Elasticsearch instance. | - - -### NodeSetSelectionAttribute - - - -NodeSetSelectionAttribute defines a K8s node "attribute" the Elasticsearch nodes should be aware of. The "Name" and "Value" -are used together to set the "awareness" attributes in Elasticsearch, while the "NodeLabel" and "Value" are used together -to define Node Affinity for the Pods created for the Elasticsearch nodes. - -_Appears in:_ -- [NodeSet](#nodeset) - -| Field | Description | -| --- | --- | -| `name` _string_ | | -| `nodeLabel` _string_ | | -| `value` _string_ | | - - -### Nodes - - - -Nodes defines the configuration for a set of identical Elasticsearch cluster nodes, each of type master, data, and ingest. - -_Appears in:_ -- [LogStorageSpec](#logstoragespec) - -| Field | Description | -| --- | --- | -| `count` _integer_ | Count defines the number of nodes in the Elasticsearch cluster. | -| `nodeSets` _[NodeSet](#nodeset) array_ | (Optional) NodeSets defines configuration specific to each Elasticsearch Node Set | -| `resourceRequirements` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) ResourceRequirements defines the resource limits and requirements for the Elasticsearch cluster. | - - -### NonPrivilegedType - -_Underlying type:_ _string_ - -NonPrivilegedType specifies whether Calico runs as permissioned or not - -One of: Enabled, Disabled - -_Appears in:_ -- [InstallationSpec](#installationspec) - -| Value | Description | -| --- | --- | -| `Enabled` | | -| `Disabled` | | - - -### OIDCType - -_Underlying type:_ _string_ - -OIDCType defines how OIDC is configured for Tigera Enterprise. Dex should be the best option for most use-cases. -The Tigera option can help in specific use-cases, for instance, when you are unable to configure a client secret. -One of: Dex, Tigera - -_Validation:_ -- Enum: [Dex Tigera] - - -_Appears in:_ -- [AuthenticationOIDC](#authenticationoidc) - -| Value | Description | -| --- | --- | -| `Dex` | OIDCTypeDex uses Dex IdP, a popular open-source tool for connecting OIDC. | -| `Tigera` | OIDCTypeTigera uses customer code to pass OIDC configuration directly into our server applications. | - - -### PacketCaptureAPI - - - -PacketCaptureAPI is used to configure the resource requirement for PacketCaptureAPI deployment. It must be named "tigera-secure". - -| Field | Description | -| --- | --- | -| `apiVersion` _string_ | `operator.tigera.io/v1` | -| `kind` _string_ | `PacketCaptureAPI` | -| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | -| `spec` _[PacketCaptureAPISpec](#packetcaptureapispec)_ | Specification of the desired state for the PacketCaptureAPI. | -| `status` _[PacketCaptureAPIStatus](#packetcaptureapistatus)_ | Most recently observed state for the PacketCaptureAPI. | - - -### PacketCaptureAPIDeployment - - - -PacketCaptureAPIDeployment is the configuration for the PacketCaptureAPI Deployment. - -_Appears in:_ -- [PacketCaptureAPISpec](#packetcaptureapispec) - -| Field | Description | -| --- | --- | -| `spec` _[PacketCaptureAPIDeploymentSpec](#packetcaptureapideploymentspec)_ | (Optional) Spec is the specification of the PacketCaptureAPI Deployment. | - - -### PacketCaptureAPIDeploymentContainer - - - -PacketCaptureAPIDeploymentContainer is a PacketCaptureAPI Deployment container. - -_Appears in:_ -- [PacketCaptureAPIDeploymentPodSpec](#packetcaptureapideploymentpodspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is an enum which identifies the PacketCaptureAPI Deployment container by name.
    Supported values are: tigera-packetcapture-server | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) Resources allows customization of limits and requests for compute resources such as cpu and memory. If specified, this overrides the named PacketCaptureAPI Deployment container's resources. If omitted, the PacketCaptureAPI Deployment will use its default value for this container's resources. | - - -### PacketCaptureAPIDeploymentInitContainer - - - -PacketCaptureAPIDeploymentInitContainer is a PacketCaptureAPI Deployment init container. - -_Appears in:_ -- [PacketCaptureAPIDeploymentPodSpec](#packetcaptureapideploymentpodspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is an enum which identifies the PacketCaptureAPI Deployment init container by name.
    Supported values are: tigera-packetcapture-server-tls-key-cert-provisioner | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) Resources allows customization of limits and requests for compute resources such as cpu and memory. If specified, this overrides the named PacketCaptureAPI Deployment init container's resources. If omitted, the PacketCaptureAPI Deployment will use its default value for this init container's resources. | - - -### PacketCaptureAPIDeploymentPodSpec - - - -PacketCaptureAPIDeploymentPodSpec is the PacketCaptureAPI Deployment's PodSpec. - -_Appears in:_ -- [PacketCaptureAPIDeploymentPodTemplateSpec](#packetcaptureapideploymentpodtemplatespec) - -| Field | Description | -| --- | --- | -| `initContainers` _[PacketCaptureAPIDeploymentInitContainer](#packetcaptureapideploymentinitcontainer) array_ | (Optional) InitContainers is a list of PacketCaptureAPI init containers. If specified, this overrides the specified PacketCaptureAPI Deployment init containers. If omitted, the PacketCaptureAPI Deployment will use its default values for its init containers. | -| `containers` _[PacketCaptureAPIDeploymentContainer](#packetcaptureapideploymentcontainer) array_ | (Optional) Containers is a list of PacketCaptureAPI containers. If specified, this overrides the specified PacketCaptureAPI Deployment containers. If omitted, the PacketCaptureAPI Deployment will use its default values for its containers. | - - -### PacketCaptureAPIDeploymentPodTemplateSpec - - - -PacketCaptureAPIDeploymentPodTemplateSpec is the PacketCaptureAPI Deployment's PodTemplateSpec - -_Appears in:_ -- [PacketCaptureAPIDeploymentSpec](#packetcaptureapideploymentspec) - -| Field | Description | -| --- | --- | -| `spec` _[PacketCaptureAPIDeploymentPodSpec](#packetcaptureapideploymentpodspec)_ | (Optional) Spec is the PacketCaptureAPI Deployment's PodSpec. | - - -### PacketCaptureAPIDeploymentSpec - - - -PacketCaptureAPIDeploymentSpec defines configuration for the PacketCaptureAPI Deployment. - -_Appears in:_ -- [PacketCaptureAPIDeployment](#packetcaptureapideployment) - -| Field | Description | -| --- | --- | -| `template` _[PacketCaptureAPIDeploymentPodTemplateSpec](#packetcaptureapideploymentpodtemplatespec)_ | (Optional) Template describes the PacketCaptureAPI Deployment pod that will be created. | - - -### PacketCaptureAPISpec - - - -PacketCaptureAPISpec defines configuration for the Packet Capture API. - -_Appears in:_ -- [PacketCaptureAPI](#packetcaptureapi) - -| Field | Description | -| --- | --- | -| `packetCaptureAPIDeployment` _[PacketCaptureAPIDeployment](#packetcaptureapideployment)_ | (Optional) PacketCaptureAPIDeployment configures the PacketCaptureAPI Deployment. | - - -### PacketCaptureAPIStatus - - - -PacketCaptureAPIStatus defines the observed state of the Packet Capture API. - -_Appears in:_ -- [PacketCaptureAPI](#packetcaptureapi) - -| Field | Description | -| --- | --- | -| `state` _string_ | State provides user-readable status. | -| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#condition-v1-meta) array_ | (Optional) Conditions represents the latest observed set of conditions for the component. A component may be one or more of Ready, Progressing, Degraded or other customer types. | - - -### PathMatch - - - - - -_Appears in:_ -- [TLSTerminatedRouteSpec](#tlsterminatedroutespec) - -| Field | Description | -| --- | --- | -| `path` _string_ | Path is the path portion of the URL based on which we proxy. | -| `pathRegexp` _string_ | (Optional) PathRegexp, if not nil, checks if Regexp matches the path. | -| `pathReplace` _string_ | (Optional) PathReplace if not nil will be used to replace PathRegexp matches. | - - -### PolicyRecommendation - - - -PolicyRecommendation is the Schema for the policy recommendation API. At most one instance -of this resource is supported. It must be named "tigera-secure". - -| Field | Description | -| --- | --- | -| `apiVersion` _string_ | `operator.tigera.io/v1` | -| `kind` _string_ | `PolicyRecommendation` | -| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | -| `spec` _[PolicyRecommendationSpec](#policyrecommendationspec)_ | | -| `status` _[PolicyRecommendationStatus](#policyrecommendationstatus)_ | | - - -### PolicyRecommendationDeployment - - - -PolicyRecommendationDeployment is the configuration for the PolicyRecommendation Deployment. - -_Appears in:_ -- [PolicyRecommendationSpec](#policyrecommendationspec) - -| Field | Description | -| --- | --- | -| `spec` _[PolicyRecommendationDeploymentSpec](#policyrecommendationdeploymentspec)_ | (Optional) Spec is the specification of the PolicyRecommendation Deployment. | - - -### PolicyRecommendationDeploymentContainer - - - -PolicyRecommendationDeploymentContainer is a PolicyRecommendation Deployment container. - -_Appears in:_ -- [PolicyRecommendationDeploymentPodSpec](#policyrecommendationdeploymentpodspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is an enum which identifies the PolicyRecommendation Deployment container by name.
    Supported values are: policy-recommendation-controller | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) Resources allows customization of limits and requests for compute resources such as cpu and memory. If specified, this overrides the named PolicyRecommendation Deployment container's resources. If omitted, the PolicyRecommendation Deployment will use its default value for this container's resources. | - - -### PolicyRecommendationDeploymentInitContainer - - - -PolicyRecommendationDeploymentInitContainer is a PolicyRecommendation Deployment init container. - -_Appears in:_ -- [PolicyRecommendationDeploymentPodSpec](#policyrecommendationdeploymentpodspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is an enum which identifies the PolicyRecommendation Deployment init container by name. | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) Resources allows customization of limits and requests for compute resources such as cpu and memory. If specified, this overrides the named PolicyRecommendation Deployment init container's resources. If omitted, the PolicyRecommendation Deployment will use its default value for this init container's resources. | - - -### PolicyRecommendationDeploymentPodSpec - - - -PolicyRecommendationDeploymentPodSpec is the PolicyRecommendation Deployment's PodSpec. - -_Appears in:_ -- [PolicyRecommendationDeploymentPodTemplateSpec](#policyrecommendationdeploymentpodtemplatespec) - -| Field | Description | -| --- | --- | -| `initContainers` _[PolicyRecommendationDeploymentInitContainer](#policyrecommendationdeploymentinitcontainer) array_ | (Optional) InitContainers is a list of PolicyRecommendation init containers. If specified, this overrides the specified PolicyRecommendation Deployment init containers. If omitted, the PolicyRecommendation Deployment will use its default values for its init containers. | -| `containers` _[PolicyRecommendationDeploymentContainer](#policyrecommendationdeploymentcontainer) array_ | (Optional) Containers is a list of PolicyRecommendation containers. If specified, this overrides the specified PolicyRecommendation Deployment containers. If omitted, the PolicyRecommendation Deployment will use its default values for its containers. | - - -### PolicyRecommendationDeploymentPodTemplateSpec - - - -PolicyRecommendationDeploymentPodTemplateSpec is the PolicyRecommendation Deployment's PodTemplateSpec - -_Appears in:_ -- [PolicyRecommendationDeploymentSpec](#policyrecommendationdeploymentspec) - -| Field | Description | -| --- | --- | -| `spec` _[PolicyRecommendationDeploymentPodSpec](#policyrecommendationdeploymentpodspec)_ | (Optional) Spec is the PolicyRecommendation Deployment's PodSpec. | - - -### PolicyRecommendationDeploymentSpec - - - -PolicyRecommendationDeploymentSpec defines configuration for the PolicyRecommendation Deployment. - -_Appears in:_ -- [PolicyRecommendationDeployment](#policyrecommendationdeployment) - -| Field | Description | -| --- | --- | -| `template` _[PolicyRecommendationDeploymentPodTemplateSpec](#policyrecommendationdeploymentpodtemplatespec)_ | (Optional) Template describes the PolicyRecommendation Deployment pod that will be created. | - - -### PolicyRecommendationSpec - - - -PolicyRecommendationSpec defines configuration for the Calico Enterprise Policy Recommendation -service. - -_Appears in:_ -- [PolicyRecommendation](#policyrecommendation) - -| Field | Description | -| --- | --- | -| `policyRecommendationDeployment` _[PolicyRecommendationDeployment](#policyrecommendationdeployment)_ | (Optional) PolicyRecommendation configures the PolicyRecommendation Deployment. | - - -### PolicyRecommendationStatus - - - -PolicyRecommendationStatus defines the observed state of Tigera policy recommendation. - -_Appears in:_ -- [PolicyRecommendation](#policyrecommendation) - -| Field | Description | -| --- | --- | -| `state` _string_ | State provides user-readable status. | - - -### ProductVariant - -_Underlying type:_ _string_ - -ProductVariant represents the variant of the product. - -One of: Calico, TigeraSecureEnterprise - -_Appears in:_ -- [InstallationSpec](#installationspec) -- [InstallationStatus](#installationstatus) - - - -### Prometheus - - - - - -_Appears in:_ -- [MonitorSpec](#monitorspec) - -| Field | Description | -| --- | --- | -| `spec` _[PrometheusSpec](#prometheusspec)_ | (Optional) Spec is the specification of the Prometheus. | - - -### PrometheusContainer - - - -PrometheusContainer is a Prometheus container. - -_Appears in:_ -- [CommonPrometheusFields](#commonprometheusfields) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is an enum which identifies the Prometheus Deployment container by name.
    Supported values are: authn-proxy | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) Resources allows customization of limits and requests for compute resources such as cpu and memory. If specified, this overrides the named Prometheus container's resources. If omitted, the Prometheus will use its default value for this container's resources. | - - -### PrometheusSpec - - - - - -_Appears in:_ -- [Prometheus](#prometheus) - -| Field | Description | -| --- | --- | -| `commonPrometheusFields` _[CommonPrometheusFields](#commonprometheusfields)_ | CommonPrometheusFields are the options available to both the Prometheus server and agent. | - - -### PromptType - -_Underlying type:_ _string_ - -PromptType is a value that specifies whether the identity provider prompts the end user for re-authentication and -consent. -One of: None, Login, Consent, SelectAccount. - -_Validation:_ -- Enum: [None Login Consent SelectAccount] - - -_Appears in:_ -- [AuthenticationOIDC](#authenticationoidc) - -| Value | Description | -| --- | --- | -| `None` | The identity provider must not display any authentication or consent user interface pages. | -| `Login` | The identity provider should prompt the end user for reauthentication. | -| `Consent` | The identity provider should prompt the end user for consent before returning information to the client. | -| `SelectAccount` | The identity provider should prompt the end user to select a user account. | - - -### Provider - -_Underlying type:_ _string_ - -Provider represents a particular provider or flavor of Kubernetes. Valid options -are: EKS, GKE, AKS, RKE2, OpenShift, DockerEnterprise, TKG. - -_Appears in:_ -- [InstallationSpec](#installationspec) - - - -### Retention - - - -Retention defines how long data is retained in an Elasticsearch cluster before it is cleared. - -_Appears in:_ -- [LogStorageSpec](#logstoragespec) - -| Field | Description | -| --- | --- | -| `flows` _integer_ | (Optional) Flows configures the retention period for flow logs, in days. Logs written on a day that started at least this long ago are removed. To keep logs for at least x days, use a retention period of x+1.
    Default: 8 | -| `auditReports` _integer_ | (Optional) AuditReports configures the retention period for audit logs, in days. Logs written on a day that started at least this long ago are removed. To keep logs for at least x days, use a retention period of x+1.
    Default: 91 | -| `snapshots` _integer_ | (Optional) Snapshots configures the retention period for snapshots, in days. Snapshots are periodic captures of resources which along with audit events are used to generate reports. Consult the Compliance Reporting documentation for more details on snapshots. Logs written on a day that started at least this long ago are removed. To keep logs for at least x days, use a retention period of x+1.
    Default: 91 | -| `complianceReports` _integer_ | (Optional) ComplianceReports configures the retention period for compliance reports, in days. Reports are output from the analysis of the system state and audit events for compliance reporting. Consult the Compliance Reporting documentation for more details on reports. Logs written on a day that started at least this long ago are removed. To keep logs for at least x days, use a retention period of x+1.
    Default: 91 | -| `dnsLogs` _integer_ | (Optional) DNSLogs configures the retention period for DNS logs, in days. Logs written on a day that started at least this long ago are removed. To keep logs for at least x days, use a retention period of x+1.
    Default: 8 | -| `bgpLogs` _integer_ | (Optional) BGPLogs configures the retention period for BGP logs, in days. Logs written on a day that started at least this long ago are removed. To keep logs for at least x days, use a retention period of x+1.
    Default: 8 | - - -### S3StoreSpec - - - -S3StoreSpec defines configuration for exporting logs to Amazon S3. - -_Appears in:_ -- [AdditionalLogStoreSpec](#additionallogstorespec) - -| Field | Description | -| --- | --- | -| `region` _string_ | AWS Region of the S3 bucket | -| `bucketName` _string_ | Name of the S3 bucket to send logs | -| `bucketPath` _string_ | Path in the S3 bucket where to send logs | - - -### SNIMatch - - - - - -_Appears in:_ -- [TLSPassThroughRouteSpec](#tlspassthroughroutespec) - -| Field | Description | -| --- | --- | -| `serverName` _string_ | ServerName is used to match the server name for the request. | - - -### ServiceMonitor - - - - - -_Appears in:_ -- [ExternalPrometheus](#externalprometheus) - -| Field | Description | -| --- | --- | -| `labels` _object (keys:string, values:string)_ | Labels are the metadata.labels of the ServiceMonitor. When combined with spec.serviceMonitorSelector.matchLabels on your prometheus instance, the service monitor will automatically be picked up.
    Default: k8s-app=tigera-prometheus | -| `endpoints` _[Endpoint](#endpoint) array_ | The endpoints to scrape. This struct contains a subset of the Endpoint as defined in the prometheus docs. Fields related to connecting to our Prometheus server are automatically set by the operator. | - - -### SplunkStoreSpec - - - -SplunkStoreSpec defines configuration for exporting logs to splunk. - -_Appears in:_ -- [AdditionalLogStoreSpec](#additionallogstorespec) - -| Field | Description | -| --- | --- | -| `endpoint` _string_ | Location for splunk's http event collector end point. example `https://1.2.3.4:8088` | - - -### StatusConditionType - -_Underlying type:_ _string_ - -StatusConditionType is a type of condition that may apply to a particular component. - -_Appears in:_ -- [TigeraStatusCondition](#tigerastatuscondition) - -| Value | Description | -| --- | --- | -| `Available` | Available indicates that the component is healthy. | -| `Progressing` | Progressing means that the component is in the process of being installed or upgraded. | -| `Degraded` | Degraded means the component is not operating as desired and user action is required. | -| `Ready` | Ready indicates that the component is healthy and ready.it is identical to Available and used in Status conditions for CRs. | - - -### Sysctl - - - - - -_Appears in:_ -- [CalicoNetworkSpec](#caliconetworkspec) - -| Field | Description | -| --- | --- | -| `key` _string_ | | -| `value` _string_ | | - - -### SyslogLogType - -_Underlying type:_ _string_ - -SyslogLogType represents the allowable log types for syslog. -Allowable values are Audit, DNS, Flows and IDSEvents. -* Audit corresponds to audit logs for both Kubernetes resources and Enterprise custom resources. -* DNS corresponds to DNS logs generated by Calico node. -* Flows corresponds to flow logs generated by Calico node. -* IDSEvents corresponds to event logs for the intrusion detection system (anomaly detection, suspicious IPs, suspicious domains and global alerts). - -_Validation:_ -- Enum: [Audit DNS Flows IDSEvents] - - -_Appears in:_ -- [SyslogStoreSpec](#syslogstorespec) - -| Value | Description | -| --- | --- | -| `Audit` | | -| `DNS` | | -| `Flows` | | -| `L7` | | -| `IDSEvents` | | - - -### SyslogStoreSpec - - - -SyslogStoreSpec defines configuration for exporting logs to syslog. - -_Appears in:_ -- [AdditionalLogStoreSpec](#additionallogstorespec) - -| Field | Description | -| --- | --- | -| `endpoint` _string_ | Location of the syslog server. example: tcp://1.2.3.4:601 | -| `packetSize` _integer_ | (Optional) PacketSize defines the maximum size of packets to send to syslog. In general this is only needed if you notice long logs being truncated.
    Default: 1024 | -| `logTypes` _[SyslogLogType](#sysloglogtype) array_ | If no values are provided, the list will be updated to include log types Audit, DNS and Flows.
    Default: Audit, DNS, Flows | -| `encryption` _[EncryptionOption](#encryptionoption)_ | (Optional) Encryption configures traffic encryption to the Syslog server.
    Default: None | - - -### TLS - - - - - -_Appears in:_ -- [ManagementClusterSpec](#managementclusterspec) - -| Field | Description | -| --- | --- | -| `secretName` _string_ | (Optional) SecretName indicates the name of the secret in the tigera-operator namespace that contains the private key and certificate that the management cluster uses when it listens for incoming connections. When set to tigera-management-cluster-connection voltron will use the same cert bundle which Guardian client certs are signed with. When set to manager-tls, voltron will use the same cert bundle which Manager UI is served with. This cert bundle must be a publicly signed cert created by the user. Note that Tigera Operator will generate a self-signed manager-tls cert if one does not exist, and use of that cert will result in Guardian being unable to verify Voltron's identity. If changed on a running cluster with connected managed clusters, all managed clusters will disconnect as they will no longer be able to verify Voltron's identity. To reconnect existing managed clusters, change the tls.ca of the managed clusters' ManagementClusterConnection resource. One of: tigera-management-cluster-connection, manager-tls
    Default: tigera-management-cluster-connection | - - - - -### TLSPassThroughRouteSpec - - - - - -_Appears in:_ -- [TLSPassThroughRoute](#tlspassthroughroute) - -| Field | Description | -| --- | --- | -| `target` _[TargetType](#targettype)_ | | -| `sniMatch` _[SNIMatch](#snimatch)_ | SNIMatch is used to match requests based on the server name for the intended destination server. Matching requests will be proxied to the Destination. | -| `destination` _string_ | Destination is the destination url to proxy the request to. | - - - - -### TLSTerminatedRouteSpec - - - - - -_Appears in:_ -- [TLSTerminatedRoute](#tlsterminatedroute) - -| Field | Description | -| --- | --- | -| `target` _[TargetType](#targettype)_ | | -| `pathMatch` _[PathMatch](#pathmatch)_ | PathMatch is used to match requests based on what's in the path. Matching requests will be proxied to the Destination defined in this structure. | -| `destination` _string_ | Destination is the destination URL where matching traffic is routed to. | -| `caBundle` _[ConfigMapKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#configmapkeyselector-v1-core)_ | CABundle is where we read the CA bundle from to authenticate the destination (if non-empty) | -| `mtlsCert` _[SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#secretkeyselector-v1-core)_ | (Optional) ForwardingMTLSCert is the certificate used for mTLS between voltron and the destination. Either both ForwardingMTLSCert and ForwardingMTLSKey must be specified, or neither can be specified. | -| `mtlsKey` _[SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#secretkeyselector-v1-core)_ | (Optional) ForwardingMTLSKey is the key used for mTLS between voltron and the destination. Either both ForwardingMTLSCert and ForwardingMTLSKey must be specified, or neither can be specified. | -| `unauthenticated` _boolean_ | (Optional) Unauthenticated says whether the request should go through authentication. This is only applicable if the Target is UI. | - - -### TargetType - -_Underlying type:_ _string_ - - - -_Appears in:_ -- [TLSPassThroughRouteSpec](#tlspassthroughroutespec) -- [TLSTerminatedRouteSpec](#tlsterminatedroutespec) - -| Value | Description | -| --- | --- | -| `UpstreamTunnel` | | -| `UI` | | - - -### TigeraStatus - - - -TigeraStatus represents the most recently observed status for Calico or a Calico Enterprise functional area. - -| Field | Description | -| --- | --- | -| `apiVersion` _string_ | `operator.tigera.io/v1` | -| `kind` _string_ | `TigeraStatus` | -| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | -| `spec` _[TigeraStatusSpec](#tigerastatusspec)_ | | -| `status` _[TigeraStatusStatus](#tigerastatusstatus)_ | | - - -### TigeraStatusCondition - - - -TigeraStatusCondition represents a condition attached to a particular component. - -_Appears in:_ -- [TigeraStatusStatus](#tigerastatusstatus) - -| Field | Description | -| --- | --- | -| `type` _[StatusConditionType](#statusconditiontype)_ | The type of condition. May be Available, Progressing, or Degraded. | -| `status` _[ConditionStatus](#conditionstatus)_ | The status of the condition. May be True, False, or Unknown. | -| `lastTransitionTime` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#time-v1-meta)_ | The timestamp representing the start time for the current status. | -| `reason` _string_ | A brief reason explaining the condition. | -| `message` _string_ | Optionally, a detailed message providing additional context. | -| `observedGeneration` _integer_ | (Optional) observedGeneration represents the generation that the condition was set based upon. For instance, if generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. | - - - - -### TigeraStatusSpec - - - -TigeraStatusSpec defines the desired state of TigeraStatus - -_Appears in:_ -- [TigeraStatus](#tigerastatus) - - - -### TigeraStatusStatus - - - -TigeraStatusStatus defines the observed state of TigeraStatus - -_Appears in:_ -- [TigeraStatus](#tigerastatus) - -| Field | Description | -| --- | --- | -| `conditions` _[TigeraStatusCondition](#tigerastatuscondition) array_ | Conditions represents the latest observed set of conditions for this component. A component may be one or more of Available, Progressing, or Degraded. | - - -### TyphaAffinity - - - -Deprecated. Please use TyphaDeployment instead. -TyphaAffinity allows configuration of node affinity characteristics for Typha pods. - -_Appears in:_ -- [InstallationSpec](#installationspec) - -| Field | Description | -| --- | --- | -| `nodeAffinity` _[NodeAffinity](#nodeaffinity)_ | (Optional) NodeAffinity describes node affinity scheduling rules for typha. | - - -### TyphaDeployment - - - -TyphaDeployment is the configuration for the typha Deployment. - -_Appears in:_ -- [InstallationSpec](#installationspec) - -| Field | Description | -| --- | --- | -| `metadata` _[Metadata](#metadata)_ | (Optional) Refer to Kubernetes API documentation for fields of `metadata`. | -| `spec` _[TyphaDeploymentSpec](#typhadeploymentspec)_ | (Optional) Spec is the specification of the typha Deployment. | - - -### TyphaDeploymentContainer - - - -TyphaDeploymentContainer is a typha Deployment container. - -_Appears in:_ -- [TyphaDeploymentPodSpec](#typhadeploymentpodspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is an enum which identifies the typha Deployment container by name.
    Supported values are: calico-typha | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) Resources allows customization of limits and requests for compute resources such as cpu and memory. If specified, this overrides the named typha Deployment container's resources. If omitted, the typha Deployment will use its default value for this container's resources. If used in conjunction with the deprecated ComponentResources, then this value takes precedence. | - - -### TyphaDeploymentInitContainer - - - -TyphaDeploymentInitContainer is a typha Deployment init container. - -_Appears in:_ -- [TyphaDeploymentPodSpec](#typhadeploymentpodspec) - -| Field | Description | -| --- | --- | -| `name` _string_ | Name is an enum which identifies the typha Deployment init container by name.
    Supported values are: typha-certs-key-cert-provisioner | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | (Optional) Resources allows customization of limits and requests for compute resources such as cpu and memory. If specified, this overrides the named typha Deployment init container's resources. If omitted, the typha Deployment will use its default value for this init container's resources. If used in conjunction with the deprecated ComponentResources, then this value takes precedence. | - - -### TyphaDeploymentPodSpec - - - -TyphaDeploymentPodSpec is the typha Deployment's PodSpec. - -_Appears in:_ -- [TyphaDeploymentPodTemplateSpec](#typhadeploymentpodtemplatespec) - -| Field | Description | -| --- | --- | -| `initContainers` _[TyphaDeploymentInitContainer](#typhadeploymentinitcontainer) array_ | (Optional) InitContainers is a list of typha init containers. If specified, this overrides the specified typha Deployment init containers. If omitted, the typha Deployment will use its default values for its init containers. | -| `containers` _[TyphaDeploymentContainer](#typhadeploymentcontainer) array_ | (Optional) Containers is a list of typha containers. If specified, this overrides the specified typha Deployment containers. If omitted, the typha Deployment will use its default values for its containers. | -| `affinity` _[Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#affinity-v1-core)_ | (Optional) Affinity is a group of affinity scheduling rules for the typha pods. If specified, this overrides any affinity that may be set on the typha Deployment. If omitted, the typha Deployment will use its default value for affinity. If used in conjunction with the deprecated TyphaAffinity, then this value takes precedence.
    WARNING: Please note that this field will override the default calico-typha Deployment affinity. | -| `nodeSelector` _object (keys:string, values:string)_ | NodeSelector is the calico-typha pod's scheduling constraints. If specified, each of the key/value pairs are added to the calico-typha Deployment nodeSelector provided the key does not already exist in the object's nodeSelector. If omitted, the calico-typha Deployment will use its default value for nodeSelector.
    WARNING: Please note that this field will modify the default calico-typha Deployment nodeSelector. | -| `terminationGracePeriodSeconds` _integer_ | (Optional) Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds. | -| `topologySpreadConstraints` _[TopologySpreadConstraint](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#topologyspreadconstraint-v1-core) array_ | (Optional) TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed. | -| `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#toleration-v1-core) array_ | (Optional) Tolerations is the typha pod's tolerations. If specified, this overrides any tolerations that may be set on the typha Deployment. If omitted, the typha Deployment will use its default value for tolerations.
    WARNING: Please note that this field will override the default calico-typha Deployment tolerations. | - - -### TyphaDeploymentPodTemplateSpec - - - -TyphaDeploymentPodTemplateSpec is the typha Deployment's PodTemplateSpec - -_Appears in:_ -- [TyphaDeploymentSpec](#typhadeploymentspec) - -| Field | Description | -| --- | --- | -| `metadata` _[Metadata](#metadata)_ | (Optional) Refer to Kubernetes API documentation for fields of `metadata`. | -| `spec` _[TyphaDeploymentPodSpec](#typhadeploymentpodspec)_ | (Optional) Spec is the typha Deployment's PodSpec. | - - -### TyphaDeploymentSpec - - - -TyphaDeploymentSpec defines configuration for the typha Deployment. - -_Appears in:_ -- [TyphaDeployment](#typhadeployment) - -| Field | Description | -| --- | --- | -| `minReadySeconds` _integer_ | (Optional) MinReadySeconds is the minimum number of seconds for which a newly created Deployment pod should be ready without any of its container crashing, for it to be considered available. If specified, this overrides any minReadySeconds value that may be set on the typha Deployment. If omitted, the typha Deployment will use its default value for minReadySeconds. | -| `template` _[TyphaDeploymentPodTemplateSpec](#typhadeploymentpodtemplatespec)_ | (Optional) Template describes the typha Deployment pod that will be created. | -| `strategy` _[TyphaDeploymentStrategy](#typhadeploymentstrategy)_ | (Optional) The deployment strategy to use to replace existing pods with new ones. | - - -### TyphaDeploymentStrategy - - - -TyphaDeploymentStrategy describes how to replace existing pods with new ones. Only RollingUpdate is supported -at this time so the Type field is not exposed. - -_Appears in:_ -- [TyphaDeploymentSpec](#typhadeploymentspec) - -| Field | Description | -| --- | --- | -| `rollingUpdate` _[RollingUpdateDeployment](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#rollingupdatedeployment-v1-apps)_ | (Optional) Rolling update config params. Present only if DeploymentStrategyType = RollingUpdate. to be. | - - -### UserMatch - - - -UserMatch when the value of a UserAttribute and a GroupAttribute match, a user belongs to the group. - -_Appears in:_ -- [GroupSearch](#groupsearch) - -| Field | Description | -| --- | --- | -| `userAttribute` _string_ | The attribute of a user that links it to a group. | -| `groupAttribute` _string_ | The attribute of a group that links it to a user. | - - -### UserSearch - - - -User entry search configuration to match the credentials with a user. - -_Appears in:_ -- [AuthenticationLDAP](#authenticationldap) - -| Field | Description | -| --- | --- | -| `baseDN` _string_ | BaseDN to start the search from. For example "cn=users,dc=example,dc=com" | -| `filter` _string_ | (Optional) Optional filter to apply when searching the directory. For example "(objectClass=person)" | -| `nameAttribute` _string_ | (Optional) A mapping of the attribute that is used as the username. This attribute can be used to apply RBAC to a user.
    Default: uid | - - -### WAFStatusType - -_Underlying type:_ _string_ - - - -_Appears in:_ -- [ApplicationLayerSpec](#applicationlayerspec) - -| Value | Description | -| --- | --- | -| `Disabled` | | -| `Enabled` | | - - -### WindowsDataplaneOption - -_Underlying type:_ _string_ - - - -_Validation:_ -- Enum: [HNS Disabled] - - -_Appears in:_ -- [CalicoNetworkSpec](#caliconetworkspec) - -| Value | Description | -| --- | --- | -| `Disabled` | | -| `HNS` | | - - -### WindowsNodeSpec - - - - - -_Appears in:_ -- [InstallationSpec](#installationspec) - -| Field | Description | -| --- | --- | -| `cniBinDir` _string_ | (Optional) CNIBinDir is the path to the CNI binaries directory on Windows, it must match what is used as 'bin_dir' under [plugins] [plugins."io.containerd.grpc.v1.cri"] [plugins."io.containerd.grpc.v1.cri".cni] on the containerd 'config.toml' file on the Windows nodes. | -| `cniConfigDir` _string_ | (Optional) CNIConfigDir is the path to the CNI configuration directory on Windows, it must match what is used as 'conf_dir' under [plugins] [plugins."io.containerd.grpc.v1.cri"] [plugins."io.containerd.grpc.v1.cri".cni] on the containerd 'config.toml' file on the Windows nodes. | -| `cniLogDir` _string_ | (Optional) CNILogDir is the path to the Calico CNI logs directory on Windows. | -| `vxlanMACPrefix` _string_ | (Optional) VXLANMACPrefix is the prefix used when generating MAC addresses for virtual NICs | -| `vxlanAdapter` _string_ | (Optional) VXLANAdapter is the Network Adapter used for VXLAN, leave blank for primary NIC | - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/installation/_crd-ref-docs/config.yaml b/calico-enterprise_versioned_docs/version-3.19-2/reference/installation/_crd-ref-docs/config.yaml deleted file mode 100644 index fd123b9c48..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/installation/_crd-ref-docs/config.yaml +++ /dev/null @@ -1,20 +0,0 @@ -processor: - # RE2 regular expressions describing types that should be excluded from the generated documentation. - ignoreTypes: - - "List$" - - "Tenant*" - # RE2 regular expressions describing type fields that should be excluded from the generated documentation. - ignoreFields: - - "TypeMeta$" - customMarkers: - - name: "optional" - target: field - -render: - # Version of Kubernetes to use when generating links to Kubernetes API documentation. - kubernetesVersion: 1.32 - # Generate better link for known types - #knownTypes: - # - name: SecretObjectReference - # package: sigs.k8s.io/gateway-api/apis/v1beta1 - # link: https://gateway-api.sigs.k8s.io/references/spec/#gateway.networking.k8s.io/v1beta1.SecretObjectReference diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/installation/_crd-ref-docs/templates/gv_details.tpl b/calico-enterprise_versioned_docs/version-3.19-2/reference/installation/_crd-ref-docs/templates/gv_details.tpl deleted file mode 100644 index 3bd48c66bc..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/installation/_crd-ref-docs/templates/gv_details.tpl +++ /dev/null @@ -1,21 +0,0 @@ -{{- define "gvDetails" -}} -{{- $gv := . -}} - -{/* vale off */} - -## {{ $gv.GroupVersionString }} - -{{ $gv.Doc }} - -{{- if $gv.Kinds }} -Resource Types -{{- range $gv.SortedKinds }} -- {{ $gv.TypeForKind . | markdownRenderTypeLink }} -{{- end }} -{{ end }} - -{{ range $gv.SortedTypes }} -{{ template "type" . }} -{{ end }} - -{{- end -}} diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/installation/_crd-ref-docs/templates/gv_list.tpl b/calico-enterprise_versioned_docs/version-3.19-2/reference/installation/_crd-ref-docs/templates/gv_list.tpl deleted file mode 100644 index 30ad594057..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/installation/_crd-ref-docs/templates/gv_list.tpl +++ /dev/null @@ -1,13 +0,0 @@ -{{- define "gvList" -}} -{{- $groupVersions := . -}} - -Packages -{{- range $groupVersions }} -- {{ markdownRenderGVLink . }} -{{- end }} - -{{ range $groupVersions }} -{{ template "gvDetails" . -}} -{{ end }} - -{{- end -}} diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/installation/_crd-ref-docs/templates/type.tpl b/calico-enterprise_versioned_docs/version-3.19-2/reference/installation/_crd-ref-docs/templates/type.tpl deleted file mode 100644 index bcf45a0967..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/installation/_crd-ref-docs/templates/type.tpl +++ /dev/null @@ -1,51 +0,0 @@ -{{- define "type" -}} -{{- $type := . -}} -{{- if markdownShouldRenderType $type -}} - -### {{ $type.Name }} - -{{ if $type.IsAlias }}_Underlying type:_ _{{ markdownRenderTypeLink $type.UnderlyingType }}_{{ end }} - -{{ $type.Doc }} - -{{- if $type.Validation }} - -_Validation:_ -{{- range $type.Validation }} -- {{ . }} -{{ end }} -{{- end -}} - -{{- if $type.References }} - -_Appears in:_ -{{- range $type.SortedReferences }} -- {{ markdownRenderTypeLink . }} -{{- end }} -{{- end }} - -{{ if $type.Members -}} -| Field | Description | -| --- | --- | -{{ if $type.GVK -}} -| `apiVersion` _string_ | `{{ $type.GVK.Group }}/{{ $type.GVK.Version }}` | -| `kind` _string_ | `{{ $type.GVK.Kind }}` | -{{ end -}} - -{{ range $type.Members -}} -| `{{ .Name }}` _{{ markdownRenderType .Type }}_ | {{ with .Markers.optional -}}(Optional) {{ end -}} {{ template "type_members" . }} | -{{ end -}} - -{{ end -}} - -{{ if $type.EnumValues -}} -| Value | Description | -| --- | --- | -{{ range $type.EnumValues -}} -| `{{ .Name }}` | {{ markdownRenderFieldDoc .Doc }} | -{{ end -}} -{{ end -}} - - -{{- end -}} -{{- end -}} diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/installation/_crd-ref-docs/templates/type_members.tpl b/calico-enterprise_versioned_docs/version-3.19-2/reference/installation/_crd-ref-docs/templates/type_members.tpl deleted file mode 100644 index 041758a872..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/installation/_crd-ref-docs/templates/type_members.tpl +++ /dev/null @@ -1,8 +0,0 @@ -{{- define "type_members" -}} -{{- $field := . -}} -{{- if eq $field.Name "metadata" -}} -Refer to Kubernetes API documentation for fields of `metadata`. -{{- else -}} -{{ markdownRenderFieldDoc $field.Doc }} -{{- end -}} -{{- end -}} diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/installation/api.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/installation/api.mdx deleted file mode 100644 index 20777917c4..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/installation/api.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Installation API reference ---- - -# Installation reference - -import API from '@site/calico-enterprise_versioned_docs/version-3.19-2/reference/installation/_api.mdx'; - -The Kubernetes resources below configure $[prodname] installation when using the operator. Each resource is responsible for installing and configuring a different subsystem of $[prodname] during installation. Most options can be modified on a running cluster using `kubectl`. - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/installation/config.json b/calico-enterprise_versioned_docs/version-3.19-2/reference/installation/config.json deleted file mode 100644 index 5c4f9225cb..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/installation/config.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "hideMemberFields": [ - "TypeMeta" - ], - "hideTypePatterns": [ - "ParseError$", - "List$" - ], - "externalPackages": [ - { - "typeMatchPrefix": "^k8s\\.io/apimachinery/pkg/apis/meta/v1\\.Duration$", - "docsURLTemplate": "https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#Duration" - }, - { - "typeMatchPrefix": "^k8s\\.io/(api|apimachinery/pkg/apis)/", - "docsURLTemplate": "https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#{{lower .TypeIdentifier}}-{{arrIndex .PackageSegments -1}}-{{arrIndex .PackageSegments -2}}" - }, - { - "typeMatchPrefix": "^github\\.com/knative/pkg/apis/duck/", - "docsURLTemplate": "https://godoc.org/github.com/knative/pkg/apis/duck/{{arrIndex .PackageSegments -1}}#{{.TypeIdentifier}}" - } - ], - "typeDisplayNamePrefixOverrides": { - "k8s.io/api/": "Kubernetes ", - "k8s.io/apimachinery/pkg/apis/": "Kubernetes " - }, - "markdownDisabled": false -} diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/installation/helm_customization.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/installation/helm_customization.mdx deleted file mode 100644 index 7f1f0bd045..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/installation/helm_customization.mdx +++ /dev/null @@ -1,130 +0,0 @@ ---- -description: Helm installation reference ---- - -# Helm installation reference - -You can customize the following resources and settings during $[prodname] Helm-based installation using the file, `values.yaml`. - -- [Installation](api.mdx#installationspec) -- [Api server](api.mdx#apiserverspec) -- [Compliance](api.mdx#compliancespec) -- [Intrusion detection](api.mdx#intrusiondetectionspec) -- [Log collector](api.mdx#logcollectorspec) -- [Log storage](api.mdx#logstoragespec) -- [Manager](api.mdx#managerspec) -- [Monitor](api.mdx#monitorspec) -- [Policy recommendation](api.mdx#policyrecommendationspec) -- [Authentication](api.mdx#authenticationspec) -- [Application layer](api.mdx#applicationlayerspec) -- [Amazon cloud integration](api.mdx#amazoncloudintegrationspec) -- [Default felix configuration](../resources/felixconfig.mdx#spec) - -:::note -If you customize felix configuration when you install $[prodname], the `v1 apiVersion` is used. However, when you apply -felix configuration customization after installation (when the tigera-apiserver is running), use the `v3 apiVersion`. -::: - -### Sample values.yaml - -Here is a sample `values.yaml` file with settings for custom resources. You must enable the custom resource using -`enabled: true` to provide custom configurations; custom resources set to false are ignored. - -```yaml -installation: - enabled: true - - -apiServer: - enabled: true - - -intrusionDetection: - enabled: true - - -logCollector: - enabled: true - - -logStorage: - enabled: true - nodes: - count: 1 - - -manager: - enabled: true - - -monitor: - enabled: true - - -compliance: - enabled: true - - -policyRecommendation: - enabled: true - - -authentication: - enabled: false - - -applicationLayer: - enabled: false - - -amazonCloudIntegration: - enabled: false - - -defaultFelixConfiguration: - enabled: false - -``` - -## Common customizations - -Common customizations that you might want to configure are number of replicas, pod affinity, and encryption using WireGuard. - -### Number of replicas -This setting defines the number of replicas for $[prodname] components that can run simultaneously in multiple instances. -To configure this setting, see [controlPlaneReplicas](api.mdx#installationspec). -The components for the replicas are: - -- tigera-manager -- tigera-apiserver -- tigera-dex -- tigera-kibana -- es-gateway - -To set a specific replica for these components, you must provide the setting in your custom values.yaml file: -```yaml -installation: - enable: true - controlPlaneReplicas: 3 # desired number of replicas for the components listed above. -``` - -### Pod affinity -You can define pod affinity for the following Tigera components. Update the appropriate custom resource in your custom `values.yaml`. - -- tigera-apiserver: through ApiServer resource -- calico-nodes: through CalicoNodeDaemonSet property in the Installation resource -- calico-kube-controllers: through CalicoKubeControllersDeployment property in the Installation resource -- compliance deployment pods (compliance-snapshotter, compliance-server, compliance-controller, compliance-benchmarker, -compliance-scaleloader, compliance-reporter): through Compliance resource -- elasticsearch pods: through LogStorage resource - for more info on this option please checkout [Advanced Node Scheduling](../../operations/logstorage/advanced-node-scheduling.mdx) - -### Encryption using WireGuard -[WireGuard encryption](../../compliance/encrypt-cluster-pod-traffic#enable-wireguard-for-a-cluster) -is configured in the FelixConfiguration. To set encryption for your cluster, update the `values.yaml` file. - -```yaml -defaultFelixConfiguration: - enabled: true - wireguardEnabled: true - wireguardEnabledV6: true -``` \ No newline at end of file diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/installation/tigerastatus.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/installation/tigerastatus.mdx deleted file mode 100644 index aa9d68b5ef..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/installation/tigerastatus.mdx +++ /dev/null @@ -1,64 +0,0 @@ ---- -description: Descriptions of Tigera Status fields ---- - -# TigeraStatus - -Installing $[prodname] on your Kubernetes cluster is managed by the Tigera Operator. The operator gets its configuration from the API resources and communicates through the Kubernetes API. The operator is deployed as a Deployment in the `tigera-operator` namespace, and records status in the `tigerastatus` resource. Depending on how your cluster is configured, you can get `tigerastatus` on these resources: - -- apiserver -- applicationlayer -- authentication -- calico -- calico-windows -- compliance -- egressgateway -- intrusion detection -- log-collector -- log-storage -- log-storage-access -- log-storage-elastic -- log-storage-kubecontrollers -- log-storage-secrets -- log-storage-users -- manager -- management-cluster-connection -- policy-recommendation -- secrets - -## Get TigeraStatus - -To get tigerastatus for a resource, run the following command: - - `kubectl get tigerastatus` - -For detailed output (including messages and further details on any non-functioning components), run either of the following commands: - - `kubectl describe tigerastatus X` - - `kubectl get tigerastatus X -o yaml` - -## Log storage - -Log storage provides persistent storage for $[prodname] Elasticsearch logs (flow, dns, l7, bpg, audit, etc.), and compliance reports. - -To check log storage status, run the following command: - - `kubectl get tigerastatus log-storage` - -The following table lists the type of information displayed in TigerStatus for log storage. - -| TigeraStatus name | Displays status of... | Standalone | Managed cluster | Management cluster | -| --------------------------- | ------------------------------------------------------------ | ---------------------- | ------------------ | --------------------------- | -| log-storage | LogStorage subsystem. |
    | |
    | -| log-storage-elastic | Elasticsearch cluster and Kibana. |
    | |
    | -| log-storage-kubecontrollers | Elasticsearch gateway and kube-controllers. |
    | |
    | -| log-storage-access | $[prodname] log storage API service. |
    | |
    | -| log-storage-secrets | Required security credentials and CA certificates for log storage components to operate. |
    | |
    | -| log-storage-users | Users provisioned to use Elasticsearch. |
    |
    | | - - - - - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/public-cloud/aws.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/public-cloud/aws.mdx deleted file mode 100644 index e9c53a7980..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/public-cloud/aws.mdx +++ /dev/null @@ -1,176 +0,0 @@ ---- -description: Advantages of using Calico Enterprise in AWS. ---- - -# Amazon Web Services - -$[prodname] provides the following advantages when running in Amazon Web Services (AWS): - -- **Network Policy for Containers**: $[prodname] provides fine-grained network security policy for individual containers. -- **No Overlays**: Within each VPC subnet $[prodname] doesn't need an overlay, which means high performance networking for your containers. -- **No 50 Node Limit**: $[prodname] allows you to surpass the 50 node limit, which exists as a consequence of the [AWS 50 route limit](http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Appendix_Limits.html#vpc-limits-route-tables) when using the VPC routing table. - -## Routing traffic within a single VPC subnet - -Since $[prodname] assigns IP addresses outside the range used by AWS for EC2 instances, you must disable AWS src/dst -checks on each EC2 instance in your cluster -[as described in the AWS documentation](http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_NAT_Instance.html#EIP_Disable_SrcDestCheck). This -allows $[prodname] to route traffic natively within a single VPC subnet without using an overlay or any of the limited VPC routing table entries. - -## Routing traffic across different VPC subnets / VPCs - -If you need to split your deployment across multiple AZs for high availability then each AZ will have its own VPC subnet. To -use $[prodname] across multiple different VPC subnets or [peered VPCs](http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-peering.html), -in addition to disabling src/dst checks as described above you must also enable IPIP encapsulation and outgoing NAT -on your $[prodname] IP pools. - -See the [IP pool configuration reference](../resources/ippool.mdx) -for information on how to configure $[prodname] IP pools. - -By default, $[prodname]'s IPIP encapsulation applies to all container-to-container traffic. However, -encapsulation is only required for container traffic that crosses a VPC subnet boundary. For better -performance, you can configure $[prodname] to perform IPIP encapsulation only across VPC subnet boundaries. - -To enable the "CrossSubnet" IPIP feature, configure your $[prodname] IP pool resources -to enable IPIP and set the mode to "CrossSubnet". - -:::note - -This feature was introduced in $[prodname] v2.1, if your deployment was created with -an older version of $[prodname], or if you if you are unsure whether your deployment -is configured correctly, follow the [Configuring IP-in-IP guide](../../networking/configuring/vxlan-ipip.mdx) -which discusses this in more detail. - -::: - -The following `kubectl` command will create or modify an IPv4 pool with -CIDR 192.168.0.0/16 using IPIP mode `CrossSubnet`. Adjust the pool CIDR for your deployment. - -```bash -kubectl apply -f - < diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/alertexception.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/alertexception.mdx deleted file mode 100644 index 29883a0ec1..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/alertexception.mdx +++ /dev/null @@ -1,78 +0,0 @@ ---- -description: API for this Calico Enterprise resource. ---- - -# Alert exception - -An alert exception resource is a filter that hides specific alerts from users in the $[prodname] web console. -You can filter alerts by time range or indefinitely. If an alert exception expires, alerts will reappear in the web console. - -For `kubectl` [commands](https://kubernetes.io/docs/reference/kubectl/overview/), -the following case-insensitive aliases can be used to specify the resource type on the CLI: -`alertexception.projectcalico.org`, `alertexceptions.projectcalico.org` and abbreviations such as -`alertexception.p` and `alertexceptions.p`. - -## Sample YAML - -```yaml -apiVersion: projectcalico.org/v3 -kind: AlertException -metadata: - name: sample -spec: - description: 'Sample alert exception' - selector: origin = "" and source_namespace = "" - startTime: '2022-01-02T00:00:00Z' - endTime: '2022-01-03T00:00:00Z' -``` - -## Alert exception definition - -| Field | Description | Accepted Values | Schema | -| ----- | --------------------------------- | --------------------------------------------------- | ------ | -| name | The name of this alert exception. | Alphanumeric string with optional `.`, `_`, or `-`. | string | - -### Spec - -| Field | Description | Type | Required | Acceptable Values | -| ----------- | ----------------------------------------------------------------------------------- | ----------------------- | -------- | ----------------------- | -| description | Human-readable description of the alert exception. | string | yes | -| selector | Selects alerts to filter from the $[prodname] web console queries. | string | yes | [selector](#selector) | -| startTime | Defines the start time from which this alert exception will start filtering alerts. | Date in RFC 3339 format | yes | [startTime](#starttime) | -| endTime | Defines the end time at which this alert exception will stop filtering alerts. | Date in RFC 3339 format | | [endTime](#endtime) | - -### Selector - -A selector is an expression that matches alerts based on their fields. For each alert, -`origin` and `type` fields are automatically set by the applicable component, but other fields can be empty. - -| Field | Description | -| ---------------- | ---------------------------------------------------------------------------------- | -| origin | User specified or generated names from $[prodname] threat defense components. | -| type | $[prodname] threat defense components an alert is generated from. | -| host | Name of the node that triggers this alert. | -| dest_ip | IP address of the destination pod. | -| dest_name | Name of the destination pod. | -| dest_name_aggr | Aggregated name of the destination pod. | -| dest_namespace | Namespace of the destination endpoint. A `-` means the endpoint is not namespaced. | -| source_ip | IP address of the source pod. | -| source_name | Name of the source pod. | -| source_name_aggr | Aggregated name of the source pod. | -| source_namespace | Namespace of the source endpoint. A `-` means the endpoint is not namespaced. | - -The selector also supports logical operators, which can be combined into larger expressions. - -| Expression | Meaning | -| ----------------------------------- | ----------------------------------------------------------------------------- | -| ` AND ` | Matches if and only if both ``, and, `` matches | -| ` OR ` | Matches if and only if either ``, or, `` matches. | - -### StartTime - -Defines the start time when this alert exception starts filtering alerts in RFC 3339 format. This value is required. - -### EndTime - -Defines the end time when this alert exception stops filtering alerts in RFC 3339 format. -If omitted, alerts are filtered indefinitely. -If the value is changed to the past, this alert exception is disabled immediately. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/bgpconfig.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/bgpconfig.mdx deleted file mode 100644 index 019364d35c..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/bgpconfig.mdx +++ /dev/null @@ -1,89 +0,0 @@ ---- -description: API for this Calico Enterprise resource. ---- - -# BGP configuration - -A BGP configuration resource (`BGPConfiguration`) represents BGP specific configuration options for the cluster or a -specific node. - -For `kubectl` commands, the following case-insensitive aliases may be used to specify the resource type on the CLI: `bgpconfiguration.projectcalico.org`, `bgpconfigurations.projectcalico.org` as well as abbreviations such as `bgpconfiguration.p` and `bgpconfigurations.p`. - -## Sample YAML - -```yaml -apiVersion: projectcalico.org/v3 -kind: BGPConfiguration -metadata: - name: default -spec: - logSeverityScreen: Info - nodeToNodeMeshEnabled: true - nodeMeshMaxRestartTime: 120s - asNumber: 63400 - serviceClusterIPs: - - cidr: 10.96.0.0/12 - serviceExternalIPs: - - cidr: 104.244.42.129/32 - - cidr: 172.217.3.0/24 - listenPort: 178 - bindMode: NodeIP - communities: - - name: bgp-large-community - value: 63400:300:100 - prefixAdvertisements: - - cidr: 172.218.4.0/26 - communities: - - bgp-large-community - - 63400:120 -``` - -## BGP configuration definition - -### Metadata - -| Field | Description | Accepted Values | Schema | -| ----- | --------------------------------------------------------- | --------------------------------------------------- | ------ | -| name | Unique name to describe this resource instance. Required. | Alphanumeric string with optional `.`, `_`, or `-`. | string | - -- The resource with the name `default` has a specific meaning - this contains the BGP global default configuration. -- The resources with the name `node.` contain the node-specific overrides, and will be applied to the node ``. When deleting a node the BGPConfiguration resource associated with the node will also be deleted. Only prefixAdvertisements, listenPort, and logSeverityScreen can be overridden this way. - -### Spec - -| Field | Description | Accepted Values | Schema | Default | -| ---------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------- | ----------------------------------------------------- | --------------------------------------------------------------- | -| logSeverityScreen | Global log level | Debug, Info, Warning, Error, Fatal | string | `Info` | -| nodeToNodeMeshEnabled | Full BGP node-to-node mesh. Only valid on the global `default` BGPConfiguration. | true, false | string | true | -| asNumber | The default local AS Number that $[prodname] should use when speaking with BGP peers. Only valid on the global `default` BGPConfiguration; to set a per-node override, use the `bgp` field on the [Node resource](node.mdx). | A valid AS Number, may be specified in dotted notation. | integer/string | 64512 | -| extensions | Additional mapping of keys and values. Used for setting values in custom BGP configurations. | valid strings for both keys and values | map | | -| serviceClusterIPs | The CIDR blocks for Kubernetes Service Cluster IPs to be advertised over BGP. Only valid on the global `default` BGPConfiguration: will be ignored otherwise. | A list of valid IPv4 or IPv6 CIDR blocks. | List of `cidr: /` values. | Empty List | -| serviceExternalIPs | The CIDR blocks for Kubernetes Service External IPs to be advertised over BGP. Kubernetes Service External IPs will only be advertised if they are within one of these blocks. Only valid on the global `default` BGPConfiguration: will be ignored otherwise. | A list of valid IPv4 or IPv6 CIDR blocks. | List of `cidr: /` values. | Empty List | -| serviceLoadBalancerIPs | The CIDR blocks for Kubernetes Service status.LoadBalancer IPs to be advertised over BGP. Kubernetes LoadBalancer IPs will only be advertised if they are within one of these blocks. Only valid on the global `default` BGPConfiguration: will be ignored otherwise. | A list of valid IPv4 or IPv6 CIDR blocks. | List of `cidr: /` values. | Empty List | -| listenPort | The port where BGP protocol should listen. | A valid port number. | integer | 179 | -| bindMode | Indicates whether to listen for BGP connections on all addresses (None) or only on the node's canonical IP address Node.Spec.BGP.IPvXAddress (NodeIP). If this field is changed when calico-node is already running, the change will not take effect until calico-node is manually restarted. | None, NodeIP. | string | None | -| communities | List of BGP community names and their values, communities are not advertised unless they are used in [prefixAdvertisements](#prefixadvertisements). | | List of [communities](#communities) | -| prefixAdvertisements | List of per-prefix advertisement properties, like BGP communities. | | List of [prefixAdvertisements](#prefixadvertisements) | -| nodeMeshPassword | BGP password for the all the peerings in a full mesh configuration. | | [BGPPassword](bgppeer.mdx#bgppassword) | `nil` (no password) | -| nodeMeshMaxRestartTime | Restart time that is announced by BIRD in the BGP graceful restart capability and that specifies how long the neighbor would wait for the BGP session to re-establish after a restart before deleting stale routes in full mesh configurations. Note: extra care should be taken when changing this configuration, as it may break networking in your cluster. When not specified, BIRD uses the default value of 120 seconds. | `10s`, `120s`, `2m` etc. | [Duration string][parse-duration] | `nil` (empty config, BIRD will use the default value of `120s`) | -| ignoredInterfaces | List of network interfaces to be excluded when reading device routes. | A list of network interface names. The names can contain the wildcard character asterisk `*` to specify groups of interface names. | List of strings | `nil` (no extra interfaces to be ignored) | - -### communities - -| Field | Description | Accepted Values | Schema | -| ----- | -------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------ | -| name | Name or identifier for the community. This should be used in [prefixAdvertisements](#prefixadvertisements) to advertise the community value. | | string | -| value | Standard or large BGP community value. | For standard community, value should be in `aa:nn` format, where both `aa` and `nn` are 16 bit integers.
    For large community, value should be `aa:nn:mm` format, where `aa`, `nn` and `mm` are all 32 bit integers.
    Where `aa` is an AS Number, `nn` and `mm` are per-AS identifier. | string | - -### prefixAdvertisements - -| Field | Description | Accepted Values | Schema | -| ----------- | ----------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------- | -| cidr | CIDR for which properties should be advertised. | `cidr: XXX.XXX.XXX.XXX/XX` | string | -| communities | BGP communities to be advertised. | Communities can be list of either community names already defined in [communities](#communities) or community value of format `aa:nn` or `aa:nn:mm`.
    For standard community, value should be in `aa:nn` format, where both `aa` and `nn` are 16 bit integers.
    For large community, value should be `aa:nn:mm` format, where `aa`, `nn` and `mm` are all 32 bit integers.
    Where `aa` is an AS Number, `nn` and `mm` are per-AS identifier. | List of string | - -## Supported operations - -| Datastore type | Create | Delete | Delete (Global `default`) | Update | Get/List | Notes | -| --------------------- | ------ | ------ | ------------------------- | ------ | -------- | ----- | -| Kubernetes API server | Yes | Yes | No | Yes | Yes | diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/bgpfilter.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/bgpfilter.mdx deleted file mode 100644 index fb21b3f34d..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/bgpfilter.mdx +++ /dev/null @@ -1,94 +0,0 @@ ---- -description: API for this Calico Enterprise resource. ---- - -# BGP Filter - -A BGP filter resource (`BGPFilter`) represents a way to control -routes imported by and exported to BGP peers specified using a -BGP peer resource (`BGPPeer`). - -The BGPFilter rules are applied sequentially: the `action` for -the **first** rule that matches is executed immediately. -If an address does not match any explicit BGP filter rule, -the default action is `Accept`. - -In order for a BGPFilter to be used in a BGP peering, its `name` -must be added to `filters` of the corresponding BGPPeer resource. - -For `kubectl` commands, the following case-sensitive aliases may -be used to specify the resource type on the CLI: `bgpfilters.crd.projectcalico.org` - -## Sample YAML - -```yaml -apiVersion: projectcalico.org/v3 -kind: BGPFilter -metadata: - name: my-filter -spec: - exportV4: - - action: Accept - matchOperator: In - cidr: 77.0.0.0/16 - source: RemotePeers - - action: Reject - interface: '*.calico' - importV4: - - action: Reject - matchOperator: NotIn - cidr: 44.0.0.0/16 - exportV6: - - action: Reject - source: RemotePeers - - action: Reject - interface: '*.calico' - importV6: - - action: Accept - matchOperator: Equal - cidr: 5000::0/64 - - action: Reject -``` - -## BGP filter definition - -### Metadata - -| Field | Description | Accepted Values | Schema | -| ----- | ------------------------------------------------------------------ | --------------------------------------------------- | ------ | -| name | Unique name to describe this resource instance. Must be specified. | Alphanumeric string with optional `.`, `_`, or `-`. | string | - -### Spec - -| Field | Description | Accepted Values | Schema | Default | -| -------- | ---------------------------------- | --------------- | ----------------------------------------- | ------- | -| exportV4 | List of v4 CIDRs and export action | | [BGP Filter Rule v4](#bgp-filter-rule-v4) | | -| importV4 | List of v4 CIDRs and import action | | [BGP Filter Rule v4](#bgp-filter-rule-v4) | | -| exportV6 | List of v6 CIDRs and export action | | [BGP Filter Rule v6](#bgp-filter-rule-v6) | | -| importV6 | List of v6 CIDRs and import action | | [BGP Filter Rule v6](#bgp-filter-rule-v6) | | - -### BGP Filter Rule v4 - -| Field | Description | Accepted Values | Schema | Default | -| ------------- | ----------------------------------------- | ------------------------------------------------------------------- | ------ | ------- | -| cidr | IPv6 range | A valid IPv6 CIDR | string | | -| matchOperator | Method by which to match candidate routes | `In`, `NotIn`, `Equal`, `NotEqual` | string | | -| source | Indicator of the source of route | `RemotePeers` means any route learned from other BGP peers | string | | -| interface | String to match interface names | A valid pattern to match interfaces. "*" can be used as a wildcard. | string | | -| action | Action to be taken for this rule | `Accept` or `Reject` | string | | - -### BGP Filter Rule v6 - -| Field | Description | Accepted Values | Schema | Default | -| ------------- | ----------------------------------------- | ------------------------------------------------------------------- | ------ | ------- | -| cidr | IPv6 range | A valid IPv6 CIDR | string | | -| matchOperator | Method by which to match candidate routes | `In`, `NotIn`, `Equal`, `NotEqual` | string | | -| source | Indicator of the source of route | `RemotePeers` means any route learned from other BGP peers | string | | -| interface | String to match interface names | A valid pattern to match interfaces. "*" can be used as a wildcard. | string | | -| action | Action to be taken for this rule | `Accept` or `Reject` | string | | - -## Supported operations - -| Datastore type | Create/Delete | Update | Get/List | Notes | -| --------------------- | ------------- | ------ | -------- | ----- | -| Kubernetes API server | Yes | Yes | Yes | | diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/bgppeer.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/bgppeer.mdx deleted file mode 100644 index a299766c01..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/bgppeer.mdx +++ /dev/null @@ -1,192 +0,0 @@ ---- -description: API for this Calico Enterprise resource. ---- - -# BGP peer - -import Selectors from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_selectors.mdx'; - -A BGP peer resource (`BGPPeer`) represents a remote BGP peer with -which the node(s) in a $[prodname] cluster will peer. -Configuring BGP peers allows you to peer a $[prodname] network -with your datacenter fabric (e.g. ToR). For more -information on cluster layouts, see $[prodname]'s documentation on -[$[prodname] over IP fabrics](../architecture/design/l3-interconnect-fabric.mdx). - -For `kubectl` commands, the following case-insensitive aliases may be used to specify the resource type on the CLI: `bgppeer.projectcalico.org`, `bgppeers.projectcalico.org` as well as abbreviations such as `bgppeer.p` and `bgppeers.p`. - -## Sample YAML - -```yaml -apiVersion: projectcalico.org/v3 -kind: BGPPeer -metadata: - name: some.name -spec: - node: rack1-host1 - peerIP: 192.168.1.1 - asNumber: 63400 -``` - -## BGP peer definition - -### Metadata - -| Field | Description | Accepted Values | Schema | -| ----- | ------------------------------------------------------------------ | --------------------------------------------------- | ------ | -| name | Unique name to describe this resource instance. Must be specified. | Alphanumeric string with optional `.`, `_`, or `-`. | string | - -### Spec - -| Field | Description | Accepted Values | Schema | Default | -| ------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------- | --------------------------- | ---------------------------------------------------------------------------- | -| node | If specified, the scope is node level, otherwise the scope is global. | The hostname of the node to which this peer applies. | string | | -| peerIP | The IP address of this peer and an optional port number. If port number is not set, and peer is Calico node with `listenPort` set, then `listenPort` is used. | Valid IPv4 or IPv6 address. If port number is set use, `IPv4:port` or `[IPv6]:port` format. | string | | -| asNumber | The remote AS Number of the peer. | A valid AS Number, may be specified in dotted notation. | integer/string | -| nodeSelector | Selector for the nodes that should have this peering. When this is set, the `node` field must be empty. | | [selector](#selector) | -| peerSelector | Selector for the remote nodes to peer with. When this is set, the `peerIP` and `asNumber` fields must be empty. | | [selector](#selector) | -| keepOriginalNextHop | Maintain and forward the original next hop BGP route attribute to a specific Peer within a different AS. | | boolean | -| extensions | Additional mapping of keys and values. Used for setting values in custom BGP configurations. | valid strings for both keys and values | map | | -| password | [BGP password](../../operations/comms/secure-bgp.mdx) for the peerings generated by this BGPPeer resource. | | [BGPPassword](#bgppassword) | `nil` (no password) | -| sourceAddress | Specifies whether and how to configure a source address for the peerings generated by this BGPPeer resource. Default value "UseNodeIP" means to configure the node IP as the source address. "None" means not to configure a source address. | "UseNodeIP", "None" | string | "UseNodeIP" | -| failureDetectionMode | Specifies whether and how to detect loss of connectivity on the peerings generated by this BGPPeer resource. Default value "None" means nothing beyond BGP's own (slow) hold timer. "BFDIfDirectlyConnected" means to use BFD when the peer is directly connected. | "None", "BFDIfDirectlyConnected" | string | "None" | -| restartMode | Specifies restart behaviour to configure on the peerings generated by this BGPPeer resource. Default value "GracefulRestart" means traditional graceful restart. "LongLivedGracefulRestart" means LLGR according to draft-uttaro-idr-bgp-persistence-05. | "GracefulRestart", "LongLivedGracefulRestart" | string | "GracefulRestart" | -| maxRestartTime | Restart time that is announced by BIRD in the BGP graceful restart capability and that specifies how long the neighbor would wait for the BGP session to re-establish after a restart before deleting stale routes. When specified, this is configured as the graceful restart timeout when `RestartMode` is "GracefulRestart", and as the LLGR stale time when `RestartMode` is "LongLivedGracefulRestart". When not specified, the BIRD defaults are used, which are 120s for "GracefulRestart" and 3600s for "LongLivedGracefulRestart". Note: extra care should be taken when changing this configuration, as it may break networking in your cluster. | | duration | None | -| birdGatewayMode | Specifies the BIRD "gateway" mode, i.e. method for computing the immediate next hop for each received route, for peerings generated by this BGPPeer resource. Default value "Recursive" means "gateway recursive". "DirectIfDirectlyConnected" means to configure "gateway direct" when the peer is directly connected. | "Recursive", "DirectIfDirectlyConnected" | string | "Recursive" | -| numAllowedLocalASNumbers | The number of local AS numbers to allow in the AS path for received routes. This disables BGP loop prevention and should only be used if necessary. | | integer | `nil` (BIRD will default to 0 meaning no change to loop prevention behavior) | -| ttlSecurity | Enables the generalized TTL security mechanism (GTSM) which protects against spoofed packets by ignoring received packets with a smaller than expected TTL value. The provided value is the number of hops (edges) between the peers. | 0 - 255 | 8-bit integer | `nil` (results in BIRD configuration `ttl security off`) | -| filters | List of names of [BGPFilter](bgpfilter.mdx) resources to apply to this peering. | ["my-bgp-filter-1","my-bgp-filter-2"] | List of strings | | -| externalNetwork | Name of the external network to which this peer belongs. | - | string | | -| reachableBy | Adds a static route that may be needed to connect to a peer. In some cases, not having a static route for BGP peering results in route flapping. By adding the address of the gateway that the peer is connected to, a static route is added to prevent route flapping. | The address of the gateway that the peer is connected to | string | | - -:::tip - -The cluster-wide default local AS number used when speaking with a peer is controlled by the -[BGPConfiguration resource](bgpconfig.mdx). That value can be overridden per-node by using the `bgp` field of -the [node resource](node.mdx). - -::: - -### BGPPassword - -:::note - -BGP passwords must be 80 characters or fewer. If a password longer than that -is configured, the BGP sessions with that password will fail to be established. - -::: - -| Field | Description | Schema | -| ------------ | ------------------------------- | ----------------- | -| secretKeyRef | Get the password from a secret. | [KeyRef](#keyref) | - -### KeyRef - -KeyRef tells $[prodname] where to get a BGP password. The referenced Kubernetes -secret must be in the same namespace as the $[nodecontainer] pod. - -| Field | Description | Schema | -| ----- | ------------------------- | ------ | -| name | The name of the secret | string | -| key | The key within the secret | string | - -:::warning - -$[prodname] must be able to read the referenced secret. - -This means that the `calico-node` ServiceAccount must have permissions to `list, get, watch` the secret referenced in the KeyRef. - -In practice, this can be done by creating a Role (which allows to `list, get, watch` the secret) and a RoleBinding (which grants the Role's permission to the `calico-node` ServiceAccount). - -Example: - -```yaml ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: bgp-passwords-reader - namespace: calico-system -rules: - - apiGroups: [""] - resources: ["secrets"] - resourceNames: ["bgp-passwords"] - verbs: ["list", "watch", "get"] - ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: calico-read-bgp-passwords - namespace: calico-system -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: bgp-passwords-reader -subjects: - - kind: ServiceAccount - name: calico-node - ---- -apiVersion: v1 -kind: Secret -metadata: - name: "bgp-passwords" - namespace: calico-system -data: - peer_a_pw: "base64-encoded Password for Peer A" - peer_b_pw: "base64-encoded Password for Peer B" - ---- -apiVersion: crd.projectcalico.org/v1 -kind: BGPPeer -metadata: - name: "peer-a" -spec: - password: - secretKeyRef: - name: "bgp-passwords" - key: "peer_a_pw" - ---- -apiVersion: crd.projectcalico.org/v1 -kind: BGPPeer -metadata: - name: "peer-b" -spec: - password: - secretKeyRef: - name: "bgp-passwords" - key: "peer_b_pw" -``` - -::: - -## Peer scopes - -BGP Peers can exist at either global or node-specific scope. A peer's scope -determines which `$[nodecontainer]`s will attempt to establish a BGP session with that peer. -If `$[nodecontainer]` has a `listenPort` set in `BGPConfiguration`, it will be used in peering. - -### Global peer - -To assign a BGP peer a global scope, omit the `node` and `nodeSelector` fields. All nodes in -the cluster will attempt to establish BGP connections with it - -### Node-specific peer - -A BGP peer can also be node-specific. When the `node` field is included, only the specified node -will peer with it. When the `nodeSelector` field is included, the nodes with labels that match that selector -will peer with it. - -## Supported operations - -| Datastore type | Create/Delete | Update | Get/List | Notes | -| --------------------- | ------------- | ------ | -------- | ----- | -| Kubernetes API server | Yes | Yes | Yes | - -## Selector - - - -[parse-duration]: https://golang.org/pkg/time/#ParseDuration diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/blockaffinity.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/blockaffinity.mdx deleted file mode 100644 index 451a6bad29..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/blockaffinity.mdx +++ /dev/null @@ -1,31 +0,0 @@ ---- -description: IP address management block affinity ---- - -# Block affinity - -A block affinity resource (`BlockAffinity`) represents the affinity for an IPAM block. These are managed by Calico IPAM. - -## Block affinity definition - -### Metadata - -| Field | Description | Accepted Values | Schema | -| ----- | ------------------------------------------------------------------ | --------------------------------------------------- | ------ | -| name | Unique name to describe this resource instance. Must be specified. | Alphanumeric string with optional `.`, `_`, or `-`. | string | - -### Spec - -| Field | Description | Accepted Values | Schema | Default | -| ------- | -------------------------------------------------------------------------- | ----------------------------------- | ------- | ------- | -| state | State of the affinity with regard to any referenced IPAM blocks. | confirmed, pending, pendingDeletion | string | | -| node | The node that this affinity is assigned to. | The hostname of the node | string | | -| cidr | The CIDR range this block affinity references. | A valid IPv4 or IPv6 CIDR. | string | | -| deleted | When set to true, clients should treat this block as if it does not exist. | true, false | boolean | `false` | - -## Supported operations - -| Datastore type | Create | Delete | Update | Get/List | Watch | -| --------------------- | ------ | ------ | ------ | -------- | ----- | -| etcdv3 | No | No | No | Yes | Yes | -| Kubernetes API server | No | No | No | Yes | Yes | diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/caliconodestatus.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/caliconodestatus.mdx deleted file mode 100644 index 69fdb137eb..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/caliconodestatus.mdx +++ /dev/null @@ -1,216 +0,0 @@ ---- -description: API for this Calico resource. ---- - -# Calico node status - -A Calico node status resource (`CalicoNodeStatus`) represents a collection of status information for a node that $[prodname] reports back to the user for use during troubleshooting. - -As of today, status of BGP agents, BGP sessions and routes exposed to BGP agents are collected from Linux nodes only. **Windows nodes are not supported at this time.** -Calico node status resource is only valid when $[prodname] BGP networking is in use. - -### Notes - -The updating of `CalicoNodeStatus` will have a small performance impact on CPU/Memory usage of the node as well as adding load to kubernetes apiserver. - -In our testing on a ten node, full mesh cluster, a `CalicoNodeStatus` resource was created for each node where the update interval was set to ten seconds. On each node, this resulted in an increase in CPU use of 5% of a vCPU and an increase of 4MB of memory. The control plane node recorded an increase in CPU usage of 5% of a vCPU for these 10 nodes. - -:::caution - -The implementation of `CalicoNodeStatus` is designed to handle a small number of nodes (less than 10 is recommended) reporting back status in the same time. If `CalicoNodeStatus` are created for a large number of nodes, and with short update interval, -the kubernetes apiserver may become slower and less responsive. -You should create `CalicoNodeStatus` for the node you are interested in and for debugging purpose only. `CalicoNodeStatus` resource should be deleted upon the completion of the debugging process. - -::: - -## Sample YAML - -To use this function, the user creates a CalicoNodeStatus object for the node, specifying the information to collect and the interval it should be collected at. This example collects information for node "my-kadm-node-0" with an update interval of 10 seconds. - -```bash -kubectl apply -f -< diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/compliance-reports/inventory.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/compliance-reports/inventory.mdx deleted file mode 100644 index f8c8e3219b..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/compliance-reports/inventory.mdx +++ /dev/null @@ -1,86 +0,0 @@ ---- -description: API for this resource. ---- - -# Inventory report - -To create an Inventory report, create a [`GlobalReport`](../globalreport.mdx) with the `reportType` -set to `inventory`. - -The following sample command creates a GlobalReport that results in a daily inventory report for -endpoints in the `public` namespace. - -```bash -kubectl apply -f - << EOF -apiVersion: projectcalico.org/v3 -kind: GlobalReport -metadata: - name: daily-public-inventory-report - labels: - deployment: production -spec: - reportType: inventory - endpoints: - namespaces: - names: - - public - schedule: 0 0 * * * -EOF -``` - -## Downloadable reports - -### summary.csv - -A summary CSV file that includes details about the report parameters and the top level counts. - -| Heading | Description | Format | -| ----------------------------- | ----------------------------------------------------------------------------------------------------------- | ------------------------------------------- | -| startTime | The report interval start time. | RFC3339 string | -| endTime | The report interval end time. | RFC3339 string | -| endpointSelector | The endpoint selector used to restrict in-scope endpoints by endpoint label selection. | selector string | -| namespaceNames | The set of namespace names used to restrict in-scope endpoints by namespace. | ";" separated list of namespace names | -| namespaceSelector | The namespace selector used to restrict in-scope endpoints by namespace label selection. | selector string | -| serviceAccountNames | The set of service account names used to restrict in-scope endpoints by service account. | ";" separated list of service account names | -| serviceAccountSelectors | The service account selector used to restrict in-scope endpoints by service account label selection. | selector string | -| endpointsNumInScope | The number of enumerated endpoints that are in-scope according to the requested endpoint selection options. | number | -| endpointsNumIngressProtected | The number of in-scope endpoints that were always ingress protected during the report interval. | number | -| endpointsNumEgressProtected | The number of in-scope endpoints that were always egress protected during the report interval. | number | -| namespacesNumInScope | The number of namespaces containing in-scope endpoints. | number | -| namespacesNumIngressProtected | The number of namespaces whose in-scope endpoints were always ingress protected during the report interval. | number | -| namespacesNumEgressProtected | The number of namespaces whose in-scope endpoints were always egress protected during the report interval. | number | -| serviceAccountsNumInScope | The number of service accounts associated with in-scope endpoints. | number | - -### endpoints.csv - -An endpoints CSV file that includes per-endpoint information. - -| Heading | Description | Format | -| ---------------- | --------------------------------------------------------------------------------------------- | ----------------------------------- | -| endpoint | The name of the endpoint. | string | -| ingressProtected | Whether the endpoint was always ingress protected during the report interval. | bool | -| egressProtected | Whether the endpoint was always egress protected during the report interval. | bool | -| envoyEnabled | Whether the endpoint was always Envoy enabled during the report interval. | bool | -| appliedPolicies | The full set of policies that applied to the endpoint at any time during the report interval. | ";" separated list of policy names | -| services | The full set of services that included this endpoint at any time during the report interval. | ";" separated list of service names | - -### namespaces.csv - -A namespaces CSV file that includes per-namespace information. - -| Heading | Description | Format | -| ---------------- | ------------------------------------------------------------------------------------------------------------- | ------ | -| namespace | The name of the namespace. | string | -| ingressProtected | Whether all in-scope endpoints within the namespace were always ingress protected during the report interval. | bool | -| egressProtected | Whether all in-scope endpoints within the namespace were always egress protected during the report interval. | bool | -| envoyEnabled | Whether all in-scope endpoints within the namespace were always Envoy enabled during the report interval. | bool | - -### services.csv - -A services CSV file that includes per-service information. - -| Heading | Description | Format | -| ---------------- | ---------------------------------------------------------------------------------------------------------------- | ------ | -| service | The name of the service. | string | -| ingressProtected | Whether all in-scope endpoints that are in the service were always ingress protected during the report interval. | bool | -| envoyEnabled | Whether all in-scope endpoints that are in the service were always Envoy enabled during the report interval. | bool | diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/compliance-reports/network-access.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/compliance-reports/network-access.mdx deleted file mode 100644 index 6c5b848bf5..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/compliance-reports/network-access.mdx +++ /dev/null @@ -1,92 +0,0 @@ ---- -description: API for this resource. ---- - -# Network Access report - -To create an Inventory report, create a [`GlobalReport`](../globalreport.mdx) with the `reportType` -set to `network-access`. - -The following sample command creates a GlobalReport that results in a daily network access report for -endpoints in the `public` namespace. - -```bash -kubectl apply -f - << EOF -apiVersion: projectcalico.org/v3 -kind: GlobalReport -metadata: - name: daily-public-network-access-report - labels: - deployment: production -spec: - reportType: network-access - endpoints: - namespaces: - names: - - public - schedule: 0 0 * * * -EOF -``` - -:::note - -There is a known issue that audit logs do not contain deletion events for resources that were -deleted implicitly as part of a namespace deletion event. Currently, this means policies and pods that have been -deleted in this way may still appear in the reports that cover any period within the next day. - -::: - -## Downloadable reports - -### summary.csv - -A summary CSV file that includes details about the report parameters and the top level counts. - -| Heading | Description | Format | -| ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------- | -| startTime | The report interval start time. | RFC3339 string | -| endTime | The report interval end time. | RFC3339 string | -| endpointSelector | The endpoint selector used to restrict in-scope endpoints by endpoint label selection. | selector string | -| namespaceNames | The set of namespace names used to restrict in-scope endpoints by namespace. | ";" separated list of namespace names | -| namespaceSelector | The namespace selector used to restrict in-scope endpoints by namespace label selection. | selector string | -| serviceAccountNames | The set of service account names used to restrict in-scope endpoints by service account. | ";" separated list of service account names | -| serviceAccountSelectors | The service account selector used to restrict in-scope endpoints by service account label selection. | selector string | -| endpointsNumIngressProtected | The number of in-scope endpoints that were always ingress protected during the report interval. | number | -| endpointsNumEgressProtected | The number of in-scope endpoints that were always egress protected during the report interval. | number | -| endpointsNumIngressUnprotected | The number of in-scope endpoints that were ingress unprotected at any point during the report interval. | number | -| endpointsNumEgressUnprotected | The number of in-scope endpoints that were egress unprotected at any point during the report interval. | number | -| endpointsNumIngressFromInternet | The number of in-scope endpoints that allowed ingress traffic from the public internet at any point during the report interval. | number | -| endpointsNumEgressToInternet | The number of in-scope endpoints that allowed egress traffic to the public internet at any point during the report interval. | number | -| endpointsNumIngressFromOtherNamespace | The number of in-scope endpoints that allowed ingress traffic from another namespace at any point during the report interval. | number | -| endpointsNumEgressToOtherNamespace | The number of in-scope endpoints that allowed egress traffic to another namespace at any point during the report interval. | number | -| endpointsNumEnvoyEnabled | The number of in-scope endpoints that were always Envoy enabled during the report interval. | number | - -### endpoints.csv - -An endpoints CSV file that includes per-endpoint information. - -| Heading | Description | Format | -| ------------------------------------------- | -------------------------------------------------------------------------------------------------------------- | ----------------------------------- | -| endpoint | The name of the endpoint. | string | -| ingressProtected | Whether the endpoint was always ingress protected during the report interval. | bool | -| egressProtected | Whether the endpoint was always egress protected during the report interval. | bool | -| ingressFromInternet | Whether the endpoint allowed ingress traffic from the public internet at any point during the report interval. | number | -| egressToInternet | Whether the endpoint allowed egress traffic to the public internet at any point during the report interval. | number | -| ingressFromOtherNamespace | Whether the endpoint allowed ingress traffic from another namespace at any point during the report interval. | number | -| egressToOtherNamespace | Whether the endpoint allowed egress traffic to another namespace at any point during the report interval. | number | -| envoyEnabled | Whether the endpoint was always Envoy enabled during the report interval. | bool | -| appliedPolicies | The full set of policies that applied to the endpoint at any time during the report interval. | ";" separated list of policy names | -| services | The full set of services that included this endpoint at any time during the report interval. | ";" separated list of service names | -| trafficAggregationPrefix\* | The flow log aggregation prefix. | string | -| endpointsGeneratingTrafficToThisEndpoint\* | The set of endpoints that were generating traffic to this endpoint. | ";" separated list of service names | -| endpointsReceivingTrafficFromThisEndpoint\* | The set of endpoints that this endpoint is generating traffic to. | ";" separated list of service names | - -\* Traffic data is determined from flow logs. By default, $[prodname] aggregates flow logs so that flows to -and from pods in the same replica set are summarized if the flows are accepted. (Denied flows are not aggregated this -way by default). This means that the per-endpoint traffic details do not refer specifically to that endpoint, but -rather the set of endpoints specified by the trafficAggregationPrefix. - -If you want per-endpoint detail you should turn down the level of aggregation. To do so, -set the value of `flowLogsFileAggregationKindForAllowed` to 1 using a [FelixConfiguration][felixconfig] - -[felixconfig]: ../felixconfig.mdx diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/compliance-reports/overview.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/compliance-reports/overview.mdx deleted file mode 100644 index ef084b7f9f..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/compliance-reports/overview.mdx +++ /dev/null @@ -1,102 +0,0 @@ ---- -description: Schedule reports and configure report scope. ---- - -# Compliance reports (deprecated) - -The $[prodname] compliance reporting feature provides the following compliance reports: - -- [Inventory](inventory.mdx) -- [Network Access](network-access.mdx) -- [Policy Audit](policy-audit.mdx) -- [CIS Benchmark](cis-benchmark.mdx) - -Create a [`GlobalReport`](../globalreport.mdx) resource to automatically schedule report generation, and specify the report scope (resources to include in the report). - -## Concepts - -### In-scope asset - -An asset (Pod or HostEndpoint) is flagged as in-scope by endpoint labels, namespace and/or namespace labels, and service -account and/or service account labels. - -_How this applies to the report_: -The report includes all resources that were in-scope at any point during the report interval. The resource is included -when it is first flagged as in-scope according to the configured label selector and name selections. The resource is -included even if the resource is deleted or goes out-of-scope before the end of the report interval. - -### Ingress protected - -An endpoint is ingress protected if it has at least one Ingress policy that is applied to it. - -A service is ingress protected if all of the inscope endpoints within that service are ingress protected. - -A namespace is ingress protected if all of the inscope endpoints within that namespace are ingress protected. - -_How this applies to the report_: -An endpoint is ingress protected only if it was ingress protected throughout the entire report interval. - -### Egress protected - -As per ingress, but with egress policy rules. Note that egress statistics are not obtained for services. - -### Allows ingress traffic from another namespace - -An endpoint is flagged as allowing ingress traffic from another namespace if it has one or more policies that apply to -it with an ingress allow rule that: - -- has an explicit namespace selector configured, or -- has no source selector or source CIDR configured, or -- (for GlobalNetworkPolicy) has no source CIDR. - -A service is flagged as allowing ingress traffic from another namespace if any of the inscope endpoints within that -service are flagged. - -A namespace is flagged as allowing ingress traffic from another namespace if all of the inscope endpoints within that -namespace are flagged. - -_How this applies to the report_: -An endpoint is flagged as allowing ingress traffic from another namespace if it was flagged at any time during the -report interval. - -### Allows egress traffic to another namespace - -As per ingress, but with egress policy rules and destination selector/CIDR. Note that egress statistics are not obtained -for services. - -### Allows ingress traffic from the internet - -An endpoint is flagged as allowing ingress traffic from the internet if it has one or more policies that apply to it -with an ingress allow rule that: - -- has no source selector or source CIDR configured, or -- has a source CIDR in the non-private IP ranges and has no source selector, or -- has a source selector that matches one or more NetworkSets that contain at least one non-private IP. - -A service is flagged as allowing ingress traffic from the internet if any of the inscope endpoints within that service -are flagged. - -A namespace is flagged as allowing ingress traffic from the internet if all of the inscope endpoints within that -namespace are flagged. - -_How this applies to the report_: -An endpoint is flagged as allowing ingress traffic from the internet if it was flagged as such at any time during the -report interval. - -### Allows egress traffic to the internet - -As per ingress, but with egress policy rules and destination selector/CIDR. Note that egress statistics are not obtained -for services. - -### Envoy enabled - -An endpoint is flagged as Envoy Enabled if the associated Pod Spec and Annotations indicate that an Istio init and main -container are deployed in the Pod. Provided Istio is appropriately configured on the cluster, this can be extrapolated -to be indication of whether mTLS is enabled for the endpoint. - -A service is flagged as Envoy enabled if all of the inscope endpoints within that service are flagged. - -A namespace is flagged as Envoy enabled if all of the inscope endpoints within that namespace are flagged. - -_How this applies to the report_: -An endpoint is flagged as Envoy enabled if it was flagged as such throughout the entire report interval. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/compliance-reports/policy-audit.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/compliance-reports/policy-audit.mdx deleted file mode 100644 index 06970a93ce..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/compliance-reports/policy-audit.mdx +++ /dev/null @@ -1,56 +0,0 @@ ---- -description: API for this resource. ---- - -# Policy audit report - -To create a Policy Audit report, create a [`GlobalReport`](../globalreport.mdx) with the `reportType` -set to `policy-audit`. - -The following sample command creates a GlobalReport that results in a daily policy audit report for -policies that are applied to endpoints in the `public` namespace. - -```bash -kubectl apply -f - << EOF -apiVersion: projectcalico.org/v3 -kind: GlobalReport -metadata: - name: daily-public-policy-audit-report - labels: - deployment: production -spec: - reportType: policy-audit - endpoints: - namespaces: - names: - - public - schedule: 0 0 * * * -EOF -``` - -## Downloadable reports - -### summary.csv - -A summary CSV file that includes details about the report parameters and the top level counts. - -| Heading | Description | Format | -| ----------------------- | ------------------------------------------------------------------------------------------------------ | ------------------------------------------- | -| startTime | The report interval start time. | RFC3339 string | -| endTime | The report interval end time. | RFC3339 string | -| endpointSelector | The endpoint selector used to restrict in-scope endpoints by endpoint label selection. | selector string | -| namespaceNames | The set of namespace names used to restrict in-scope endpoints by namespace. | ";" separated list of namespace names | -| namespaceSelector | The namespace selector used to restrict in-scope endpoints by namespace label selection. | selector string | -| serviceAccountNames | The set of service account names used to restrict in-scope endpoints by service account. | ";" separated list of service account names | -| serviceAccountSelectors | The service account selector used to restrict in-scope endpoints by service account label selection. | selector string | -| numCreatedPolicies | The number of policies that apply to in-scope endpoints that were created during the report interval. | number | -| numModifiedPolicies | The number of policies that apply to in-scope endpoints that were modified during the report interval. | number | -| numDeletedPolicies | The number of policies that apply to in-scope endpoints that were deleted during the report interval. | number | - -### events.json - -Events formatted in JSON. - -### events.yaml - -Events formatted in YAML. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/deeppacketinspection.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/deeppacketinspection.mdx deleted file mode 100644 index 131cab69cd..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/deeppacketinspection.mdx +++ /dev/null @@ -1,74 +0,0 @@ ---- -description: API for this Calico Enterprise resource. ---- - -# Deep packet inspection - -import Selectors from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_selectors.mdx'; - -A deep packet inspection resource (`DeepPacketInspection`) represents live network traffic monitor for malicious activities -by analyzing header and payload of the packet using specific rules. Malicious activities are added to the “Alerts” page in -the $[prodname] web console. - -For `kubectl` [commands](https://kubernetes.io/docs/reference/kubectl/overview/), the following case-insensitive aliases can be used to specify the resource type on the CLI: -`deeppacketinspection`,`deeppacketinspections`, `deeppacketinspection.projectcalico.org`, `deeppacketinspections.projectcalico.org` as well as -abbreviations such as `deeppacketinspection.p` and `deeppacketinspections.p`. - -## Sample YAML - -```yaml -apiVersion: projectcalico.org/v3 -kind: DeepPacketInspection -metadata: - name: sample-dpi - namespace: sample-namespace -spec: - selector: k8s-app == "sample-app" -``` - -## DeepPacketInspection definition - -### Metadata - -| Field | Description | Accepted Values | Schema | Default | -| --------- | ------------------------------------------------------------------ | --------------------------------------------------- | ------ | --------- | -| name | The name of the deep packet inspection. Required. | Alphanumeric string with optional `.`, `_`, or `-`. | string | | -| namespace | Namespace provides an additional qualification to a resource name. | | string | "default" | - -### Spec - -| Field | Description | Accepted Values | Schema | Default | -| -------- | ------------------------------------------------------------------- | --------------- | --------------------- | ------- | -| selector | Selects the endpoints to which this deep packet inspection applies. | | [selector](#selector) | | - -### Status - -| Field | Description | -| ----- | ------------------------ | -| nodes | List of [Nodes](#nodes). | - -### Nodes - -| Field | Description | -| --------------- | -------------------------------------------- | -| node | Name of the node that generated this status. | -| active | [Active](#active) status. | -| errorConditions | List of [errors](#error-conditions). | - -### Active - -| Field | Description | -| ----------- | ------------------------------------------------------------ | -| success | Whether the deep packet inspection is active on the backend. | -| lastUpdated | Time when the [active](#active) field was updated. | - -### Error Conditions - -| Field | Description | -| ----------- | ------------------------------------------------------------------- | -| message | Errors preventing deep packet inspection from running successfully. | -| lastUpdated | Time when the [error](#error-conditions) was updated. | - -### Selector - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/egressgatewaypolicy.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/egressgatewaypolicy.mdx deleted file mode 100644 index 3cd371bfa5..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/egressgatewaypolicy.mdx +++ /dev/null @@ -1,83 +0,0 @@ ---- -description: API for this Calico Enterprise resource. ---- - -# Egress gateway policy - -An EgressGatewayPolicy resource (`EgressGatewayPolicy`) represents a way to select -different egress gateways or skip one for different destinations. - -Rules in an Egress EgressGatewayPolicy are checked in Longest Prefix Match(LPM) fashion -like routers. As such it is not valid to use the exact destination in two rules. - -In order for an EgressGatewayPolicy to be used, its `name` must be added -to a pod or namespace by using `egress.projectcalico.org/egressGatewayPolicy` annotation. - -## Sample YAML - -```yaml -apiVersion: projectcalico.org/v3 -kind: EgressGatewayPolicy -metadata: - name: my-egwpolicy -spec: - rules: - - destination: - cidr: 10.0.0.0/8 - description: "Local: no gateway" - - destination: - cidr: 11.0.0.0/8 - description: "Gateway to on prem" - gateway: - namespaceSelector: "projectcalico.org/name == 'default'" - selector: "egress-code == 'blue'" - maxNextHops: 2 - - description: "Gateway to internet" - gateway: - namespaceSelector: "projectcalico.org/name == 'default'" - selector: "egress-code == 'red'" - gatewayPreference: PreferNodeLocal -``` - -## Egress gateway policy definition - -### Metadata - -| Field | Description | Accepted Values | Schema | -| ----- | ------------------------------------------------------------------ | --------------------------------------------------- | ------ | -| name | Unique name to describe this resource instance. Must be specified. | Alphanumeric string with optional `.`, `_`, or `-`. | string | - -### Spec - -| Field | Description | Accepted Values | Schema | Default | -| -------- | -------------------------------------- | --------------- | --------------------------------------------------------- | ------- | -| rules | List of egress gateway policies | | [Egress Gateway Policy Rule](#egress-gateway-policy-rule) | | - -### Egress gateway policy rule - -| Field | Description | Accepted Values | Schema | Default | -| ----------------- | ------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------- | ------- | -| description | A description of rule | | string | | -| destination | CIDR representing a destination | | [destination](#destination) | | -| gateway | egress gateway to be used for a destination | | [gateway](#egress-gateway) | | -| gatewayPreference | Hints about egress gateway selection | `None` for using all available egress gateway replicas from the selected deployment, or `PreferNodeLocal` to use only egress gateway replicas on the same local node as the client pod or namespace if available, otherwise fall back to the default behaviour. | | 'None' | - -### Destination - -| Field | Description | Accepted Values | Schema | Default | -| ---------- | ------------------------------------------- | --------------------------- | --------------------------- | ------- | -| cidr | CIDR of destination network | | string | | - -### Egress gateway - -| Field | Description | Accepted Values | Schema | Default | -| ----------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------------- | --------------------------- | ------- | -| selector | selector to choose an egress gateway deployment | | string | | -| namespaceSelector | name space of egress gateway deployment | | string | | -| maxNextHops | Specifies the maximum number of egress gateway replicas from the selected deployment that a pod should depend on. Replicas will be chosen in a manner that attempts to balance load across the whole egress gateway replicaset. If unset, or set to "0", egress traffic will behave in the default manner (load balanced over all available gateways). | | string | | - -## Supported operations - -| Datastore type | Create/Delete | Update | Get/List | Notes | -| --------------------- | ------------- | ------ | -------- | ----- | -| Kubernetes API server | Yes | Yes | Yes | | diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/externalnetwork.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/externalnetwork.mdx deleted file mode 100644 index 4229006a04..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/externalnetwork.mdx +++ /dev/null @@ -1,40 +0,0 @@ ---- -description: API for this Calico Enterprise resource. ---- - -# External network - -An external network resource (`ExternalNetwork`) represents an external network outside the cluster. -External networks can provide service endpoint with overlapping IPs. -A BGP peer can declare itself as part of an external network and propagate routes to the service endpoints in the external network to allow egress traffic from the cluster to reach the service endpoints via the egress gateways. -Those routes are programmed in the dedicated kernel routing table which has the index defined by the external network resource. - -For `kubectl` [commands](https://kubernetes.io/docs/reference/kubectl/overview/), the following case-insensitive aliases -may be used to specify the resource type on the CLI: -`externalnetwork.projectcalico.org`, `externalnetworks.projectcalico.org` and abbreviations such as -`externalnetwork.p` and `externalnetworks.p`. - -## Sample YAML - -```yaml -apiVersion: projectcalico.org/v3 -kind: ExternalNetwork -metadata: - name: network-a -spec: - routeTableIndex: 500 -``` - -## Definition - -### Metadata - -| Field | Description | Accepted Values | Schema | -|-------|--------------|-------------------|--------| -| name | The name of the external network. | | string | - -### Spec - -| Field | Description | Accepted Values | Schema | Default | -|------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------|-----------------------|-----------------------| -| routeTableIndex | The index of a linux kernel routing table that should be used for the routes associated with the external network. The value should be unique for each external network. The value should not be in the range of `RouteTableRanges` field in FelixConfiguration. The kernel routing table index should not be used by other processes on the node. | | int | | \ No newline at end of file diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/felixconfig.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/felixconfig.mdx deleted file mode 100644 index 7cce2601d5..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/felixconfig.mdx +++ /dev/null @@ -1,362 +0,0 @@ ---- -description: API for this Calico Enterprise resource. ---- - -# Felix configuration - -A [Felix](../architecture/overview.mdx#felix) configuration resource (`FelixConfiguration`) represents Felix configuration options for the cluster. - -For `kubectl` commands, the following case-insensitive aliases may be used to specify the resource type on the CLI: `felixconfiguration.projectcalico.org`, `felixconfigurations.projectcalico.org` as well as abbreviations such as `felixconfiguration.p` and `felixconfigurations.p`. - -See [Configuring Felix](../component-resources/node/felix/configuration.mdx) for more details. - -## Sample YAML - -```yaml -apiVersion: projectcalico.org/v3 -kind: FelixConfiguration -metadata: - name: default -spec: - ipv6Support: false - ipipMTU: 1400 - awsSrcDstCheck: Enable -``` - -## Felix configuration definition - -### Metadata - -| Field | Description | Accepted Values | Schema | -| ----- | --------------------------------------------------------- | --------------------------------------------------- | ------ | -| name | Unique name to describe this resource instance. Required. | Alphanumeric string with optional `.`, `_`, or `-`. | string | - -- $[prodname] automatically creates a resource named `default` containing the global default configuration settings for Felix. You can use [calicoctl](../clis/calicoctl/overview.mdx) to view and edit these settings -- The resources with the name `node.` contain the node-specific overrides, and will be applied to the node ``. When deleting a node the FelixConfiguration resource associated with the node will also be deleted. - -### Spec - -| Field | Description | Accepted Values | Schema | Default | -| ------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------ | --------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| awsSrcDstCheck | Controls automatically setting [source-destination-check](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_NAT_Instance.html#EIP_Disable_SrcDestCheck) on an AWS EC2 instance running Felix. Setting the value to `Enable` will set the check value in the instance description to `true`. For `Disable`, the check value will be `false`. Setting must be `Disable` if you want the EC2 instance to process traffic not matching the host interface IP address. For example, EKS cluster using Calico CNI with `VXLANMode=CrossSubnet`. Check [IAM role and profile configuration](#aws-iam-rolepolicy-for-source-destination-check-configuration) for setting the necessary permission for this setting to work. | DoNothing, Enable, Disable | string | `DoNothing` | -| awsSecondaryIPSupport | Controls whether Felix will create secondary AWS ENIs for AWS-backed IP pools. This feature is documented in the [egress gateways on AWS guide](../../networking/egress/egress-gateway-aws.mdx). Should only be enabled on AWS. | `Enabled`, `EnabledENIPerWorkload`, `Disabled` | string | `Disabled` | -| awsSecondaryIPRoutingRulePriority | Controls the priority of the policy-based routing rules used to implement AWS-backed IP addresses. Should only be changed to avoid conflicts if your nodes have additional policy based routing rules. | 0-4294967295 | int | 101 | -| awsRequestTimeout | Timeout used for communicating with the AWS API. | `5s`, `10s`, `1m` etc. | duration | `30s` | -| dropActionOverride | Controls what happens to each packet that is denied by the current $[prodname] policy. Normally the `Drop` or `LogAndDrop` value should be used. However when experimenting or debugging a scenario that is not behaving as you expect, the `Accept` and `LogAndAccept` values can be useful: then the packet will be still be allowed through. When one of the `LogAnd...` values is set, each denied packet is logged in syslog.\* | `Drop`, `Accept`, `LogAndDrop`, `LogAndAccept` | string | `Drop` | -| chainInsertMode | Controls whether Felix hooks the kernel's top-level iptables chains by inserting a rule at the top of the chain or by appending a rule at the bottom. `Insert` is the safe default since it prevents $[prodname]'s rules from being bypassed. If you switch to `Append` mode, be sure that the other rules in the chains signal acceptance by falling through to the $[prodname] rules, otherwise the $[prodname] policy will be bypassed. In particular `Append` mode is incompatible with DNS Policy unless kube-proxy is modified to fall through to $[prodname] rules. | `Insert`, `Append` | string | `Insert` | -| healthTimeoutOverrides | A list of overrides for Felix's internal liveness/readiness timeouts. | see [below](#health-timeout-overrides) | List of `HealthTimeoutOverride` objects | `[]` | -| dataplaneWatchdogTimeout | Deprecated, use `healthTimeoutOverrides` instead. Timeout before the main data plane goroutine is determined to have hung and Felix will report non-live and non-ready. Can be increased if the liveness check incorrectly fails (for example if Felix is running slowly on a heavily loaded system). | `90s`, `120s`, `10m` etc. | duration | `90s` | -| defaultEndpointToHostAction | This parameter controls what happens to traffic that goes from a workload endpoint to the host itself (after the traffic hits the endpoint egress policy). By default $[prodname] blocks traffic from workload endpoints to the host itself with an iptables "DROP" action. If you want to allow some or all traffic from endpoint to host, set this parameter to `Return` or `Accept`. Use `Return` if you have your own rules in the iptables "INPUT" chain; $[prodname] will insert its rules at the top of that chain, then `Return` packets to the "INPUT" chain once it has completed processing workload endpoint egress policy. Use `Accept` to unconditionally accept packets from workloads after processing workload endpoint egress policy. | Drop, Return, Accept | string | `Drop` | -| deviceRouteSourceAddress | IPv4 address to set as the source hint for routes programmed by Felix. When not set the source address for local traffic from host to workload will be determined by the kernel. | IPv4 | string | `""` | -| deviceRouteSourceAddressIPv6 | IPv6 address to set as the source hint for routes programmed by Felix. When not set the source address for local traffic from host to workload will be determined by the kernel. | IPv6 | string | `""` | -| deviceRouteProtocol | This defines the route protocol added to programmed device routes. | Protocol | int | RTPROT_BOOT | -| endpointStatusPathPrefix | Path to the directory where Felix should create the `endpoint-status` directory. Choosing a mounted volume such as `/var/run/calico` is recommended as the directory can then be monitored by host processes such as the Calico CNI. Leaving this field empty disables endpoint-status files. | Any existing path in the calico-node container | string | `""` | -| externalNodesCIDRList | A comma-delimited list of CIDRs of external non-calico nodes that can source tunnel traffic for acceptance by calico-nodes. | IPv4 | string | `""` | -| failsafeInboundHostPorts | UDP/TCP/SCTP protocol/cidr/port groupings that Felix will allow incoming traffic to host endpoints on irrespective of the security policy. This is useful to avoid accidentally cutting off a host with incorrect configuration. The default value allows SSH access, etcd, BGP, DHCP and the Kubernetes API. | | List of [ProtoPort](#protoport) |

    - protocol: tcp
      port: 22
    - protocol: udp
      port: 68
    - protocol: tcp
      port: 179
    - protocol: tcp
      port: 2379
    - protocol: tcp
      port: 2380
    - protocol: tcp
      port: 5473
    - protocol: tcp
      port: 6443
    - protocol: tcp
      port: 6666
    - protocol: tcp
      port: 6667

    | -| failsafeOutboundHostPorts | UDP/TCP/SCTP protocol/port groupings that Felix will allow outgoing traffic from host endpoints to irrespective of the security policy. This is useful to avoid accidentally cutting off a host with incorrect configuration. The default value opens etcd's standard ports to ensure that Felix does not get cut off from etcd as well as allowing DHCP, DNS, BGP and the Kubernetes API. | | List of [ProtoPort](#protoport) |

    - protocol: udp
      port: 53
    - protocol: udp
      port: 67
    - protocol: tcp
      port: 179
    - protocol: tcp
      port: 2379
    - protocol: tcp
      port: 2380
    - protocol: tcp
      port: 5473
    - protocol: tcp
      port: 6443
    - protocol: tcp
      port: 6666
    - protocol: tcp
      port: 6667

    | -| featureDetectOverride | Is used to override the feature detection. Values are specified in a comma separated list with no spaces, example; "SNATFullyRandom=true,MASQFullyRandom=false,RestoreSupportsLock=". "true" or "false" will force the feature, empty or omitted values are auto-detected. | string | string | `""` | -| genericXDPEnabled | When enabled, Felix can fallback to the non-optimized `generic` XDP mode. This should only be used for testing since it doesn't improve performance over the non-XDP mode. | true,false | boolean | `false` | -| interfaceExclude | A comma-separated list of interface names that should be excluded when Felix is resolving host endpoints. The default value ensures that Felix ignores Kubernetes' internal `kube-ipvs0` device. If you want to exclude multiple interface names using a single value, the list supports regular expressions. For regular expressions you must wrap the value with `/`. For example having values `/^kube/,veth1` will exclude all interfaces that begin with `kube` and also the interface `veth1`. | string | string | `kube-ipvs0` | -| interfacePrefix | The interface name prefix that identifies workload endpoints and so distinguishes them from host endpoint interfaces. Note: in environments other than bare metal, the orchestrators configure this appropriately. For example our Kubernetes and Docker integrations set the 'cali' value, and our OpenStack integration sets the 'tap' value. | string | string | `cali` | -| ipipEnabled | Optional, you shouldn't need to change this setting as Felix calculates if IPIP should be enabled based on the existing IP Pools. When set, this overrides whether Felix should configure an IPinIP interface on the host. When explicitly disabled in FelixConfiguration, Felix will not clean up addresses from the `tunl0` interface (use this if you need to add addresses to that interface and don't want to have them removed). | `true`, `false`, unset | optional boolean | unset | -| ipipMTU | The MTU to set on the tunnel device. Zero value means auto-detect. See [Configuring MTU](../../networking/configuring/mtu.mdx) | int | int | `0` | -| ipForwarding | _Added in: v3.19.3._ Controls whether Felix sets the host sysctls to enable IP forwarding. IP forwarding is required when using Calico for workload networking. This should be disabled only on hosts where Calico is used for host protection. | `Enabled`, `Disabled` | string | `Enabled` | -| ipsetsRefreshInterval | Period at which Felix re-checks the IP sets in the data plane to ensure that no other process has accidentally broken $[prodname]'s rules. Set to 0 to disable IP sets refresh. | `5s`, `10s`, `1m` etc. | duration | `10s` | -| iptablesFilterAllowAction | This parameter controls what happens to traffic that is accepted by a Felix policy chain in the iptables filter table (i.e. a normal policy chain). The default will immediately `Accept` the traffic. Use `Return` to send the traffic back up to the system chains for further processing. | Accept, Return | string | `Accept` | -| iptablesBackend | This parameter controls which variant of iptables Felix uses. If using Felix on a system that uses the netfilter-backed iptables binaries, set this to `nft`. | Legacy, nft | string | automatic detection | -| iptablesLockFilePath | Location of the iptables lock file. You may need to change this if the lock file is not in its standard location (for example if you have mapped it into Felix's container at a different path). | string | string | `/run/xtables.lock` | -| iptablesLockProbeInterval | Time that Felix will wait between attempts to acquire the iptables lock if it is not available. Lower values make Felix more responsive when the lock is contended, but use more CPU. | `5s`, `10s`, `1m` etc. | duration | `50ms` | -| iptablesLockTimeout | Time that Felix will wait for the iptables lock, or 0, to disable. To use this feature, Felix must share the iptables lock file with all other processes that also take the lock. When running Felix inside a container, this requires the /run directory of the host to be mounted into the $[nodecontainer] or calico/felix container. | `5s`, `10s`, `1m` etc. | duration | `0` (Disabled) | -| iptablesMangleAllowAction | This parameter controls what happens to traffic that is accepted by a Felix policy chain in the iptables mangle table (i.e. a pre-DNAT policy chain). The default will immediately `Accept` the traffic. Use `Return` to send the traffic back up to the system chains for further processing. | `Accept`, `Return` | string | `Accept` | -| iptablesMarkMask | Mask that Felix selects its IPTables Mark bits from. Should be a 32 bit hexadecimal number with at least 8 bits set, none of which clash with any other mark bits in use on the system. | netmask | netmask | `0xffff0000` | -| iptablesNATOutgoingInterfaceFilter | This parameter can be used to limit the host interfaces on which Calico will apply SNAT to traffic leaving a Calico IPAM pool with "NAT outgoing" enabled. This can be useful if you have a main data interface, where traffic should be SNATted and a secondary device (such as the docker bridge) which is local to the host and doesn't require SNAT. This parameter uses the iptables interface matching syntax, which allows `+` as a wildcard. Most users will not need to set this. Example: if your data interfaces are eth0 and eth1 and you want to exclude the docker bridge, you could set this to `eth+` | string | string | `""` | -| iptablesPostWriteCheckInterval | Period after Felix has done a write to the data plane that it schedules an extra read back to check the write was not clobbered by another process. This should only occur if another application on the system doesn't respect the iptables lock. | `5s`, `10s`, `1m` etc. | duration | `1s` | -| iptablesRefreshInterval | Period at which Felix re-checks all iptables state to ensure that no other process has accidentally broken $[prodname]'s rules. Set to 0 to disable iptables refresh. | `5s`, `10s`, `1m` etc. | duration | `90s` | -| ipv6Support | IPv6 support for Felix | `true`, `false` | boolean | `true` | -| logFilePath | The full path to the Felix log. Set to `none` to disable file logging. | string | string | `/var/log/calico/felix.log` | -| logPrefix | The log prefix that Felix uses when rendering LOG rules. | string | string | `calico-packet` | -| logSeverityFile | The log severity above which logs are sent to the log file. | Same as logSeveritySys | string | `Info` | -| logSeverityScreen | The log severity above which logs are sent to the stdout. | Same as logSeveritySys | string | `Info` | -| logSeveritySys | The log severity above which logs are sent to the syslog. Set to `none` for no logging to syslog. | Debug, Info, Warning, Error, Fatal | string | `Info` | -| logDebugFilenameRegex | controls which source code files have their Debug log output included in the logs. Only logs from files with names that match the given regular expression are included. The filter only applies to Debug level logs. | regex | string | `""` | -| maxIpsetSize | Maximum size for the ipsets used by Felix. Should be set to a number that is greater than the maximum number of IP addresses that are ever expected in a selector. | int | int | `1048576` | -| metadataAddr | The IP address or domain name of the server that can answer VM queries for cloud-init metadata. In OpenStack, this corresponds to the machine running nova-api (or in Ubuntu, nova-api-metadata). A value of `none` (case insensitive) means that Felix should not set up any NAT rule for the metadata path. | IPv4, hostname, none | string | `127.0.0.1` | -| metadataPort | The port of the metadata server. This, combined with global.MetadataAddr (if not 'None'), is used to set up a NAT rule, from 169.254.169.254:80 to MetadataAddr:MetadataPort. In most cases this should not need to be changed. | int | int | `8775` | -| natOutgoingAddress | The source address to use for outgoing NAT. By default an iptables MASQUERADE rule determines the source address which will use the address on the host interface the traffic leaves on. | IPV4 | string | `""` | -| policySyncPathPrefix | File system path where Felix notifies services of policy changes over Unix domain sockets. This is required only if you're configuring [L7 logs](../../observability/elastic/l7/configure.mdx), or [egress gateways](../../networking/egress/index.mdx). Set to `""` to disable. | string | string | `""` | -| prometheusGoMetricsEnabled | Set to `false` to disable Go runtime metrics collection, which the Prometheus client does by default. This reduces the number of metrics reported, reducing Prometheus load. | boolean | boolean | `true` | -| prometheusMetricsEnabled | Set to `true` to enable the experimental Prometheus metrics server in Felix. | boolean | boolean | `false` | -| prometheusMetricsHost | TCP network address that the Prometheus metrics server should bind to. | IPv4, IPv6, Hostname | string | `""` | -| prometheusMetricsPort | TCP port that the Prometheus metrics server should bind to. | int | int | `9091` | -| prometheusProcessMetricsEnabled | Set to `false` to disable process metrics collection, which the Prometheus client does by default. This reduces the number of metrics reported, reducing Prometheus load. | boolean | boolean | `true` | -| prometheusReporterEnabled | Set to `true` to enable configure Felix to keep count of recently denied packets and publish these as Prometheus metrics. Note that denied packet metrics are independent of the `dropActionOverride` setting. Specifically, if packets that would normally be denied are being allowed through by a setting of `Accept` or `LogAndAccept`, those packets still get counted as denied packets. | `true`, `false` | boolean | `false` | -| prometheusReporterPort | The TCP port on which to report denied packet metrics, if `prometheusReporterEnabled` is set to `true`. | | | `9092` | -| removeExternalRoutes | Whether or not to remove device routes that have not been programmed by Felix. Disabling this will allow external applications to also add device routes. | bool | boolean | `true` | -| reportingInterval | Interval at which Felix reports its status into the datastore. 0 means disabled and is correct for Kubernetes-only clusters. Must be non-zero in OpenStack deployments. | `5s`, `10s`, `1m` etc. | duration | `30s` | -| reportingTTL | Time-to-live setting for process-wide status reports. | `5s`, `10s`, `1m` etc. | duration | `90s` | -| routeRefreshInterval | Period at which Felix re-checks the routes in the data plane to ensure that no other process has accidentally broken $[prodname]'s rules. Set to 0 to disable route refresh. | `5s`, `10s`, `1m` etc. | duration | `90s` | -| ipsecMode | Controls which mode IPsec is operating on. The only supported value is `PSK`. An empty value means IPsec is not enabled. | PSK | string | `""` | -| ipsecAllowUnsecuredTraffic | When set to `false`, only IPsec-protected traffic will be allowed on the packet paths where IPsec is supported. When set to `true`, IPsec will be used but non-IPsec traffic will be accepted. In general, setting this to `true` is less safe since it allows an attacker to inject packets. However, it is useful when transitioning from non-IPsec to IPsec since it allows traffic to flow while the cluster negotiates the IPsec mesh. | `true`, `false` | boolean | `false` | -| ipsecIKEAlgorithm | IPsec IKE algorithm. Default is NIST suite B recommendation. | string | string | `aes128gcm16-prfsha256-ecp256` | -| ipsecESPAlgorithm | IPsec ESP algorithm. Default is NIST suite B recommendation. | string | string | `aes128gcm16-ecp256` | -| ipsecLogLevel | Controls log level for IPsec components. Set to `None` for no logging. | `None`, `Notice`, `Info`, `Debug`, `Verbose` | string | `Info` | -| ipsecPSKFile | The path to the pre shared key file for IPsec. | string | string | `""` | -| flowLogsFileEnabled | Set to `true`, enables flow logs. If set to `false` no flow logging will occur. Flow logs are written to a file `flows.log` and sent to Elasticsearch. The location of this file can be configured using the `flowLogsFileDirectory` field. File rotation settings for this `flows.log` file can be configured using the fields `flowLogsFileMaxFiles` and `flowLogsFileMaxFileSizeMB`. Note that flow log exports to Elasticsearch are dependent on flow logs getting written to this file. Setting this parameter to `false` will disable flow logs. | `true`, `false` | boolean | `false` | -| flowLogsFileDirectory | Set the directory where flow logs files are stored on Linux nodes. This parameter only takes effect when `flowLogsFileEnabled` is set to `true`. | string | string | `/var/log/calico/flowlogs` | -| flowLogsPositionFilePath | Specify the position of the external pipeline that reads flow logs on Linux nodes. This parameter only takes effect when `FlowLogsDynamicAggregationEnabled` is set to `true`. | string | string | `/var/log/calico/flows.log.pos` | -| flowLogsFileMaxFiles | Set the number of log files to keep. This parameter only takes effect when `flowLogsFileEnabled` is set to `true`. | int | int | `5` | -| flowLogsFileMaxFileSizeMB | Set the max size in MB of flow logs files before rotation. This parameter only takes effect when `flowLogsFileEnabled` is set to `true`. | int | int | `100` | -| flowLogsFlushInterval | The period, in seconds, at which Felix exports the flow logs. | int | int | `300s` | -| flowLogsFileAggregationKindForAllowed | How much to aggregate the flow logs sent to Elasticsearch for allowed traffic. Bear in mind that changing this value may have a dramatic impact on the volume of flow logs sent to Elasticsearch. | 0-2 | [AggregationKind](#aggregationkind) | 2 | -| flowLogsFileAggregationKindForDenied | How much to aggregate the flow logs sent to Elasticsearch for denied traffic. Bear in mind that changing this value may have a dramatic impact on the volume of flow logs sent to Elasticsearch. | 0-2 | [AggregationKind](#aggregationkind) | 1 | -| flowLogsFileIncludeService | When set to `true`, include destination service information in the aggregated flow log. Note that service information will only be included when the flow can be explicitly determined to be bound to a service (e.g. pre-DNAT destination matches a service ClusterIP). | `true`, `false` | boolean | `false` | -| flowLogsFileIncludeLabels | When set to `true`, include source and destination endpoint labels in the aggregated flow log. Note that only Kubernetes endpoints or network sets are included; arbitrary networks do not contain labels. | `true`, `false` | boolean | `false` | -| flowLogsFileIncludePolicies | When set to `true`, include all policies in the aggregated flow logs that acted upon and matches the flow log traffic. | `true`, `false` | boolean | `false` | -| flowLogsDestDomainsByClient | When set to true, top-level domains are strictly associated with the source IP that originally queried the domains. (default: true) -| flowLogsEnableNetworkSets | When set to `true`, include an arbitrary network set in the aggregated flow log that matches the IP address of the flow log endpoint. | `true`, `false` | boolean | `false` | -| flowLogsCollectProcessInfo | When set to `true`, Felix will load the kprobe BPF programs to collect process info. | `true`, `false` | boolean | `false` | -| flowLogsCollectTcpStats | When set to `true`, Felix will collect the TCP socket stats. | `true`, `false` | boolean | `true` | -| flowLogsCollectProcessPath | When set to `true`, along with flowLogsCollectProcessInfo, each flow log will include the full path of the executable and the arguments with which the executable was invoked. | `true`, `false` | boolean | `false` | -| flowLogsFilePerFlowProcessLimit | Specify the maximum number of flow log entries with distinct process information beyond which process information will be aggregated | int | int | `2` | -| flowLogsFileNatOutgoingPortLimit | Specify the maximum number of distinct post SNAT ports that will appear in the flowLogs | int | int | `3` | -| flowLogsFilePerFlowProcessArgsLimit | Specify the maximum number of unique arguments in the flowlogs beyond which process arguments will be aggregated | int | int | `5` | -| flowLogsFileDomainsLimit | Specify the maximum number of top-level domains to include in a flow log. This only applies to source reported flows to destinations external to the cluster. | int | int | `5` | -| statsDumpFilePath | Specify the position of the file used for dumping flow log statistics on Linux nodes. Note this is an internal setting that users shouldn't need to modify. | string | string | `/var/log/calico/stats/dump` | -| routeTableRange | _deprecated in favor of `RouteTableRanges`_ Calico programs additional Linux route tables for various purposes. `RouteTableRange` specifies the indices of the route tables that Calico should use. | | [RouteTableRanges](#routetablerange) | `""` | -| routeTableRanges | Calico programs additional Linux route tables for various purposes. `RouteTableRanges` specifies a set of table index ranges that Calico should use. Deprecates `RouteTableRange`, overrides `RouteTableRange` | | [RouteTableRanges](#routetableranges) | `[{"min": 1, "max": 250}]` | -| routeSyncDisabled | Set to `true` to disable Calico programming routes to local workloads. | boolean | boolean | `false` | -| serviceLoopPrevention | When [service IP advertisement is enabled](../../networking/configuring/advertise-service-ips.mdx), prevent routing loops to service IPs that are not in use, by dropping or rejecting packets that do not get DNAT'd by kube-proxy. Unless set to "Disabled", in which case such routing loops continue to be allowed. | `Drop`, `Reject`, `Disabled` | string | `Drop` | -| workloadSourceSpoofing | Controls whether pods can enable source IP address spoofing with the `cni.projectcalico.org/allowedSourcePrefixes` annotation. When set to `Any`, pods can use this annotation to send packets from any IP address. | `Any`, `Disabled` | string | `Disabled` | -| vxlanEnabled | Optional, you shouldn't need to change this setting as Felix calculates if VXLAN should be enabled based on the existing IP Pools. When set, this overrides whether Felix should create the VXLAN tunnel device for VXLAN networking. | `true`, `false`, unset | optional boolean | unset | -| vxlanMTU | MTU to use for the IPv4 VXLAN tunnel device. Zero value means auto-detect. Also controls NodePort MTU when eBPF enabled. | int | int | `0` | -| vxlanMTUV6 | MTU to use for the IPv6 VXLAN tunnel device. Zero value means auto-detect. Also controls NodePort MTU when eBPF enabled. | int | int | `0` | -| vxlanPort | Port to use for VXLAN traffic. A value of `0` means "use the kernel default". | int | int | `4789` | -| vxlanVNI | Virtual network ID to use for VXLAN traffic. A value of `0` means "use the kernel default". | int | int | `4096` | -| allowVXLANPacketsFromWorkloads | Set to `true` to allow VXLAN encapsulated traffic from workloads. | boolean | boolean | `false` | -| allowIPIPPacketsFromWorkloads | Set to `true` to allow IPIP encapsulated traffic from workloads. | boolean | boolean | `false` | -| windowsFlowLogsFileDirectory | Set the directory where flow logs files are stored on Windows nodes. This parameter only takes effect when `flowLogsFileEnabled` is set to `true`. | string | string | `c:\\TigeraCalico\\flowlogs` | -| windowsFlowLogsPositionFilePath | Specify the position of the external pipeline that reads flow logs on Windows nodes. This parameter only takes effect when `FlowLogsDynamicAggregationEnabled` is set to `true`. | string | string | `c:\\TigeraCalico\\flowlogs\\flows.log.pos` | -| windowsStatsDumpFilePath | Specify the position of the file used for dumping flow log statistics on Windows nodes. Note this is an internal setting that users shouldn't need to modify. | string | string | `c:\\TigeraCalico\\stats\\dump` | -| windowsDNSCacheFile | Specify the name of the file that Calico uses to preserve learnt DNS information when restarting. | string | string | `c:\\TigeraCalico\\felix-dns-cache.txt` | -| windowsDNSExtraTTL | Specify extra time in seconds to keep IPs and alias names that are learnt from DNS, in addition to each name or IP's advertised TTL. | int | int | `120` | -| windowsManageFirewallRules | Configure whether or not Felix will program Windows Firewall rules. (to allow inbound access to its own metrics ports) [Default: Disabled] | `Enabled`, `Disabled` | string | `Disabled` | -| wireguardEnabled | Enable encryption on WireGuard supported nodes in cluster. When enabled, pod to pod traffic will be sent over encrypted tunnels between the nodes. | `true`, `false` | boolean | `false` | -| wireguardEnabledV6 | Enable encryption for IPv6 on WireGuard supported nodes in cluster. When enabled, pod to pod traffic will be sent over encrypted tunnels between the nodes. | `true`, `false` | boolean | `false` | -| wireguardInterfaceName | Name of the IPv4 WireGuard interface created by Felix. If you change the name and want to clean up the previously configured interface names on each node, this is a manual process. Felix expects the name to end with either .cali or .calico suffix. | string | string | wireguard.cali | -| wireguardInterfaceNameV6 | Name of the IPv6 WireGuard interface created by Felix. If you change the name and want to clean up the previously configured interface names on each node, this is a manual process. Felix expects the name to end with either .cali or .calico suffix. | string | string | wg-v6.cali | -| wireguardListeningPort | Port used by WireGuard tunnels. Felix sets up WireGuard tunnel on each node specified by this port. Available for configuration only in the global FelixConfiguration resource; setting it per host, config-file or environment variable will not work. | 1-65535 | int | 51820 | -| wireguardListeningPortV6 | Port used by IPv6 WireGuard tunnels. Felix sets up an IPv6 WireGuard tunnel on each node specified by this port. Available for configuration only in the global FelixConfiguration resource; setting it per host, config-file or environment variable will not work. | 1-65535 | int | 51821 | -| wireguardMTU | MTU set on the WireGuard interface created by Felix. Zero value means auto-detect. See [Configuring MTU](../../networking/configuring/mtu.mdx). | int | int | 0 | -| wireguardMTUV6 | MTU set on the IPv6 WireGuard interface created by Felix. Zero value means auto-detect. See [Configuring MTU](../../networking/configuring/mtu.mdx). | int | int | 0 | -| wireguardRoutingRulePriority | WireGuard routing rule priority value set up by Felix. If you change the default value, set it to a value most appropriate to routing rules for your nodes. | 1-32765 | int | 99 | -| wireguardHostEncryptionEnabled | **Experimental**: Adds host-namespace workload IP's to WireGuard's list of peers. Should **not** be enabled when WireGuard is enabled on a cluster's control plane node, as networking deadlock can occur. | true, false | boolean | false | -| wireguardKeepAlive | WireguardKeepAlive controls Wireguard PersistentKeepalive option. Set 0 to disable. [Default: 0] | `5s`, `10s`, `1m` etc. | duration | `0` | -| xdpRefreshInterval | Period at which Felix re-checks the XDP state in the data plane to ensure that no other process has accidentally broken $[prodname]'s rules. Set to 0 to disable XDP refresh. | `5s`, `10s`, `1m` etc. | duration | `90s` | -| xdpEnabled | When `bpfEnabled` is `false`: enable XDP acceleration for host endpoint policies. When `bpfEnabled` is `true`, XDP is automatically used for Calico policy where that makes sense, regardless of this setting. [Default: `true`] | true,false | boolean | `true` | -| dnsCacheFile | The name of the file that Felix uses to preserve learnt DNS information when restarting. | file name | string | `/var/run/calico/felix-dns-cache.txt` | -| dnsCacheSaveInterval | The period, in seconds, at which Felix saves learnt DNS information to the cache file. | `5s`, `10s`, `1m` etc. | duration | `60s` | -| dnsCacheEpoch | An arbitrary number that can be changed, at runtime, to tell Felix to discard all its learnt DNS information. | int | int | `0` | -| dnsExtraTTL | Extra time to keep IPs and alias names that are learnt from DNS, in addition to each name or IP's advertised TTL. | `5s`, `10s`, `1m` etc. | duration | `0s` | -| dnsTrustedServers | The DNS servers that Felix should trust. Each entry here must be `[:]` - indicating an explicit DNS server IP - or `k8s-service:[/][:port]` - indicating a Kubernetes DNS service. `` defaults to the first service port, or 53 for an IP, and `` to `kube-system`. An IPv6 address with a port must use the square brackets convention, for example `[fd00:83a6::12]:5353`. Note that Felix (calico-node) will need RBAC permission to read the details of each service specified by a `k8s-service:...` form. | IPs or service names | comma-separated strings | `k8s-service:kube-dns` | -| dnsLogsFileEnabled | Set to `true`, enables DNS logs. If set to `false` no DNS logging will occur. DNS logs are written to a file `dns.log` and sent to Elasticsearch. The location of this file can be configured using the `DNSLogsFileDirectory` field. File rotation settings for this `dns.log` file can be configured using the fields `DNSLogsFileMaxFiles` and `DNSLogsFileMaxFileSizeMB`. Note that DNS log exports to Elasticsearch are dependent on DNS logs getting written to this file. Setting this parameter to `false` will disable DNS logs. | `true`, `false` | boolean | `false` | -| dnsLogsFileDirectory | The directory where DNS logs files are stored. This parameter only takes effect when `DNSLogsFileEnabled` is `true`. | directory | string | `/var/log/calico/dnslogs` | -| dnsLogsFileMaxFiles | The number of files to keep when rotating DNS log files. This parameter only takes effect when `DNSLogsFileEnabled` is `true`. | int | int | `5` | -| dnsLogsFileMaxFileSizeMB | The max size in MB of DNS log files before rotation. This parameter only takes effect when `DNSLogsFileEnabled` is `true`. | int | int | `100` | -| dnsLogsFlushInterval | The period, in seconds, at which Felix exports DNS logs. | int | int | `300s` | -| dnsLogsFileAggregationKind | How much to aggregate DNS logs. Bear in mind that changing this value may have a dramatic impact on the volume of flow logs sent to Elasticsearch. `0` means no aggregation, `1` means aggregate similar DNS logs from workloads in the same ReplicaSet. | `0`,`1` | int | `1` | -| dnsLogsFileIncludeLabels | Whether to include client and server workload labels in DNS logs. | `true`, `false` | boolean | `true` | -| dnsLogsFilePerNodeLimit | Limit on the number of DNS logs that can be emitted within each flush interval. When this limit has been reached, Felix counts the number of unloggable DNS responses within the flush interval, and emits a WARNING log with that count at the same time as it flushes the buffered DNS logs. | int | int | `0` (no limit) | -| dnsLogsLatency | Indicates to include measurements of DNS request/response latency in each DNS log. | `true`, `false` | boolean | `true` | -| dnsPolicyMode | DNSPolicyMode specifies how DNS policy programming will be handled. | `NoDelay`, `DelayDNSResponse`, `DelayDeniedPacket` | [DNSPolicyMode](#dnspolicymode) | `DelayDeniedPacket` | -| dnsPolicyNfqueueID | DNSPolicyNfqueueID is the NFQUEUE ID to use for DNS Policy re-evaluation when the domains IP hasn't been programmed to ipsets yet. This value can be changed to avoid conflicts with other users of NFQUEUEs. Used when `DNSPolicyMode` is `DelayDeniedPacket`. | 0-65535 | int | `100` | -| dnsPolicyNfqueueSize | DNSPolicyNfqueueID is the size of the NFQUEUE for DNS policy re-evaluation. This is the maximum number of denied packets that may be queued up pending re-evaluation. Used when `DNSPolicyMode` is `DelayDeniedPacket`. | 0-65535 | int | `100` | -| dnsPacketsNfqueueID | DNSPacketsNfqueueID is the NFQUEUE ID to use for capturing DNS packets to ensure programming IPSets occurs before the response is released. Used when `DNSPolicyMode` is `DelayDNSResponse`. | 0-65535 | int | `101` | -| dnsPacketsNfqueueSize | DNSPacketsNfqueueSize is the size of the NFQUEUE for captured DNS packets. This is the maximum number of DNS packets that may be queued awaiting programming in the data plane. Used when `DNSPolicyMode` is `DelayDNSResponse`. | 0-65535 | int | `100` | -| dnsPacketsNfqueueMaxHoldDuration | DNSPacketsNfqueueMaxHoldDuration is the max length of time to hold on to a DNS response while waiting for the data plane to be programmed. Used when `DNSPolicyMode` is `DelayDNSResponse`. | `5s`, `10s`, `1m` etc. | duration | `3s` | -| bpfEnabled | Enable eBPF data plane mode. eBPF mode has some limitations, see the [HOWTO guide](../../operations/ebpf/enabling-ebpf.mdx) for more details. | true, false | boolean | false | -| bpfDisableUnprivileged | If true, Felix sets the kernel.unprivileged_bpf_disabled sysctl to disable unprivileged use of BPF. This ensures that unprivileged users cannot access Calico's BPF maps and cannot insert their own BPF programs to interfere with the ones that $[prodname] installs. | true, false | boolean | true | -| bpfLogLevel | In eBPF data plane mode, the log level used by the BPF programs. The logs are emitted to the BPF trace pipe, accessible with the command `tc exec bpf debug`. This is a tech preview feature and subject to change in future releases. | Off,Info,Debug | string | Off | -| bpfDataIfacePattern | In eBPF data plane mode, controls which interfaces Felix should attach BPF programs to catch traffic to/from the external network. This needs to match the interfaces that Calico workload traffic flows over as well as any interfaces that handle incoming traffic to NodePorts and services from outside the cluster. It should not match the workload interfaces (usually named cali...).. This is a tech preview feature and subject to change in future releases. | regular expression | string | ^((en|wl|ww|sl|ib)[Popsx].\*|(eth|wlan|wwan|bond).\*|tunl0$|vxlan.calico$|vxlan-v6.calico$|wireguard.cali$|wg-v6.cali$|egress.calico$) | -| bpfConnectTimeLoadBalancingEnabled | In eBPF data plane mode, controls whether Felix installs the connect-time load balancer. In the current release, the connect-time load balancer is required for the host to reach kubernetes services. This is a tech preview feature and subject to change in future releases. | true,false | boolean | true | -| bpfExternalServiceMode | In eBPF data plane mode, controls how traffic from outside the cluster to NodePorts and ClusterIPs is handled. In Tunnel mode, packet is tunneled from the ingress host to the host with the backing pod and back again. In DSR mode, traffic is tunneled to the host with the backing pod and then returned directly; this requires a network that allows direct return. | Tunnel,DSR | string | Tunnel | -| bpfKubeProxyIptablesCleanupEnabled | In eBPF data plane mode, controls whether Felix will clean up the iptables rules created by the Kubernetes `kube-proxy`; should only be enabled if `kube-proxy` is not running. This is a tech preview feature and subject to change in future releases. | true,false | boolean | true | -| bpfKubeProxyMinSyncPeriod | In eBPF data plane mode, controls the minimum time between data plane updates for Felix's embedded `kube-proxy` implementation. | `5s`, `10s`, `1m` etc. | duration | `1s` | -| BPFKubeProxyEndpointSlicesEnabled | In eBPF data plane mode, controls whether Felix's embedded kube-proxy derives its services from Kubernetes' EndpointSlices resources. Using EndpointSlices is more efficient but it requires EndpointSlices support to be enabled at the Kubernetes API server. | true,false | boolean | false | -| bpfMapSizeConntrack | In eBPF data plane mode, controls the size of the conntrack map. | int | int | 512000 | -| bpfMapSizeIPSets | In eBPF data plane mode, controls the size of the ipsets map. | int | int | 1048576 | -| bpfMapSizeNATAffinity | In eBPF data plane mode, controls the size of the NAT affinity map. | int | int | 65536 | -| bpfMapSizeNATFrontend | In eBPF data plane mode, controls the size of the NAT front end map. | int | int | 65536 | -| bpfMapSizeNATBackend | In eBPF data plane mode, controls the size of the NAT back end map. | int | int | 262144 | -| bpfMapSizeRoute | In eBPF data plane mode, controls the size of the route map. | int | int | 262144 | -| bpfPolicyDebugEnabled | In eBPF data plane mode, controls whether felix will collect policy dump for each interface. | true, false | boolean | true | -| routeSource | Where Felix gets is routing information from for VXLAN and the BPF data plane. The CalicoIPAM setting is more efficient because it supports route aggregation, but it only works when Calico's IPAM or host-local IPAM is in use. Use the WorkloadIPs setting if you are using Calico's VXLAN or BPF data plane and not using Calico IPAM or host-local IPAM. | CalicoIPAM,WorkloadIPs | string | `CalicoIPAM` | -| mtuIfacePattern | Pattern used to discover the host's interface for MTU auto-detection. | regex | string | ^((en|wl|ww|sl|ib)[opsvx].*|(eth|wlan|wwan).*) | -| bpfForceTrackPacketsFromIfaces | Forces traffic from these interfaces in BPF mode to skip Calico's iptables NOTRACK rule, allowing traffic from those interfaces to be tracked by Linux conntrack. Use only for interfaces that are not used for the Calico fabric, for example, a docker bridge device for non-Calico-networked containers. | A list of strings | A list of strings | docker+ | -| bpfDisableGROForIfaces | BPFDisableGROForIfaces is a regular expression that controls which interfaces Felix should disable the Generic Receive Offload [GRO] option. It should not match the workload interfaces (usually named cali...). | regex | string | "" | -| egressIPSupport | Defines three different support modes for egress gateway function. `Disabled` means egress gateways are not supported. `EnabledPerNamespace` means egress gateway function is enabled and can be configured on a per-namespace basis (but per-pod egress annotations are ignored). `EnabledPerNamespaceOrPerPod` means egress gateway function is enabled and can be configured per-namespace or per-pod (with per-pod egress annotations overriding namespace annotations). | Disabled,
    EnabledPerNamespace,
    EnabledPerNamespaceOrPerPod | string | `Disabled` | -| egressIPVXLANPort | Port to use for egress gateway VXLAN traffic. A value of `0` means "use the kernel default". | int | int | `4790` | -| egressIPVXLANVNI | Virtual network ID to use for egress gateway VXLAN traffic. A value of `0` means "use the kernel default". | int | int | `4097` | -| egressIPRoutingRulePriority | Controls the priority value to use for the egress gateway routing rule. | int | int | `100` | -| egressGatewayPollInterval | Controls the interval at which Felix will poll remote egress gateways to check their health. Only Egress Gateways with a named "health" port will be polled in this way. Egress Gateways that fail the health check will be taken our of use as if they have been deleted. | `5s`, `10s`, `1m` etc. | duration | `10s` | -| egressGatewayPollFailureCount | Controls the minimum number of poll failures before a remote Egress Gateway is considered to have failed. | int | int | `3` | -| externalNetworkSupport | Defines two different support modes for external network function. `Disabled` means external network is not supported. `Enabled` means external network support is enabled. | Disabled,
    Enabled | string | `Disabled` | -| externalNetworkRoutingRulePriority | controls the priority value to use for the external network routing rule. | int | int | `102` | -| captureDir | Controls the directory where packet capture files are stored. | string | string | `/var/log/calico/pcap` | -| captureMaxSizeBytes | Controls the maximum size in bytes for a packet capture file before rotation. | int | int | `10000000` | -| captureRotationSeconds | Controls the rotation period in seconds for a packet capture file. | int | int | `3600` | -| captureMaxFiles | Controls the maximum number rotated packet capture files. | int | int | `2` | - -\* When `dropActionOverride` is set to `LogAndDrop` or `LogAndAccept`, the `syslog` entries look something like the following. - -``` -May 18 18:42:44 ubuntu kernel: [ 1156.246182] calico-drop: IN=tunl0 OUT=cali76be879f658 MAC= SRC=192.168.128.30 DST=192.168.157.26 LEN=60 TOS=0x00 PREC=0x00 TTL=62 ID=56743 DF PROTO=TCP SPT=56248 DPT=80 WINDOW=29200 RES=0x00 SYN URGP=0 MARK=0xa000000 -``` - -\*\* Duration is denoted by the numerical amount followed by the unit of time. Valid units of time include nanoseconds (ns), microseconds (µs), milliseconds (ms), seconds (s), minutes (m), and hours (h). Units of time can also be used together e.g. `3m30s` to represent 3 minutes and 30 seconds. Any amounts of time that can be converted into larger units of time will be converted e.g. `90s` will become `1m30s`. - -
    - -`genericXDPEnabled` and `xdpRefreshInterval` are only relevant when `bpfEnabled` is `false` and -`xdpEnabled` is `true`; in other words when XDP is being used to accelerate denial-of-service -prevention policies in the iptables data plane. - -When `bpfEnabled` is `true` the "xdp" settings all have no effect; in BPF mode the implementation of -policy is always accelerated, using the best available BPF technology. - -### Health Timeout Overrides - -Felix has internal liveness and readiness watchdog timers that monitor its various loops. -If a loop fails to "check in" within the allotted timeout then Felix will report non-Ready -or non-Live on its health port (which is monitored by Kubelet in a Kubernetes system). -If Felix reports non-Live, this can result in the Pod being restarted. - -In Kubernetes, if you see the calico-node Pod readiness or liveness checks fail -intermittently, check the calico-node Pod log for a log from Felix that gives the -overall health status (the list of components will depend on which features are enabled): - -``` -+---------------------------+---------+----------------+-----------------+--------+ -| COMPONENT | TIMEOUT | LIVENESS | READINESS | DETAIL | -+---------------------------+---------+----------------+-----------------+--------+ -| CalculationGraph | 30s | reporting live | reporting ready | | -| FelixStartup | 0s | reporting live | reporting ready | | -| InternalDataplaneMainLoop | 1m30s | reporting live | reporting ready | | -+---------------------------+---------+----------------+-----------------+--------+ -``` - -If some health timeouts show as "timed out" it may help to apply an override -using the `healthTimeoutOverrides` field: - -```yaml noValidation -... -spec: - healthTimeoutOverrides: - - name: InternalDataplaneMainLoop - timeout: "5m" - - name: CalculationGraph - timeout: "1m30s" - ... -``` - -A timeout value of 0 disables the timeout. - -### ProtoPort - -| Field | Description | Accepted Values | Schema | -| -------- | -------------------- | ------------------------------------ | ------ | -| port | The exact port match | 0-65535 | int | -| protocol | The protocol match | tcp, udp, sctp | string | -| net | The CIDR match | any valid CIDR (e.g. 192.168.0.0/16) | string | - -Keep in mind that in the following example, `net: ""` and `net: "0.0.0.0/0"` are processed as the same in the policy enforcement. - -```yaml noValidation - ... -spec: - failsafeInboundHostPorts: - - net: "192.168.1.1/32" - port: 22 - protocol: tcp - - net: "" - port: 67 - protocol: udp -failsafeOutboundHostPorts: - - net: "0.0.0.0/0" - port: 67 - protocol: udp - ... -``` - -### AggregationKind - -| Value | Description | -| ----- | ---------------------------------------------------------------------------------------- | -| 0 | No aggregation | -| 1 | Aggregate all flows that share a source port on each node | -| 2 | Aggregate all flows that share source ports or are from the same ReplicaSet on each node | - -### DNSPolicyMode - -| Value | Description | -| ----------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| DelayDeniedPacket | Felix delays any denied packet that traversed a policy that included egress domain matches, but did not match. The packet is released after a fixed time, or after the destination IP address was programmed. | -| DelayDNSResponse | Felix delays any DNS response until related IPSets are programmed. This introduces some latency to all DNS packets (even when no IPSet programming is required), but it ensures policy hit statistics are accurate. This is the recommended setting when you are making use of staged policies or policy rule hit statistics. | -| NoDelay | Felix does not introduce any delay to the packets. DNS rules may not have been programmed by the time the first packet traverses the policy rules. Client applications need to handle reconnection attempts if initial connection attempts fail. This may be problematic for some applications or for very low DNS TTLs. | - -On Windows, or when using the eBPF data plane, this setting is ignored and `NoDelay` is always used. - -A linux kernel version of 3.13 or greater is required to use `DelayDNSResponse`. For earlier kernel versions, this value is modified to `DelayDeniedPacket`. - -### RouteTableRange - -The `RouteTableRange` option is now deprecated in favor of [RouteTableRanges](#routetableranges). - -| Field | Description | Accepted Values | Schema | -| ----- | -------------------- | --------------- | ------ | -| min | Minimum index to use | 1-250 | int | -| max | Maximum index to use | 1-250 | int | - -### RouteTableRanges - -`RouteTableRanges` is a list of `RouteTableRange` objects: - -| Field | Description | Accepted Values | Schema | -| ----- | -------------------- | --------------- | ------ | -| min | Minimum index to use | 1 - 4294967295 | int | -| max | Maximum index to use | 1 - 4294967295 | int | - -Each item in the `RouteTableRanges` list designates a range of routing tables available to Calico. By default, Calico will use a single range of `1-250`. If a range spans Linux's reserved table range (`253-255`) then those tables are automatically excluded from the list. It's possible that other table ranges may also be reserved by third-party systems unknown to Calico. In that case, multiple ranges can be defined to target tables below and above the sensitive ranges: - -```sh - target tables 65-99, and 256-1000, skipping 100-255 -calicoctl patch felixconfig default --type=merge -p '{"spec":{"routeTableRanges": [{"min": 65, "max": 99}, {"min": 256, "max": 1000}] }} -``` - -_Note_, for performance reasons, the maximum total number of routing tables that Felix will accept is 65535 (or 2\*16). - -Specifying both the `RouteTableRange` and `RouteTableRanges` arguments is not supported and will result in an error from the api. - -### AWS IAM Role/Policy for source-destination-check configuration - -Setting `awsSrcDstCheck` to `Disable` will automatically disable source-destination-check on EC2 instances in a cluster, provided necessary IAM roles and policies are set. One of the policies assigned to IAM role of cluster nodes must contain a statement similar to the following: - -``` -{ - "Effect": "Allow", - "Action": [ - "ec2:DescribeInstances", - "ec2:ModifyNetworkInterfaceAttribute" - ], - "Resource": "*" -} -``` - -If there are no policies attached to node roles containing the above statement, attach a new policy. For example, if a node role is `test-cluster-nodeinstance-role`, click on the IAM role in AWS console. In the `Permission policies` list, add a new inline policy with the above statement to the new policy JSON definition. For detailed information, see [AWS documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_create.html?icmpid=docs_iam_console). - -For an EKS cluster, the necessary IAM role and policy is available by default. No further actions are needed. - -## Supported operations - -| Datastore type | Create | Delete | Delete (Global `default`) | Update | Get/List | Notes | -| --------------------- | ------ | ------ | ------------------------- | ------ | -------- | ----- | -| Kubernetes API server | Yes | Yes | No | Yes | Yes | | diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/globalalert.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/globalalert.mdx deleted file mode 100644 index 78c8011442..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/globalalert.mdx +++ /dev/null @@ -1,318 +0,0 @@ ---- -description: API for this Calico Enterprise resource. ---- - -# Global Alert - -A global alert resource represents a query that is periodically run -against data sets collected by $[prodname] whose findings are -added to the Alerts page in the $[prodname] web console. Alerts may -search for the existence of rows in a query, or when aggregated metrics -satisfy a condition. - -$[prodname] supports alerts on the following data sets: - -- [Audit logs](../../observability/elastic/audit-overview.mdx) -- [DNS logs](../../observability/elastic/dns/index.mdx) -- [Flow logs](../../observability/elastic/flow/index.mdx) -- [L7 logs](../../observability/elastic/l7/index.mdx) - -For `kubectl` [commands](https://kubernetes.io/docs/reference/kubectl/overview/), the following case-insensitive aliases -can be used to specify the resource type on the CLI: -`globalalert.projectcalico.org`, `globalalerts.projectcalico.org` and abbreviations such as -`globalalert.p` and `globalalerts.p`. - -## Sample YAML - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalAlert -metadata: - name: sample -spec: - summary: 'Sample' - description: 'Sample ${source_namespace}/${source_name_aggr}' - severity: 100 - dataSet: flows - query: action=allow - aggregateBy: [source_namespace, source_name_aggr] - field: num_flows - metric: sum - condition: gt - threshold: 0 -``` - -## GlobalAlert definition - -### Metadata - -| Field | Description | Accepted Values | Schema | -| ----- | ----------------------- | ----------------------------------------- | ------ | -| name | The name of this alert. | Lower-case alphanumeric with optional `-` | string | - -### Spec - -| Field | Description | Type | Required | Acceptable Values | Default | -| ------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------- | ----------------------------------------- | ---------------------------- | -------------------------------- | -| type | Type will dictate how the fields of the GlobalAlert will be utilized. Each `type` will have different usages and/or defaults for the other GlobalAlert fields as described in the table. | string | no | RuleBased | RuleBased | -| description | Human-readable description of the template. | string | yes | -| summary | Template for the description field in generated events. See the summary section below for more details. `description` is used if this is omitted. | string | no | -| severity | Severity of the alert for display in Manager. | int | yes | 1 - 100 | -| dataSet | Which data set to execute the alert against. | string | if `type` is `RuleBased` | audit, dns, flows, l7 | -| period | How often the query defined will run, if `type` is `RuleBased`. | duration | no | 1h 2m 3s | 5m, 15m if `type` is `RuleBased` | -| lookback | Specifies how far back in time data is to be collected. Must exceed audit log flush interval, `dnsLogsFlushInterval`, or `flowLogsFlushInterval` as appropriate. | duration | no | 1h 2m 3s | 10m | -| query | Which data to include from the source data set. Written in a domain-specific query language. See the query section below. | string | no | -| aggregateBy | An optional list of fields to aggregate results. | string array | no | -| field | Which field to aggregate results by if using a metric other than count. | string | if metric is one of avg, max, min, or sum | -| metric | A metric to apply to aggregated results. `count` is the number of log entries matching the aggregation pattern. Others are applied only to numeric fields in the logs. | string | no | avg, max, min, sum, count | -| condition | Compare the value of the metric to the threshold using this condition. | string | if metric defined | eq, not_eq, lt, lte, gt, gte | -| threshold | A numeric value to compare the value of the metric against. | float | if metric defined | -| substitutions | An optional list of values to replace variable names in query. | List of [GlobalAlertSubstitution](#globalalertsubstitution) | no | - -### GlobalAlertSubstitution - -| Field | Description | Type | Required | -| ------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------ | -------- | -| name | The name of the global alert substitution. It will be referenced by the variable names in query. Duplicate names are not allowed in the substitutions list. | string | yes | -| values | A list of values for this substitution. Wildcard operators asterisk (`*`) and question mark (`?`) are supported. | string array | yes | - -### Status - -| Field | Description | -| --------------- | ------------------------------------------------------------------------------------------- | -| lastUpdate | When the alert was last modified on the backend. | -| active | Whether the alert is active on the backend. | -| healthy | Whether the alert is in an error state or not. | -| lastExecuted | When the query for the alert last ran. | -| lastEvent | When the condition of the alert was last satisfied and an alert was successfully generated. | -| errorConditions | List of errors preventing operation of the updates or search. | - -## Query - -Alerts use a domain-specific query language to select which records -from the data set should be used in the alert. This could be used to -identify flows with specific features, or to select (or omit) certain -namespaces from consideration. - -The query language is composed of any number of selectors, combined -with boolean expressions (`AND`, `OR`, and `NOT`), set expressions -(`IN` and `NOTIN`) and bracketed subexpressions. These are translated -by $[prodname] to Elastic DSL queries that are executed on the backend. - -Set expressions support wildcard operators asterisk (`*`) and question mark (`?`). -The asterisk sign matches zero or more characters and the question mark matches a single character. -Set values can be embedded into the query string or reference the values -in the global alert substitution list. - -A selector consists of a key, comparator, and value. Keys and values -may be identifiers consisting of alphanumerics and underscores (`_`) -with the first character being alphabetic or an underscore, or may be -quoted strings. Values may also be integer or floating point numbers. -Comparators may be `=` (equal), `!=` (not equal), `<` (less than), -`<=` (less than or equal), `>` (greater than), or `>=` (greater than -or equal). - -Keys must be indexed fields in their corresponding data set. See the -appendix for a list of valid keys in each data set. - -Examples: - -- `query: "count > 0"` -- `query: "\"servers.ip\" = \"127.0.0.1\""` - -Selectors may be combined using `AND`, `OR`, and `NOT` boolean expressions, -`IN` and `NOTIN` set expressions, and bracketed subexpressions. - -Examples: - -- `query: "count > 100 AND client_name=mypod"` -- `query: "client_namespace = ns1 OR client_namespace = ns2"` -- `query: "count > 100 AND NOT (client_namespace = ns1 OR client_namespace = ns2)"` -- `query: "(qtype = A OR qtype = AAAA) AND rcode != NoError"` -- `query: "process_name IN {\"proc1?\", \"*proc2\"} AND source_namespace = ns1` -- `query: "qname NOTIN ${domains}"` - -## Aggregation - -Results from the query can be aggregated by any number of data fields. -Only these data fields will be included in the generated alerts, and -each unique combination of aggregations will generate a unique alert. -Careful consideration of fields for aggregation will yield the best -results. - -Some good choices for aggregations on the `flows` data set are -`[source_namespace, source_name_aggr, source_name]`, `[source_ip]`, -`[dest_namespace, dest_name_aggr, dest_name]`, and `[dest_ip]` -depending on your use case. For the `dns` data set, -`[client_namespace, client_name_aggr, client_name]` is a good choice -for an aggregation pattern. - -## Metrics and conditions - -Results from the query can be further aggregated using a metric that -is applied to a numeric field, or counts the number of rows in an -aggregation. Search hits satisfying the condition are output as -alerts. - -| Metric | Description | Applied to Field | -| ------ | ---------------------------------- | ---------------- | -| count | Counts the number of rows | No | -| min | The minimal value of the field | Yes | -| max | The maximal value of the field | Yes | -| sum | The sum of all values of the field | Yes | -| avg | The average value of the field | Yes | - -| Condition | Description | -| --------- | --------------------- | -| eq | Equals | -| not_eq | Not equals | -| lt | Less than | -| lte | Less than or equal | -| gt | Greater than | -| gte | Greater than or equal | - -Example: - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalAlert -metadata: - name: frequent-dns-responses -spec: - description: 'Monitor for NXDomain' - summary: 'Observed ${sum} NXDomain responses for ${qname}' - severity: 100 - dataSet: dns - query: rcode = NXDomain AND (rtype = A or rtype = AAAA) - aggregateBy: qname - field: count - metric: sum - condition: gte - threshold: 100 -``` - -This alert identifies non-existing DNS responses for Internet addresses -that were observed more than 100 times in the past 10 minutes. - -### Unconditional alerts - -If the `field`, `metric`, `condition`, and `threshold` fields of an -alert are left blank then the alert will trigger whenever its query -returns any data. Each hit (or aggregation pattern, if `aggregateBy` -is non-empty) returned will cause an event to be created. This should -be used **only** when the query is highly specific to avoid filling -the Alerts page and index with a large number of events. The use of -`aggregateBy` is strongly recommended to reduce the number of entries -added to the Alerts page. - -The following example would alert on incoming connections to postgres -pods from the Internet that were not denied by policy. It runs hourly -to reduce the noise. Noise could be further reduced by removing -`source_ip` from the `aggregateBy` clause at the cost of removing -`source_ip` from the generated events. - -```yaml -period: 1h -lookback: 75m -query: 'dest_labels="application=postgres" AND source_type=net AND action=allow AND proto=tcp AND dest_port=5432' -aggregateBy: [dest_namespace, dest_name, source_ip] -``` - -## Summary template - -Alerts may include a summary template to provide context for the -alerts in the $[prodname] web console Alert user interface. Any field -in the `aggregateBy` section, or the value of the `metric` may be -substituted in the summary using a bracketed variable syntax. - -Example: - -```yaml -summary: 'Observed ${sum} NXDomain responses for ${qname}' -``` - -The `description` field is validated in the same manner. If not -provided, the `description` field is used in place of the `summary` -field. - -## Period and lookback - -The interval between alerts, and the amount of data considered by the -alert may be controlled using the `period` and `lookback` parameters -respectively. These fields are formatted as [duration](https://golang.org/pkg/time/#ParseDuration) strings. - -> A duration string is a possibly signed sequence of decimal numbers, -> each with optional fraction and a unit suffix, such as "300ms", -> "-1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), -> "ms", "s", "m", "h". - -The minimum duration of a period is 1 minute with a default of 5 minutes and -the default for lookback is 10 minutes. The lookback should always be -greater than the sum of the period and the configured -`FlowLogsFlushInterval` or `DNSLogsFlushInterval` as appropriate to avoid -gaps in coverage. - -## Alert records - -With only aggregations and no metrics, the alert will generate one event -per aggregation pattern returned by the query. The record field will -contain only the aggregated fields. As before, this should be used -with specific queries. - -The addition of a metric will include the value of that metric in the -record, along with any aggregations. This, combined with queries as -necessary, will yield the best results in most cases. - -With no aggregations the alert will generate one event per record -returned by the query. The record will be included in its entirety -in the record field of the event. This should only be used with very -narrow and specific queries. - -## Templates - -$[prodname] supports the `GlobalAlertTemplate` resource type. -These are used in the $[prodname] web console to create alerts -with prepopulated fields that can be modified to suit your needs. -The `GlobalAlertTemplate` resource is configured identically to the -`GlobalAlert` resource. $[prodname] includes some sample Alert -templates; add your own templates as needed. - -### Sample YAML - -**RuleBased GlobalAlert** - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalAlertTemplate -metadata: - name: http.connections -spec: - description: 'HTTP connections to a target namespace' - summary: 'HTTP connections from ${source_namespace}/${source_name_aggr} to /${dest_name_aggr}' - severity: 50 - dataSet: flows - query: dest_namespace="" AND dest_port=80 - aggregateBy: [source_namespace, dest_name_aggr, source_name_aggr] - field: count - metric: sum - condition: gte - threshold: 1 -``` - -## Appendix: Valid fields for queries - -### Audit logs - -See [audit.k8s.io group v1](https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apiserver/pkg/apis/audit/v1/types.go) for descriptions of fields. - -### DNS logs - -See [DNS logs](../../observability/elastic/dns/dns-logs.mdx) for description of fields. - -### Flow logs - -See [Flow logs](../../observability/elastic/flow/datatypes.mdx) for description of fields. - -### L7 logs - -See [L7 logs](../../observability/elastic/l7/datatypes.mdx) for description of fields. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/globalnetworkpolicy.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/globalnetworkpolicy.mdx deleted file mode 100644 index e87e58ab48..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/globalnetworkpolicy.mdx +++ /dev/null @@ -1,214 +0,0 @@ ---- -description: API for this Calico Enterprise resource. ---- - -# Global network policy - -import Servicematch from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_servicematch.mdx'; - -import Serviceaccountmatch from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_serviceaccountmatch.mdx'; - -import Ports from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_ports.mdx'; - -import SelectorScopes from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_selector-scopes.mdx'; - -import Selectors from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_selectors.mdx'; - -import Entityrule from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_entityrule.mdx'; - -import Icmp from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_icmp.mdx'; - -import Rule from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_rule.mdx'; - -import Httpmatch from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_httpmatch.mdx'; - -A global network policy resource (`GlobalNetworkPolicy`) represents an ordered set of rules which are applied -to a collection of endpoints that match a [label selector](#selector). - -`GlobalNetworkPolicy` is not a namespaced resource. `GlobalNetworkPolicy` applies to [workload endpoint resources](workloadendpoint.mdx) in all namespaces, and to [host endpoint resources](hostendpoint.mdx). -Select a namespace in a `GlobalNetworkPolicy` in the standard selector by using -`projectcalico.org/namespace` as the label name and a `namespace` name as the -value to compare against, e.g., `projectcalico.org/namespace == "default"`. -See [network policy resource](networkpolicy.mdx) for namespaced network policy. - -`GlobalNetworkPolicy` resources can be used to define network connectivity rules between groups of $[prodname] endpoints and host endpoints, and -take precedence over [Profile resources](profile.mdx) if any are defined. - -GlobalNetworkPolicies are organized into [tiers](tier.mdx), which provide an additional layer of ordering—in particular, note that the `Pass` action skips to the -next [tier](tier.mdx), to enable hierarchical security policy. - -For `kubectl` [commands](https://kubernetes.io/docs/reference/kubectl/overview/), the following case-insensitive aliases -may be used to specify the resource type on the CLI: -`globalnetworkpolicy.projectcalico.org`, `globalnetworkpolicies.projectcalico.org` and abbreviations such as -`globalnetworkpolicy.p` and `globalnetworkpolicies.p`. - -## Sample YAML - -This sample policy allows TCP traffic from `frontend` endpoints to port 6379 on -`database` endpoints. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: internal-access.allow-tcp-6379 -spec: - tier: internal-access - selector: role == 'database' - types: - - Ingress - - Egress - ingress: - - action: Allow - metadata: - annotations: - from: frontend - to: database - protocol: TCP - source: - selector: role == 'frontend' - destination: - ports: - - 6379 - egress: - - action: Allow -``` - -## Definition - -### Metadata - -| Field | Description | Accepted Values | Schema | Default | -| ----- | ----------------------------------------- | --------------------------------------------------- | ------ | ------- | -| name | The name of the network policy. Required. | Alphanumeric string with optional `.`, `_`, or `-`. | string | | - -### Spec - -| Field | Description | Accepted Values | Schema | Default | -| ---------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------- | --------------------- | --------------------------------------------- | -| order | Controls the order of precedence. $[prodname] applies the policy with the lowest value first. | | float | | -| tier | Name of the [tier](tier.mdx) this policy belongs to. | | string | `default` | -| selector | Selects the endpoints to which this policy applies. | | [selector](#selector) | all() | -| serviceAccountSelector | Selects the service account(s) to which this policy applies. Select all service accounts in the cluster with a specific name using the `projectcalico.org/name` label. | | [selector](#selector) | all() | -| namespaceSelector | Selects the namespace(s) to which this policy applies. Select a specific namespace by name using the `projectcalico.org/name` label. | | [selector](#selector) | all() | -| types | Applies the policy based on the direction of the traffic. To apply the policy to inbound traffic, set to `Ingress`. To apply the policy to outbound traffic, set to `Egress`. To apply the policy to both, set to `Ingress, Egress`. | `Ingress`, `Egress` | List of strings | Depends on presence of ingress/egress rules\* | -| ingress | Ordered list of ingress rules applied by policy. | | List of [Rule](#rule) | | -| egress | Ordered list of egress rules applied by this policy. | | List of [Rule](#rule) | | -| doNotTrack\*\* | Indicates to apply the rules in this policy before any data plane connection tracking, and that packets allowed by these rules should not be tracked. | true, false | boolean | false | -| preDNAT\*\* | Indicates to apply the rules in this policy before any DNAT. | true, false | boolean | false | -| applyOnForward\*\* | Indicates to apply the rules in this policy on forwarded traffic as well as to locally terminated traffic. | true, false | boolean | false | -| performanceHints | Contains a list of hints to Calico's policy engine to help process the policy more efficiently. Hints never change the enforcement behaviour of the policy. The available hints are described [below](#performance-hints). | `AssumeNeededOnEveryNode` | List of strings | | - -\* If `types` has no value, $[prodname] defaults as follows. - -> | Ingress Rules Present | Egress Rules Present | `Types` value | -> | --------------------- | -------------------- | ----------------- | -> | No | No | `Ingress` | -> | Yes | No | `Ingress` | -> | No | Yes | `Egress` | -> | Yes | Yes | `Ingress, Egress` | - -\*\* The `doNotTrack` and `preDNAT` and `applyOnForward` fields are meaningful -only when applying policy to a [host endpoint](hostendpoint.mdx). - -Only one of `doNotTrack` and `preDNAT` may be set to `true` (in a given policy). If they are both `false`, or when applying the policy to a -[workload endpoint](workloadendpoint.mdx), -the policy is enforced after connection tracking and any DNAT. - -`applyOnForward` must be set to `true` if either `doNotTrack` or `preDNAT` is -`true` because for a given policy, any untracked rules or rules before DNAT will -in practice apply to forwarded traffic. - -See [Policy for hosts](../../network-policy/hosts/index.mdx) -for how `doNotTrack` and `preDNAT` and `applyOnForward` can be useful for host endpoints. - -### Rule - - - -### ICMP - - - -### EntityRule - - - -### Selector - - - - -### Ports - - - -### ServiceAccountMatch - - - -### ServiceMatch - - - -### Performance Hints - -Performance hints provide a way to tell $[prodname] about the intended use of the policy so that it may -process it more efficiently. Currently only one hint is defined: - -* `AssumeNeededOnEveryNode`: normally, $[prodname] only calculates a policy's rules and selectors on nodes where - the policy is actually in use (i.e. its selector matches a local endpoint). This saves work in most cases. - The `AssumeNeededOnEveryNode` hint tells $[prodname] to treat the policy as "in use" on *every* node. This is - useful for large policy sets that are known to apply to all (or nearly all) endpoints. It effectively "preloads" - the policy on every node so that there is less work to do when the first endpoint matching the policy shows up. - It also prevents work from being done to tear down the policy when the last endpoint is drained. - -## Application layer policy - -Application layer policy is an optional feature of $[prodname] and -[must be enabled](../../network-policy/application-layer-policies/alp.mdx) -to use the following match criteria. - -:::note - -Application layer policy match criteria are supported with the following restrictions. - -- Only ingress policy is supported. Egress policy must not contain any application layer policy match clauses. -- Rules must have the action `Allow` if they contain application layer policy match clauses. - -::: - -### HTTPMatch - - - -## Supported operations - -| Datastore type | Create/Delete | Update | Get/List | Notes | -| ------------------------ | ------------- | ------ | -------- | ----- | -| Kubernetes API datastore | Yes | Yes | Yes | - -#### List filtering on tiers - -List and watch operations may specify label selectors or field selectors to filter `GlobalNetworkPolicy` resources on tiers returned by the API server. -When no selector is specified, the API server returns all `GlobalNetworkPolicy` resources from all tiers that the user has access to. - -##### Field selector - -When using the field selector, supported operators are `=` and `==` - -The following example shows how to retrieve all `GlobalNetworkPolicy` resources in the default tier: - -```bash -kubectl get globalnetworkpolicy --field-selector spec.tier=default -``` - -##### Label selector - -When using the label selector, supported operators are `=`, `==` and `IN`. - -The following example shows how to retrieve all `GlobalNetworkPolicy` resources in the `default` and `net-sec` tiers: - -```bash -kubectl get globalnetworkpolicy -l 'projectcalico.org/tier in (default, net-sec)' -``` diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/globalnetworkset.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/globalnetworkset.mdx deleted file mode 100644 index 9a1ea01f2f..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/globalnetworkset.mdx +++ /dev/null @@ -1,83 +0,0 @@ ---- -description: API for this Calico Enterprise resource. ---- - -# Global network set - -import DomainNames from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_domain-names.mdx'; - -A global network set resource (GlobalNetworkSet) represents an arbitrary set of IP subnetworks/CIDRs, -allowing it to be matched by $[prodname] policy. Network sets are useful for applying policy to traffic -coming from (or going to) external, non-$[prodname], networks. - -GlobalNetworkSets can also include domain names, whose effect is to allow egress traffic to those -domain names, when the GlobalNetworkSet is matched by the destination selector of an egress rule -with action Allow. Domain names have no effect in ingress rules, or in a rule whose action is not -Allow. - -:::note - -$[prodname] implements policy for domain names by learning the -corresponding IPs from DNS, then programming rules to allow those IPs. This means that -if multiple domain names A, B and C all map to the same IP, and there is domain-based -policy to allow A, traffic to B and C will be allowed as well. - -::: - -The metadata for each network set includes a set of labels. When $[prodname] is calculating the set of -IPs that should match a source/destination selector within a -[global network policy](globalnetworkpolicy.mdx) rule, or within a -[network policy](networkpolicy.mdx) rule whose `namespaceSelector` includes `global()`, it includes -the CIDRs from any network sets that match the selector. - -:::note - -Since $[prodname] matches packets based on their source/destination IP addresses, -$[prodname] rules may not behave as expected if there is NAT between the $[prodname]-enabled node and the -networks listed in a network set. For example, in Kubernetes, incoming traffic via a service IP is -typically SNATed by the kube-proxy before reaching the destination host so $[prodname]'s workload -policy will see the kube-proxy's host's IP as the source instead of the real source. -For `kubectl` [commands](https://kubernetes.io/docs/reference/kubectl/overview/), the following case-insensitive aliases -may be used to specify the resource type on the CLI: -`globalnetworkset.projectcalico.org`, `globalnetworksets.projectcalico.org` and abbreviations such as -`globalnetworkset.p` and `globalnetworksets.p`. - -::: - -## Sample YAML - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkSet -metadata: - name: a-name-for-the-set - labels: - role: external-database -spec: - nets: - - 198.51.100.0/28 - - 203.0.113.0/24 - allowedEgressDomains: - - db.com - - '*.db.com' -``` - -## Global network set definition - -### Metadata - -| Field | Description | Accepted Values | Schema | -| ------ | ------------------------------------------ | ------------------------------------------------- | ------ | -| name | The name of this network set. | Lower-case alphanumeric with optional `-` or `-`. | string | -| labels | A set of labels to apply to this endpoint. | | map | - -### Spec - -| Field | Description | Accepted Values | Schema | Default | -| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------------------------------------------- | ------ | ------- | -| nets | The IP networks/CIDRs to include in the set. | Valid IPv4 or IPv6 CIDRs, for example "192.0.2.128/25" | list | | -| allowedEgressDomains | The list of domain names that belong to this set and are honored in egress allow rules only. Domain names specified here only work to allow egress traffic from the cluster to external destinations. They don't work to _deny_ traffic to destinations specified by domain name, or to allow ingress traffic from _sources_ specified by domain name. | List of [exact or wildcard domain names](#exact-and-wildcard-domain-names) | list | | - -### Exact and wildcard domain names - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/globalreport.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/globalreport.mdx deleted file mode 100644 index e0ac9c94d7..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/globalreport.mdx +++ /dev/null @@ -1,149 +0,0 @@ ---- -description: API for this Calico Enterprise resource. ---- - -# Global report - -A global report resource is a configuration for generating compliance reports. A global report configuration in $[prodname] lets you: - -- Specify report contents, frequency, and data filtering -- Specify the node(s) on which to run the report generation jobs -- Enable/disable creation of new jobs for generating the report - -For `kubectl` [commands](https://kubernetes.io/docs/reference/kubectl/overview/), the following case-insensitive aliases -may be used to specify the resource type on the CLI: -`globalreport.projectcalico.org`, `globalreports.projectcalico.org` and abbreviations such as -`globalreport.p` and `globalreports.p`. - -## Sample YAML - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalReport -metadata: - name: weekly-full-inventory -spec: - reportType: inventory - schedule: 0 0 * * 0 - jobNodeSelector: - nodetype: infrastructure - ---- -apiVersion: projectcalico.org/v3 -kind: GlobalReport -metadata: - name: hourly-accounts-networkaccess -spec: - reportType: network-access - endpoints: - namespaces: - names: ['payable', 'collections', 'payroll'] - schedule: 0 * * * * - ---- -apiVersion: projectcalico.org/v3 -kind: GlobalReport -metadata: - name: monthly-widgets-controller-tigera-policy-audit -spec: - reportType: policy-audit - schedule: 0 0 1 * * - endpoints: - serviceAccounts: - names: ['controller'] - namespaces: - names: ['widgets'] - ---- -apiVersion: projectcalico.org/v3 -kind: GlobalReport -metadata: - name: daily-cis-benchmark -spec: - reportType: cis-benchmark - schedule: 0 0 * * * - cis: - resultsFilters: - - benchmarkSelection: { kubernetesVersion: '1.13' } - exclude: ['1.1.4', '1.2.5'] -``` - -## GlobalReport Definition - -### Metadata - -| Field | Description | Accepted Values | Schema | -| ------ | ---------------------------------------- | ------------------------------------------------ | ------ | -| name | The name of this report. | Lower-case alphanumeric with optional `-` or `.` | string | -| labels | A set of labels to apply to this report. | | map | - -### Spec - -| Field | Description | Required | Accepted Values | Schema | -| --------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ----------------------------------------- | -| reportType | The type of report to produce. This field controls the content of the report - see the links for each type for more details. | Yes | [cis‑benchmark](compliance-reports/cis-benchmark.mdx), [inventory](compliance-reports/inventory.mdx), [network‑access](compliance-reports/network-access.mdx), [policy‑audit](compliance-reports/policy-audit.mdx) | string | -| endpoints | Specify which endpoints are in scope. If omitted, selects everything. | | | [EndpointsSelection](#endpointsselection) | -| schedule | Configure report frequency by specifying start and end time in [cron-format][cron-format]. Reports are started 30 minutes (configurable) after the scheduled value to allow enough time for data archival. A maximum limit of 12 schedules per hour is enforced (an average of one report every 5 minutes). | Yes | | string | -| jobNodeSelector | Specify the node(s) for scheduling the report jobs using selectors. | | | map | -| suspend | Disable future scheduled report jobs. In-flight reports are not affected. | | | bool | -| cis | Parameters related to generating a CIS benchmark report. | | | [CISBenchmarkParams](#cisbenchmarkparams) | - -### EndpointsSelection - -| Field | Description | Schema | -| --------------- | ------------------------------------------------------------------------------------------- | ------------------------------------------- | -| selector | Endpoint label selector to restrict endpoint selection. | string | -| namespaces | Namespace name and label selector to restrict endpoints by selected namespaces. | [NamesAndLabelsMatch](#namesandlabelsmatch) | -| serviceAccounts | Service account name and label selector to restrict endpoints by selected service accounts. | [NamesAndLabelsMatch](#namesandlabelsmatch) | - -### CISBenchmarkParams - -| Fields | Description | Required | Schema | -| -------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------- | ----------------------------------------- | -| highThreshold | Integer percentage value that determines the lower limit of passing tests to consider a node as healthy. Default: 100 | No | int | -| medThreshold | Integer percentage value that determines the lower limit of passing tests to consider a node as unhealthy. Default: 50 | No | int | -| includeUnscoredTests | Boolean value that when false, applies a filter to exclude tests that are marked as “Unscored” by the CIS benchmark standard. If true, the tests will be included in the report. Default: false | No | bool | -| numFailedTests | Integer value that sets the number of tests to display in the Top-failed Tests section of the CIS benchmark report. Default: 5 | No | int | -| resultsFilters | Specifies an include or exclude filter to apply on the test results that will appear on the report. | No | [CISBenchmarkFilter](#cisbenchmarkfilter) | - -### CISBenchmarkFilter - -| Fields | Description | Required | Schema | -| ------------------ | ---------------------------------------------------------------------------------------------- | -------- | ----------------------------------------------- | -| benchmarkSelection | Specify which set of benchmarks that this filter should apply to. Selects all benchmark types. | No | [CISBenchmarkSelection](#cisbenchmarkselection) | -| exclude | Specify which benchmark tests to exclude | No | array of strings | -| include | Specify which benchmark tests to include only (higher precedence than exclude) | No | array of strings | - -### CISBenchmarkSelection - -| Fields | Description | Required | Schema | -| ----------------- | -------------------------------------- | -------- | ------ | -| kubernetesVersion | Specifies a version of the benchmarks. | Yes | string | - -### NamesAndLabelsMatch - -| Field | Description | Schema | -| -------- | ------------------------------------ | ------ | -| names | Set of resource names. | list | -| selector | Selects a set of resources by label. | string | - -Use the `NamesAndLabelsMatch`to limit the scope of endpoints. If both `names` -and `selector` are specified, the resource is identified using label _AND_ name -match. - -:::note - -To use the $[prodname] compliance reporting feature, you must ensure all required resource types -are being audited and the logs archived in Elasticsearch. You must explicitly configure the [Kubernetes API Server](../../observability/kube-audit.mdx) - to send audit logs for Kubernetes-owned resources -to Elasticsearch. - -::: - -## Supported operations - -| Datastore type | Create/Delete | Update | Get/List | Notes | -| --------------------- | ------------- | ------ | -------- | ----- | -| Kubernetes API server | Yes | Yes | Yes | | - -[cron-format]: https://en.wikipedia.org/wiki/Cron diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/globalthreatfeed.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/globalthreatfeed.mdx deleted file mode 100644 index 8af83d22c6..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/globalthreatfeed.mdx +++ /dev/null @@ -1,269 +0,0 @@ ---- -description: API for this Calico Enterprise resource. ---- - -# Global threat feed - -A global threat feed resource (GlobalThreatFeed) represents a feed of threat intelligence used for -security purposes. - -$[prodname] supports threat feeds that give either - -- a set of IP addresses or IP prefixes, with content type IPSet, or -- a set of domain names, with content type DomainNameSet - -For each IPSet threat feed, $[prodname] automatically monitors flow logs for members of the set. -IPSet threat feeds can also be configured to be synchronized to a [global network set](globalnetworkset.mdx), -allowing you to use them as a dynamically-updating deny-list by incorporating the global network set into network policy. - -For each DomainNameSet threat feed, $[prodname] automatically monitors DNS logs for queries (QNAME) or answers (RR NAME or RDATA) that contain members of the set. - -For `kubectl` [commands](https://kubernetes.io/docs/reference/kubectl/overview/), the following case-insensitive aliases -may be used to specify the resource type on the CLI: -`globalthreatfeed.projectcalico.org`, `globalthreatfeeds.projectcalico.org` and abbreviations such as -`globalthreatfeed.p` and `globalthreatfeeds.p`. - -## Sample YAML - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalThreatFeed -metadata: - name: sample-global-threat-feed -spec: - content: IPSet - mode: Enabled - description: "This is the sample global threat feed" - feedType: Custom - globalNetworkSet: - # labels to set on the GNS - labels: - level: high - pull: - # accepts time in golang duration format - period: 24h - http: - format: - newlineDelimited: {} - url: https://an.example.threat.feed/deny-list - headers: - - name: "Accept" - value: "text/plain" - - name: "APIKey" - valueFrom: - # secrets selected must be in the "tigera-intrusion-detection" namespace to be used - secretKeyRef: - name: "globalthreatfeed-sample-global-threat-feed-example" - key: "apikey" -``` - -## Push or Pull - -You can configure $[prodname] to pull updates from your threat feed using a [`pull`](#pull) stanza in -the global threat feed spec. - -Alternately, you can have your threat feed push updates directly. Leave out the `pull` stanza, and configure -your threat feed to create or update the Elasticsearch document that corresponds to the global threat -feed object. - -For IPSet threat feeds, this Elasticsearch document will be in the index `.tigera.ipset.` and must have the ID set -to the name of the global threat feed object. The doc should have a single field called `ips`, containing -a list of IP prefixes. - -For example: - -``` -PUT .tigera.ipset.cluster01/_doc/sample-global-threat-feed -{ - "ips" : ["99.99.99.99/32", "100.100.100.0/24"] -} -``` - -For DomainNameSet threat feeds, this Elasticsearch document will be in the index `.tigera.domainnameset.` and must -have the ID set to the name of the global threat feed object. The doc should have a single field called `domains`, containing -a list of domain names. - -For example: - -``` -PUT .tigera.domainnameset.cluster01/_doc/example-global-threat-feed -{ - "domains" : ["malware.badstuff", "hackers.r.us"] -} -``` - -Refer to the [Elasticsearch document APIs][elastic-document-apis] for more information on how to -create and update documents in Elasticsearch. - -## GlobalThreatFeed Definition - -### Metadata - -| Field | Description | Accepted Values | Schema | -| ------ | --------------------------------------------- | ----------------------------------------- | ------ | -| name | The name of this threat feed. | Lower-case alphanumeric with optional `-` | string | -| labels | A set of labels to apply to this threat feed. | | map | - -### Spec - -| Field | Description | Accepted Values | Schema | Default | -| ---------------- | ---------------------------------------------------- | ---------------------- | --------------------------------------------- | ------- | -| content | What kind of threat intelligence is provided | IPSet, DomainNameSet | string | IPSet | -| mode | Determines if the threat feed is Enabled or Disabled | Enabled, Disabled | string | Enabled | -| description | Human-readable description of the template | Maximum 256 characters | string | | -| feedType | Distinguishes Builtin threat feeds from Custom feeds | Builtin, Custom | string | Custom | -| globalNetworkSet | Include to sync with a global network set | | [GlobalNetworkSetSync](#globalnetworksetsync) | | -| pull | Configure periodic pull of threat feed updates | | [Pull](#pull) | | - -### Status - -The `status` is read-only for users and updated by the `intrusion-detection-controller` component as -it processes global threat feeds. - -| Field | Description | -| -------------------- | -------------------------------------------------------------------------------- | -| lastSuccessfulSync | Timestamp of the last successful update to the threat intelligence from the feed | -| lastSuccessfulSearch | Timestamp of the last successful search of logs for threats | -| errorConditions | List of errors preventing operation of the updates or search | - -### GlobalNetworkSetSync - -When you include a `globalNetworkSet` stanza in a global threat feed, it triggers synchronization -with a [global network set](globalnetworkset.mdx). This global network set will have the name `threatfeed.` -where `` is the name of the global threat feed it is synced with. This is only supported for -threat feeds of type IPSet. - -:::note - -A `globalNetworkSet` stanza only works for `IPSet` threat feeds, and you must also include a `pull` stanza. - -::: - -| Field | Description | Accepted Values | Schema | -| ------ | --------------------------------------------------------- | --------------- | ------ | -| labels | A set of labels to apply to the synced global network set | | map | - -### Pull - -When you include a `pull` stanza in a global threat feed, it triggers a periodic pull of new data. On successful -pull and update to the data store, we update the `status.lastSuccessfulSync` timestamp. - -If you do not include a `pull` stanza, you must configure your system to [push](#push-or-pull) updates. - -| Field | Description | Accepted Values | Schema | Default | -| ------ | ------------------------------------- | --------------- | --------------------------------- | ------- | -| period | How often to pull an update | ≥ 5m | [Duration string][parse-duration] | 24h | -| http | Pull the update from an HTTP endpoint | | [HTTPPull](#httppull) | | - -### HTTPPull - -Pull updates from the threat feed by doing an HTTP GET against the given URL. - -| Field | Description | Accepted Values | Schema | -| ------- | --------------------------------------------------------- | --------------- | ------------------------- | -| format | Format of the data the threat feed returns | | [Format](#format) | -| url | The URL to query | | string | -| headers | List of additional HTTP Headers to include on the request | | [HTTPHeader](#httpheader) | - -IPSet threat feeds must contain IP addresses or IP prefixes. For example: - -``` - This is an IP Prefix -100.100.100.0/24 - This is an address -99.99.99.99 -``` - -DomainNameSet threat feeds must contain domain names. For example: - -``` - Suspicious domains -malware.badstuff -hackers.r.us -``` - -Internationalized domain names (IDNA) may be encoded either as Unicode in UTF-8 format, or as -ASCII-Compatible Encoding (ACE) according to [RFC 5890][idna]. - -### Format - -Several different feed formats are supported. The default, -`newlineDelimited`, expects a text file containing entries separated by -newline characters. It may also include comments prefixed by `#`. -`json` uses a [jsonpath] to extract the desired information from a -JSON document. `csv` extracts one column from CSV-formatted data. - -| Field | Description | Schema | -| ---------------- | --------------------------- | ------------- | -| newlineDelimited | Newline-delimited text file | Empty object | -| json | JSON object | [JSON](#json) | -| csv | CSV file | [CSV](#csv) | - -#### JSON - -| Field | Description | Schema | -| ----- | ----------------------------- | ------ | -| path | [jsonpath] to extract values. | string | - -Values can be extracted from the document using any [jsonpath] -expression, subject to the limitations mentioned below, that evaluates -to a list of strings. For example: `$.` is valid for `["a", "b", "c"]`, -and `$.a` is valid for `{"a": ["b", "c"]}`. - -:::caution - -No support for subexpressions and filters. Strings in -brackets must use double quotes. It cannot operate on JSON decoded -struct fields. - -::: - -#### CSV - -| Field | Description | Schema | -| --------------------------- | ------------------------------------------------------------------------- | ------ | -| fieldNum | Number of column containing values. Mutually exclusive with `fieldName`. | int | -| fieldName | Name of column containing values, requires `header: true`. | string | -| header | Whether or not the document contains a header row. | bool | -| columnDelimiter | An alternative delimiter character, such as |. | string | -| commentDelimiter | Lines beginning with this character are skipped. `#` is common. | string | -| recordSize | The number of columns expected in the document. Auto detected if omitted. | int | -| disableRecordSizeValidation | Disable row size checking. Mutually exclusive with `recordSize`. | bool | - -### HTTPHeader - -| Field | Description | Schema | -| --------- | --------------------------------------------------------- | ------------------------------------- | -| name | Header name | string | -| value | Literal value | string | -| valueFrom | Include to retrieve the value from a config map or secret | [HTTPHeaderSource](#httpheadersource) | - -:::note - -You must include either `value` or `valueFrom`, but not both. - -::: - -### HTTPHeaderSource - -| Field | Description | Schema | -| --------------- | ------------------------------- | ----------------- | -| configMapKeyRef | Get the value from a config map | [KeyRef](#keyref) | -| secretKeyRef | Get the value from a secret | [KeyRef](#keyref) | - -### KeyRef - -KeyRef tells $[prodname] where to get the value for a header. The referenced Kubernetes object -(either a config map or a secret) must be in the `tigera-intrusion-detection` namespace. The referenced -Kubernetes object should have a name with following prefix format: `globalthreatfeed--`. - -| Field | Description | Accepted Values | Schema | Default | -| -------- | --------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | ------ | ------- | -| name | The name of the config map or secret | | string | | -| key | The key within the config map or secret | | string | | -| optional | Whether the pull can proceed without the referenced value | If the referenced value does not exist, `true` means omit the header. `false` means abort the entire pull until it exists | bool | `false` | - -[elastic-document-apis]: https://www.elastic.co/guide/en/elasticsearch/reference/6.4/docs-update.html -[parse-duration]: https://golang.org/pkg/time/#ParseDuration -[idna]: https://tools.ietf.org/html/rfc5890 -[jsonpath]: https://goessner.net/articles/JsonPath/ diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/hostendpoint.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/hostendpoint.mdx deleted file mode 100644 index 5fa09605e2..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/hostendpoint.mdx +++ /dev/null @@ -1,121 +0,0 @@ ---- -description: API for this Calico Enterprise resource. ---- - -# Host endpoint - -import Endpointport from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_endpointport.mdx'; - -A host endpoint resource (`HostEndpoint`) represents one or more real or virtual interfaces -attached to a host that is running $[prodname]. It enforces $[prodname] policy on -the traffic that is entering or leaving the host's default network namespace through those -interfaces. - -- A host endpoint with `interfaceName: *` represents _all_ of a host's real or virtual - interfaces. - -- A host endpoint for one specific real interface is configured by `interfaceName: `, - for example `interfaceName: eth0`, or by leaving `interfaceName` - empty and including one of the interface's IPs in `expectedIPs`. - -Each host endpoint may include a set of labels and list of profiles that $[prodname] -will use to apply -[policy](networkpolicy.mdx) -to the interface. If no profiles or labels are applied, $[prodname] will not apply -any policy. - -For `kubectl` [commands](https://kubernetes.io/docs/reference/kubectl/overview/), the following case-insensitive aliases -may be used to specify the resource type on the CLI: -`hostendpoint.projectcalico.org`, `hostendpoints.projectcalico.org` and abbreviations such as -`hostendpoint.p` and `hostendpoints.p`. - -**Default behavior of external traffic to/from host** - -If a host endpoint is created and network policy is not in place, the $[prodname] default is to deny traffic to/from that endpoint (except for traffic allowed by failsafe rules). -For a named host endpoint (i.e. a host endpoint representing a specific interface), $[prodname] blocks traffic only to/from the interface specified in the host endpoint. Traffic to/from other interfaces is ignored. - -:::note - -Host endpoints with `interfaceName: *` do not support [untracked policy](../../network-policy/extreme-traffic/high-connection-workloads.mdx). - -::: - -For a wildcard host endpoint (i.e. a host endpoint representing all of a host's interfaces), $[prodname] blocks traffic to/from _all_ interfaces on the host (except for traffic allowed by failsafe rules). - -However, profiles can be used in conjunction with host endpoints to modify default behavior of external traffic to/from the host in the absence of network policy. -$[prodname] provides a default profile resource named `projectcalico-default-allow` that consists of allow-all ingress and egress rules. -Host endpoints with the `projectcalico-default-allow` profile attached will have "allow-all" semantics instead of "deny-all" in the absence of policy. - -Note: If you have custom iptables rules, using host endpoints with allow-all rules (with no policies) will accept all traffic and therefore bypass those custom rules. - -:::note - -Auto host endpoints specify the `projectcalico-default-allow` profile so they behave similarly to pod workload endpoints. - -::: - -:::note - -When rendering security rules on other hosts, $[prodname] uses the -`expectedIPs` field to resolve label selectors to IP addresses. If the `expectedIPs` field -is omitted then security rules that use labels will fail to match this endpoint. - -::: - -**Host to local workload traffic**: Traffic from a host to its workload endpoints (e.g. Kubernetes pods) is always allowed, despite any policy in place. This ensures that `kubelet` liveness and readiness probes always work. - -## Sample YAML - -```yaml -apiVersion: projectcalico.org/v3 -kind: HostEndpoint -metadata: - name: some.name - labels: - type: production -spec: - interfaceName: eth0 - node: myhost - expectedIPs: - - 192.168.0.1 - - 192.168.0.2 - profiles: - - profile1 - - profile2 - ports: - - name: some-port - port: 1234 - protocol: TCP - - name: another-port - port: 5432 - protocol: UDP -``` - -## Host endpoint definition - -### Metadata - -| Field | Description | Accepted Values | Schema | -| ------ | ------------------------------------------ | --------------------------------------------------- | ------ | -| name | The name of this hostEndpoint. Required. | Alphanumeric string with optional `.`, `_`, or `-`. | string | -| labels | A set of labels to apply to this endpoint. | | map | - -### Spec - -| Field | Description | Accepted Values | Schema | Default | -| ------------- | -------------------------------------------------------------------------- | -------------------------- | -------------------------------------- | ------- | -| node | The name of the node where this HostEndpoint resides. | | string | -| interfaceName | Either `*` or the name of the specific interface on which to apply policy. | | string | -| expectedIPs | The expected IP addresses associated with the interface. | Valid IPv4 or IPv6 address | list | -| profiles | The list of profiles to apply to the endpoint. | | list | -| ports | List of named ports that this workload exposes. | | List of [EndpointPorts](#endpointport) | - -### EndpointPort - - - -## Supported operations - -| Datastore type | Create/Delete | Update | Get/List | Notes | -| --------------------- | ------------- | ------ | -------- | ----- | -| Kubernetes API server | Yes | Yes | Yes | diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/index.mdx deleted file mode 100644 index 2285c6bf97..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: APIs for all Calico networking and network policy resources. -hide_table_of_contents: true ---- - -# Resource definitions - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/ipamconfig.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/ipamconfig.mdx deleted file mode 100644 index b7b7720d70..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/ipamconfig.mdx +++ /dev/null @@ -1,43 +0,0 @@ ---- -description: IP address management global configuration ---- - -# IPAM configuration - -An IPAM configuration resource (`IPAMConfiguration`) represents global IPAM configuration options. - -## Sample YAML - -```yaml -apiVersion: projectcalico.org/v3 -kind: IPAMConfiguration -metadata: - name: default -spec: - strictAffinity: false - maxBlocksPerHost: 4 -``` - -## IPAM configuration definition - -### Metadata - -| Field | Description | Accepted Values | Schema | -| ----- | --------------------------------------------------------- | --------------- | ------ | -| name | Unique name to describe this resource instance. Required. | default | string | - -The resource is a singleton which must have the name `default`. - -### Spec - -| Field | Description | Accepted Values | Schema | Default | -| ---------------- | ------------------------------------------------------------------- | --------------- | ------ | --------- | -| strictAffinity | When StrictAffinity is true, borrowing IP addresses is not allowed. | true, false | bool | false | -| maxBlocksPerHost | The max number of blocks that can be affine to each host. | 0 - max(int32) | int | 20 | - -## Supported operations - -| Datastore type | Create | Delete | Update | Get/List | -| --------------------- | ------ | ------ | ------ | -------- | -| etcdv3 | Yes | Yes | Yes | Yes | -| Kubernetes API server | Yes | Yes | Yes | Yes | diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/ippool.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/ippool.mdx deleted file mode 100644 index 0024e72b91..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/ippool.mdx +++ /dev/null @@ -1,155 +0,0 @@ ---- -description: API for this Calico Enterprise resource. ---- - -# IP pool - -import Selectors from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_selectors.mdx'; - -An IP pool resource (`IPPool`) represents a collection of IP addresses from which $[prodname] expects -endpoint IPs to be assigned. - -For `kubectl` commands, the following case-insensitive aliases may be used to specify the resource type on the CLI: `ippool.projectcalico.org`, `ippools.projectcalico.org` as well as abbreviations such as `ippool.p` and `ippools.p`. - -## Sample YAML - -```yaml -apiVersion: projectcalico.org/v3 -kind: IPPool -metadata: - name: my.ippool-1 -spec: - cidr: 10.1.0.0/16 - ipipMode: CrossSubnet - natOutgoing: true - disabled: false - nodeSelector: all() - allowedUses: - - Workload - - Tunnel -``` - -## IP pool definition - -### Metadata - -| Field | Description | Accepted Values | Schema | -| ----- | ------------------------------------------- | --------------------------------------------------- | ------ | -| name | The name of this IPPool resource. Required. | Alphanumeric string with optional `.`, `_`, or `-`. | string | - -### Spec - -| Field | Description | Accepted Values | Schema | Default | -| ---------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------- | --------------------------------------------- | -| cidr | IP range to use for this pool. | A valid IPv4 or IPv6 CIDR. Subnet length must be at least big enough to fit a single block (by default `/26` for IPv4 or `/122` for IPv6). Must not overlap with the Link Local range `169.254.0.0/16` or `fe80::/10`. | string | | -| blockSize | The CIDR size of allocation blocks used by this pool. Blocks are allocated on demand to hosts and are used to aggregate routes. The value can only be set when the pool is created. | 20 to 32 (inclusive) for IPv4 and 116 to 128 (inclusive) for IPv6 | int | `26` for IPv4 pools and `122` for IPv6 pools. | -| ipipMode | The mode defining when IPIP will be used. Cannot be set at the same time as `vxlanMode`. | Always, CrossSubnet, Never | string | `Never` | -| vxlanMode | The mode defining when VXLAN will be used. Cannot be set at the same time as `ipipMode`. | Always, CrossSubnet, Never | string | `Never` | -| natOutgoing | When enabled, packets sent from $[prodname] networked containers in this pool to destinations outside of any Calico IP pools will be masqueraded. | true, false | boolean | `false` | -| disabled | When set to true, $[prodname] IPAM will not assign addresses from this pool. | true, false | boolean | `false` | -| disableBGPExport _(since v3.11.0)_ | Disable exporting routes from this IP Pool’s CIDR over BGP. | true, false | boolean | `false` | -| nodeSelector | Selects the nodes where $[prodname] IPAM should assign pod addresses from this pool. Can be overridden if a pod [explicitly identifies this IP pool by annotation](../component-resources/configuration.mdx#using-kubernetes-annotations). | | [selector](#node-selector) | all() | -| allowedUses _(since v3.11.0)_ | Controls whether the pool will be used for automatic assignments of certain types. See [below](#allowed-uses). | Workload, Tunnel, HostSecondaryInterface | list of strings | `["Workload", "Tunnel"]` | -| awsSubnetID _(since v3.11.0)_ | May be set to the ID of an AWS VPC Subnet that contains the CIDR of this IP pool to activate the AWS-backed pool feature. See [below](#aws-backed-pools). | Valid AWS Subnet ID. | string | | - -:::note - -Do not use a custom `blockSize` until **all** $[prodname] components have been updated to a version that -supports it (at least v2.3.0). Older versions of components do not understand the field so they may corrupt the -IP pool by creating blocks of incorrect size. - -::: - -### Allowed uses - -When automatically assigning IP addresses to workloads, only pools with "Workload" in their `allowedUses` field are -consulted. Similarly, when assigning IPs for tunnel devices, only "Tunnel" pools are eligible. Finally, when -assigning IP addresses for AWS secondary ENIs, only pools with allowed use "HostSecondaryInterface" are candidates. - -If the `allowedUses` field is not specified, it defaults to `["Workload", "Tunnel"]` for compatibility with older -versions of Calico. It is not possible to specify a pool with no allowed uses. - -The `allowedUses` field is only consulted for new allocations, changing the field has no effect on previously allocated -addresses. - -$[prodname] supports Kubernetes [annotations that force the use of specific IP addresses](../component-resources/configuration.mdx#requesting-a-specific-ip-address). These annotations take precedence over the `allowedUses` field. - -### AWS-backed pools - -$[prodname] supports IP pools that are backed by the AWS fabric. This feature was added in order -to support egress gateways on the AWS fabric; the restrictions and requirements are currently documented as part of the -[egress gateways on AWS guide](../../networking/egress/egress-gateway-aws.mdx). - -### IPIP - -Routing of packets using IP-in-IP will be used when the destination IP address -is in an IP Pool that has IPIP enabled. In addition, if the `ipipMode` is set to `CrossSubnet`, -$[prodname] will only route using IP-in-IP if the IP address of the destination node is in a different -subnet. The subnet of each node is configured on the node resource (which may be automatically -determined when running the `$[nodecontainer]` service). - -For details on configuring IP-in-IP on your deployment, please refer to -[Configuring IP-in-IP](../../networking/configuring/vxlan-ipip.mdx). - -:::note - -Setting `natOutgoing` is recommended on any IP Pool with `ipip` enabled. -When `ipip` is enabled without `natOutgoing` routing between Workloads and -Hosts running $[prodname] is asymmetric and may cause traffic to be filtered due to -[RPF](https://en.wikipedia.org/wiki/Reverse_path_forwarding) checks failing. - -::: - -### VXLAN - -Routing of packets using VXLAN will be used when the destination IP address -is in an IP Pool that has VXLAN enabled. In addition, if the `vxlanMode` is set to `CrossSubnet`, -$[prodname] will only route using VXLAN if the IP address of the destination node is in a different -subnet. The subnet of each node is configured on the node resource (which may be automatically -determined when running the `$[nodecontainer]` service). - -:::note - -Setting `natOutgoing` is recommended on any IP Pool with `vxlan` enabled. -When `vxlan` is enabled without `natOutgoing` routing between Workloads and -Hosts running $[prodname] is asymmetric and may cause traffic to be filtered due to -[RPF](https://en.wikipedia.org/wiki/Reverse_path_forwarding) checks failing. - -::: - -### Block sizes - -The default block sizes of `26` for IPv4 and `122` for IPv6 provide blocks of 64 addresses. This allows addresses to be allocated in groups to workloads running on the same host. By grouping addresses, fewer routes need to be exchanged between hosts and to other BGP peers. If a host allocates all of the addresses in a block then it will be allocated an additional block. If there are no more blocks available then the host can take addresses from blocks allocated to other hosts. Specific routes are added for the borrowed addresses which has an impact on route table size. - -Increasing the block size from the default (e.g., using `24` for IPv4 to give 256 addresses per block) means fewer blocks per host, and potentially fewer routes. But try to ensure that there are at least as many blocks in the pool as there are hosts. - -Reducing the block size from the default (e.g., using `28` for IPv4 to give 16 addresses per block) means more blocks per host and therefore potentially more routes. This can be beneficial if it allows the blocks to be more fairly distributed amongst the hosts. - -### Node Selector - -For details on configuring IP pool node selectors, please read the -[Assign IP addresses based on topology guide.](../../networking/ipam/assign-ip-addresses-topology.mdx). - -:::tip - -To prevent an IP pool from being used automatically by $[prodname] IPAM, while still allowing -it to be used manually for static assignments, set the `IPPool`'s `nodeSelector` to `!all()`. Since the selector -matches no nodes, the IPPool will not be used automatically and, unlike setting `disabled: true`, it can still be -used for manual assignments. - -::: - -#### Selector reference - - - -## Supported operations - -| Datastore type | Create/Delete | Update | Get/List | Notes | -| --------------------- | ------------- | ------ | -------- | ----- | -| Kubernetes API server | Yes | Yes | Yes | - -## See also - -The [`IPReservation` resource](ipreservation.mdx) allows for small parts of an IP pool to be reserved so that they will -not be used for automatic IPAM assignments. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/ipreservation.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/ipreservation.mdx deleted file mode 100644 index ab41de81c8..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/ipreservation.mdx +++ /dev/null @@ -1,56 +0,0 @@ ---- -description: API for this Calico resource. ---- - -# IP reservation - -An IP reservation resource (`IPReservation`) represents a collection of IP addresses that $[prodname] should -not use when automatically assigning new IP addresses. It only applies when $[prodname] IPAM is in use. - -## Sample YAML - -```yaml -apiVersion: projectcalico.org/v3 -kind: IPReservation -metadata: - name: my-ipreservation-1 -spec: - reservedCIDRs: - - 192.168.2.3 - - 10.0.2.3/32 - - cafe:f00d::/123 -``` - -## IP reservation definition - -### Metadata - -| Field | Description | Accepted Values | Schema | -| ----- | -------------------------------------------------- | --------------------------------------------------- | ------ | -| name | The name of this IPReservation resource. Required. | Alphanumeric string with optional `.`, `_`, or `-`. | string | - -### Spec - -| Field | Description | Accepted Values | Schema | Default | -| ------------- | --------------------------------------------------------------- | -------------------------------------------------- | ------ | ------- | -| reservedCIDRs | List of IP addresses and/or networks specified in CIDR notation | List of valid IP addresses (v4 or v6) and/or CIDRs | list | | - -### Notes - -The implementation of `IPReservation`s is designed to handle reservation of a small number of IP addresses/CIDRs from -(generally much larger) IP pools. If a significant portion of an IP pool is reserved (say more than 10%) then -$[prodname] may become significantly slower when searching for free IPAM blocks. - -Since `IPReservations` must be consulted for every IPAM assignment request, it's best to have one or two -`IPReservation` resources with multiple addresses per `IPReservation` resource (rather than having many IPReservation -resources), each with one address inside. - -If an `IPReservation` is created after an IP from its range is already in use then the IP is not automatically -released back to the pool. The reservation check is only done at auto allocation time. - -$[prodname] supports Kubernetes [annotations that force the use of specific IP addresses](../component-resources/configuration.mdx#requesting-a-specific-ip-address). These annotations override any `IPReservation`s that -are in place. - -When Windows nodes claim blocks of IPs they automatically assign the first three IPs -in each block and the final IP for internal purposes. These assignments cannot be blocked by an `IPReservation`. -However, if a whole IPAM block is reserved with an `IPReservation`, Windows nodes will not claim such a block. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/kubecontrollersconfig.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/kubecontrollersconfig.mdx deleted file mode 100644 index e6ba483ee5..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/kubecontrollersconfig.mdx +++ /dev/null @@ -1,86 +0,0 @@ ---- -description: API for KubeControllersConfiguration resource. ---- - -# Kubernetes controllers configuration - -A $[prodname] [Kubernetes controllers](../component-resources/kube-controllers/configuration.mdx) configuration resource (`KubeControllersConfiguration`) represents configuration options for the $[prodname] Kubernetes controllers. - -## Sample YAML - -```yaml -apiVersion: projectcalico.org/v3 -kind: KubeControllersConfiguration -metadata: - name: default -spec: - logSeverityScreen: Info - healthChecks: Enabled - prometheusMetricsPort: 9094 - controllers: - node: - reconcilerPeriod: 5m - leakGracePeriod: 15m - syncLabels: Enabled - hostEndpoint: - autoCreate: Disabled -``` - -## Kubernetes controllers configuration definition - -### Metadata - -| Field | Description | Accepted Values | Schema | -| ----- | --------------------------------------------------------- | ----------------- | ------ | -| name | Unique name to describe this resource instance. Required. | Must be `default` | string | - -- $[prodname] automatically creates a resource named `default` containing the configuration settings, only the name `default` is used and only one object of this type is allowed. You can use [calicoctl](../clis/calicoctl/overview.mdx) to view and edit these settings - -### Spec - -| Field | Description | Accepted Values | Schema | Default | -| --------------------- | --------------------------------------------------------- | ----------------------------------- | --------------------------- | ------- | -| logSeverityScreen | The log severity above which logs are sent to the stdout. | Debug, Info, Warning, Error, Fatal | string | Info | -| healthChecks | Enable support for health checks | Enabled, Disabled | string | Enabled | -| prometheusMetricsPort | Port on which to serve prometheus metrics. | Set to 0 to disable, > 0 to enable. | TCP port | 9094 | -| controllers | Enabled controllers and their settings | | [Controllers](#controllers) | | - -### Controllers - -| Field | Description | Schema | -| ----------------- | ------------------------------------------------------ | ------------------------------------------------------------------------------- | -| node | Enable and configure the node controller | omit to disable, or [NodeController](#nodecontroller) | -| federatedservices | Enable and configure the federated services controller | omit to disable, or [FederatedServicesController](#federatedservicescontroller) | - -### NodeController - -The node controller automatically cleans up configuration for nodes that no longer exist. Optionally, it can create host endpoints for all Kubernetes nodes. - -| Field | Description | Accepted Values | Schema | Default | -| ---------------- | --------------------------------------------------------------------------------- | ----------------- | --------------------------------- | ------- | -| reconcilerPeriod | Period to perform reconciliation with the $[prodname] datastore | | [Duration string][parse-duration] | 5m | -| syncLabels | When enabled, Kubernetes node labels will be copied to $[prodname] node objects. | Enabled, Disabled | string | Enabled | -| hostEndpoint | Controls allocation of host endpoints | | [HostEndpoint](#hostendpoint) | | -| leakGracePeriod | Grace period to use when garbage collecting suspected leaked IP addresses. | | [Duration string][parse-duration] | 15m | - -### HostEndpoint - -| Field | Description | Accepted Values | Schema | Default | -| ---------- | ---------------------------------------------------------------- | ----------------- | ------ | -------- | -| autoCreate | When enabled, automatically create a host endpoint for each node | Enabled, Disabled | string | Disabled | - -### FederatedServicesController - -The federated services controller syncs Kubernetes services from remote clusters defined through [RemoteClusterConfigurations](remoteclusterconfiguration.mdx). - -| Field | Description | Schema | Default | -| ---------------- | ---------------------------------------------------------------- | --------------------------------- | ------- | -| reconcilerPeriod | Period to perform reconciliation with the $[prodname] datastore | [Duration string][parse-duration] | 5m | - -## Supported operations - -| Datastore type | Create | Delete (Global `default`) | Update | Get/List | Notes | -| --------------------- | ------ | ------------------------- | ------ | -------- | ----- | -| Kubernetes API server | Yes | Yes | Yes | Yes | - -[parse-duration]: https://golang.org/pkg/time/#ParseDuration diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/licensekey.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/licensekey.mdx deleted file mode 100644 index 6f403c5bf2..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/licensekey.mdx +++ /dev/null @@ -1,84 +0,0 @@ ---- -description: API for this Calico Enterprise resource. ---- - -# License key - -A License Key resource (`LicenseKey`) represents a user's license to use $[prodname]. Keys are -provided by Tigera support, and must be applied to the cluster to enable -$[prodname] features. - -For `kubectl` commands, the following case-insensitive aliases may be used to specify -the resource type on the CLI: `licensekey.projectcalico.org`, `licensekeys.projectcalico.org` -as well as abbreviations such as `licensekey.p` and `licensekeys.p`. - -## Working with license keys - -### Applying or updating a license key - -When you add $[prodname] to an existing Kubernetes cluster or create a -new OpenShift cluster, you must apply your license key to complete the installation -and gain access to the full set of $[prodname] features. - -In deployments that use multicluster management, a license key is required only on the management cluster. - -When your license key expires, you must update it to continue using $[prodname]. - -To apply or update a license key use the following command, replacing `` -with the customer name in the file sent to you by Tigera. - -**Command** - -```bash -kubectl apply -f -license.yaml -``` - -**Example** - -```bash -kubectl apply -f awesome-corp-license.yaml -``` - -### Viewing information about your license key - -To view the number of licensed nodes and the license key expiry, use: - -```bash -kubectl get licensekeys.p -o custom-columns='Name:.metadata.name,MaxNodes:.status.maxnodes,Expiry:.status.expiry,PackageType:.status.package' -``` - -This is an example of the output of above command. - -``` -Name MaxNodes Expiry Package -default 100 2021-10-01T23:59:59Z Enterprise -``` - -## Sample YAML - -```yaml -apiVersion: projectcalico.org/v3 -kind: LicenseKey -metadata: - creationTimestamp: null - name: default -spec: - certificate: | - -----BEGIN CERTIFICATE----- - MII...n5 - -----END CERTIFICATE----- - token: eyJ...zaQ -status: - expiry: '2021-10-01T23:59:59Z' - maxnodes: 100 - package: Enterprise -``` - -The data fields in the license key resource may change without warning. The license key resource -is currently a singleton: the only valid name is `default`. - -## Supported operations - -| Datastore type | Create | Delete | Update | Get/List | Notes | -| --------------------- | ------ | ------ | ------ | -------- | ----- | -| Kubernetes API server | Yes | No | Yes | Yes | diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/managedcluster.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/managedcluster.mdx deleted file mode 100644 index b4aa4d13d4..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/managedcluster.mdx +++ /dev/null @@ -1,72 +0,0 @@ ---- -description: API for this Calico Enterprise resource. ---- - -# Managed Cluster - -A Managed Cluster resource (`ManagedCluster`) represents a cluster managed by a centralized management plane with a shared Elasticsearch. -The management plane provides central control of the managed cluster and stores its logs. - -$[prodname] supports connecting multiple $[prodname] clusters as describe in the [Multi-cluster management] installation guide. - -For `kubectl` commands, the following case-insensitive aliases may be used to specify the resource type on the CLI: -`managedcluster`,`managedclusters`, `managedcluster.projectcalico.org`, `managedclusters.projectcalico.org` as well as -abbreviations such as `managedcluster.p` and `managedclusters.p`. - -## Sample YAML - -```yaml -apiVersion: projectcalico.org/v3 -kind: ManagedCluster -metadata: - name: managed-cluster -spec: - operatorNamespace: tigera-operator -``` - -## Managed cluster definition - -### Metadata - -| Field | Description | Accepted Values | Schema | -| ----- | --------------------------------------------------------- | --------------------------------------------------- | ------ | -| name | Unique name to describe this resource instance. Required. | Alphanumeric string with optional `.`, `_`, or `-`. | string | - -- `cluster` is a reserved name for the management plane and is considered an invalid value - -### Spec - -| Field | Description | Accepted Values | Schema | Default | -| -------------------- | ----------------------------------------------------------------------------------------------------------------- | --------------- | ------ | ------- | -| installationManifest | Installation Manifest to be applied on a managed cluster infrastructure | None | string | `Empty` | -| operatorNamespace | The namespace of the managed cluster's operator. This value is used in the generation of the InstallationManifest | None | string | `Empty` | - -- `installationManifest` field can be retrieved only once at creation time. Updates are not supported for this field. - -To extract the installation manifest at creation time `-o jsonpath="{.spec.installationManifest}"` parameters -can be used with a `kubectl` command. - -### Status - -Status represents the latest observed status of Managed cluster. The `status` is read-only for users and updated by the -$[prodname] components. - -| Field | Description | Schema | -| ---------- | -------------------------------------------------------------------------- | -------------------------------------- | -| conditions | List of condition that describe the current status of the Managed cluster. | List of ManagedClusterStatusConditions | - -**ManagedClusterStatusConditions** - -Conditions represent the latest observed set of conditions for a Managed cluster. The connection between a management -plane and managed plane will be reported as following: - -- `Unknown` when no initial connection has been established -- `True` when both planes have an established connection -- `False` when neither planes have an established connection - -| Field | Description | Accepted Values | Schema | Default | -| ------ | ------------------------------------------------------------------------- | -------------------------- | ------ | ------------------------- | -| type | Type of status that is being reported | - | string | `ManagedClusterConnected` | -| status | Status of the connection between a Managed cluster and management cluster | `Unknown`, `True`, `False` | string | `Unknown` | - -[Multi-cluster management](../../multicluster/set-up-multi-cluster-management/standard-install/create-a-management-cluster.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/networkpolicy.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/networkpolicy.mdx deleted file mode 100644 index c0105e336f..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/networkpolicy.mdx +++ /dev/null @@ -1,198 +0,0 @@ ---- -description: API for this Calico Enterprise resource. ---- - -# Network policy - -import Servicematch from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_servicematch.mdx'; - -import Serviceaccountmatch from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_serviceaccountmatch.mdx'; - -import Ports from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_ports.mdx'; - -import SelectorScopes from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_selector-scopes.mdx'; - -import Selectors from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_selectors.mdx'; - -import Entityrule from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_entityrule.mdx'; - -import Icmp from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_icmp.mdx'; - -import Rule from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_rule.mdx'; - -import Httpmatch from '@site/calico/_includes/content/_httpmatch.mdx'; - -A network policy resource (`NetworkPolicy`) represents an ordered set of rules which are applied -to a collection of endpoints that match a [label selector](#selector). - -`NetworkPolicy` is a namespaced resource. `NetworkPolicy` in a specific namespace -only applies to [workload endpoint resources](workloadendpoint.mdx) -in that namespace. Two resources are in the same namespace if the `namespace` -value is set the same on both. -See [global network policy resource](globalnetworkpolicy.mdx) for non-namespaced network policy. - -`NetworkPolicy` resources can be used to define network connectivity rules between groups of $[prodname] endpoints and host endpoints, and -take precedence over [profile resources](profile.mdx) if any are defined. - -NetworkPolicies are organized into [tiers](tier.mdx), which provide an additional layer of ordering—in particular, note that the `Pass` action skips to the -next [tier](tier.mdx), to enable hierarchical security policy. - -For `kubectl` [commands](https://kubernetes.io/docs/reference/kubectl/overview/), the following case-insensitive aliases -may be used to specify the resource type on the CLI: -`networkpolicy.projectcalico.org`, `networkpolicies.projectcalico.org` and abbreviations such as -`networkpolicy.p` and `networkpolicies.p`. - -## Sample YAML - -This sample policy allows TCP traffic from `frontend` endpoints to port 6379 on -`database` endpoints. - -```yaml -apiVersion: projectcalico.org/v3 -kind: NetworkPolicy -metadata: - name: internal-access.allow-tcp-6379 - namespace: production -spec: - tier: internal-access - selector: role == 'database' - types: - - Ingress - - Egress - ingress: - - action: Allow - metadata: - annotations: - from: frontend - to: database - protocol: TCP - source: - selector: role == 'frontend' - destination: - ports: - - 6379 - egress: - - action: Allow -``` - -## Definition - -### Metadata - -| Field | Description | Accepted Values | Schema | Default | -| --------- | ------------------------------------------------------------------ | --------------------------------------------------- | ------ | --------- | -| name | The name of the network policy. Required. | Alphanumeric string with optional `.`, `_`, or `-`. | string | | -| namespace | Namespace provides an additional qualification to a resource name. | | string | "default" | - -### Spec - -| Field | Description | Accepted Values | Schema | Default | -| ---------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------- | --------------------- | --------------------------------------------- | -| order | Controls the order of precedence. $[prodname] applies the policy with the lowest value first. | | float | | -| tier | Name of the [tier](tier.mdx) this policy belongs to. | | string | `default` | -| selector | Selects the endpoints to which this policy applies. | | [selector](#selector) | all() | -| types | Applies the policy based on the direction of the traffic. To apply the policy to inbound traffic, set to `Ingress`. To apply the policy to outbound traffic, set to `Egress`. To apply the policy to both, set to `Ingress, Egress`. | `Ingress`, `Egress` | List of strings | Depends on presence of ingress/egress rules\* | -| ingress | Ordered list of ingress rules applied by policy. | | List of [Rule](#rule) | | -| egress | Ordered list of egress rules applied by this policy. | | List of [Rule](#rule) | | -| serviceAccountSelector | Selects the service account(s) to which this policy applies. Select a specific service account by name using the `projectcalico.org/name` label. | | [selector](#selector) | all() | -| performanceHints | Contains a list of hints to Calico's policy engine to help process the policy more efficiently. Hints never change the enforcement behaviour of the policy. The available hints are described [below](#performance-hints). | `AssumeNeededOnEveryNode` | List of strings | | - -\* If `types` has no value, $[prodname] defaults as follows. - -> | Ingress Rules Present | Egress Rules Present | `Types` value | -> | --------------------- | -------------------- | ----------------- | -> | No | No | `Ingress` | -> | Yes | No | `Ingress` | -> | No | Yes | `Egress` | -> | Yes | Yes | `Ingress, Egress` | - -### Rule - - - -### ICMP - - - -### EntityRule - - - -### Selector - - - - -### Ports - - - -### ServiceAccountMatch - - - -### ServiceMatch - - - -### Performance Hints - -Performance hints provide a way to tell $[prodname] about the intended use of the policy so that it may -process it more efficiently. Currently only one hint is defined: - -* `AssumeNeededOnEveryNode`: normally, $[prodname] only calculates a policy's rules and selectors on nodes where - the policy is actually in use (i.e. its selector matches a local endpoint). This saves work in most cases. - The `AssumeNeededOnEveryNode` hint tells $[prodname] to treat the policy as "in use" on *every* node. This is - useful for large policy sets that are known to apply to all (or nearly all) endpoints. It effectively "preloads" - the policy on every node so that there is less work to do when the first endpoint matching the policy shows up. - It also prevents work from being done to tear down the policy when the last endpoint is drained. - -## Application layer policy - -Application layer policy is an optional feature of $[prodname] and -[must be enabled](../../network-policy/application-layer-policies/alp.mdx) -to use the following match criteria. - -:::note - -Application layer policy match criteria are supported with the following restrictions. - -- Only ingress policy is supported. Egress policy must not contain any application layer policy match clauses. -- Rules must have the action `Allow` if they contain application layer policy match clauses. - -::: - -### HTTPMatch - - - -## Supported operations - -| Datastore type | Create/Delete | Update | Get/List | Notes | -| ------------------------ | ------------- | ------ | -------- | ----- | -| Kubernetes API datastore | Yes | Yes | Yes | - -#### List filtering on tiers - -List and watch operations may specify label selectors or field selectors to filter `NetworkPolicy` resources on tiers returned by the API server. -When no selector is specified, the API server returns all `NetworkPolicy` resources from all tiers that the user has access to. - -##### Field selector - -When using the field selector, supported operators are `=` and `==` - -The following example shows how to retrieve all `NetworkPolicy` resources in the default tier and in all namespaces: - -```bash -kubectl get networkpolicy.p --field-selector spec.tier=default --all-namespaces -``` - -##### Label selector - -When using the label selector, supported operators are `=`, `==` and `IN`. - -The following example shows how to retrieve all `NetworkPolicy` resources in the `default` and `net-sec` tiers and in all namespaces: - -```bash -kubectl get networkpolicy.p -l 'projectcalico.org/tier in (default, net-sec)' --all-namespaces -``` diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/networkset.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/networkset.mdx deleted file mode 100644 index 6ab9aa7687..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/networkset.mdx +++ /dev/null @@ -1,71 +0,0 @@ ---- -description: API for this Calico Enterprise resource. ---- - -# Network set - -import DomainNames from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_domain-names.mdx'; - -A network set resource (NetworkSet) represents an arbitrary set of IP subnetworks/CIDRs, -allowing it to be matched by $[prodname] policy. Network sets are useful for applying policy to traffic -coming from (or going to) external, non-$[prodname], networks. - -`NetworkSet` is a namespaced resource. `NetworkSets` in a specific namespace -only applies to [network policies](networkpolicy.mdx) -in that namespace. Two resources are in the same namespace if the `namespace` -value is set the same on both. (See [GlobalNetworkSet](globalnetworkset.mdx) for non-namespaced network sets.) - -The metadata for each network set includes a set of labels. When $[prodname] is calculating the set of -IPs that should match a source/destination selector within a -[network policy](networkpolicy.mdx) rule, it includes -the CIDRs from any network sets that match the selector. - -:::note - -Since $[prodname] matches packets based on their source/destination IP addresses, -$[prodname] rules may not behave as expected if there is NAT between the $[prodname]-enabled node and the -networks listed in a network set. For example, in Kubernetes, incoming traffic via a service IP is -typically SNATed by the kube-proxy before reaching the destination host so $[prodname]'s workload -policy will see the kube-proxy's host's IP as the source instead of the real source. - -::: - -## Sample YAML - -```yaml -apiVersion: projectcalico.org/v3 -kind: NetworkSet -metadata: - name: external-database - namespace: staging - labels: - role: db -spec: - nets: - - 198.51.100.0/28 - - 203.0.113.0/24 - allowedEgressDomains: - - db.com - - '*.db.com' -``` - -## Network set definition - -### Metadata - -| Field | Description | Accepted Values | Schema | Default | -| --------- | ------------------------------------------------------------------ | ------------------------------------------------- | ------ | --------- | -| name | The name of this network set. Required. | Lower-case alphanumeric with optional `_` or `-`. | string | | -| namespace | Namespace provides an additional qualification to a resource name. | | string | "default" | -| labels | A set of labels to apply to this endpoint. | | map | | - -### Spec - -| Field | Description | Accepted Values | Schema | Default | -| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------------------------------------------- | ------ | ------- | -| nets | The IP networks/CIDRs to include in the set. | Valid IPv4 or IPv6 CIDRs, for example "192.0.2.128/25" | list | | -| allowedEgressDomains | The list of domain names that belong to this set and are honored in egress allow rules only. Domain names specified here only work to allow egress traffic from the cluster to external destinations. They don't work to _deny_ traffic to destinations specified by domain name, or to allow ingress traffic from _sources_ specified by domain name. | List of [exact or wildcard domain names](#exact-and-wildcard-domain-names) | list | | - -### Exact and wildcard domain names - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/node.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/node.mdx deleted file mode 100644 index f8c4fbbb27..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/node.mdx +++ /dev/null @@ -1,82 +0,0 @@ ---- -description: API for this Calico Enterprise resource. ---- - -# Node - -A node resource (`Node`) represents a node running $[prodname]. When adding a host -to a $[prodname] cluster, a node resource needs to be created which contains the -configuration for the `$[nodecontainer]` instance running on the host. - -When starting a `$[nodecontainer]` instance, the name supplied to the instance should -match the name configured in the Node resource. - -By default, starting a `$[nodecontainer]` instance will automatically create a node resource -using the `hostname` of the compute host. - -This resource is not supported in `kubectl`. - -## Sample YAML - -```yaml -apiVersion: projectcalico.org/v3 -kind: Node -metadata: - name: node-hostname -spec: - bgp: - asNumber: 64512 - ipv4Address: 10.244.0.1/24 - ipv6Address: 2001:db8:85a3::8a2e:370:7334/120 - ipv4IPIPTunnelAddr: 192.168.0.1 -``` - -## Definition - -### Metadata - -| Field | Description | Accepted Values | Schema | -| ----- | -------------------------------- | --------------------------------------------------- | ------ | -| name | The name of this node. Required. | Alphanumeric string with optional `.`, `_`, or `-`. | string | - -### Spec - -| Field | Description | Accepted Values | Schema | Default | -| -------------------- | --------------------------------------------------------------------------------------------------------------------------------- | --------------- | ---------------------------- | ------- | -| bgp | BGP configuration for this node. Omit if using $[prodname] for policy only. | | [BGP](#bgp) | -| ipv4VXLANTunnelAddr | IPv4 address of the VXLAN tunnel. This is system configured and should not be updated manually. | | string | -| ipv6VXLANTunnelAddr | IPv6 address of the VXLAN tunnel. This is system configured and should not be updated manually. | | string | -| vxlanTunnelMACAddr | MAC address of the VXLAN tunnel. This is system configured and should not be updated manually. | | string | -| vxlanTunnelMACAddrV6 | MAC address of the IPv6 VXLAN tunnel. This is system configured and should not be updated manually. | | string | -| orchRefs | Correlates this node to a node in another orchestrator. | | list of [OrchRefs](#orchref) | -| wireguard | WireGuard configuration for this node. This is applicable only if WireGuard is enabled in [Felix Configuration](felixconfig.mdx). | | [WireGuard](#wireguard) | - -### OrchRef - -| Field | Description | Accepted Values | Schema | Default | -| ------------ | ------------------------------------------------ | --------------- | ------ | ------- | -| nodeName | Name of this node according to the orchestrator. | | string | -| orchestrator | Name of the orchestrator. | k8s | string | - -### BGP - -| Field | Description | Accepted Values | Schema | Default | -| ----------------------- | -------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | ------- | -| asNumber | The AS Number of your `$[nodecontainer]`. | Optional. If omitted the global value is used (see [example modifying Global BGP settings](../../networking/configuring/bgp.mdx) for details about modifying the `asNumber` setting). | integer | -| ipv4Address | The IPv4 address and subnet exported as the next-hop for the $[prodname] endpoints on the host | The IPv4 address must be specified if BGP is enabled. | string | -| ipv6Address | The IPv6 address and subnet exported as the next-hop for the $[prodname] endpoints on the host | Optional | string | -| ipv4IPIPTunnelAddr | IPv4 address of the IP-in-IP tunnel. This is system configured and should not be updated manually. | Optional IPv4 address | string | -| routeReflectorClusterID | Enables this node as a route reflector within the given cluster | Optional IPv4 address | string | - -### WireGuard - -| Field | Description | Accepted Values | Schema | Default | -| -------------------- | ----------------------------------------------------------------------------------------- | --------------- | ------ | ------- | -| interfaceIPv4Address | The IP address and subnet for the IPv4 WireGuard interface created by Felix on this node. | Optional | string | -| interfaceIPv6Address | The IP address and subnet for the IPv6 WireGuard interface created by Felix on this node. | Optional | string | - -## Supported operations - -| Datastore type | Create/Delete | Update | Get/List | Notes | -| --------------------- | ------------- | ------ | -------- | ------------------------------------------------------------------ | -| Kubernetes API server | No | Yes | Yes | `$[nodecontainer]` data is directly tied to the Kubernetes nodes. | diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/overview.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/overview.mdx deleted file mode 100644 index 791f2a9424..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/overview.mdx +++ /dev/null @@ -1,117 +0,0 @@ ---- -description: Calico Enterprise resources (APIs) that you can manage using calicoctl. ---- - -# Resource definitions - -This section describes the set of valid resource types that can be managed -through `calicoctl` or `kubectl`. - -While resources may be supplied in YAML or JSON format, this guide provides examples in YAML. - -## Overview of resource structure - -The calicoctl commands for resource management (create, apply, delete, replace, get) -all take resource manifests as input. - -Each manifest may contain a single resource -(e.g. a profile resource), or a list of multiple resources (e.g. a profile and two -hostEndpoint resources). - -The general structure of a single resource is as follows: - -```yaml noValidation -apiVersion: projectcalico.org/v3 -kind: -metadata: - # Identifying information - name: - ... -spec: - # Specification of the resource - ... -``` - -### Schema - -| Field | Description | Accepted Values | Schema | -| ---------- | --------------------------------------------------------------------------------------- | -------------------- | ------------------------ | -| apiVersion | Indicates the version of the API that the data corresponds to. | projectcalico.org/v3 | string | -| kind | Specifies the type of resource described by the YAML document. | | [kind](#supported-kinds) | -| metadata | Contains information used to uniquely identify the particular instance of the resource. | | map | -| spec | Contains the resource specification. | | map | - -### Supported kinds - -The following resources are supported: - -- [AlertException](alertexception.mdx) -- [BGPConfiguration](bgpconfig.mdx) -- [BGPPeer](bgppeer.mdx) -- [DeepPacketInspection](deeppacketinspection.mdx) -- [EgressGatewayPolicy](egressgatewaypolicy.mdx) -- [FelixConfiguration](felixconfig.mdx) -- [GlobalAlert](globalalert.mdx) -- [GlobalNetworkPolicy](globalnetworkpolicy.mdx) -- [GlobalNetworkSet](globalnetworkset.mdx) -- [GlobalReport](globalreport.mdx) -- [GlobalThreatFeed](globalthreatfeed.mdx) -- [HostEndpoint](hostendpoint.mdx) -- [IPPool](ippool.mdx) -- [IPReservation](ipreservation.mdx) -- [KubeControllersConfiguration](kubecontrollersconfig.mdx) -- [LicenseKey](licensekey.mdx) -- [ManagedCluster](managedcluster.mdx) -- [NetworkPolicy](networkpolicy.mdx) -- [NetworkSet](networkset.mdx) -- [Node](node.mdx) -- [PacketCapture](packetcapture.mdx) -- [Profile](profile.mdx) -- [RemoteClusterConfiguration](remoteclusterconfiguration.mdx) -- [StagedGlobalNetworkPolicy](stagedglobalnetworkpolicy.mdx) -- [StagedKubernetesNetworkPolicy](stagedkubernetesnetworkpolicy.mdx) -- [StagedNetworkPolicy](stagednetworkpolicy.mdx) -- [Tier](tier.mdx) -- [WorkloadEndpoint](workloadendpoint.mdx) - -### Resource name requirements - -Every resource must have the `name` field specified. Name must be unique within a namespace. -Name required when creating resources, and cannot be updated. -A valid resource name can have alphanumeric characters with optional `.`, `_`, or `-`. of up to 128 characters total. - -### Multiple resources in a single file - -A file may contain multiple resource documents specified in a YAML list format. For example, the following is the contents of a file containing two `HostEndpoint` resources: - -```yaml -- apiVersion: projectcalico.org/v3 - kind: HostEndpoint - metadata: - name: endpoint1 - labels: - type: database - spec: - interface: eth0 - node: host1 - profiles: - - prof1 - - prof2 - expectedIPs: - - 1.2.3.4 - - '00:bb::aa' -- apiVersion: projectcalico.org/v3 - kind: HostEndpoint - metadata: - name: endpoint2 - labels: - type: frontend - spec: - interface: eth1 - node: host1 - profiles: - - prof1 - - prof2 - expectedIPs: - - 1.2.3.5 -``` diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/packetcapture.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/packetcapture.mdx deleted file mode 100644 index 4f3200cbfa..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/packetcapture.mdx +++ /dev/null @@ -1,148 +0,0 @@ ---- -description: API for this Calico Enterprise resource. ---- - -# Packet capture - -import Selectors from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_selectors.mdx'; - -A Packet Capture resource (`PacketCapture`) represents captured live traffic for debugging microservices and application -interaction inside a Kubernetes cluster. - -$[prodname] supports selecting one or multiple [WorkloadEndpoints resources](workloadendpoint.mdx) -as described in the [Packet Capture] guide. - -For `kubectl` [commands](https://kubernetes.io/docs/reference/kubectl/overview/), the following case-insensitive aliases may be used to specify the resource type on the CLI: -`packetcapture`,`packetcaptures`, `packetcapture.projectcalico.org`, `packetcaptures.projectcalico.org` as well as -abbreviations such as `packetcapture.p` and `packetcaptures.p`. - -## Sample YAML - -```yaml -apiVersion: projectcalico.org/v3 -kind: PacketCapture -metadata: - name: sample-capture - namespace: sample-namespace -spec: - selector: k8s-app == "sample-app" - filters: - - protocol: TCP - ports: - - 80 -``` - -```yaml -apiVersion: projectcalico.org/v3 -kind: PacketCapture -metadata: - name: sample-capture - namespace: sample-namespace -spec: - selector: all() - startTime: '2021-08-26T12:00:00Z' - endTime: '2021-08-26T12:30:00Z' -``` - -## Packet capture definition - -### Metadata - -| Field | Description | Accepted Values | Schema | Default | -| --------- | ------------------------------------------------------------------ | --------------------------------------------------- | ------ | --------- | -| name | The name of the packet capture. Required. | Alphanumeric string with optional `.`, `_`, or `-`. | string | | -| namespace | Namespace provides an additional qualification to a resource name. | | string | "default" | - -### Spec - -| Field | Description | Accepted Values | Schema | Default | -| --------- | ---------------------------------------------------------------------------------- | ----------------------- | ----------------------- | ------- | -| selector | Selects the endpoints to which this packet capture applies. | | [selector](#selector) | | -| filters | The ordered set of filters applied to traffic captured from an interface. | | [filters](#filters) | | -| startTime | Defines the start time from which this PacketCapture will start capturing packets. | Date in RFC 3339 format | [startTime](#starttime) | | -| endTime | Defines the end time at which this PacketCapture will stop capturing packets. | Date in RFC 3339 format | [endTime](#endtime) | | - -### Selector - - - -### Filters - -| Field | Description | Accepted Values | Schema | Default | -| -------- | ------------------------------------- | ------------------------------------------------------------ | ----------------- | ------- | -| protocol | Positive protocol match. | `TCP`, `UDP`, `ICMP`, `ICMPv6`, `SCTP`, `UDPLite`, `1`-`255` | string \| integer | | -| ports | Positive match on the specified ports | | list of ports | | - -$[prodname] supports the following syntax for expressing ports. - -| Syntax | Example | Description | -| --------- | --------- | ------------------------------------------------------ | -| int | 80 | The exact (numeric) port specified | -| start:end | 6040:6050 | All (numeric) ports within the range start ≤ x ≤ end | - -An individual numeric port may be specified as a YAML/JSON integer. A port range must be represented as a string. Named ports are not supported by `PacketCapture`. -Multiple ports can be defined to filter traffic. All specified ports or port ranges concatenated using the logical operator "OR". - -For example, this would be a valid list of ports: - -```yaml -ports: [8080, '1234:5678'] -``` - -Multiple filter rules can be defined to filter traffic. All rules are concatenated using the logical operator "OR". -For example, filtering both TCP or UDP traffic will be defined as: - -```yaml -filters: - - protocol: TCP - - protocol: UDP -``` - -Within a single filter rule, protocol and list of valid ports will be concatenated using the logical operator "AND". - -For example, filtering TCP traffic and traffic for port 80 will be defined as: - -```yaml -filters: - - protocol: TCP - ports: [80] -``` - -### StartTime - -Defines the start time from which this PacketCapture will start capturing packets in RFC 3339 format. -If omitted or the value is in the past, the capture will start immediately. -If the value is changed to a future time, capture will stop immediately and restart at that time. - -```yaml -startTime: '2021-08-26T12:00:00Z' -``` - -### EndTime - -Defines the end time from which this PacketCapture will stop capturing packets in RFC 3339 format. -If omitted the capture will continue indefinitely. -If the value is changed to the past, capture will stop immediately. - -```yaml -endTime: '2021-08-26T12:30:00Z' -``` - -### Status - -`PacketCaptureStatus` lists the current state of a `PacketCapture` and its generated capture files. - -| Field | Description | -| ----- | -------------------------------------------------------------------------------------------------------------------------------- | -| files | It describes the location of the packet capture files that is identified via a node, its directory and the file names generated. | - -### Files - -| Field | Description | -| --------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| directory | The path inside the calico-node container for the generated files. | -| fileNames | The name of the generated file for a `PacketCapture` ordered alphanumerically.
    The active packet capture file will be identified using the following schema: `{}.pcap`.
    Rotated capture files name will contain an index matching the rotation timestamp. | -| node | The hostname of the Kubernetes node the files are located on. | -| state | Determines whether a PacketCapture is capturing traffic from any interface attached to the current node. Possible values include: Capturing, Scheduled, Finished, Error, WaitingForTraffic | - -[packet capture]: ../../observability/packetcapture.mdx diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/policyrecommendations.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/policyrecommendations.mdx deleted file mode 100644 index 01c64c7540..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/policyrecommendations.mdx +++ /dev/null @@ -1,56 +0,0 @@ ---- -description: API for this Calico Enterprise resource. ---- - -# Policy recommendation scope - -import Servicematch from '../../_includes/content/_servicematch.mdx'; - -import Serviceaccountmatch from '../../_includes/content/_serviceaccountmatch.mdx'; - -import Ports from '../../_includes/content/_ports.mdx'; - -import Selectors from '../../_includes/content/_selectors.mdx'; - -import Entityrule from '../../_includes/content/_entityrule.mdx'; - -import Icmp from '../../_includes/content/_icmp.mdx'; - -import Rule from '../../_includes/content/_rule.mdx'; - -The policy recommendation scope is a collection of configuration options to control [policy recommendation](../../network-policy/recommendations/policy-recommendations.mdx) in the web console. - -To apply changes to this resource, use the following format: - -``` -$ kubectl patch policyrecommendationscope default -p '{"spec":{"":""}}' -``` -**Example** - -`$ kubectl patch policyrecommendationscope default -p '{"spec":{"interval":"5m"}}'` - -## Definition - -### - -### Metadata - -| Field | Description | Accepted Values | Schema | Default | -| ---------------- | ------------------------------------------------------------ | --------------------------------------------------- | ------ | -------------------------------------------------- | -| name | The name of the policy recommendation scope. | `default` | string | | - -### Spec - -| Field | Description | Accepted Values | Schema | Default | -| ---------------------- | ------------------------------------------------------------ | --------------- | ------ | -------------- | -| Interval | The frequency to create and refine policy recommendations. | | | 2.5m (minutes) | -| InitialLookback | Start time to look at flow logs when first creating a policy recommendation. | | | 24h (hours) | -| StabilizationPeriod | Time that a recommended policy should remain unchanged so it is stable and ready to be enforced. | | | 10m (minutes) | - -#### NamespaceSpec - -| Field | Description | Accepted Values | Schema | Default | -| ---------------------- | ------------------------------------------------------------ | --------------- | ------ | -------------- | -| recStatus | Defines the policy recommendation engine status. | Enabled/Disabled | | Disabled | -| selector | Selects the namespaces for generating recommendations. | | | `!(projectcalico.org/name starts with ''tigera-'') && !(projectcalico.org/name starts with ''calico-'') && !(projectcalico.org/name starts with ''kube-'')` | -| intraNamespacePassThroughTraffic | When true, sets all intra-namespace traffic to Pass | true/false | | false | diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/profile.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/profile.mdx deleted file mode 100644 index bf24a1e7ce..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/profile.mdx +++ /dev/null @@ -1,52 +0,0 @@ ---- -description: API for this Calico Enterprise resource. ---- - -# Profile - -A profile resource (`Profile`) represents a set of rules which are applied -to the individual endpoints to which this profile has been assigned. - -Each $[prodname] endpoint or host endpoint can be assigned to zero or more profiles. - -This resource is not supported in `kubectl`. - -## Sample YAML - -The following sample profile applies the label `stage: development` to any endpoint that includes `dev-apps` in its list of profiles. - -```yaml -apiVersion: projectcalico.org/v3 -kind: Profile -metadata: - name: dev-apps -spec: - labelsToApply: - stage: development -``` - -## Definition - -### Metadata - -| Field | Description | Accepted Values | Schema | Default | -| ------ | ---------------------------------- | --------------------------------------------------- | ---------------------------------- | ------- | -| name | The name of the profile. Required. | Alphanumeric string with optional `.`, `_`, or `-`. | string | -| labels | A set of labels for this profile. | | map of string key to string values | - -### Spec - -| Field | Description | Accepted Values | Schema | Default | -| -------------------- | -------------------------------------------------------------------------------------------------------------- | --------------- | -------------------------------------- | ------- | -| ingress (deprecated) | The ingress rules belonging to this profile. | | List of [Rule](networkpolicy.mdx#rule) | -| egress (deprecated) | The egress rules belonging to this profile. | | List of [Rule](networkpolicy.mdx#rule) | -| labelsToApply | An optional set of labels to apply to each endpoint in this profile (in addition to the endpoint's own labels) | | map | - -For `Rule` details please see the [NetworkPolicy](networkpolicy.mdx) or -[GlobalNetworkPolicy](globalnetworkpolicy.mdx) resource. - -## Supported operations - -| Datastore type | Create/Delete | Update | Get/List | Notes | -| --------------------- | ------------- | ------ | -------- | ------------------------------------------------------------------------------ | -| Kubernetes API server | No | No | Yes | $[prodname] profiles are pre-assigned for each Namespace and Service Account. | diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/remoteclusterconfiguration.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/remoteclusterconfiguration.mdx deleted file mode 100644 index 2f8ac0d1f7..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/remoteclusterconfiguration.mdx +++ /dev/null @@ -1,97 +0,0 @@ ---- -description: API for this Calico Enterprise resource. ---- - -# Remote cluster configuration - -A remote cluster configuration resource (RemoteClusterConfiguration) represents a cluster in a federation of clusters. -Each remote cluster needs a configuration to be specified to allow the local cluster to access resources on the remote -cluster. The connection is one-way: the information flows only from the remote to the local cluster. To share -information from the local cluster to the remote one a remote cluster configuration resource must be created on the -remote cluster. - -A remote cluster configuration causes Typha and `calicoq` to retrieve the following resources from a remote cluster: - -- [Workload endpoints](workloadendpoint.mdx) -- [Host endpoints](hostendpoint.mdx) -- [Profiles](profile.mdx) (rules are not retrieved from remote profiles, only the `LabelsToApply` field is used) - -When using the Kubernetes API datastore with RBAC enabled on the remote cluster, the RBAC rules must be configured to -allow access to these resources. - -For more details on the federation feature refer to the [Overview](../../multicluster/federation/overview.mdx). - -For the meaning of the fields matches the configuration used for configuring `calicoctl`, see [Kubernetes datastore](../../operations/clis/calicoctl/configure/datastore.mdx) instructions for more details. - -This resource is not supported in `kubectl`. - -## Sample YAML - -For a remote Kubernetes datastore cluster: - -```yaml -apiVersion: projectcalico.org/v3 -kind: RemoteClusterConfiguration -metadata: - name: cluster1 -spec: - datastoreType: kubernetes - kubeconfig: /etc/tigera-federation-remotecluster/kubeconfig-rem-cluster-1 -``` - -For a remote etcdv3 cluster: - -```yaml -apiVersion: projectcalico.org/v3 -kind: RemoteClusterConfiguration -metadata: - name: cluster1 -spec: - datastoreType: etcdv3 - etcdEndpoints: 'https://10.0.0.1:2379,https://10.0.0.2:2379' -``` - -## RemoteClusterConfiguration Definition - -### Metadata - -| Field | Description | Accepted Values | Schema | -| ----- | ---------------------------------------------- | ----------------------------------------- | ------ | -| name | The name of this remote cluster configuration. | Lower-case alphanumeric with optional `-` | string | - -### Spec - -| Field | Secret key | Description | Accepted Values | Schema | Default | -| ------------------- | ------------- | ------------------------------------------------------------------ | --------------------- | -------------------------- | ------- | -| clusterAccessSecret | | Reference to a Secret that contains connection information | | Kubernetes ObjectReference | none | -| datastoreType | datastoreType | The datastore type of the remote cluster. | `etcdv3` `kubernetes` | string | none | -| etcdEndpoints | etcdEndpoints | A comma separated list of etcd endpoints. | | string | none | -| etcdUsername | etcdUsername | Username for RBAC. | | string | none | -| etcdPassword | etcdPassword | Password for the given username. | | string | none | -| etcdKeyFile | etcdKey | Path to the etcd key file. | | string | none | -| etcdCertFile | etcdCert | Path to the etcd certificate file. | | string | none | -| etcdCACertFile | etcdCACert | Path to the etcd CA certificate file. | | string | none | -| kubeconfig | kubeconfig | Location of the `kubeconfig` file. | | string | none | -| k8sAPIEndpoint | | Location of the kubernetes API server. | | string | none | -| k8sKeyFile | | Location of a client key for accessing the Kubernetes API. | | string | none | -| k8sCertFile | | Location of a client certificate for accessing the Kubernetes API. | | string | none | -| k8sCAFile | | Location of a CA certificate. | | string | none | -| k8sAPIToken | | Token to be used for accessing the Kubernetes API. | | string | none | - -When using the `clusterAccessSecret` field, all other fields in the RemoteClusterconfiguration resource must be empty. -When the `clusterAccessSecret` reference is used, all datastore configuration will be read from the referenced Secret -using the "Secret key" fields named in the above table as the data key in the Secret. The fields read from a Secret -that were file path or locations in a RemoteClusterConfiguration will be expected to be the file contents when read -from a Secret. - -All of the fields that start with `etcd` are only valid when the DatastoreType is etcdv3 and the fields that start with `k8s` or `kube` are only valid when the datastore type is kubernetes. -The `kubeconfig` field and the fields that end with `File` must be accessible to Typha and `calicoq`, this does not apply when the data is coming from a Secret referenced by `clusterAccessSecret`. - -When the DatastoreType is `kubernetes`, the `kubeconfig` file is optional but since it can contain all of the authentication information needed to access the Kubernetes API server it is generally easier to use than setting all the individual `k8s` fields. The other kubernetes fields can be used by themselves though or to override specific kubeconfig values. - -## Supported operations - -| Datastore type | Create/Delete | Update | Get/List | Notes | -| --------------------- | ------------- | ------ | -------- | ----- | -| etcdv3 | Yes | Yes | Yes | -| Kubernetes API server | Yes | Yes | Yes | diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/securityeventwebhook.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/securityeventwebhook.mdx deleted file mode 100644 index 770e87f812..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/securityeventwebhook.mdx +++ /dev/null @@ -1,146 +0,0 @@ ---- -description: API for this Calico Enterprise resource. ---- - -# Security event webhook - -A security event webhook (`SecurityEventWebhook`) is a cluster-scoped resource that represents instances -of integrations with external systems through the webhook callback mechanism. - -For `kubectl` [commands](https://kubernetes.io/docs/reference/kubectl/overview/), the following case-insensitive aliases -can be used to specify the resource type on the CLI: -`securityeventwebhook.projectcalico.org`, `securityeventwebhooks.projectcalico.org` and abbreviations such as -`securityeventwebhook.p` and `securityeventwebhooks.p`. - -## Sample YAML - -```yaml -apiVersion: projectcalico.org/v3 -kind: SecurityEventWebhook -metadata: - name: jira-webhook - annotations: - webhooks.projectcalico.org/labels: 'Cluster name:Calico Enterprise' -spec: - consumer: Jira - state: Enabled - query: type=waf - config: - - name: url - value: 'https://your-jira-instance-name.atlassian.net/rest/api/2/issue/' - - name: project - value: PRJ - - name: issueType - value: Bug - - name: username - valueFrom: - secretKeyRef: - name: jira-secrets - key: username - - name: apiToken - valueFrom: - secretKeyRef: - name: jira-secrets - key: token -``` - -## Security event webhook definition - -### Metadata - -| Field | Description | Accepted Values | Schema | -| ----- | --------------------------------------------------------- | --------------------------------------------------- | ------ | -| name | Unique name to describe this resource instance. Required. | Alphanumeric string with optional `.`, `_`, or `-`. | string | - -#### Annotations - -Security event webhooks provide an easy way to add arbitrary data to the webhook generated HTTP payload through the metadata annotation. -The value of the `webhooks.projectcalico.org/labels`, if present, will be converted into the payload labels. -The value must conform to the following rules: - -- Key and value data for a single label are separated by the `:` character, -- Multiple labels are separated by the `,` character. - -### Spec - -| Field | Description | Accepted Values | Schema | Required | -| -------- | --------------------------------------------------------------------------------------------------------------- | ----------------------------------- | ------------------------------------------------------------------------| --------------------------- | -| consumer | Specifies intended consumer of the webhook. | Slack, Jira, Generic | string | yes | -| state | Defines current state of the webhook. | Enabled, Disabled, Debug | string | yes | -| query | Defines query used to retrieve security events from Calico. | [see Query](#query) | string | yes | -| config | Webhook configuration, required contents of this structure is determined by the value of the `consumer` field. | [see Config](#configuration) | list of [SecurityEventWebhookConfigVar](#securityeventwebhookconfigvar) | yes | - -### SecurityEventWebhookConfigVar - -| Field | Description | Schema | Required | -| ------------ | -------------------------------------------------------------------------- | --------------------------------------------------------------------------- | ----------------------------------- | -| name | Configuration variable name. | string | yes | -| value | Direct value for the variable. | string | yes if `valueFrom` is not specified | -| valueFrom | Value defined either in a Kubernetes ConfigMap or in a Kubernetes Secret. | [SecurityEventWebhookConfigVarSource](#securityeventwebhookconfigvarsource) | yes if `value` is not specified | - -### SecurityEventWebhookConfigVarSource - -| Field | Description | Schema | Required | -| ---------------- | --------------------------------- | ------------------------------------------------------------------------------------------------------------ | ----------------------------------------- | -| configMapKeyRef | Kubernetes ConfigMap reference. | `ConfigMapKeySelector` (referenced ConfigMap key should exist in the `tigera-intrusion-detection` namespace) | yes if `secretKeyRef` is not specified | -| secretKeyRef | Kubernetes Secret reference. | `SecretKeySelector` (referenced Secret key should exist in the `tigera-intrusion-detection` namespace) | yes if `configMapKeyRef` is not specified | - -### Status - -Field `status` reflects the health of a webhook. It is a list of [Kubernetes Conditions](https://pkg.go.dev/k8s.io/apimachinery@v0.23.0/pkg/apis/meta/v1#Condition). - -## Query - -Security event webhooks use a domain-specific query language to select which records -from the data set should trigger the HTTP request. - -The query language is composed of any number of selectors, combined -with boolean expressions (`AND`, `OR`, and `NOT`), set expressions -(`IN` and `NOTIN`) and bracketed subexpressions. These are translated -by $[prodname] to Elastic DSL queries that are executed on the backend. - -Set expressions support wildcard operators asterisk (`*`) and question mark (`?`). -The asterisk sign matches zero or more characters and the question mark matches a single character. - -A selector consists of a key, comparator, and value. Keys and values -may be identifiers consisting of alphanumerics and underscores (`_`) -with the first character being alphabetic or an underscore, or may be -quoted strings. Values may also be integer or floating point numbers. -Comparators may be `=` (equal), `!=` (not equal), `<` (less than), -`<=` (less than or equal), `>` (greater than), or `>=` (greater than -or equal). - -## Configuration - -Data required to be present in the `config` section of the security event webhook `spec` depends on the intended consumer for the HTTP -requests generated by the webhook. The value in the `consumer` field of the `spec` specifies the consumer and therefore data -that is required to be present. Currently Calico supports the following consumers: `Slack`, `Jira` and `Generic`. -Payloads generated by the webhook will be different for each of the listed use cases. - -### Slack - -Data fields required for the `Slack` value present in the `spec.consumer` field of a webhook: - -| Field | Description | Required | -| ---------------- | ------------------------------------------------------------------------------ | ---------- | -| url | A valid Slack [Incoming Webhook URL](https://api.slack.com/messaging/webhooks). | yes | - -### Generic - -Data fields required for the `Generic` value present in the `spec.consumer` field of a webhook: - -| Field | Description | Required | -| ---------------- | --------------------------------------------------- | ---------- | -| url | A generic and valid URL of another HTTP(s) endpoint. | yes | - -### Jira - -Data fields required for the `Jira` value present in the `spec.consumer` field of a webhook: - -| Field | Description | Required | -| ---------------- | ---------------------------------------------------------------------- | ---------- | -| url | URL of a Jira REST API v2 endpoint for the organisation. | yes | -| project | A valid Jira project abbreviation. | yes | -| issueType | A valid issue type for the selected project, examples: `Bug` or `Task` | yes | -| username | A valid Jira user name. | yes | -| apiToken | A valid Jira API token for the user. | yes | diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/stagedglobalnetworkpolicy.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/stagedglobalnetworkpolicy.mdx deleted file mode 100644 index edaa29705b..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/stagedglobalnetworkpolicy.mdx +++ /dev/null @@ -1,187 +0,0 @@ ---- -description: API for this resource. ---- - -# Staged global network policy - -import Servicematch from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_servicematch.mdx'; - -import Serviceaccountmatch from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_serviceaccountmatch.mdx'; - -import Ports from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_ports.mdx'; - -import Selectors from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_selectors.mdx'; - -import Entityrule from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_entityrule.mdx'; - -import Icmp from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_icmp.mdx'; - -import Rule from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_rule.mdx'; - -A staged global network policy resource (`StagedGlobalNetworkPolicy`) represents an ordered set of rules which are applied -to a collection of endpoints that match a [label selector](#selector). These rules are used to preview network behavior and do -not enforce network traffic. For enforcing network traffic, see [global network policy resource](globalnetworkpolicy.mdx). - -`StagedGlobalNetworkPolicy` is not a namespaced resource. `StagedGlobalNetworkPolicy` applies to [workload endpoint resources](workloadendpoint.mdx) in all namespaces, and to [host endpoint resources](hostendpoint.mdx). -Select a namespace in a `StagedGlobalNetworkPolicy` in the standard selector by using -`projectcalico.org/namespace` as the label name and a `namespace` name as the -value to compare against, e.g., `projectcalico.org/namespace == "default"`. -See [staged network policy resource](stagednetworkpolicy.mdx) for staged namespaced network policy. - -`StagedGlobalNetworkPolicy` resources can be used to define network connectivity rules between groups of $[prodname] endpoints and host endpoints, and -take precedence over [Profile resources](profile.mdx) if any are defined. - -StagedGlobalNetworkPolicies are organized into [tiers](tier.mdx), which provide an additional layer of ordering—in particular, note that the `Pass` action skips to the -next [tier](tier.mdx), to enable hierarchical security policy. - -For `kubectl` [commands](https://kubernetes.io/docs/reference/kubectl/overview/), the following case-insensitive aliases -may be used to specify the resource type on the CLI: -`stagedglobalnetworkpolicy.projectcalico.org`, `stagedglobalnetworkpolicies.projectcalico.org` and abbreviations such as -`stagedglobalnetworkpolicy.p` and `stagedglobalnetworkpolicies.p`. - -## Sample YAML - -This sample policy allows TCP traffic from `frontend` endpoints to port 6379 on -`database` endpoints. - -```yaml -apiVersion: projectcalico.org/v3 -kind: StagedGlobalNetworkPolicy -metadata: - name: internal-access.allow-tcp-6379 -spec: - tier: internal-access - selector: role == 'database' - types: - - Ingress - - Egress - ingress: - - action: Allow - protocol: TCP - source: - selector: role == 'frontend' - destination: - ports: - - 6379 - egress: - - action: Allow -``` - -## Definition - -### Metadata - -| Field | Description | Accepted Values | Schema | Default | -| ----- | ----------------------------------------- | --------------------------------------------------- | ------ | ------- | -| name | The name of the network policy. Required. | Alphanumeric string with optional `.`, `_`, or `-`. | string | | - -### Spec - -| Field | Description | Accepted Values | Schema | Default | -| ---------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------- | --------------------- | --------------------------------------------- | -| order | Controls the order of precedence. $[prodname] applies the policy with the lowest value first. | | float | | -| tier | Name of the [tier](tier.mdx) this policy belongs to. | | string | `default` | -| selector | Selects the endpoints to which this policy applies. | | [selector](#selector) | all() | -| serviceAccountSelector | Selects the service account(s) to which this policy applies. Select all service accounts in the cluster with a specific name using the `projectcalico.org/name` label. | | [selector](#selector) | all() | -| namespaceSelector | Selects the namespace(s) to which this policy applies. Select a specific namespace by name using the `projectcalico.org/name` label. | | [selector](#selector) | all() | -| types | Applies the policy based on the direction of the traffic. To apply the policy to inbound traffic, set to `Ingress`. To apply the policy to outbound traffic, set to `Egress`. To apply the policy to both, set to `Ingress, Egress`. | `Ingress`, `Egress` | List of strings | Depends on presence of ingress/egress rules\* | -| ingress | Ordered list of ingress rules applied by policy. | | List of [Rule](#rule) | | -| egress | Ordered list of egress rules applied by this policy. | | List of [Rule](#rule) | | -| doNotTrack\*\* | Indicates to apply the rules in this policy before any data plane connection tracking, and that packets allowed by these rules should not be tracked. | true, false | boolean | false | -| preDNAT\*\* | Indicates to apply the rules in this policy before any DNAT. | true, false | boolean | false | -| applyOnForward\*\* | Indicates to apply the rules in this policy on forwarded traffic as well as to locally terminated traffic. | true, false | boolean | false | -| performanceHints | Contains a list of hints to Calico's policy engine to help process the policy more efficiently. Hints never change the enforcement behaviour of the policy. The available hints are described [below](#performance-hints). | `AssumeNeededOnEveryNode` | List of strings | | - -\* If `types` has no value, $[prodname] defaults as follows. - -> | Ingress Rules Present | Egress Rules Present | `Types` value | -> | --------------------- | -------------------- | ----------------- | -> | No | No | `Ingress` | -> | Yes | No | `Ingress` | -> | No | Yes | `Egress` | -> | Yes | Yes | `Ingress, Egress` | - -\*\* The `doNotTrack` and `preDNAT` and `applyOnForward` fields are meaningful -only when applying policy to a [host endpoint](hostendpoint.mdx). - -Only one of `doNotTrack` and `preDNAT` may be set to `true` (in a given policy). If they are both `false`, or when applying the policy to a -[workload endpoint](workloadendpoint.mdx), -the policy is enforced after connection tracking and any DNAT. - -`applyOnForward` must be set to `true` if either `doNotTrack` or `preDNAT` is -`true` because for a given policy, any untracked rules or rules before DNAT will -in practice apply to forwarded traffic. - -See [Using $[prodname] to Secure Host Interfaces](../host-endpoints/index.mdx) -for how `doNotTrack` and `preDNAT` and `applyOnForward` can be useful for host endpoints. - -### Rule - - - -### ICMP - - - -### EntityRule - - - -### Selector - - - -### Ports - - - -### ServiceAccountMatch - - - -### ServiceMatch - - - -### Performance Hints - -Performance hints provide a way to tell $[prodname] about the intended use of the policy so that it may -process it more efficiently. Currently only one hint is defined: - -* `AssumeNeededOnEveryNode`: normally, $[prodname] only calculates a policy's rules and selectors on nodes where - the policy is actually in use (i.e. its selector matches a local endpoint). This saves work in most cases. - The `AssumeNeededOnEveryNode` hint tells $[prodname] to treat the policy as "in use" on *every* node. This is - useful for large policy sets that are known to apply to all (or nearly all) endpoints. It effectively "preloads" - the policy on every node so that there is less work to do when the first endpoint matching the policy shows up. - It also prevents work from being done to tear down the policy when the last endpoint is drained. - -## Supported operations - -| Datastore type | Create/Delete | Update | Get/List | Notes | -| ------------------------ | ------------- | ------ | -------- | ----- | -| Kubernetes API datastore | Yes | Yes | Yes | - -#### List filtering on tiers - -List and watch operations may specify label selectors or field selectors to filter `StagedGlobalNetworkPolicy` resources on tiers returned by the API server. -When no selector is specified, the API server returns all `StagedGlobalNetworkPolicy` resources from all tiers that the user has access to. - -##### Field selector - -When using the field selector, supported operators are `=` and `==` - -The following example shows how to retrieve all `StagedGlobalNetworkPolicy` resources in the default tier: - -```bash -kubectl get stagedglobalnetworkpolicy --field-selector spec.tier=default -``` - -##### Label selector - -When using the label selector, supported operators are `=`, `==` and `IN`. - -The following example shows how to retrieve all `StagedGlobalNetworkPolicy` resources in the `default` and `net-sec` tiers: - -```bash -kubectl get stagedglobalnetworkpolicy -l 'projectcalico.org/tier in (default, net-sec)' -``` diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/stagedkubernetesnetworkpolicy.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/stagedkubernetesnetworkpolicy.mdx deleted file mode 100644 index 793a587a8d..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/stagedkubernetesnetworkpolicy.mdx +++ /dev/null @@ -1,65 +0,0 @@ ---- -description: API for this Calico Enterprise resource. ---- - -# Staged Kubernetes network policy - -A staged kubernetes network policy resource (`StagedKubernetesNetworkPolicy`) represents a staged version -of [Kubernetes network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies). -This is used to preview network behavior before actually enforcing the network policy. Once persisted, this -will create a Kubernetes network policy backed by a $[prodname] -[network policy](networkpolicy.mdx). - -For `kubectl` [commands](https://kubernetes.io/docs/reference/kubectl/overview/), the following case-insensitive aliases -may be used to specify the resource type on the CLI: -`stagedkubernetesnetworkpolicy.projectcalico.org`, `stagedkubernetesnetworkpolicies.projectcalico.org` and abbreviations such as -`stagedkubernetesnetworkpolicy.p` and `stagedkubernetesnetworkpolicies.p`. - -## Sample YAML - -Below is a sample policy created from the example policy from the -[Kubernetes NetworkPolicy documentation](https://kubernetes.io/docs/concepts/services-networking/network-policies/#networkpolicy-resource). -The only difference between this policy and the example Kubernetes version is that the `apiVersion` and `kind` are changed -to properly specify a staged Kubernetes network policy. - -```yaml -apiVersion: projectcalico.org/v3 -kind: StagedKubernetesNetworkPolicy -metadata: - name: test-network-policy - namespace: default -spec: - podSelector: - matchLabels: - role: db - policyTypes: - - Ingress - - Egress - ingress: - - from: - - ipBlock: - cidr: 172.17.0.0/16 - except: - - 172.17.1.0/24 - - namespaceSelector: - matchLabels: - project: myproject - - podSelector: - matchLabels: - role: frontend - ports: - - protocol: TCP - port: 6379 - egress: - - to: - - ipBlock: - cidr: 10.0.0.0/24 - ports: - - protocol: TCP - port: 5978 -``` - -## Definition - -See the [Kubernetes NetworkPolicy documentation](https://v1-21.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#networkpolicyspec-v1-networking-k8s-io) -for more information. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/stagednetworkpolicy.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/stagednetworkpolicy.mdx deleted file mode 100644 index adb0841b09..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/stagednetworkpolicy.mdx +++ /dev/null @@ -1,171 +0,0 @@ ---- -description: API for this Calico Enterprise resource. ---- - -# Staged network policy - -import Servicematch from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_servicematch.mdx'; - -import Serviceaccountmatch from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_serviceaccountmatch.mdx'; - -import Ports from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_ports.mdx'; - -import Selectors from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_selectors.mdx'; - -import Entityrule from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_entityrule.mdx'; - -import Icmp from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_icmp.mdx'; - -import Rule from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_rule.mdx'; - -A staged network policy resource (`StagedNetworkPolicy`) represents an ordered set of rules which are applied -to a collection of endpoints that match a [label selector](#selector). These rules are used to preview network behavior and do -not enforce network traffic. For enforcing network traffic, see [network policy resource](networkpolicy.mdx). - -`StagedNetworkPolicy` is a namespaced resource. `StagedNetworkPolicy` in a specific namespace -only applies to [workload endpoint resources](workloadendpoint.mdx) -in that namespace. Two resources are in the same namespace if the `namespace` -value is set the same on both. -See [staged global network policy resource](stagedglobalnetworkpolicy.mdx) for staged non-namespaced network policy. - -`StagedNetworkPolicy` resources can be used to define network connectivity rules between groups of $[prodname] endpoints and host endpoints, and -take precedence over [profile resources](profile.mdx) if any are defined. - -StagedNetworkPolicies are organized into [tiers](tier.mdx), which provide an additional layer of ordering—in particular, note that the `Pass` action skips to the -next [tier](tier.mdx), to enable hierarchical security policy. - -For `kubectl` [commands](https://kubernetes.io/docs/reference/kubectl/overview/), the following case-insensitive aliases -may be used to specify the resource type on the CLI: -`stagednetworkpolicy.projectcalico.org`, `stagednetworkpolicies.projectcalico.org` and abbreviations such as -`stagednetworkpolicy.p` and `stagednetworkpolicies.p`. - -## Sample YAML - -This sample policy allows TCP traffic from `frontend` endpoints to port 6379 on -`database` endpoints. - -```yaml -apiVersion: projectcalico.org/v3 -kind: StagedNetworkPolicy -metadata: - name: internal-access.allow-tcp-6379 - namespace: production -spec: - tier: internal-access - selector: role == 'database' - types: - - Ingress - - Egress - ingress: - - action: Allow - protocol: TCP - source: - selector: role == 'frontend' - destination: - ports: - - 6379 - egress: - - action: Allow -``` - -## Definition - -### Metadata - -| Field | Description | Accepted Values | Schema | Default | -| --------- | ------------------------------------------------------------------ | --------------------------------------------------- | ------ | --------- | -| name | The name of the network policy. Required. | Alphanumeric string with optional `.`, `_`, or `-`. | string | | -| namespace | Namespace provides an additional qualification to a resource name. | | string | "default" | - -### Spec - -| Field | Description | Accepted Values | Schema | Default | -| ---------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------- | --------------------- | --------------------------------------------- | -| order | Controls the order of precedence. $[prodname] applies the policy with the lowest value first. | | float | | -| tier | Name of the [tier](tier.mdx) this policy belongs to. | | string | `default` | -| selector | Selects the endpoints to which this policy applies. | | [selector](#selector) | all() | -| types | Applies the policy based on the direction of the traffic. To apply the policy to inbound traffic, set to `Ingress`. To apply the policy to outbound traffic, set to `Egress`. To apply the policy to both, set to `Ingress, Egress`. | `Ingress`, `Egress` | List of strings | Depends on presence of ingress/egress rules\* | -| ingress | Ordered list of ingress rules applied by policy. | | List of [Rule](#rule) | | -| egress | Ordered list of egress rules applied by this policy. | | List of [Rule](#rule) | | -| serviceAccountSelector | Selects the service account(s) to which this policy applies. Select a specific service account by name using the `projectcalico.org/name` label. | | [selector](#selector) | all() | -| performanceHints | Contains a list of hints to Calico's policy engine to help process the policy more efficiently. Hints never change the enforcement behaviour of the policy. The available hints are described [below](#performance-hints). | `AssumeNeededOnEveryNode` | List of strings | | - -\* If `types` has no value, $[prodname] defaults as follows. - -> | Ingress Rules Present | Egress Rules Present | `Types` value | -> | --------------------- | -------------------- | ----------------- | -> | No | No | `Ingress` | -> | Yes | No | `Ingress` | -> | No | Yes | `Egress` | -> | Yes | Yes | `Ingress, Egress` | - -### Rule - - - -### ICMP - - - -### EntityRule - - - -### Selector - - - -### Ports - - - -### ServiceAccountMatch - - - -### ServiceMatch - - - -### Performance Hints - -Performance hints provide a way to tell $[prodname] about the intended use of the policy so that it may -process it more efficiently. Currently only one hint is defined: - -* `AssumeNeededOnEveryNode`: normally, $[prodname] only calculates a policy's rules and selectors on nodes where - the policy is actually in use (i.e. its selector matches a local endpoint). This saves work in most cases. - The `AssumeNeededOnEveryNode` hint tells $[prodname] to treat the policy as "in use" on *every* node. This is - useful for large policy sets that are known to apply to all (or nearly all) endpoints. It effectively "preloads" - the policy on every node so that there is less work to do when the first endpoint matching the policy shows up. - It also prevents work from being done to tear down the policy when the last endpoint is drained. - -## Supported operations - -| Datastore type | Create/Delete | Update | Get/List | Notes | -| ------------------------ | ------------- | ------ | -------- | ----- | -| Kubernetes API datastore | Yes | Yes | Yes | - -#### List filtering on tiers - -List and watch operations may specify label selectors or field selectors to filter `StagedNetworkPolicy` resources on tiers returned by the API server. -When no selector is specified, the API server returns all `StagedNetworkPolicy` resources from all tiers that the user has access to. - -##### Field selector - -When using the field selector, supported operators are `=` and `==` - -The following example shows how to retrieve all `StagedNetworkPolicy` resources in the default tier and in all namespaces: - -```bash -kubectl get stagednetworkpolicy.p --field-selector spec.tier=default --all-namespaces -``` - -##### Label selector - -When using the label selector, supported operators are `=`, `==` and `IN`. - -The following example shows how to retrieve all `StagedNetworkPolicy` resources in the `default` and `net-sec` tiers and in all namespaces: - -```bash -kubectl get stagednetworkpolicy.p -l 'projectcalico.org/tier in (default, net-sec)' --all-namespaces -``` diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/tier.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/tier.mdx deleted file mode 100644 index 052b83ab43..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/tier.mdx +++ /dev/null @@ -1,60 +0,0 @@ ---- -description: API for this Calico Enterprise resource. ---- - -# Tier - -A tier resource (`Tier`) represents an ordered collection of [NetworkPolicies](networkpolicy.mdx) -and/or [GlobalNetworkPolicies](globalnetworkpolicy.mdx). -Tiers are used to divide these policies into groups of different priorities. These policies -are ordered within a Tier: the additional hierarchy of Tiers provides more flexibility -because the `Pass` `action` in a Rule jumps to the next Tier. Some example use cases for this are. - -- Allowing privileged users to define security policy that takes precedence over other users. -- Translating hierarchies of physical firewalls directly into $[prodname] policy. - -For `kubectl` [commands](https://kubernetes.io/docs/reference/kubectl/overview/), the following case-insensitive aliases -may be used to specify the resource type on the CLI: -`tier.projectcalico.org`, `tiers.projectcalico.org` and abbreviations such as -`tier.p` and `tiers.p`. - -## How Policy Is Evaluated - -When a new connection is processed by $[prodname], each tier that contains a policy that applies to the endpoint processes the packet. -Tiers are sorted by their `order` - smallest number first. - -Policies in each Tier are then processed in order. - -- If a [NetworkPolicy](networkpolicy.mdx) or [GlobalNetworkPolicy](globalnetworkpolicy.mdx) in the Tier `Allow`s or `Deny`s the packet, then evaluation is done: the packet is handled accordingly. -- If a [NetworkPolicy](networkpolicy.mdx) or [GlobalNetworkPolicy](globalnetworkpolicy.mdx) in the Tier `Pass`es the packet, the next Tier containing a Policy that applies to the endpoint processes the packet. - -If the Tier applies to the endpoint, but takes no action on the packet the packet is dropped. - -If the last Tier applying to the endpoint `Pass`es the packet, that endpoint's [Profiles](profile.mdx) are evaluated. - -## Sample YAML - -```yaml -apiVersion: projectcalico.org/v3 -kind: Tier -metadata: - name: internal-access -spec: - order: 100 -``` - -## Definition - -### Metadata - -| Field | Description | Accepted Values | Schema | -| ----- | --------------------- | --------------- | ------ | -| name | The name of the tier. | | string | - -### Spec - -| Field | Description | Accepted Values | Schema | Default | -| ----- | ------------------------------------------------------------------------------------------------------------------------------------ | --------------- | ------ | --------------------- | -| order | (Optional) Indicates priority of this Tier, with lower order taking precedence. No value indicates highest order (lowest precedence) | | float | `nil` (highest order) | - -All Policies created by $[prodname] orchestrator integrations are created in the default (last) Tier. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/workloadendpoint.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/workloadendpoint.mdx deleted file mode 100644 index fb50acec01..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/resources/workloadendpoint.mdx +++ /dev/null @@ -1,141 +0,0 @@ ---- -description: API for this Calico Enterprise resource. ---- - -# Workload endpoint - -import Ipnat from '@site/calico-enterprise_versioned_docs/version-3.19-2/_includes/content/_ipnat.mdx'; - -A workload endpoint resource (`WorkloadEndpoint`) represents an interface -connecting a $[prodname] networked container or VM to its host. - -Each endpoint may specify a set of labels and list of profiles that $[prodname] will use -to apply policy to the interface. - -A workload endpoint is a namespaced resource, that means a -[NetworkPolicy](networkpolicy.mdx) -in a specific namespace only applies to the WorkloadEndpoint in that namespace. -Two resources are in the same namespace if the namespace value is set the same -on both. - -This resource is not supported in `kubectl`. - -:::note - -While `calicoctl` allows the user to fully manage Workload Endpoint resources, -the lifecycle of these resources is generally handled by an orchestrator-specific -plugin such as the $[prodname] CNI plugin. In general, we recommend that you only -use `calicoctl` to view this resource type. - -::: - -**Multiple networks** - -If multiple networks are enabled, workload endpoints will have additional labels which can be used in network policy selectors: - -- `projectcalico.org/network`: The name of the network specified in the NetworkAttachmentDefinition. -- `projectcalico.org/network-namespace`: This namespace the network is in. -- `projectcalico.org/network-interface`: The network interface for the workload endpoint. - -For more information, see the [multiple-networks how-to guide](../../networking/configuring/multiple-networks.mdx). - -## Sample YAML - -```yaml -apiVersion: projectcalico.org/v3 -kind: WorkloadEndpoint -metadata: - name: node1-k8s-my--nginx--b1337a-eth0 - namespace: default - labels: - app: frontend - projectcalico.org/namespace: default - projectcalico.org/orchestrator: k8s -spec: - node: node1 - orchestrator: k8s - endpoint: eth0 - containerID: 1337495556942031415926535 - pod: my-nginx-b1337a - endpoint: eth0 - interfaceName: cali0ef24ba - mac: ca:fe:1d:52:bb:e9 - ipNetworks: - - 192.168.0.0/32 - profiles: - - profile1 - ports: - - name: some-port - port: 1234 - protocol: TCP - - name: another-port - port: 5432 - protocol: UDP -``` - -## Definitions - -### Metadata - -| Field | Description | Accepted Values | Schema | Default | -| --------- | ------------------------------------------------------------------ | -------------------------------------------------- | ------ | --------- | -| name | The name of this workload endpoint resource. Required. | Alphanumeric string with optional `.`, `_`, or `-` | string | | -| namespace | Namespace provides an additional qualification to a resource name. | | string | "default" | -| labels | A set of labels to apply to this endpoint. | | map | | - -### Spec - -| Field | Description | Accepted Values | Schema | Default | -| ------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------- | ---------------------------------------------- | ------- | -| workload | The name of the workload to which this endpoint belongs. | | string | | -| orchestrator | The orchestrator that created this endpoint. | | string | | -| node | The node where this endpoint resides. | | string | | -| containerID | The CNI CONTAINER_ID of the workload endpoint. | | string | | -| pod | Kubernetes pod name for this workload endpoint. | | string | | -| endpoint | Container network interface name. | | string | | -| ipNetworks | The CIDRs assigned to the interface. | | List of strings | | -| ipNATs | List of 1:1 NAT mappings to apply to the endpoint. | | List of [IPNATs](#ipnat) | | -| awsElasticIPs | List of AWS Elastic IP addresses that should be considered for this workload; only used for workloads in an AWS-backed IP pool. This should be set via the `cni.projectcalico.org/awsElasticIPs` Pod annotation. | | List of valid IP addresses | | -| ipv4Gateway | The gateway IPv4 address for traffic from the workload. | | string | | -| ipv6Gateway | The gateway IPv6 address for traffic from the workload. | | string | | -| profiles | List of profiles assigned to this endpoint. | | List of strings | | -| interfaceName | The name of the host-side interface attached to the workload. | | string | | -| mac | The source MAC address of traffic generated by the workload. | | IEEE 802 MAC-48, EUI-48, or EUI-64 | | -| ports | List on named ports that this workload exposes. | | List of [WorkloadEndpointPorts](#endpointport) | | - -### IPNAT - - - -### EndpointPort - -A WorkloadEndpointPort associates a name with a particular TCP/UDP/SCTP port of the endpoint, allowing it to -be referenced as a named port in [policy rules](networkpolicy.mdx#entityrule). - -| Field | Description | Accepted Values | Schema | Default | -| -------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------- | ------ | ------- | -| name | The name to attach to this port, allowing it to be referred to in [policy rules](networkpolicy.mdx#entityrule). Names must be unique within an endpoint. | | string | | -| protocol | The protocol of this named port. | `TCP`, `UDP`, `SCTP` | string | | -| port | The workload port number. | `1`-`65535` | int | | -| hostPort | Port on the host that is forwarded to this port. | `1`-`65535` | int | | -| hostIP | IP address on the host on which the hostPort is accessible. | `1`-`65535` | int | | - -:::note - -On their own, WorkloadEndpointPort entries don't result in any change to the connectivity of the port. -They only have an effect if they are referred to in policy. - -::: - -:::note - -The hostPort and hostIP fields are read-only and determined from Kubernetes hostPort configuration. -These fields are used only when host ports are enabled in Calico. - -::: - -## Supported operations - -| Datastore type | Create/Delete | Update | Get/List | Notes | -| --------------------- | ------------- | ------ | -------- | -------------------------------------------------------- | -| Kubernetes API server | No | Yes | Yes | WorkloadEndpoints are directly tied to a Kubernetes pod. | diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/rest-api-reference.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/rest-api-reference.mdx deleted file mode 100644 index 13b5395dee..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/rest-api-reference.mdx +++ /dev/null @@ -1,15 +0,0 @@ ---- -description: REST API reference ---- - -# REST API Reference - -import SwaggerUI from 'swagger-ui-react'; - - - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/reference/support-policy.mdx b/calico-enterprise_versioned_docs/version-3.19-2/reference/support-policy.mdx deleted file mode 100644 index 23c5579298..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/reference/support-policy.mdx +++ /dev/null @@ -1,51 +0,0 @@ ---- -description: Calico Enterprise platform support policy. ---- - -# Platform support policy - -This overview summarizes the $[prodname] policies for supporting and testing third-party platforms for $[prodname]. - -See the documentation for your version of $[prodname] for specific platform versions that are supported. Note that supported versions for platforms are finalized approximately two weeks before a $[prodname] release is GA. - -## Kubernetes - -Upstream Kubernetes releases new versions approximately four times per year as stated in their [Release Cycle documentation](https://kubernetes.io/releases/release/). - -$[prodname] supports the last three versions of Kubernetes at time of $[prodname] release. For example, if 1.22 is the latest version at time of release, then 1.22, 1.21, and 1.20 are the supported versions upon release. - -## Microsoft Azure Kubernetes Services (AKS) - -$[prodname] supports the latest version of AKS at time of $[prodname] release, as stated in the system requirements. - -## Amazon Elastic Kubernetes Service (EKS) - -$[prodname] supports the latest version of EKS at time of $[prodname] release, as stated in the system requirements. - -## Google Kubernetes Engine (GKE) - -$[prodname] supports the latest version of [GKE regular channel](https://cloud.google.com/kubernetes-engine/docs/concepts/release-channels) at time of $[prodname] release, as stated in the system requirements. - -## Mirantis Kubernetes Engine (MKE) - -$[prodname] supports the latest version of MKE at time of $[prodname] release, as stated in the system requirements. - -## Red Hat OpenShift Container Platform (OCP) - -Red Hat releases a new version of OpenShift approximately every four months according to their lifecycle support policy. Even numbered releases (4.6, 4.8, etc.) are Extended Update Support (EUS) releases that provide an upgrade path to the next EUS. - -If the last three versions of OpenShift contain two EUS releases, then $[prodname] will support all three versions upon release to provide customers with an easier upgrade path. If the last three versions contain only one EUS, then $[prodname] will only support the last two versions of OpenShift upon release. - -See the OpenShift system requirements for more information on a specific release of $[prodname]. - -## Rancher Kubernetes Engine (RKE) - -$[prodname] supports the latest version of RKE at time of $[prodname] release, as stated in the system requirements. - -## Rancher's Next Generation Kubernetes Distribution (RKE2) - -$[prodname] supports the versions of RKE2 that align with supported kubernetes versions in $[prodname] release, as stated in the system requirements. - -## Tanzu Kubernetes Grid (TKG) - -See the Tanzu Kubernetes Grid (TKG) system requirements for more information on a specific release of $[prodname]. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/release-notes/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/release-notes/index.mdx deleted file mode 100644 index 6a2a8dfed7..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/release-notes/index.mdx +++ /dev/null @@ -1,306 +0,0 @@ ---- -description: What's new, and why features provide value for upgrading. -title: Release notes ---- - -import CodeBlock from '@theme/CodeBlock'; - -# Calico Enterprise 3.19 release notes - -Learn about the new features, bug fixes, and other updates in this release of $[prodname]. - -This version of Calico Enterprise is based on [Calico Open Source $[openSourceVersion]](https://docs.tigera.io/calico/$[openSourceVersion]/release-notes). - -## New features and enhancements - -### Improved flow log filtering for destination domains - -We’ve updated the Felix parameter (`dest_domains`) for DNS policy to make it easy to find only domain names that the deployment connected to (not all the domain names that got translated to the same IP address). -For more information, see [Flow log data types](../observability/elastic/flow/datatypes.mdx). - -### New flow logs panel on Endpoints page - -We've updated the Endpoints page in the web console with a new flow logs panel so you can view and filter Endpoints associated with denied traffic. Flow log metadata includes the source, destination, ports, protocols, and other key forms. We've also updated the Policy Board to highlight policies with denied traffic. - -### Improvements to security events - -We've added the following improvements to the [Security events list](../threat/security-event-management): - -- Jira and Slack webhook integration for security event alerts - - By [configuring security event alerts](../threat/configuring-webhooks), you can push security event alerts to Slack, Jira, or an external HTTP endpoint of your choice. - This lets incident response and security teams to use native tools to respond to security event alerts. - -- Added threat feed alerts - - If you have implemented global threat feeds for suspicious activity (domains or suspicious IPs), alerts are now visible in the Security Overview dashboard. - For more information on threat feeds, see [Trace and block suspicious IPs](../threat/suspicious-ips). - -### Security events dashboard - -A new dashboard summarizes security events and helps practitioners easily understand how events map across namespaces, MITRE techniques, event types, and attack phases. This allows first responders to quickly make sense of potential threats, engage the right stakeholders, and start the incident response and investigation process. - -For more information, see [Security event management](../threat/security-event-management.mdx). - -### Exceptions for security events - -$[prodname] now allows users to create exceptions for Security Events with varying levels of scope, from excluding an entire namespace to a specific deployment or workload. This gives operators a way to tune the runtime threat detection they have deployed and focus their investigations and response on critical applications and infrastructure. - -For more information, see [Security event management](../threat/security-event-management.mdx). - -### New flow logs panel for Endpoints and View Policy pages - -$[prodname] has added new entry points to view flow logs directly from the Endpoints listing and View Policy pages in the UI. -Users can easily see which endpoints are involved in denied traffic, filter on those workloads, and click a link to open a panel that shows associated flows. -A similar link has been added for View Policy pages, which allows users to quickly see the flows that have been recently evaluated by that policy to make sense of denied traffic or updates to rules. - -### Security Events in Service Graph - -$[prodname] now includes a new tab for Security Events which has taken the Alerts. Most runtime threat detection features now generate Security Events, and their inclusion Service Graph enables users to automatically filter events based on where they are occurring in a cluster. - -### Security Events IP addresses enriched with ASN and geolocation - -For security events that contain external IP addresses, $[prodname] now automatically performs a geolocation lookup. Understanding the country of origin for an IP address can often be the quickest and easiest way to distinguish legitimate traffic from malicious traffic. - -### Extend Workload-based WAF to Ingress Gateways - -This latest release enables operators to plug-in a modifiedsimplified version of WAF to their own instances of Envoy. -This allows users to deploy this version of WAF at the edge of their cluster integrated with an Ingress Gateway (if based on Envoy), with fully customizable rules based on OWASP CoreRuleSet 4.0 and powered by the Coraza engine. - -For more information, see [Deploying WAF with an ingress gateway ](../threat/deploying-waf-ingress-gateway.mdx). - -### ARM64 support - -This release expands our support to clusters with nodes running ARM64-based architectures. - -### Specifying resource requests and limits in $[prodname] components - -$[prodname] now provides the ability to set resource requests and limits for the components that run as part of $[prodname]. Please see documentation for specific guidance on setting these limits. - -## Deprecated and removed features - -* The FIPS mode feature is removed in this release. -* The AWS security groups integration is removed in this release. - It will be removed in Calico Enterprise 3.19. -* The ingress log collection feature is removed in this release. -* The manual installation method for Windows is deprecated and will be removed in a future release. - The recommended installation method is now [operator-based](../getting-started/install-on-clusters/windows-calico/operator). - -## Technology Preview features - -- [Web application firewall](../threat/web-application-firewall) - - Protect cloud-native applications from application layer attacks. - -- [DNS policy for Windows](../getting-started/install-on-clusters/windows-calico/limitations#dns-policy-limitations) - - Use domain names in policies to identify services outside the cluster, which is often operationally simpler and more robust than using IP addresses. - - -## Bug fixes - -* Updates have been made to the Calico API server to ensure that Calico network policies can be sync with GitOps tools such as ArgoCD. - - -## Known issues - -* Flow logs for the Windows workloads currently do not display entries with a Deny action. -* Before upgrading a $[prodname] cluster on MKE v3.6 to the latest $[prodname] version: 1) upgrade MKE from 3.6 to 3.7, then 2) upgrade $[prodname]. -* L7 logs with source name `pvt` is not visible in Service Graph. -* *Multi-cluster management users only*. If the `manager-tls` and `internal-manager-tls` secrets have overlapping DNS names, components such as `es-calico-kube-controllers` will log certificate errors. If you have previously installed a version older than v3.13.0 and never changed your manager-tls secret from the tigera-operator namespace, you must delete both of these secrets. This applies to you if the following command prints a certificate: `$ kubectl get secret manager-tls -n tigera-operator -o "jsonpath={.data['cert']}"`. -* Upgrading to $[prodname] 3.18.0 on Rancher/RKE from $[prodname] 3.13.0 currently requires manually terminating the calico-node container for an upgrade to proceed. -* Calico panics if kube-proxy or other components are using native `nftables` rules instead of the `iptables-nft` compatibility shim. Until Calico supports native nftables mode, we recommend that you continue to use the iptables-nft compatibility layer for all components. (The compatibility layer was the only option before Kubernetes v1.29 added alpha-level `nftables` support.) Do not run Calico in "legacy" iptables mode on a system that is also using `nftables`. Although this combination does not panic or fail (at least on kernels that support both), the interaction between `iptables` "legacy" mode and `nftables` is confusing: both `iptables` and `nftables` rules can be executed on the same packet, leading to policy verdicts being "overturned". -* When a tier order is set to the maximum float value (1.7976931348623157e+308), this can cause policy re-ordering in the UI not to work properly. Since the `namespace-isolation` tier has this value by default, policy recommendation users are affected. To workaround this issue edit any tier that has this value for the order. For example: use `kubectl edit tier namespace-isolation` and set the order to `10000`. -* Linseed deployment needs to be manually restarted after an upgrade. Without a restart, Linseed can't ingest data because it can't authenticate with Elastic. -
  • - Some application layer features are not working as expected for $[prodname] installations with the following deployment types: -
      -
    • AKS clusters with Azure CNI for networking and $[prodname] for network policy
    • -
    • RKE2 clusters installed with Rancher UI
    • -
    - During installation, for these deployment types, kubeletVolumePluginPath is set to None in the Installation CR, causing all application layer features to stop working. - The affected features include web application firewalls, application layer policies, and L7 logging. - As a workaround, you can restore the default value by running the following command on an affected cluster: - {`kubectl patch installation.tigera.io default --type=merge -p '{"spec":{"kubeletVolumePluginPath":"/var/lib/kubelet"}}'`} -
  • - -## Updating - -:::important - -$[prodname] 3.19 contains breaking changes for installations that use the Calico API server. - -::: - -* ***Breaking change:*** Upgrading from Calico Enterprise 3.18 or earlier $[prodname] will alter the UID of all `projectcalico.org/v3` resources. - If you're using the Calico API server, you must restart any controllers, including `kube-controller-manager`, that manage these resources after the upgrade. - This change addresses an issue where duplicate UIDs on different API resources could disrupt Kubernetes garbage collection. - -## Release details - -### Calico Enterprise 3.19.0-1.0 (early preview) - -February 2, 2024 - -Calico Enterprise 3.19.0-1.0 is now available as an early preview release. -This release is for previewing and testing purposes only. -It is not supported for use in production. - -### Calico Enterprise 3.19.0-2.0 (early preview) - -May 9, 2024 - -Calico Enterprise 3.19.0-2.0 is now available as an early preview release. -This release is for previewing and testing purposes only. -It is not supported for use in production. - -### Calico Enterprise 3.19.1 GA - -June 20, 2024 - -Calico Enterprise 3.19.1 is now available as a GA release. - -This release is supported for use in production. - -#### Updates - -* License usage data is now collected and stored locally in the cluster -* Curator was removed from Calico Enterprise due to its limited reliability. To ensure that your Elasticsearch does not -run out of space, please consult our documentation on [Data retention](../observability/elastic/retention.mdx) and [Prometheus alerts for Elasticsearch](../operations/monitor/metrics/elasticsearch-and-fluentd-metrics#create-prometheus-alerts-for-elasticsearch.mdx). - -#### Bug fixes - -* Fixes Security Event Exceptions not applying properly when using Multi-Cluster Management -* Fixes an issue where Egress Gateways don't properly handle changes in a pod's IP address -* Fixes Managed Cluster connection status not always being correctly reported -* Verify CNI plugin installed correctly -* Security updates - -To update an existing installation of Calico Enterprise 3.19, see [Install a patch release](../getting-started/manifest-archive.mdx). - -### Calico Enterprise 3.19.1 operator-only bug fix release - -July 24, 2024 - -Calico Enterprise 3.19.1 is now available with an update to the Tigera Operator. -The Tigera Operator version has been updated to version 1.34.2. -No other components have been changed. - -#### Bug fixes - -* Previously, for AKS clusters running Kubernetes 1.29 or higher, migrating from Calico Open Source to Calico Enterprise resulted in failure. - AKS began applying an image set to clusters with Kubernetes 1.29, and this change conflicted with operations performed by the Tigera Operator during migration. - We fixed the issue by modifying how the Tigera Operator checks for image sets during migrations to Calico Enterprise. -* Removed a mutual dependency between logstorage and other components that could result in a degraded TigeraStatus if certificates are missing required key usages. - -### Calico Enterprise 3.19.2 bug fix release - -September 4, 2024 - -Calico Enterprise 3.19.2 is now available. -This release includes bug fixes and improvements. - -#### Bug fixes - -* Fix that Felix would panic when trying to resync a temporary IP set. Temporary IP sets are created in certain scenarios after previous failures. -* Reduce lock contention between the process info cache and the flow logs collector. Avoids slowing down the collector when the info cache is under update load. -* Fix excessive CPU usage in the process name lookup cache if a PID was unknown. -* Updates the default behaviour of list request without tier field selector or label selector on globalnetworkpolicy, stagedglobalnetworkpolicy, networkpolicy and staged network policy to return all policies available to user, instead of returning only the policies in the default tier. This change allows to manage policies with GitOps tools like ArgoCD. -* Fix Felix panic when using non-default BPF map sizes. Size was not updated in all places resulting in failure to attach programs. -* Fix dual ToR startup when using bgp-layout ConfigMap to assign AS numbers. -* Added support for eBPF on MKE. -* Security updates. - -To update an existing installation of Calico Enterprise 3.19, see [Install a patch release](../getting-started/manifest-archive.mdx). - -### Calico Enterprise 3.19.3 bug fix release - -October 22, 2024 - -Calico Enterprise 3.19.3 is now available. -This release includes bug fixes and improvements. - -#### Bug fixes -* Added a new Felix Configuration option `IPForwarding`, which allows for preventing Felix from enabling IP forwarding on systems that are only using Calico for host protection (and hence don't need to forward traffic to workloads). -* Fixed a routing issue where CrossSubnet routes were not updated when the main IP moved interfaces. -* Fixed marking of return traffic from external network to egress gateways. -* Fixed a permission issue preventing the operator from updating the TigeraStatus for Egress Gateways. -* Fixed switching from iptables to eBPF mode with tcp stats turned on. -* Fixed tcp stats in eBPF mode. -* Adding X-Frames-Options DENY header to Kibana. -* Security updates. - -To update an existing installation of Calico Enterprise 3.19, see [Install a patch release](../getting-started/manifest-archive.mdx). - -### Calico Enterprise 3.19.4 bug fix release - -November 7, 2024 - -Calico Enterprise 3.19.4 is now available. -This release includes bug fixes and improvements. - -#### Bug fixes - -* Fixed an issue where excessive API calls were made by the operator when users are running a large number of egress gateways. -* Security updates. - -To update an existing installation of Calico Enterprise 3.19, see [Install a patch release](../getting-started/manifest-archive.mdx). - -### Calico Enterprise 3.19.5 bug fix release - -March 3, 2025 - -Calico Enterprise 3.19.5 is now available. -This release includes bug fixes and improvements. - -#### Bug fixes - -* Security updates. - -To update an existing installation of Calico Enterprise 3.19, see [Install a patch release](../getting-started/manifest-archive.mdx). - -### Calico Enterprise 3.19.6 bug fix release - -April 24, 2025 - -Calico Enterprise 3.19.6 is now available. -This release includes bug fixes and improvements. - -#### Bug fixes - -* Fixes an issue where voltron is not refreshing its own service account token, leading to authorization errors on clusters that enforce service account token expiry. -* The federated services controller now packs backing Endpoints into EndpointSubsets more efficiently. This improves Kubernetes control plane performance for services which select a large number of pods. -* Security updates. - -To update an existing installation of Calico Enterprise 3.19, see [Install a patch release](../getting-started/manifest-archive.mdx). - -### Calico Enterprise 3.19.7 bug fix release - -June 9, 2025 - -Calico Enterprise 3.19.7 is now available. -This release includes bug fixes and improvements. - -#### Bug fixes - -* Suppress connection reset errors when failing to forward data when using multi-cluster management. -* Fixed mounting cgroupv2 for connect time load balancing on the BPF dataplane. -* Security updates. - -To update an existing installation of Calico Enterprise 3.19, see [Install a patch release](../getting-started/manifest-archive.mdx). - -### Calico Enterprise 3.19.8 bug fix release - -August 22, 2025 - -Calico Enterprise 3.19.8 is now available. -This release includes bug fixes and improvements. -*IMPORTANT!* This is the last patch that will receive security related updates. We urge users to upgrade to a more recent minor version. - -#### Bug fixes - -* Adds the silent-callback URL to the redirectURIs list for Dex, so the UI can prolong a session for the user. -* Increase the `lifecycle.poll_interval` for Elasticsearch. In a case where a cluster has many indices, the default setting can cause performance issues. -* Security updates. - -To update an existing installation of Calico Enterprise 3.19, see [Install a patch release](../getting-started/manifest-archive.mdx). diff --git a/calico-enterprise_versioned_docs/version-3.19-2/releases.json b/calico-enterprise_versioned_docs/version-3.19-2/releases.json deleted file mode 100644 index 9268c3da7a..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/releases.json +++ /dev/null @@ -1,2513 +0,0 @@ -[ - { - "title": "v3.19.8", - "tigera-operator": { - "version": "v1.34.14", - "image": "tigera/operator", - "registry": "quay.io" - }, - "calico": { - "minor_version": "v3.28", - "archive_path": "archive" - }, - "components": { - "alertmanager": { - "version": "v3.19.8", - "image": "tigera/alertmanager" - }, - "calicoctl": { - "version": "v3.19.8", - "image": "tigera/calicoctl" - }, - "calicoq": { - "version": "v3.19.8", - "image": "tigera/calicoq" - }, - "cnx-apiserver": { - "version": "v3.19.8", - "image": "tigera/cnx-apiserver" - }, - "cnx-kube-controllers": { - "version": "v3.19.8", - "image": "tigera/kube-controllers" - }, - "cnx-manager": { - "version": "v3.19.8", - "image": "tigera/cnx-manager" - }, - "cnx-node": { - "version": "v3.19.8", - "image": "tigera/cnx-node" - }, - "cnx-node-windows": { - "version": "v3.19.8", - "image": "tigera/cnx-node-windows" - }, - "cnx-queryserver": { - "version": "v3.19.8", - "image": "tigera/cnx-queryserver" - }, - "compliance-benchmarker": { - "version": "v3.19.8", - "image": "tigera/compliance-benchmarker" - }, - "compliance-controller": { - "version": "v3.19.8", - "image": "tigera/compliance-controller" - }, - "compliance-reporter": { - "version": "v3.19.8", - "image": "tigera/compliance-reporter" - }, - "compliance-server": { - "version": "v3.19.8", - "image": "tigera/compliance-server" - }, - "compliance-snapshotter": { - "version": "v3.19.8", - "image": "tigera/compliance-snapshotter" - }, - "coreos-alertmanager": { - "version": "v0.28.1" - }, - "coreos-config-reloader": { - "version": "v0.76.2" - }, - "coreos-prometheus": { - "version": "v2.55.1" - }, - "coreos-prometheus-operator": { - "version": "v0.76.2" - }, - "csi": { - "version": "v3.19.8", - "image": "tigera/csi" - }, - "csi-node-driver-registrar": { - "version": "v3.19.8", - "image": "tigera/node-driver-registrar" - }, - "deep-packet-inspection": { - "version": "v3.19.8", - "image": "tigera/deep-packet-inspection" - }, - "dex": { - "version": "v3.19.8", - "image": "tigera/dex" - }, - "dikastes": { - "version": "v3.19.8", - "image": "tigera/dikastes" - }, - "eck-elasticsearch": { - "version": "7.17.29" - }, - "eck-elasticsearch-operator": { - "version": "2.16.1" - }, - "eck-kibana": { - "version": "7.17.29" - }, - "egress-gateway": { - "version": "v3.19.8", - "image": "tigera/egress-gateway" - }, - "elastic-tsee-installer": { - "version": "v3.19.8", - "image": "tigera/intrusion-detection-job-installer" - }, - "elasticsearch": { - "version": "v3.19.8", - "image": "tigera/elasticsearch" - }, - "elasticsearch-metrics": { - "version": "v3.19.8", - "image": "tigera/elasticsearch-metrics" - }, - "elasticsearch-operator": { - "version": "v3.19.8", - "image": "tigera/eck-operator" - }, - "envoy": { - "version": "v3.19.8", - "image": "tigera/envoy" - }, - "envoy-init": { - "version": "v3.19.8", - "image": "tigera/envoy-init" - }, - "es-gateway": { - "version": "v3.19.8", - "image": "tigera/es-gateway" - }, - "es-proxy": { - "version": "v3.19.8", - "image": "tigera/es-proxy" - }, - "firewall-integration": { - "version": "v3.19.8", - "image": "tigera/firewall-integration" - }, - "flexvol": { - "version": "v3.19.8", - "image": "tigera/pod2daemon-flexvol" - }, - "fluentd": { - "version": "v3.19.8", - "image": "tigera/fluentd" - }, - "fluentd-windows": { - "version": "v3.19.8", - "image": "tigera/fluentd-windows" - }, - "guardian": { - "version": "v3.19.8", - "image": "tigera/guardian" - }, - "honeypod": { - "version": "v3.19.8", - "image": "tigera/honeypod" - }, - "honeypod-controller": { - "version": "v3.19.8", - "image": "tigera/honeypod-controller" - }, - "honeypod-exp-service": { - "version": "v3.19.8", - "image": "tigera/honeypod-exp-service" - }, - "ingress-collector": { - "version": "v3.19.8", - "image": "tigera/ingress-collector" - }, - "intrusion-detection-controller": { - "version": "v3.19.8", - "image": "tigera/intrusion-detection-controller" - }, - "key-cert-provisioner": { - "version": "v3.19.8", - "image": "tigera/key-cert-provisioner" - }, - "kibana": { - "version": "v3.19.8", - "image": "tigera/kibana" - }, - "l7-collector": { - "version": "v3.19.8", - "image": "tigera/l7-collector" - }, - "license-agent": { - "version": "v3.19.8", - "image": "tigera/license-agent" - }, - "linseed": { - "version": "v3.19.8", - "image": "tigera/linseed" - }, - "packetcapture": { - "version": "v3.19.8", - "image": "tigera/packetcapture" - }, - "policy-recommendation": { - "version": "v3.19.8", - "image": "tigera/policy-recommendation" - }, - "prometheus": { - "version": "v3.19.8", - "image": "tigera/prometheus" - }, - "prometheus-config-reloader": { - "version": "v3.19.8", - "image": "tigera/prometheus-config-reloader" - }, - "prometheus-operator": { - "version": "v3.19.8", - "image": "tigera/prometheus-operator" - }, - "tigera-cni": { - "version": "v3.19.8", - "image": "tigera/cni" - }, - "tigera-cni-windows": { - "version": "v3.19.8", - "image": "tigera/cni-windows" - }, - "tigera-prometheus-service": { - "version": "v3.19.8", - "image": "tigera/prometheus-service" - }, - "typha": { - "version": "v3.19.8", - "image": "tigera/typha" - }, - "voltron": { - "version": "v3.19.8", - "image": "tigera/voltron" - }, - "webhooks-processor": { - "version": "v3.19.8", - "image": "tigera/webhooks-processor" - } - } - }, - { - "title": "v3.19.7", - "tigera-operator": { - "version": "v1.34.12", - "image": "tigera/operator", - "registry": "quay.io" - }, - "calico": { - "minor_version": "v3.28", - "archive_path": "archive" - }, - "components": { - "alertmanager": { - "version": "v3.19.7", - "image": "tigera/alertmanager" - }, - "calicoctl": { - "version": "v3.19.7", - "image": "tigera/calicoctl" - }, - "calicoq": { - "version": "v3.19.7", - "image": "tigera/calicoq" - }, - "cnx-apiserver": { - "version": "v3.19.7", - "image": "tigera/cnx-apiserver" - }, - "cnx-kube-controllers": { - "version": "v3.19.7", - "image": "tigera/kube-controllers" - }, - "cnx-manager": { - "version": "v3.19.7", - "image": "tigera/cnx-manager" - }, - "cnx-node": { - "version": "v3.19.7", - "image": "tigera/cnx-node" - }, - "cnx-node-windows": { - "version": "v3.19.7", - "image": "tigera/cnx-node-windows" - }, - "cnx-queryserver": { - "version": "v3.19.7", - "image": "tigera/cnx-queryserver" - }, - "compliance-benchmarker": { - "version": "v3.19.7", - "image": "tigera/compliance-benchmarker" - }, - "compliance-controller": { - "version": "v3.19.7", - "image": "tigera/compliance-controller" - }, - "compliance-reporter": { - "version": "v3.19.7", - "image": "tigera/compliance-reporter" - }, - "compliance-server": { - "version": "v3.19.7", - "image": "tigera/compliance-server" - }, - "compliance-snapshotter": { - "version": "v3.19.7", - "image": "tigera/compliance-snapshotter" - }, - "coreos-alertmanager": { - "version": "v0.28.1" - }, - "coreos-config-reloader": { - "version": "v0.76.2" - }, - "coreos-prometheus": { - "version": "v2.55.1" - }, - "coreos-prometheus-operator": { - "version": "v0.76.2" - }, - "csi": { - "version": "v3.19.7", - "image": "tigera/csi" - }, - "csi-node-driver-registrar": { - "version": "v3.19.7", - "image": "tigera/node-driver-registrar" - }, - "deep-packet-inspection": { - "version": "v3.19.7", - "image": "tigera/deep-packet-inspection" - }, - "dex": { - "version": "v3.19.7", - "image": "tigera/dex" - }, - "dikastes": { - "version": "v3.19.7", - "image": "tigera/dikastes" - }, - "eck-elasticsearch": { - "version": "7.17.28" - }, - "eck-elasticsearch-operator": { - "version": "2.16.1" - }, - "eck-kibana": { - "version": "7.17.28" - }, - "egress-gateway": { - "version": "v3.19.7", - "image": "tigera/egress-gateway" - }, - "elastic-tsee-installer": { - "version": "v3.19.7", - "image": "tigera/intrusion-detection-job-installer" - }, - "elasticsearch": { - "version": "v3.19.7", - "image": "tigera/elasticsearch" - }, - "elasticsearch-metrics": { - "version": "v3.19.7", - "image": "tigera/elasticsearch-metrics" - }, - "elasticsearch-operator": { - "version": "v3.19.7", - "image": "tigera/eck-operator" - }, - "envoy": { - "version": "v3.19.7", - "image": "tigera/envoy" - }, - "envoy-init": { - "version": "v3.19.7", - "image": "tigera/envoy-init" - }, - "es-gateway": { - "version": "v3.19.7", - "image": "tigera/es-gateway" - }, - "es-proxy": { - "version": "v3.19.7", - "image": "tigera/es-proxy" - }, - "firewall-integration": { - "version": "v3.19.7", - "image": "tigera/firewall-integration" - }, - "flexvol": { - "version": "v3.19.7", - "image": "tigera/pod2daemon-flexvol" - }, - "fluentd": { - "version": "v3.19.7", - "image": "tigera/fluentd" - }, - "fluentd-windows": { - "version": "v3.19.7", - "image": "tigera/fluentd-windows" - }, - "guardian": { - "version": "v3.19.7", - "image": "tigera/guardian" - }, - "honeypod": { - "version": "v3.19.7", - "image": "tigera/honeypod" - }, - "honeypod-controller": { - "version": "v3.19.7", - "image": "tigera/honeypod-controller" - }, - "honeypod-exp-service": { - "version": "v3.19.7", - "image": "tigera/honeypod-exp-service" - }, - "ingress-collector": { - "version": "v3.19.7", - "image": "tigera/ingress-collector" - }, - "intrusion-detection-controller": { - "version": "v3.19.7", - "image": "tigera/intrusion-detection-controller" - }, - "key-cert-provisioner": { - "version": "v3.19.7", - "image": "tigera/key-cert-provisioner" - }, - "kibana": { - "version": "v3.19.7", - "image": "tigera/kibana" - }, - "l7-collector": { - "version": "v3.19.7", - "image": "tigera/l7-collector" - }, - "license-agent": { - "version": "v3.19.7", - "image": "tigera/license-agent" - }, - "linseed": { - "version": "v3.19.7", - "image": "tigera/linseed" - }, - "packetcapture": { - "version": "v3.19.7", - "image": "tigera/packetcapture" - }, - "policy-recommendation": { - "version": "v3.19.7", - "image": "tigera/policy-recommendation" - }, - "prometheus": { - "version": "v3.19.7", - "image": "tigera/prometheus" - }, - "prometheus-config-reloader": { - "version": "v3.19.7", - "image": "tigera/prometheus-config-reloader" - }, - "prometheus-operator": { - "version": "v3.19.7", - "image": "tigera/prometheus-operator" - }, - "tigera-cni": { - "version": "v3.19.7", - "image": "tigera/cni" - }, - "tigera-cni-windows": { - "version": "v3.19.7", - "image": "tigera/cni-windows" - }, - "tigera-prometheus-service": { - "version": "v3.19.7", - "image": "tigera/prometheus-service" - }, - "typha": { - "version": "v3.19.7", - "image": "tigera/typha" - }, - "voltron": { - "version": "v3.19.7", - "image": "tigera/voltron" - }, - "webhooks-processor": { - "version": "v3.19.7", - "image": "tigera/webhooks-processor" - } - } - }, - { - "title": "v3.19.6", - "tigera-operator": { - "image": "tigera/operator", - "version": "v1.34.11", - "registry": "quay.io" - }, - "calico": { - "minor_version": "v3.28", - "archive_path": "archive" - }, - "components": { - "cnx-manager": { - "image": "tigera/cnx-manager", - "version": "v3.19.6" - }, - "voltron": { - "image": "tigera/voltron", - "version": "v3.19.6" - }, - "guardian": { - "image": "tigera/guardian", - "version": "v3.19.6" - }, - "cnx-apiserver": { - "image": "tigera/cnx-apiserver", - "version": "v3.19.6" - }, - "cnx-queryserver": { - "image": "tigera/cnx-queryserver", - "version": "v3.19.6" - }, - "cnx-kube-controllers": { - "image": "tigera/kube-controllers", - "version": "v3.19.6" - }, - "calicoq": { - "image": "tigera/calicoq", - "version": "v3.19.6" - }, - "typha": { - "image": "tigera/typha", - "version": "v3.19.6" - }, - "calicoctl": { - "image": "tigera/calicoctl", - "version": "v3.19.6" - }, - "cnx-node": { - "image": "tigera/cnx-node", - "version": "v3.19.6" - }, - "cnx-node-windows": { - "image": "tigera/cnx-node-windows", - "version": "v3.19.6" - }, - "dikastes": { - "image": "tigera/dikastes", - "version": "v3.19.6" - }, - "dex": { - "image": "tigera/dex", - "version": "v3.19.6" - }, - "fluentd": { - "image": "tigera/fluentd", - "version": "v3.19.6" - }, - "fluentd-windows": { - "image": "tigera/fluentd-windows", - "version": "v3.19.6" - }, - "es-proxy": { - "image": "tigera/es-proxy", - "version": "v3.19.6" - }, - "eck-kibana": { - "version": "7.17.28" - }, - "kibana": { - "image": "tigera/kibana", - "version": "v3.19.6" - }, - "eck-elasticsearch": { - "version": "7.17.28" - }, - "elasticsearch": { - "image": "tigera/elasticsearch", - "version": "v3.19.6" - }, - "elastic-tsee-installer": { - "image": "tigera/intrusion-detection-job-installer", - "version": "v3.19.6" - }, - "intrusion-detection-controller": { - "image": "tigera/intrusion-detection-controller", - "version": "v3.19.6" - }, - "compliance-controller": { - "image": "tigera/compliance-controller", - "version": "v3.19.6" - }, - "compliance-reporter": { - "image": "tigera/compliance-reporter", - "version": "v3.19.6" - }, - "compliance-snapshotter": { - "image": "tigera/compliance-snapshotter", - "version": "v3.19.6" - }, - "compliance-server": { - "image": "tigera/compliance-server", - "version": "v3.19.6" - }, - "compliance-benchmarker": { - "image": "tigera/compliance-benchmarker", - "version": "v3.19.6" - }, - "ingress-collector": { - "image": "tigera/ingress-collector", - "version": "v3.19.6" - }, - "l7-collector": { - "image": "tigera/l7-collector", - "version": "v3.19.6" - }, - "license-agent": { - "image": "tigera/license-agent", - "version": "v3.19.6" - }, - "tigera-cni": { - "image": "tigera/cni", - "version": "v3.19.6" - }, - "tigera-cni-windows": { - "image": "tigera/cni-windows", - "version": "v3.19.6" - }, - "firewall-integration": { - "image": "tigera/firewall-integration", - "version": "v3.19.6" - }, - "egress-gateway": { - "image": "tigera/egress-gateway", - "version": "v3.19.6" - }, - "honeypod": { - "image": "tigera/honeypod", - "version": "v3.19.6" - }, - "honeypod-exp-service": { - "image": "tigera/honeypod-exp-service", - "version": "v3.19.6" - }, - "honeypod-controller": { - "image": "tigera/honeypod-controller", - "version": "v3.19.6" - }, - "key-cert-provisioner": { - "image": "tigera/key-cert-provisioner", - "version": "v3.19.6" - }, - "elasticsearch-metrics": { - "image": "tigera/elasticsearch-metrics", - "version": "v3.19.6" - }, - "packetcapture": { - "image": "tigera/packetcapture", - "version": "v3.19.6" - }, - "policy-recommendation": { - "image": "tigera/policy-recommendation", - "version": "v3.19.6" - }, - "prometheus": { - "image": "tigera/prometheus", - "version": "v3.19.6" - }, - "coreos-prometheus": { - "version": "v2.55.1" - }, - "coreos-prometheus-operator": { - "version": "v0.76.2" - }, - "coreos-config-reloader": { - "version": "v0.76.2" - }, - "prometheus-operator": { - "image": "tigera/prometheus-operator", - "version": "v3.19.6" - }, - "prometheus-config-reloader": { - "image": "tigera/prometheus-config-reloader", - "version": "v3.19.6" - }, - "tigera-prometheus-service": { - "image": "tigera/prometheus-service", - "version": "v3.19.6" - }, - "es-gateway": { - "image": "tigera/es-gateway", - "version": "v3.19.6" - }, - "linseed": { - "image": "tigera/linseed", - "version": "v3.19.6" - }, - "deep-packet-inspection": { - "image": "tigera/deep-packet-inspection", - "version": "v3.19.6" - }, - "eck-elasticsearch-operator": { - "version": "2.16.1" - }, - "elasticsearch-operator": { - "image": "tigera/eck-operator", - "version": "v3.19.6" - }, - "coreos-alertmanager": { - "version": "v0.28.1" - }, - "alertmanager": { - "image": "tigera/alertmanager", - "version": "v3.19.6" - }, - "envoy": { - "image": "tigera/envoy", - "version": "v3.19.6" - }, - "envoy-init": { - "image": "tigera/envoy-init", - "version": "v3.19.6" - }, - "webhooks-processor": { - "image": "tigera/webhooks-processor", - "version": "v3.19.6" - }, - "flexvol": { - "image": "tigera/pod2daemon-flexvol", - "version": "v3.19.6" - }, - "csi": { - "image": "tigera/csi", - "version": "v3.19.6" - }, - "csi-node-driver-registrar": { - "image": "tigera/node-driver-registrar", - "version": "v3.19.6" - } - } - }, - { - "title": "v3.19.5", - "tigera-operator": { - "image": "tigera/operator", - "version": "v1.34.9", - "registry": "quay.io" - }, - "calico": { - "minor_version": "v3.28", - "archive_path": "archive" - }, - "components": { - "cnx-manager": { - "image": "tigera/cnx-manager", - "version": "v3.19.5" - }, - "voltron": { - "image": "tigera/voltron", - "version": "v3.19.5" - }, - "guardian": { - "image": "tigera/guardian", - "version": "v3.19.5" - }, - "cnx-apiserver": { - "image": "tigera/cnx-apiserver", - "version": "v3.19.5" - }, - "cnx-queryserver": { - "image": "tigera/cnx-queryserver", - "version": "v3.19.5" - }, - "cnx-kube-controllers": { - "image": "tigera/kube-controllers", - "version": "v3.19.5" - }, - "calicoq": { - "image": "tigera/calicoq", - "version": "v3.19.5" - }, - "typha": { - "image": "tigera/typha", - "version": "v3.19.5" - }, - "calicoctl": { - "image": "tigera/calicoctl", - "version": "v3.19.5" - }, - "cnx-node": { - "image": "tigera/cnx-node", - "version": "v3.19.5" - }, - "cnx-node-windows": { - "image": "tigera/cnx-node-windows", - "version": "v3.19.5" - }, - "dikastes": { - "image": "tigera/dikastes", - "version": "v3.19.5" - }, - "dex": { - "image": "tigera/dex", - "version": "v3.19.5" - }, - "fluentd": { - "image": "tigera/fluentd", - "version": "v3.19.5" - }, - "fluentd-windows": { - "image": "tigera/fluentd-windows", - "version": "v3.19.5" - }, - "es-proxy": { - "image": "tigera/es-proxy", - "version": "v3.19.5" - }, - "eck-kibana": { - "version": "7.17.27" - }, - "kibana": { - "image": "tigera/kibana", - "version": "v3.19.5" - }, - "eck-elasticsearch": { - "version": "7.17.27" - }, - "elasticsearch": { - "image": "tigera/elasticsearch", - "version": "v3.19.5" - }, - "elastic-tsee-installer": { - "image": "tigera/intrusion-detection-job-installer", - "version": "v3.19.5" - }, - "intrusion-detection-controller": { - "image": "tigera/intrusion-detection-controller", - "version": "v3.19.5" - }, - "compliance-controller": { - "image": "tigera/compliance-controller", - "version": "v3.19.5" - }, - "compliance-reporter": { - "image": "tigera/compliance-reporter", - "version": "v3.19.5" - }, - "compliance-snapshotter": { - "image": "tigera/compliance-snapshotter", - "version": "v3.19.5" - }, - "compliance-server": { - "image": "tigera/compliance-server", - "version": "v3.19.5" - }, - "compliance-benchmarker": { - "image": "tigera/compliance-benchmarker", - "version": "v3.19.5" - }, - "ingress-collector": { - "image": "tigera/ingress-collector", - "version": "v3.19.5" - }, - "l7-collector": { - "image": "tigera/l7-collector", - "version": "v3.19.5" - }, - "license-agent": { - "image": "tigera/license-agent", - "version": "v3.19.5" - }, - "tigera-cni": { - "image": "tigera/cni", - "version": "v3.19.5" - }, - "tigera-cni-windows": { - "image": "tigera/cni-windows", - "version": "v3.19.5" - }, - "firewall-integration": { - "image": "tigera/firewall-integration", - "version": "v3.19.5" - }, - "egress-gateway": { - "image": "tigera/egress-gateway", - "version": "v3.19.5" - }, - "honeypod": { - "image": "tigera/honeypod", - "version": "v3.19.5" - }, - "honeypod-exp-service": { - "image": "tigera/honeypod-exp-service", - "version": "v3.19.5" - }, - "honeypod-controller": { - "image": "tigera/honeypod-controller", - "version": "v3.19.5" - }, - "key-cert-provisioner": { - "image": "tigera/key-cert-provisioner", - "version": "v3.19.5" - }, - "elasticsearch-metrics": { - "image": "tigera/elasticsearch-metrics", - "version": "v3.19.5" - }, - "packetcapture": { - "image": "tigera/packetcapture", - "version": "v3.19.5" - }, - "policy-recommendation": { - "image": "tigera/policy-recommendation", - "version": "v3.19.5" - }, - "prometheus": { - "image": "tigera/prometheus", - "version": "v3.19.5" - }, - "coreos-prometheus": { - "version": "v2.48.1" - }, - "coreos-prometheus-operator": { - "version": "v0.73.2" - }, - "coreos-config-reloader": { - "version": "v0.73.2" - }, - "prometheus-operator": { - "image": "tigera/prometheus-operator", - "version": "v3.19.5" - }, - "prometheus-config-reloader": { - "image": "tigera/prometheus-config-reloader", - "version": "v3.19.5" - }, - "tigera-prometheus-service": { - "image": "tigera/prometheus-service", - "version": "v3.19.5" - }, - "es-gateway": { - "image": "tigera/es-gateway", - "version": "v3.19.5" - }, - "linseed": { - "image": "tigera/linseed", - "version": "v3.19.5" - }, - "deep-packet-inspection": { - "image": "tigera/deep-packet-inspection", - "version": "v3.19.5" - }, - "eck-elasticsearch-operator": { - "version": "2.16.1" - }, - "elasticsearch-operator": { - "image": "tigera/eck-operator", - "version": "v3.19.5" - }, - "coreos-alertmanager": { - "version": "v0.28.0" - }, - "alertmanager": { - "image": "tigera/alertmanager", - "version": "v3.19.5" - }, - "envoy": { - "image": "tigera/envoy", - "version": "v3.19.5" - }, - "envoy-init": { - "image": "tigera/envoy-init", - "version": "v3.19.5" - }, - "webhooks-processor": { - "image": "tigera/webhooks-processor", - "version": "v3.19.5" - }, - "flexvol": { - "image": "tigera/pod2daemon-flexvol", - "version": "v3.19.5" - }, - "csi": { - "image": "tigera/csi", - "version": "v3.19.5" - }, - "csi-node-driver-registrar": { - "image": "tigera/node-driver-registrar", - "version": "v3.19.5" - } - } - }, - { - "title": "v3.19.4", - "tigera-operator": { - "image": "tigera/operator", - "version": "v1.34.7", - "registry": "quay.io" - }, - "calico": { - "minor_version": "v3.28", - "archive_path": "archive" - }, - "components": { - "cnx-manager": { - "image": "tigera/cnx-manager", - "version": "v3.19.4" - }, - "voltron": { - "image": "tigera/voltron", - "version": "v3.19.4" - }, - "guardian": { - "image": "tigera/guardian", - "version": "v3.19.4" - }, - "cnx-apiserver": { - "image": "tigera/cnx-apiserver", - "version": "v3.19.4" - }, - "cnx-queryserver": { - "image": "tigera/cnx-queryserver", - "version": "v3.19.4" - }, - "cnx-kube-controllers": { - "image": "tigera/kube-controllers", - "version": "v3.19.4" - }, - "calicoq": { - "image": "tigera/calicoq", - "version": "v3.19.4" - }, - "typha": { - "image": "tigera/typha", - "version": "v3.19.4" - }, - "calicoctl": { - "image": "tigera/calicoctl", - "version": "v3.19.4" - }, - "cnx-node": { - "image": "tigera/cnx-node", - "version": "v3.19.4" - }, - "cnx-node-windows": { - "image": "tigera/cnx-node-windows", - "version": "v3.19.4" - }, - "dikastes": { - "image": "tigera/dikastes", - "version": "v3.19.4" - }, - "dex": { - "image": "tigera/dex", - "version": "v3.19.4" - }, - "fluentd": { - "image": "tigera/fluentd", - "version": "v3.19.4" - }, - "fluentd-windows": { - "image": "tigera/fluentd-windows", - "version": "v3.19.4" - }, - "es-proxy": { - "image": "tigera/es-proxy", - "version": "v3.19.4" - }, - "eck-kibana": { - "version": "7.17.25" - }, - "kibana": { - "image": "tigera/kibana", - "version": "v3.19.4" - }, - "eck-elasticsearch": { - "version": "7.17.25" - }, - "elasticsearch": { - "image": "tigera/elasticsearch", - "version": "v3.19.4" - }, - "elastic-tsee-installer": { - "image": "tigera/intrusion-detection-job-installer", - "version": "v3.19.4" - }, - "intrusion-detection-controller": { - "image": "tigera/intrusion-detection-controller", - "version": "v3.19.4" - }, - "compliance-controller": { - "image": "tigera/compliance-controller", - "version": "v3.19.4" - }, - "compliance-reporter": { - "image": "tigera/compliance-reporter", - "version": "v3.19.4" - }, - "compliance-snapshotter": { - "image": "tigera/compliance-snapshotter", - "version": "v3.19.4" - }, - "compliance-server": { - "image": "tigera/compliance-server", - "version": "v3.19.4" - }, - "compliance-benchmarker": { - "image": "tigera/compliance-benchmarker", - "version": "v3.19.4" - }, - "ingress-collector": { - "image": "tigera/ingress-collector", - "version": "v3.19.4" - }, - "l7-collector": { - "image": "tigera/l7-collector", - "version": "v3.19.4" - }, - "license-agent": { - "image": "tigera/license-agent", - "version": "v3.19.4" - }, - "tigera-cni": { - "image": "tigera/cni", - "version": "v3.19.4" - }, - "tigera-cni-windows": { - "image": "tigera/cni-windows", - "version": "v3.19.4" - }, - "firewall-integration": { - "image": "tigera/firewall-integration", - "version": "v3.19.4" - }, - "egress-gateway": { - "image": "tigera/egress-gateway", - "version": "v3.19.4" - }, - "honeypod": { - "image": "tigera/honeypod", - "version": "v3.19.4" - }, - "honeypod-exp-service": { - "image": "tigera/honeypod-exp-service", - "version": "v3.19.4" - }, - "honeypod-controller": { - "image": "tigera/honeypod-controller", - "version": "v3.19.4" - }, - "key-cert-provisioner": { - "image": "tigera/key-cert-provisioner", - "version": "v3.19.4" - }, - "elasticsearch-metrics": { - "image": "tigera/elasticsearch-metrics", - "version": "v3.19.4" - }, - "packetcapture": { - "image": "tigera/packetcapture", - "version": "v3.19.4" - }, - "policy-recommendation": { - "image": "tigera/policy-recommendation", - "version": "v3.19.4" - }, - "prometheus": { - "image": "tigera/prometheus", - "version": "v3.19.4" - }, - "coreos-prometheus": { - "version": "v2.48.1" - }, - "coreos-prometheus-operator": { - "version": "v0.73.2" - }, - "coreos-config-reloader": { - "version": "v0.73.2" - }, - "prometheus-operator": { - "image": "tigera/prometheus-operator", - "version": "v3.19.4" - }, - "prometheus-config-reloader": { - "image": "tigera/prometheus-config-reloader", - "version": "v3.19.4" - }, - "tigera-prometheus-service": { - "image": "tigera/prometheus-service", - "version": "v3.19.4" - }, - "es-gateway": { - "image": "tigera/es-gateway", - "version": "v3.19.4" - }, - "linseed": { - "image": "tigera/linseed", - "version": "v3.19.4" - }, - "deep-packet-inspection": { - "image": "tigera/deep-packet-inspection", - "version": "v3.19.4" - }, - "eck-elasticsearch-operator": { - "version": "v2.16.1" - }, - "elasticsearch-operator": { - "image": "tigera/eck-operator", - "version": "v3.19.4" - }, - "coreos-alertmanager": { - "version": "v0.28.0" - }, - "alertmanager": { - "image": "tigera/alertmanager", - "version": "v3.19.4" - }, - "envoy": { - "image": "tigera/envoy", - "version": "v3.19.4" - }, - "envoy-init": { - "image": "tigera/envoy-init", - "version": "v3.19.4" - }, - "webhooks-processor": { - "image": "tigera/webhooks-processor", - "version": "v3.19.4" - }, - "flexvol": { - "image": "tigera/pod2daemon-flexvol", - "version": "v3.19.4" - }, - "csi": { - "image": "tigera/csi", - "version": "v3.19.4" - }, - "csi-node-driver-registrar": { - "image": "tigera/node-driver-registrar", - "version": "v3.19.4" - } - } - }, - { - "title": "v3.19.3", - "tigera-operator": { - "image": "tigera/operator", - "version": "v1.34.6", - "registry": "quay.io" - }, - "calico": { - "minor_version": "v3.28", - "archive_path": "archive" - }, - "components": { - "cnx-manager": { - "image": "tigera/cnx-manager", - "version": "v3.19.3" - }, - "voltron": { - "image": "tigera/voltron", - "version": "v3.19.3" - }, - "guardian": { - "image": "tigera/guardian", - "version": "v3.19.3" - }, - "cnx-apiserver": { - "image": "tigera/cnx-apiserver", - "version": "v3.19.3" - }, - "cnx-queryserver": { - "image": "tigera/cnx-queryserver", - "version": "v3.19.3" - }, - "cnx-kube-controllers": { - "image": "tigera/kube-controllers", - "version": "v3.19.3" - }, - "calicoq": { - "image": "tigera/calicoq", - "version": "v3.19.3" - }, - "typha": { - "image": "tigera/typha", - "version": "v3.19.3" - }, - "calicoctl": { - "image": "tigera/calicoctl", - "version": "v3.19.3" - }, - "cnx-node": { - "image": "tigera/cnx-node", - "version": "v3.19.3" - }, - "cnx-node-windows": { - "image": "tigera/cnx-node-windows", - "version": "v3.19.3" - }, - "dikastes": { - "image": "tigera/dikastes", - "version": "v3.19.3" - }, - "dex": { - "image": "tigera/dex", - "version": "v3.19.3" - }, - "fluentd": { - "image": "tigera/fluentd", - "version": "v3.19.3" - }, - "fluentd-windows": { - "image": "tigera/fluentd-windows", - "version": "v3.19.3" - }, - "es-proxy": { - "image": "tigera/es-proxy", - "version": "v3.19.3" - }, - "eck-kibana": { - "version": "7.17.24" - }, - "kibana": { - "image": "tigera/kibana", - "version": "v3.19.3" - }, - "eck-elasticsearch": { - "version": "7.17.24" - }, - "elasticsearch": { - "image": "tigera/elasticsearch", - "version": "v3.19.3" - }, - "elastic-tsee-installer": { - "image": "tigera/intrusion-detection-job-installer", - "version": "v3.19.3" - }, - "intrusion-detection-controller": { - "image": "tigera/intrusion-detection-controller", - "version": "v3.19.3" - }, - "compliance-controller": { - "image": "tigera/compliance-controller", - "version": "v3.19.3" - }, - "compliance-reporter": { - "image": "tigera/compliance-reporter", - "version": "v3.19.3" - }, - "compliance-snapshotter": { - "image": "tigera/compliance-snapshotter", - "version": "v3.19.3" - }, - "compliance-server": { - "image": "tigera/compliance-server", - "version": "v3.19.3" - }, - "compliance-benchmarker": { - "image": "tigera/compliance-benchmarker", - "version": "v3.19.3" - }, - "ingress-collector": { - "image": "tigera/ingress-collector", - "version": "v3.19.3" - }, - "l7-collector": { - "image": "tigera/l7-collector", - "version": "v3.19.3" - }, - "license-agent": { - "image": "tigera/license-agent", - "version": "v3.19.3" - }, - "tigera-cni": { - "image": "tigera/cni", - "version": "v3.19.3" - }, - "tigera-cni-windows": { - "image": "tigera/cni-windows", - "version": "v3.19.3" - }, - "firewall-integration": { - "image": "tigera/firewall-integration", - "version": "v3.19.3" - }, - "egress-gateway": { - "image": "tigera/egress-gateway", - "version": "v3.19.3" - }, - "honeypod": { - "image": "tigera/honeypod", - "version": "v3.19.3" - }, - "honeypod-exp-service": { - "image": "tigera/honeypod-exp-service", - "version": "v3.19.3" - }, - "honeypod-controller": { - "image": "tigera/honeypod-controller", - "version": "v3.19.3" - }, - "key-cert-provisioner": { - "image": "tigera/key-cert-provisioner", - "version": "v3.19.3" - }, - "elasticsearch-metrics": { - "image": "tigera/elasticsearch-metrics", - "version": "v3.19.3" - }, - "packetcapture": { - "image": "tigera/packetcapture", - "version": "v3.19.3" - }, - "policy-recommendation": { - "image": "tigera/policy-recommendation", - "version": "v3.19.3" - }, - "prometheus": { - "image": "tigera/prometheus", - "version": "v3.19.3" - }, - "coreos-prometheus": { - "version": "v2.54.1" - }, - "coreos-prometheus-operator": { - "version": "v0.76.0" - }, - "coreos-config-reloader": { - "version": "v0.76.0" - }, - "prometheus-operator": { - "image": "tigera/prometheus-operator", - "version": "v3.19.3" - }, - "prometheus-config-reloader": { - "image": "tigera/prometheus-config-reloader", - "version": "v3.19.3" - }, - "tigera-prometheus-service": { - "image": "tigera/prometheus-service", - "version": "v3.19.3" - }, - "es-gateway": { - "image": "tigera/es-gateway", - "version": "v3.19.3" - }, - "linseed": { - "image": "tigera/linseed", - "version": "v3.19.3" - }, - "deep-packet-inspection": { - "image": "tigera/deep-packet-inspection", - "version": "v3.19.3" - }, - "eck-elasticsearch-operator": { - "version": "2.6.1" - }, - "elasticsearch-operator": { - "image": "tigera/eck-operator", - "version": "v3.19.3" - }, - "coreos-alertmanager": { - "version": "v0.25.1" - }, - "alertmanager": { - "image": "tigera/alertmanager", - "version": "v3.19.3" - }, - "envoy": { - "image": "tigera/envoy", - "version": "v3.19.3" - }, - "envoy-init": { - "image": "tigera/envoy-init", - "version": "v3.19.3" - }, - "webhooks-processor": { - "image": "tigera/webhooks-processor", - "version": "v3.19.3" - }, - "flexvol": { - "image": "tigera/pod2daemon-flexvol", - "version": "v3.19.3" - }, - "csi": { - "image": "tigera/csi", - "version": "v3.19.3" - }, - "csi-node-driver-registrar": { - "image": "tigera/node-driver-registrar", - "version": "v3.19.3" - } - } - }, - { - "title": "v3.19.2", - "tigera-operator": { - "image": "tigera/operator", - "version": "v1.34.4", - "registry": "quay.io" - }, - "calico": { - "minor_version": "v3.28", - "archive_path": "archive" - }, - "components": { - "cnx-manager": { - "image": "tigera/cnx-manager", - "version": "v3.19.2" - }, - "voltron": { - "image": "tigera/voltron", - "version": "v3.19.2" - }, - "guardian": { - "image": "tigera/guardian", - "version": "v3.19.2" - }, - "cnx-apiserver": { - "image": "tigera/cnx-apiserver", - "version": "v3.19.2" - }, - "cnx-queryserver": { - "image": "tigera/cnx-queryserver", - "version": "v3.19.2" - }, - "cnx-kube-controllers": { - "image": "tigera/kube-controllers", - "version": "v3.19.2" - }, - "calicoq": { - "image": "tigera/calicoq", - "version": "v3.19.2" - }, - "typha": { - "image": "tigera/typha", - "version": "v3.19.2" - }, - "calicoctl": { - "image": "tigera/calicoctl", - "version": "v3.19.2" - }, - "cnx-node": { - "image": "tigera/cnx-node", - "version": "v3.19.2" - }, - "cnx-node-windows": { - "image": "tigera/cnx-node-windows", - "version": "v3.19.2" - }, - "dikastes": { - "image": "tigera/dikastes", - "version": "v3.19.2" - }, - "dex": { - "image": "tigera/dex", - "version": "v3.19.2" - }, - "fluentd": { - "image": "tigera/fluentd", - "version": "v3.19.2" - }, - "fluentd-windows": { - "image": "tigera/fluentd-windows", - "version": "v3.19.2" - }, - "es-proxy": { - "image": "tigera/es-proxy", - "version": "v3.19.2" - }, - "eck-kibana": { - "version": "7.17.22" - }, - "kibana": { - "image": "tigera/kibana", - "version": "v3.19.2" - }, - "eck-elasticsearch": { - "version": "7.17.22" - }, - "elasticsearch": { - "image": "tigera/elasticsearch", - "version": "v3.19.2" - }, - "elastic-tsee-installer": { - "image": "tigera/intrusion-detection-job-installer", - "version": "v3.19.2" - }, - "intrusion-detection-controller": { - "image": "tigera/intrusion-detection-controller", - "version": "v3.19.2" - }, - "compliance-controller": { - "image": "tigera/compliance-controller", - "version": "v3.19.2" - }, - "compliance-reporter": { - "image": "tigera/compliance-reporter", - "version": "v3.19.2" - }, - "compliance-snapshotter": { - "image": "tigera/compliance-snapshotter", - "version": "v3.19.2" - }, - "compliance-server": { - "image": "tigera/compliance-server", - "version": "v3.19.2" - }, - "compliance-benchmarker": { - "image": "tigera/compliance-benchmarker", - "version": "v3.19.2" - }, - "ingress-collector": { - "image": "tigera/ingress-collector", - "version": "v3.19.2" - }, - "l7-collector": { - "image": "tigera/l7-collector", - "version": "v3.19.2" - }, - "license-agent": { - "image": "tigera/license-agent", - "version": "v3.19.2" - }, - "tigera-cni": { - "image": "tigera/cni", - "version": "v3.19.2" - }, - "tigera-cni-windows": { - "image": "tigera/cni-windows", - "version": "v3.19.2" - }, - "firewall-integration": { - "image": "tigera/firewall-integration", - "version": "v3.19.2" - }, - "egress-gateway": { - "image": "tigera/egress-gateway", - "version": "v3.19.2" - }, - "honeypod": { - "image": "tigera/honeypod", - "version": "v3.19.2" - }, - "honeypod-exp-service": { - "image": "tigera/honeypod-exp-service", - "version": "v3.19.2" - }, - "honeypod-controller": { - "image": "tigera/honeypod-controller", - "version": "v3.19.2" - }, - "key-cert-provisioner": { - "image": "tigera/key-cert-provisioner", - "version": "v3.19.2" - }, - "elasticsearch-metrics": { - "image": "tigera/elasticsearch-metrics", - "version": "v3.19.2" - }, - "packetcapture": { - "image": "tigera/packetcapture", - "version": "v3.19.2" - }, - "policy-recommendation": { - "image": "tigera/policy-recommendation", - "version": "v3.19.2" - }, - "prometheus": { - "image": "tigera/prometheus", - "version": "v3.19.2" - }, - "coreos-prometheus": { - "version": "v2.48.1" - }, - "coreos-prometheus-operator": { - "version": "v0.73.2" - }, - "coreos-config-reloader": { - "version": "v0.73.2" - }, - "prometheus-operator": { - "image": "tigera/prometheus-operator", - "version": "v3.19.2" - }, - "prometheus-config-reloader": { - "image": "tigera/prometheus-config-reloader", - "version": "v3.19.2" - }, - "tigera-prometheus-service": { - "image": "tigera/prometheus-service", - "version": "v3.19.2" - }, - "es-gateway": { - "image": "tigera/es-gateway", - "version": "v3.19.2" - }, - "linseed": { - "image": "tigera/linseed", - "version": "v3.19.2" - }, - "deep-packet-inspection": { - "image": "tigera/deep-packet-inspection", - "version": "v3.19.2" - }, - "eck-elasticsearch-operator": { - "version": "2.6.1" - }, - "elasticsearch-operator": { - "image": "tigera/eck-operator", - "version": "v3.19.2" - }, - "coreos-alertmanager": { - "version": "v0.25.1" - }, - "alertmanager": { - "image": "tigera/alertmanager", - "version": "v3.19.2" - }, - "envoy": { - "image": "tigera/envoy", - "version": "v3.19.2" - }, - "envoy-init": { - "image": "tigera/envoy-init", - "version": "v3.19.2" - }, - "webhooks-processor": { - "image": "tigera/webhooks-processor", - "version": "v3.19.2" - }, - "flexvol": { - "image": "tigera/pod2daemon-flexvol", - "version": "v3.19.2" - }, - "csi": { - "image": "tigera/csi", - "version": "v3.19.2" - }, - "csi-node-driver-registrar": { - "image": "tigera/node-driver-registrar", - "version": "v3.19.2" - } - } - }, - { - "title": "v3.19.1", - "tigera-operator": { - "image": "tigera/operator", - "version": "v1.34.2", - "registry": "quay.io" - }, - "calico": { - "minor_version": "v3.28", - "archive_path": "archive" - }, - "components": { - "cnx-manager": { - "image": "tigera/cnx-manager", - "version": "v3.19.1" - }, - "voltron": { - "image": "tigera/voltron", - "version": "v3.19.1" - }, - "guardian": { - "image": "tigera/guardian", - "version": "v3.19.1" - }, - "cnx-apiserver": { - "image": "tigera/cnx-apiserver", - "version": "v3.19.1" - }, - "cnx-queryserver": { - "image": "tigera/cnx-queryserver", - "version": "v3.19.1" - }, - "cnx-kube-controllers": { - "image": "tigera/kube-controllers", - "version": "v3.19.1" - }, - "calicoq": { - "image": "tigera/calicoq", - "version": "v3.19.1" - }, - "typha": { - "image": "tigera/typha", - "version": "v3.19.1" - }, - "calicoctl": { - "image": "tigera/calicoctl", - "version": "v3.19.1" - }, - "cnx-node": { - "image": "tigera/cnx-node", - "version": "v3.19.1" - }, - "cnx-node-windows": { - "image": "tigera/cnx-node-windows", - "version": "v3.19.1" - }, - "dikastes": { - "image": "tigera/dikastes", - "version": "v3.19.1" - }, - "dex": { - "image": "tigera/dex", - "version": "v3.19.1" - }, - "fluentd": { - "image": "tigera/fluentd", - "version": "v3.19.1" - }, - "fluentd-windows": { - "image": "tigera/fluentd-windows", - "version": "v3.19.1" - }, - "es-proxy": { - "image": "tigera/es-proxy", - "version": "v3.19.1" - }, - "eck-kibana": { - "version": "7.17.21" - }, - "kibana": { - "image": "tigera/kibana", - "version": "v3.19.1" - }, - "eck-elasticsearch": { - "version": "7.17.21" - }, - "elasticsearch": { - "image": "tigera/elasticsearch", - "version": "v3.19.1" - }, - "elastic-tsee-installer": { - "image": "tigera/intrusion-detection-job-installer", - "version": "v3.19.1" - }, - "intrusion-detection-controller": { - "image": "tigera/intrusion-detection-controller", - "version": "v3.19.1" - }, - "compliance-controller": { - "image": "tigera/compliance-controller", - "version": "v3.19.1" - }, - "compliance-reporter": { - "image": "tigera/compliance-reporter", - "version": "v3.19.1" - }, - "compliance-snapshotter": { - "image": "tigera/compliance-snapshotter", - "version": "v3.19.1" - }, - "compliance-server": { - "image": "tigera/compliance-server", - "version": "v3.19.1" - }, - "compliance-benchmarker": { - "image": "tigera/compliance-benchmarker", - "version": "v3.19.1" - }, - "ingress-collector": { - "image": "tigera/ingress-collector", - "version": "v3.19.1" - }, - "l7-collector": { - "image": "tigera/l7-collector", - "version": "v3.19.1" - }, - "license-agent": { - "image": "tigera/license-agent", - "version": "v3.19.1" - }, - "tigera-cni": { - "image": "tigera/cni", - "version": "v3.19.1" - }, - "tigera-cni-windows": { - "image": "tigera/cni-windows", - "version": "v3.19.1" - }, - "firewall-integration": { - "image": "tigera/firewall-integration", - "version": "v3.19.1" - }, - "egress-gateway": { - "image": "tigera/egress-gateway", - "version": "v3.19.1" - }, - "honeypod": { - "image": "tigera/honeypod", - "version": "v3.19.1" - }, - "honeypod-exp-service": { - "image": "tigera/honeypod-exp-service", - "version": "v3.19.1" - }, - "honeypod-controller": { - "image": "tigera/honeypod-controller", - "version": "v3.19.1" - }, - "key-cert-provisioner": { - "image": "tigera/key-cert-provisioner", - "version": "v3.19.1" - }, - "elasticsearch-metrics": { - "image": "tigera/elasticsearch-metrics", - "version": "v3.19.1" - }, - "packetcapture": { - "image": "tigera/packetcapture", - "version": "v3.19.1" - }, - "policy-recommendation": { - "image": "tigera/policy-recommendation", - "version": "v3.19.1" - }, - "prometheus": { - "image": "tigera/prometheus", - "version": "v3.19.1" - }, - "coreos-prometheus": { - "version": "v2.48.1" - }, - "coreos-prometheus-operator": { - "version": "v0.70.0" - }, - "coreos-config-reloader": { - "version": "v0.70.0" - }, - "prometheus-operator": { - "image": "tigera/prometheus-operator", - "version": "v3.19.1" - }, - "prometheus-config-reloader": { - "image": "tigera/prometheus-config-reloader", - "version": "v3.19.1" - }, - "tigera-prometheus-service": { - "image": "tigera/prometheus-service", - "version": "v3.19.1" - }, - "es-gateway": { - "image": "tigera/es-gateway", - "version": "v3.19.1" - }, - "linseed": { - "image": "tigera/linseed", - "version": "v3.19.1" - }, - "deep-packet-inspection": { - "image": "tigera/deep-packet-inspection", - "version": "v3.19.1" - }, - "eck-elasticsearch-operator": { - "version": "2.6.1" - }, - "elasticsearch-operator": { - "image": "tigera/eck-operator", - "version": "v3.19.1" - }, - "coreos-alertmanager": { - "version": "v0.25.1" - }, - "alertmanager": { - "image": "tigera/alertmanager", - "version": "v3.19.1" - }, - "envoy": { - "image": "tigera/envoy", - "version": "v3.19.1" - }, - "envoy-init": { - "image": "tigera/envoy-init", - "version": "v3.19.1" - }, - "webhooks-processor": { - "image": "tigera/webhooks-processor", - "version": "v3.19.1" - }, - "flexvol": { - "image": "tigera/pod2daemon-flexvol", - "version": "v3.19.1" - }, - "csi": { - "image": "tigera/csi", - "version": "v3.19.1" - }, - "csi-node-driver-registrar": { - "image": "tigera/node-driver-registrar", - "version": "v3.19.1" - } - } - }, - { - "title": "v3.19.0-2.0", - "tigera-operator": { - "image": "tigera/operator", - "version": "v1.34.0", - "registry": "quay.io" - }, - "calico": { - "minor_version": "v3.28", - "archive_path": "archive" - }, - "components": { - "cnx-manager": { - "image": "tigera/cnx-manager", - "version": "v3.19.0-2.0" - }, - "voltron": { - "image": "tigera/voltron", - "version": "v3.19.0-2.0" - }, - "guardian": { - "image": "tigera/guardian", - "version": "v3.19.0-2.0" - }, - "cnx-apiserver": { - "image": "tigera/cnx-apiserver", - "version": "v3.19.0-2.0" - }, - "cnx-queryserver": { - "image": "tigera/cnx-queryserver", - "version": "v3.19.0-2.0" - }, - "cnx-kube-controllers": { - "image": "tigera/kube-controllers", - "version": "v3.19.0-2.0" - }, - "calicoq": { - "image": "tigera/calicoq", - "version": "v3.19.0-2.0" - }, - "typha": { - "image": "tigera/typha", - "version": "v3.19.0-2.0" - }, - "calicoctl": { - "image": "tigera/calicoctl", - "version": "v3.19.0-2.0" - }, - "cnx-node": { - "image": "tigera/cnx-node", - "version": "v3.19.0-2.0" - }, - "cnx-node-windows": { - "image": "tigera/cnx-node-windows", - "version": "v3.19.0-2.0" - }, - "dikastes": { - "image": "tigera/dikastes", - "version": "v3.19.0-2.0" - }, - "dex": { - "image": "tigera/dex", - "version": "v3.19.0-2.0" - }, - "fluentd": { - "image": "tigera/fluentd", - "version": "v3.19.0-2.0" - }, - "fluentd-windows": { - "image": "tigera/fluentd-windows", - "version": "v3.19.0-2.0" - }, - "es-proxy": { - "image": "tigera/es-proxy", - "version": "v3.19.0-2.0" - }, - "eck-kibana": { - "version": "7.17.18" - }, - "kibana": { - "image": "tigera/kibana", - "version": "v3.19.0-2.0" - }, - "eck-elasticsearch": { - "version": "7.17.18" - }, - "elasticsearch": { - "image": "tigera/elasticsearch", - "version": "v3.19.0-2.0" - }, - "elastic-tsee-installer": { - "image": "tigera/intrusion-detection-job-installer", - "version": "v3.19.0-2.0" - }, - "intrusion-detection-controller": { - "image": "tigera/intrusion-detection-controller", - "version": "v3.19.0-2.0" - }, - "compliance-controller": { - "image": "tigera/compliance-controller", - "version": "v3.19.0-2.0" - }, - "compliance-reporter": { - "image": "tigera/compliance-reporter", - "version": "v3.19.0-2.0" - }, - "compliance-snapshotter": { - "image": "tigera/compliance-snapshotter", - "version": "v3.19.0-2.0" - }, - "compliance-server": { - "image": "tigera/compliance-server", - "version": "v3.19.0-2.0" - }, - "compliance-benchmarker": { - "image": "tigera/compliance-benchmarker", - "version": "v3.19.0-2.0" - }, - "ingress-collector": { - "image": "tigera/ingress-collector", - "version": "v3.19.0-2.0" - }, - "l7-collector": { - "image": "tigera/l7-collector", - "version": "v3.19.0-2.0" - }, - "license-agent": { - "image": "tigera/license-agent", - "version": "v3.19.0-2.0" - }, - "tigera-cni": { - "image": "tigera/cni", - "version": "v3.19.0-2.0" - }, - "tigera-cni-windows": { - "image": "tigera/cni-windows", - "version": "v3.19.0-2.0" - }, - "firewall-integration": { - "image": "tigera/firewall-integration", - "version": "v3.19.0-2.0" - }, - "egress-gateway": { - "image": "tigera/egress-gateway", - "version": "v3.19.0-2.0" - }, - "honeypod": { - "image": "tigera/honeypod", - "version": "v3.19.0-2.0" - }, - "honeypod-exp-service": { - "image": "tigera/honeypod-exp-service", - "version": "v3.19.0-2.0" - }, - "honeypod-controller": { - "image": "tigera/honeypod-controller", - "version": "v3.19.0-2.0" - }, - "key-cert-provisioner": { - "image": "tigera/key-cert-provisioner", - "version": "v3.19.0-2.0" - }, - "elasticsearch-metrics": { - "image": "tigera/elasticsearch-metrics", - "version": "v3.19.0-2.0" - }, - "packetcapture": { - "image": "tigera/packetcapture", - "version": "v3.19.0-2.0" - }, - "policy-recommendation": { - "image": "tigera/policy-recommendation", - "version": "v3.19.0-2.0" - }, - "prometheus": { - "image": "tigera/prometheus", - "version": "v3.19.0-2.0" - }, - "coreos-prometheus": { - "version": "v2.48.1" - }, - "coreos-prometheus-operator": { - "version": "v0.70.0" - }, - "coreos-config-reloader": { - "version": "v0.70.0" - }, - "prometheus-operator": { - "image": "tigera/prometheus-operator", - "version": "v3.19.0-2.0" - }, - "prometheus-config-reloader": { - "image": "tigera/prometheus-config-reloader", - "version": "v3.19.0-2.0" - }, - "tigera-prometheus-service": { - "image": "tigera/prometheus-service", - "version": "v3.19.0-2.0" - }, - "es-gateway": { - "image": "tigera/es-gateway", - "version": "v3.19.0-2.0" - }, - "linseed": { - "image": "tigera/linseed", - "version": "v3.19.0-2.0" - }, - "deep-packet-inspection": { - "image": "tigera/deep-packet-inspection", - "version": "v3.19.0-2.0" - }, - "eck-elasticsearch-operator": { - "version": "2.6.1" - }, - "elasticsearch-operator": { - "image": "tigera/eck-operator", - "version": "v3.19.0-2.0" - }, - "coreos-alertmanager": { - "version": "v0.25.1" - }, - "alertmanager": { - "image": "tigera/alertmanager", - "version": "v3.19.0-2.0" - }, - "envoy": { - "image": "tigera/envoy", - "version": "v3.19.0-2.0" - }, - "envoy-init": { - "image": "tigera/envoy-init", - "version": "v3.19.0-2.0" - }, - "webhooks-processor": { - "image": "tigera/webhooks-processor", - "version": "v3.19.0-2.0" - }, - "flexvol": { - "image": "tigera/pod2daemon-flexvol", - "version": "v3.19.0-2.0" - }, - "csi": { - "image": "tigera/csi", - "version": "v3.19.0-2.0" - }, - "csi-node-driver-registrar": { - "image": "tigera/node-driver-registrar", - "version": "v3.19.0-2.0" - } - } - }, - { - "title": "v3.19.0-1.0", - "tigera-operator": { - "image": "tigera/operator", - "version": "v1.33.0", - "registry": "quay.io" - }, - "calico": { - "minor_version": "v3.27", - "archive_path": "archive" - }, - "components": { - "cnx-manager": { - "image": "tigera/cnx-manager", - "version": "v3.19.0-1.0" - }, - "voltron": { - "image": "tigera/voltron", - "version": "v3.19.0-1.0" - }, - "guardian": { - "image": "tigera/guardian", - "version": "v3.19.0-1.0" - }, - "cnx-apiserver": { - "image": "tigera/cnx-apiserver", - "version": "v3.19.0-1.0" - }, - "cnx-queryserver": { - "image": "tigera/cnx-queryserver", - "version": "v3.19.0-1.0" - }, - "cnx-kube-controllers": { - "image": "tigera/kube-controllers", - "version": "v3.19.0-1.0" - }, - "calicoq": { - "image": "tigera/calicoq", - "version": "v3.19.0-1.0" - }, - "typha": { - "image": "tigera/typha", - "version": "v3.19.0-1.0" - }, - "calicoctl": { - "image": "tigera/calicoctl", - "version": "v3.19.0-1.0" - }, - "cnx-node": { - "image": "tigera/cnx-node", - "version": "v3.19.0-1.0" - }, - "cnx-node-windows": { - "image": "tigera/cnx-node-windows", - "version": "v3.19.0-1.0" - }, - "dikastes": { - "image": "tigera/dikastes", - "version": "v3.19.0-1.0" - }, - "dex": { - "image": "tigera/dex", - "version": "v3.19.0-1.0" - }, - "fluentd": { - "image": "tigera/fluentd", - "version": "v3.19.0-1.0" - }, - "fluentd-windows": { - "image": "tigera/fluentd-windows", - "version": "v3.19.0-1.0" - }, - "es-proxy": { - "image": "tigera/es-proxy", - "version": "v3.19.0-1.0" - }, - "eck-kibana": { - "version": "7.17.14" - }, - "kibana": { - "image": "tigera/kibana", - "version": "v3.19.0-1.0" - }, - "eck-elasticsearch": { - "version": "7.17.14" - }, - "elasticsearch": { - "image": "tigera/elasticsearch", - "version": "v3.19.0-1.0" - }, - "cloud-controllers": { - "image": "tigera/cloud-controllers", - "version": "v3.19.0-1.0" - }, - "elastic-tsee-installer": { - "image": "tigera/intrusion-detection-job-installer", - "version": "v3.19.0-1.0" - }, - "intrusion-detection-controller": { - "image": "tigera/intrusion-detection-controller", - "version": "v3.19.0-1.0" - }, - "compliance-controller": { - "image": "tigera/compliance-controller", - "version": "v3.19.0-1.0" - }, - "compliance-reporter": { - "image": "tigera/compliance-reporter", - "version": "v3.19.0-1.0" - }, - "compliance-snapshotter": { - "image": "tigera/compliance-snapshotter", - "version": "v3.19.0-1.0" - }, - "compliance-server": { - "image": "tigera/compliance-server", - "version": "v3.19.0-1.0" - }, - "compliance-benchmarker": { - "image": "tigera/compliance-benchmarker", - "version": "v3.19.0-1.0" - }, - "ingress-collector": { - "image": "tigera/ingress-collector", - "version": "v3.19.0-1.0" - }, - "l7-collector": { - "image": "tigera/l7-collector", - "version": "v3.19.0-1.0" - }, - "license-agent": { - "image": "tigera/license-agent", - "version": "v3.19.0-1.0" - }, - "tigera-cni": { - "image": "tigera/cni", - "version": "v3.19.0-1.0" - }, - "tigera-cni-windows": { - "image": "tigera/cni-windows", - "version": "v3.19.0-1.0" - }, - "firewall-integration": { - "image": "tigera/firewall-integration", - "version": "v3.19.0-1.0" - }, - "egress-gateway": { - "image": "tigera/egress-gateway", - "version": "v3.19.0-1.0" - }, - "honeypod": { - "image": "tigera/honeypod", - "version": "v3.19.0-1.0" - }, - "honeypod-exp-service": { - "image": "tigera/honeypod-exp-service", - "version": "v3.19.0-1.0" - }, - "honeypod-controller": { - "image": "tigera/honeypod-controller", - "version": "v3.19.0-1.0" - }, - "key-cert-provisioner": { - "image": "tigera/key-cert-provisioner", - "version": "v1.1.18", - "registry": "quay.io" - }, - "elasticsearch-metrics": { - "image": "tigera/elasticsearch-metrics", - "version": "v3.19.0-1.0" - }, - "packetcapture": { - "image": "tigera/packetcapture", - "version": "v3.19.0-1.0" - }, - "policy-recommendation": { - "image": "tigera/policy-recommendation", - "version": "v3.19.0-1.0" - }, - "prometheus": { - "image": "tigera/prometheus", - "version": "v3.19.0-1.0" - }, - "coreos-prometheus": { - "version": "v2.48.1" - }, - "coreos-prometheus-operator": { - "version": "v0.70.0" - }, - "coreos-config-reloader": { - "version": "v0.70.0" - }, - "prometheus-operator": { - "image": "tigera/prometheus-operator", - "version": "v3.19.0-1.0" - }, - "prometheus-config-reloader": { - "image": "tigera/prometheus-config-reloader", - "version": "v3.19.0-1.0" - }, - "tigera-prometheus-service": { - "image": "tigera/prometheus-service", - "version": "v3.19.0-1.0" - }, - "es-gateway": { - "image": "tigera/es-gateway", - "version": "v3.19.0-1.0" - }, - "linseed": { - "image": "tigera/linseed", - "version": "v3.19.0-1.0" - }, - "deep-packet-inspection": { - "image": "tigera/deep-packet-inspection", - "version": "v3.19.0-1.0" - }, - "eck-elasticsearch-operator": { - "version": "2.6.1" - }, - "elasticsearch-operator": { - "image": "tigera/eck-operator", - "version": "v3.19.0-1.0" - }, - "coreos-alertmanager": { - "version": "v0.25.1" - }, - "alertmanager": { - "image": "tigera/alertmanager", - "version": "v3.19.0-1.0" - }, - "envoy": { - "image": "tigera/envoy", - "version": "v3.19.0-1.0" - }, - "envoy-init": { - "image": "tigera/envoy-init", - "version": "v3.19.0-1.0" - }, - "flexvol": { - "image": "tigera/pod2daemon-flexvol", - "version": "v3.19.0-1.0" - }, - "csi": { - "image": "tigera/csi", - "version": "v3.19.0-1.0" - }, - "csi-node-driver-registrar": { - "image": "tigera/node-driver-registrar", - "version": "v3.19.0-1.0" - } - } - } -] diff --git a/calico-enterprise_versioned_docs/version-3.19-2/threat/configuring-webhooks.mdx b/calico-enterprise_versioned_docs/version-3.19-2/threat/configuring-webhooks.mdx deleted file mode 100644 index 858cdd8829..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/threat/configuring-webhooks.mdx +++ /dev/null @@ -1,35 +0,0 @@ ---- -description: Get security event alerts in Slack or Jira. -title: Configuring security event alerts in Slack and Jira ---- - -# Configuring security event alerts in Slack and Jira - -:::note - -This feature is tech preview. Tech preview features may be subject to significant changes before they become GA. - -::: - -You can configure $[prodname] webhooks to post security alerts directly to a Slack channel or to create an issue in your Jira project. -By configuring webhooks for security alerts, you can make sure that you receive critical alerts without having to sign in to the web console. - -## Before you begin - -Your target application must be configured to receive data from the $[prodname] webhook. - -* **Slack**. You must have a webhook URL for the Slack app that you want $[prodname] to send alerts to. -See [Sending messages using Incoming Webhooks](https://api.slack.com/messaging/webhooks) for more information. -* **Jira**. You must have an API token for an Atlassian user account that has write permissions to your Jira instance. - See [Manage API tokens for your Atlassian account](https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/) for details on how to obtain an API token. - You also need: - * Your Atlassian site URL. If you access Jira at the URL `https://.atlassian.net/jira`, then your site URL is `.atlassian.net`. - * A Jira project key. This is the Jira project where your $[prodname] webhook creates new issues. This user associated with your API token must have write permissions to this project. -* **Generic JSON**. You must have a webhook URL for any other application you want the $[prodname] webhook to send alerts to. - -## Create a webhook for security event alerts - -1. In the web console, select **Activity** > **Webhooks**, and then click **Create your first webhook**. -2. Enter a **Name** for your webhook, select which **Event types** you want to get alerts for, and, under **Type**, select whether to configure the webhook for Slack, Jira, or for generic JSON output. -3. Complete the fields for your webhook type and click **Create Webhook**. - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/threat/deeppacketinspection.mdx b/calico-enterprise_versioned_docs/version-3.19-2/threat/deeppacketinspection.mdx deleted file mode 100644 index 2f3946c222..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/threat/deeppacketinspection.mdx +++ /dev/null @@ -1,114 +0,0 @@ ---- -description: Monitor live traffic for malicious activities. ---- - -# Deep packet inspection - -## Big picture - -Configure Deep Packet Inspection (DPI) in clusters to get alerts on compromised resources. - -## Value - -Security teams need to run DPI quickly in response to unusual network traffic in clusters so they can identify potential threats. Also, it is critical to run DPI on select workloads (not all) to efficiently make use of cluster resources and minimize the impact of false positives. $[prodname] provides an easy way to perform DPI using [Snort community rules](https://www.snort.org/downloads/#rule-downloads). You can disable DPI at any time, selectively configure for namespaces and endpoints, and alerts are generated in the Alerts dashboard in the web console. - -## Concepts - -For each deep packet inspection resource (DeepPacketInspection), $[prodname] creates a live network monitor that inspects the header and payload information of packets that match the Snort community rules. Whenever malicious activities are suspected, an alert is automatically added to the Alerts page in the $[prodname] web console. - -$[prodname] DPI uses AF_PACKET, a Linux socket that allows an application to receive and send raw packets. It is commonly used for troubleshooting (like tcpdump and Wireshark), but also for network intrusion detection. For details, see [AF_Packet](https://man7.org/linux/man-pages/man7/packet.7.html). - -## Before you begin - -**Not supported:** - -- Multi-nic setup -- $[prodname] nodes running Windows hosts - -## How To - -- [Configure deep packet inspection](#configure-deep-packet-inspection) -- [Configure resource requirements](#configure-resource-requirements) -- [Access alerts](#access-alerts) -- [Verify deep packet inspection is running](#verify-deep-packet-inspection-is-running) - -### Configure deep packet inspection - -Create a YAML file containing one or more [DeepPacketInspection](../reference/resources/deeppacketinspection.mdx) resources and apply it to your cluster. - -```bash -kubectl apply -f -``` - -To stop deep packet inspection, delete the DeepPacketInspection resource from your cluster. - -```bash -kubectl delete -f -``` - -**Examples of selecting workloads** - -Following is a basic example that selects a single workload that has the label `k8s-app` with the value `nginx`. - -```yaml -apiVersion: projectcalico.org/v3 -kind: DeepPacketInspection -metadata: - name: sample-dpi-nginx - namespace: sample -spec: - selector: k8s-app == "nginx" -``` - -In the following example, we select all workload endpoints in the `sample` namespace. - -```yaml -apiVersion: projectcalico.org/v3 -kind: DeepPacketInspection -metadata: - name: sample-dpi-all - namespace: sample -spec: - selector: all() -``` - -### Configure resource requirements - -Adjust the CPU and RAM used for performing deep packet inspection by updating the [component resource in IntrusionDetection](../reference/installation/api.mdx#intrusiondetectioncomponentresource). - -For a data transfer rate of 1GB/sec on workload endpoints being monitored, we recommend a minimum of 1 CPU and 1GB RAM. - -The following example configures deep packet inspection to use a maximum of 1 CPU and 1GB RAM. - -```yaml -apiVersion: operator.tigera.io/v1 -kind: IntrusionDetection -metadata: - name: tigera-secure -spec: - componentResources: - - componentName: DeepPacketInspection - resourceRequirements: - limits: - cpu: '1' - memory: 1Gi - requests: - cpu: 100m - memory: 100Mi -``` - -### Access alerts - -The alerts generated by deep packet inspection are available in the web console in the Alerts page. - -### Verify deep packet inspection is running - -Get the [status of DeepPacketInspection](../reference/resources/deeppacketinspection.mdx#status) resource to verify if live traffic is being monitored on selected workload endpoints. - -```bash -kubectl get -n -``` - -## Additional resources - -- [Configure packet capture](../observability/packetcapture.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/threat/deploying-waf-ingress-gateway.mdx b/calico-enterprise_versioned_docs/version-3.19-2/threat/deploying-waf-ingress-gateway.mdx deleted file mode 100644 index 9768c0aff7..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/threat/deploying-waf-ingress-gateway.mdx +++ /dev/null @@ -1,319 +0,0 @@ ---- -description: Deploy WAF with ingress gateways ---- - -# Deploying WAF with an ingress gateway - -## Introduction - -In addition to automatically deploying and managing our WAF inside the cluster to protect each workload, we also offer the option to manually deploy our WAF to integrate with Envoy-based Gateways. - -Deploying WAF in this way has the following characteristics and caveats: - -* Comes with OWASP CoreRuleSet 4.0 built-in, which can be overridden with custom rules -* Uses Coraza as the WAF engine -* Integrates with Envoy using ext_authz filter -* Logs to stdout, allowing the user to decide where to send the WAF logs -* Manually deployed and configured by the user - -## Deployment guide - -This documentation outlines the process of deploying our Web Application Firewall (WAF) with an Istio Ingress gateway. By deploying the WAF alongside Istio ingress gateway, incoming requests to the cluster will be inspected, secured and filtered before they reach the underlying services within the cluster. - -There are three steps to deploying WAF with Istio ingress gateway - -* Add WAF as a sidecar injected in Istio ingress gateway pods -* Update Istio ingress gateway to use WAF with Envoy’s ext_authz filter -* Validate the configuration works by testing WAF - -## Step 1: Enable Istio ingress gateway for custom sidecar injection - -### 1. Initialize Istio operator - -If your Istio installation was done using the Istio operator, there's no need to reinstall the Istio operator. However, if Istio was installed by means other than the Istio operator, then you should install the Istio operator using the following command to ensure that you can leverage custom sidecar injection capabilities. - -```bash -istioctl operator init -``` - -This command will deploy the Istio operator named istio-operator in the `istio-operator` namespace. - -### 2. Deploy IstioOperator custom resource for custom sidecar injection - -Create an IstioOperator custom resource to enable custom sidecar injection. -Use the provided IstioOperator definition as a starting point: - -```bash -kubectl apply -f - < -y` for Istio installation does not automatically enable custom sidecar injection. This is because it won't update the istio-sidecar-injector ConfigMap with the configured sidecar injection template. - -This limitation can be overcome by installing the IstioOperator and deploying the IstioOperator CR to enable the custom sidecar injection template. By doing so, you can ensure that the custom sidecar injection templates are properly applied and managed within your Istio service mesh. This approach provides a more flexible and customizable way to manage sidecar injections, allowing for configurations that meet specific requirements of your applications and services. - -#### Manual updates to istio-sidecar-injector ConfigMap: - -While pods are generally injected based on the sidecar injection template configured in the istio-sidecar-injector ConfigMap, manually updating, patching or adding the sidecar injection template into the ConfigMap does not guarantee the injection of custom sidecars into annotated and labeled pods. - -### Considerations - -* Istio's default installation commands may not automatically integrate custom sidecar injection configurations. -* Manual modifications to the istio-sidecar-injector ConfigMap may not trigger the injection of custom sidecars into pods as expected. - -## Custom injection support across cloud providers - -The ability to use custom injection mechanisms in Istio may vary across different Kubernetes clusters on various cloud providers. Below is a detailed section outlining the specific scenarios for AWS EKS, AWS Kubeadm, and Google GKE. - -### AWS EKS (Elastic Kubernetes Service) - -Custom injection mechanisms, especially those involving Istio sidecars or other custom configurations, may encounter challenges on AWS EKS. This limitation is due to the managed nature of EKS, where certain components and configurations are controlled by AWS. - -### AWS Kubeadm - -Custom injection is generally well-supported on Kubernetes clusters created using kubeadm on AWS. In this self-managed setup, you have more control over the cluster components, making it suitable for custom configurations, including injection of Istio sidecars or other custom components. - -### Google Kubernetes Engine (GKE) - -GKE, being a managed Kubernetes service by Google Cloud, supports custom injection mechanisms. Google provides a flexible environment where you can apply custom configurations to the cluster, making it compatible with Istio sidecar injection and similar customization approaches. - -### Azure AKS (Azure Kubernetes Service) - -Azure AKS generally supports custom injection mechanisms. While it's a managed Kubernetes service, AKS provides flexibility for certain customizations, making it compatible with Istio sidecar injection and similar customization approaches. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/threat/honeypods.mdx b/calico-enterprise_versioned_docs/version-3.19-2/threat/honeypods.mdx deleted file mode 100644 index 0725499c4c..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/threat/honeypods.mdx +++ /dev/null @@ -1,138 +0,0 @@ ---- -description: Configure honeypods to detect compromised workloads. ---- - -# Configure honeypods - -## Big picture - -Configure honeypods in your clusters and get alerts that indicate resources may be compromised. - -## Value - -Based on the well-known cybersecurity method, “honeypots”, $[prodname] honeypods are used to detect suspicious activity within a Kubernetes cluster. The feature enables you to deploy decoys disguised as a sensitive asset (called honeypods) at different locations in your Kubernetes cluster. Any resources make attempts to communicate with the honeypods, it can be considered indicative of a suspicious connection and the cluster may be compromised. - -$[prodname] honeypods can be used to detect attacks such as: - -- Data exfiltration -- Resources enumeration -- Privilege escalation -- Denial of service -- Vulnerability exploitation attempts - -## Concepts - -### Honeypod implementation - -Honeypods can be configured on a per-cluster basis using "template" honeypod manifests that are easily customizable. Any alerts triggered are displayed in the Security Events tab in the $[prodname] web console. The Honeypod Dashboard in Kibana provides an easy way to monitor and analyze traffic reaching the honeypods. - -## How To - -- [Configure namespace and RBAC for honeypods](#configure-namespace-and-rbac-for-honeypods) -- [Deploy honeypods in clusters](#deploy-honeypods-in-clusters) -- [Verify honeypods deployment](#verify-honeypods-deployment) - -### Configure namespace and RBAC for honeypods - -Apply the following manifest to create a namespace and RBAC for the honeypods: - -```bash -kubectl create -f $[filesUrl]/manifests/threatdef/honeypod/common.yaml -``` - -Add `tigera-pull-secret` into the namespace `tigera-internal`: - -```bash -kubectl get secret tigera-pull-secret --namespace=calico-system -o yaml | sed 's/namespace: .*/namespace: tigera-internal/' | kubectl apply -f - -``` - -### Deploy honeypods in clusters - -Use one of the following sample honeypods manifests or customize them for your implementation. All images include a minimal container that runs or mimics a running application. The images provided have been hardened with built-in protections to reduce the risk of them being compromised. - -:::note - -When modifying the provided honeypod manifests, be sure to update the [globalalert](../reference/resources/globalalert.mdx) section in the manifest to match your changes. Ensure the alert name has the prefix `honeypod`, for example `honeypod.new.alert`. - -::: - -- **IP Enumeration** - - Expose an empty pod that can only be reached via PodIP; this allows you to see when the attacker is probing the pod network: - -```bash -kubectl apply -f $[filesUrl]/manifests/threatdef/honeypod/ip-enum.yaml -``` - -- **Expose an nginx service** - - Expose a nginx service that serves a generic page. The pod can be discovered via ClusterIP or DNS lookup. An unreachable service `tigera-dashboard-internal-service` is created to entice the attacker to find and reach, `tigera-dashboard-internal-debug`: - -```bash -kubectl apply -f $[filesUrl]/manifests/threatdef/honeypod/expose-svc.yaml -``` - -- **Vulnerable Service (MySQL)** - - Expose a SQL service that contains an empty database with easy access. The pod can be discovered via ClusterIP or DNS lookup: - -```bash -kubectl apply -f $[filesUrl]/manifests/threatdef/honeypod/vuln-svc.yaml -``` - -### Verify honeypods deployment - -To verify the installation, ensure that honeypods are running within the `tigera-internal` namespace: - -```bash -kubectl get pods -n tigera-internal -``` - -```shell -NAME READY STATUS RESTARTS AGE -tigera-internal-app-28c85 1/1 Running 0 2m19s -tigera-internal-app-8c5bt 1/1 Running 0 2m19s -tigera-internal-app-l64nz 1/1 Running 0 2m19s -tigera-internal-app-qc7gv 1/1 Running 0 2m19s -tigera-internal-dashboard-6df998578c-mtmqr 1/1 Running 0 2m15s -tigera-internal-db-5c57bd5987-k5ksj 1/1 Running 0 2m10s -``` - -And verify that global alerts are set for honeypods: - -```bash -kubectl get globalalerts -``` - -```shell -NAME CREATED AT -honeypod.fake.svc 2020-10-22T03:44:36Z -honeypod.ip.enum 2020-10-22T03:44:31Z -honeypod.network.ssh 2020-10-22T03:43:40Z -honeypod.port.scan 2020-10-22T03:44:31Z -honeypod.vuln.svc 2020-10-22T03:44:40Z -``` - -As an example, to trigger an alert for `honeypod.ip.enum`, first get the Pod IP for one of the honeypods: - -```bash -kubectl get pod tigera-internal-app-28c85 -n tigera-internal -ojsonpath='{.status.podIP}' -``` - -Then, run a `busybox` container with the command `ping` on the honeypod IP: - -```bash -kubectl run --restart=Never --image busybox ping-runner -- ping -c1 -``` - -If the ICMP request reaches the honeypod, an alert will be generated within 5 minutes. - -After you have verified that the honeypods are installed and working, a best practice is to remove the pull secret from the namespace: - -```bash -kubectl delete secret tigera-pull-secret -n tigera-internal -``` - -## Additional resources - -- [GlobalAlerts](../reference/resources/globalalert.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/threat/index.mdx b/calico-enterprise_versioned_docs/version-3.19-2/threat/index.mdx deleted file mode 100644 index 4663f4c54f..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/threat/index.mdx +++ /dev/null @@ -1,22 +0,0 @@ ---- -description: Trace, analyze, and block malicious threats using intelligent feeds and alerts. -hide_table_of_contents: true ---- - -import { DocCardLink, DocCardLinkLayout } from '/src/___new___/components'; - - -# Threat defense - -Use real-time monitoring to detect and block threats to your cluster. - - - - - - - - - - - diff --git a/calico-enterprise_versioned_docs/version-3.19-2/threat/security-event-management.mdx b/calico-enterprise_versioned_docs/version-3.19-2/threat/security-event-management.mdx deleted file mode 100644 index 275cee0cde..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/threat/security-event-management.mdx +++ /dev/null @@ -1,122 +0,0 @@ ---- -description: Manage security events from your cluster in a single place. ---- - -# Security event management - -Manage security events from your cluster in a single place. - -## Value - -Security events indicate that a threat actor may be present in your Kubernetes cluster. For example, a DNS request to a malicious hostname, a triggered WAF rule, or the opening of a sensitive file. $[prodname] provides security engineers and incident response teams with a single dashboard to manage threat alerts. Benefits include: - -- A filtered list of critical events with recommended remediation -- Identify impacts on applications -- Understand the scope and frequency of the issue -- Manage alert noise by dismissing events (show/hide) -- Manage alert noise by creating exceptions - -## Before you begin - -**Required** - -- [WAF is enabled](./web-application-firewall.mdx) - -**Limitations** - -- Only WAF basic security events. Over time, the dashboard will contain a full range of $[prodname] security events. -- You cannot control which users can view or edit the page using fine-grained role-based access controls - -## Security Events Dashboard - -The **Security Events Dashboard** page gives you a high-level view of recent security events. -You can use this visual reference to get an overall sense of your cluster's health. -If you find anything that merits further investigation, you can click on an event for more details. - -* In the web console, go to **Threat defense > Security Events Dashboard**. - -![Security Events Dashboard](/img/calico-enterprise/security-events-dashboard.png) - -## Security Events - -The **Security Events** page lists all the security events that have been detected for your cluster. -You can view and filter your security events to focus on - -* In the web console, go to **Threat Defense > Security Events**. - -### Dismiss a security event - -You can clear your security events list by dismissing events that you've finished reviewing. -When you dismiss an event, that event is no longer visibile in the list. - -1. In the web console, go to **Threat Defense > Security Events**. -1. Find a security event in the list, and then click **Action > Dismiss Security Event**. - -### Create a security event exception - -You can prevent certain kinds of security events from appearing in the list by creating a security event exception. -This is helpful if you want to reduce alert noise for workloads that you know are safe. -When you create an exception, all matching security events are removed from the security events list. -Future matches will not appear in the list. - -1. In the web console, go to **Threat Defense > Security Events**. -1. Find a security event in the list, and then click **Action > Add exception**. -1. On the **Create an Exception** dialog, select a scope for the exception and click **Create Exception**. - -You can manage your exceptions by clicking **Threat Defense > Security Events > Exceptions**. -You can browse, edit, and delete exceptions on the list. - -### UI help - -**Event details page** - -Provides actions to remediate the detection and stop the attack from progressing. For example: - -![waf-security](/img/calico-enterprise/waf-security-events-latest.png) - -**Severity** - -$[prodname] calculates severity (Critical, High, Medium, Low) using a combination of NIST CVSS 3.0 and MITRE IDs. - -**MITRE IDs** - -Multiple MITRE IDs may be associated with a security event. - -**Attack Vector** -- Network -- Process -- File - -**MITRE Tactic** (based on the [MITRE tactics](https://attack.mitre.org/tactics/enterprise/)) includes a specific path, method, or scenario that can compromise cluster security. Valid entries: - -| Tactic | Target | Attack techniques | -| ------------------------------------------------------------ | ------------------------------- | ------------------------------------------------------------ | -| [Initial access](https://attack.mitre.org/tactics/TA0001/) | Network | Gain an initial foothold within a network using various entry vectors. | -| [Execution](https://attack.mitre.org/tactics/TA0002/) | Code in local or remove systems | Control code running on local or remote systems using malicious code. | -| [Impact](https://attack.mitre.org/tactics/TA0040/) | Systems and data | Disrupt availability or compromise integrity by manipulating business and operational processes. | -| [Persistence](https://attack.mitre.org/tactics/TA0003/) | Maintain footholds | Maintain access to systems across restarts, credential changes, and other interruptions. | -| [Privilege Escalation](https://attack.mitre.org/tactics/TA0004/) | Access permissions | Access higher-level permissions on a system or network. | -| [Defense Evasion](https://attack.mitre.org/tactics/TA0005/) | Avoid detection | Masquerade and hide malware to avoid detection to compromise software, data, scripts, and processes. | -| [Discovery](https://attack.mitre.org/tactics/TA0007/) | Determine your environment | Gain knowledge about your system and internal network. | - -### Frequently asked questions - -**How is the recommended remediation determined?** - -The Tigera Security Research team maps MITRE IDs to events and provides the recommended remediation. - -**Will I see all $[prodname] alerts in this dashboard?** - -No. $[prodname] security events do not encompass all types of alerts nor all security alert types; they only contain alerts for threats. Alerts for vulnerabilities detected in a container image, or misconfigurations in your Kubernetes cluster, are displayed in their respective dashboards. However, when vulnerabilities or misconfigurations are exploited by an attacker, those indicators of an attack are considered security events. - -**What does dismissing a security event do?** - -Dismissing a security event hides it from view. - -**Why are some fields in columns blank?** - -Security events generated from older managed clusters will not have values for the new fields (for example, MITRE IDs). You can dismiss these events. - -**Where can I view security event logs?** - -Go to: **Logs**, Kibana index, `tigera_secure_ee_events`. \ No newline at end of file diff --git a/calico-enterprise_versioned_docs/version-3.19-2/threat/suspicious-domains.mdx b/calico-enterprise_versioned_docs/version-3.19-2/threat/suspicious-domains.mdx deleted file mode 100644 index 45aa40478a..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/threat/suspicious-domains.mdx +++ /dev/null @@ -1,144 +0,0 @@ ---- -description: Add threat intelligence feeds to trace DNS queries that involve suspicious domains. ---- - -# Trace and alert on suspicious domains - -## Big picture - -Add threat intelligence feeds to $[prodname] to trace DNS queries involving suspicious domains. - -## Value - -$[prodname] integrates with threat intelligence feeds so you can detect when endpoints in your Kubernetes clusters query DNS for suspicious domains, or receive answers with suspicious domains. When events are detected, an anomaly detection dashboard in the UI shows the full context, including which pod(s) were involved so you can analyze and remediate. - -## Concepts - -### Pull or push threat feeds? - -$[prodname] supports both push and pull methods for updating threat feeds. Use the **pull method** for fully automated threat feed updates without user intervention. Use the **push method** to schedule your own updates or if your threat feed is not available over HTTP(S). - -### Domain name threat feeds - -A best practice is to develop an allow-list of "known-good" domains that particular applications or services must access, and then [enforce this allow-list with network policy](../network-policy/domain-based-policy.mdx). - -In addition to allow-lists, you can use threat feeds to monitor your cluster for DNS queries to known malicious or suspicious domain names. $[prodname] monitors DNS queries and generates alerts for any that are listed in your threat feed. - -Threat feeds for domain names associated with malicious **egress** activity (e.g. command and control (C2) servers or data exfiltration), provide the most security value. Threat feeds that associate domain names with malicious **ingress** activity (e.g. port scans or IP sweeps) are less useful since these activities do not cause endpoints in your cluster to query DNS. It is better to consider [IP-based threat feeds](suspicious-ips.mdx) for ingress activity. - -## Before you begin... - -### Required - -Privileges to manage GlobalThreatFeed. - -### Recommended - -We recommend that you turn down the aggregation of DNS logs sent to Elasticsearch for configuring threat feeds. If you do not adjust DNS log aggregation settings, $[prodname] aggregates DNS queries from workloads in the same replica set. This means if a suspicious DNS query is detected, you will only know which replica set made the query and not which specific pod. Go to: [FelixConfiguration](../reference/resources/felixconfig.mdx) and set the field, **dnsLogsFileAggregationKind** to **0** to log individual pods separately. - -## How to - -This section describes how to pull or push threat feeds to $[prodname]. - -- [Pull threat feed updates](#pull-threat-feed-updates) -- [Push threat feed updates](#push-threat-feed-updates) - -### Pull threat feed updates - -To add threat feeds to $[prodname] for automatic updates (default is once a day), the threat feed(s) must be available using HTTP(S), and return a newline-separated list of domain names. - -#### Using the web console - -1. From the web console, select **Threat Feeds** --> **Add Feed**. -2. Add your threat feed on the Add a New Threat Feed window. For example: - - **Feed Name**: feodo-tracker - - **Description**: This is my threat feed based on domains. - - **URL**: https://my.threatfeed.com/deny-list - - **Content type**: DomainNameSet - - **Labels**: Choose a label from the list. -3. Click **Save Changes**. -
    - From the **Action** menu, you can view or edit the details that you entered and can download the manifest file. - -> Go to the Security Events page to view events that are generated when an endpoint in the cluster queries a name on the list. For more information, see [Manage alerts](../observability/alerts.mdx). - -#### Using CLIs - -1. Create the GlobalThreatFeed YAML and save it to file. - The simplest example of this looks like the following. Replace the **name** and the **URL** with your feed. - - ```yaml - apiVersion: projectcalico.org/v3 - kind: GlobalThreatFeed - metadata: - name: my-threat-feed - spec: - content: DomainNameSet - mode: Enabled - description: 'This is my threat feed' - feedType: Custom - pull: - http: - url: https://my.threatfeed.com/deny-list - ``` - -2. Add the global threat feed to the cluster. - - ```shell - kubectl apply -f - ``` - -> Go to the Security Events page to view events that are generated when an endpoint in the cluster queries a name on the list. For more information, see [Manage alerts](../observability/alerts.mdx). - -### Push threat feed updates - -Use the push method if your threat feeds that are not in newline-delimited format, not available over HTTP, or if you prefer to push updates as they become available. - -1. Create the GlobalThreatFeed YAML and save it to file. - Replace the **name** field with your own name. The name is important in the later steps so make note of it. - - ```yaml - apiVersion: projectcalico.org/v3 - kind: GlobalThreatFeed - metadata: - name: my-threat-feed - spec: - content: DomainNameSet - mode: Enabled - description: 'This is my threat feed' - feedType: Custom - ``` - -2. Add the global threat feed to the cluster. - - ```shell - kubectl apply -f - ``` - -3. Configure or program your threat feed to write updates to Elasticsearch. This Elasticsearch document is written using the **tigera_secure_ee_threatfeeds_domainnameset.\.** alias and must have the ID set to the name of the global threat feed object. The doc should have a single field called **domains**, containing a list of domain names. For example: - - ``` - PUT tigera_secure_ee_threatfeeds_domainnameset.cluster./_doc/my-threat-feed - { - "domains" : ["malicious.badstuff", "hacks.r.us"] - } - ``` - - Note that to push data to ES, you'll need to configure a policy that allows that information to reach the ES cluster. - See the Elasticsearch document APIs for how to create and update documents in Elasticsearch. - - If no alias exists in your Elasticsearch cluster, configure to write the Elastic document by specifying an index. - Since the index name `` contains a date pattern, make sure to send the request using the index name URL encoded. - - ``` - PUT /%3Ctigera_secure_ee_threatfeeds_domainnameset.cluster.linseed-%7Bnow%2Fs%7ByyyyMMdd%7D%7D-000001%3E/_doc/my-threat-feed - { - "domains" : ["malicious.badstuff", "hacks.r.us"] - } - ``` - -4. In the $[prodname] web console, go the “Security Events” page to view events that are generated when an endpoint in the cluster queries a name on the list. - -## Additional resources - -See [GlobalThreatFeed](../reference/resources/globalthreatfeed.mdx) resource definition for all configuration options. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/threat/suspicious-ips.mdx b/calico-enterprise_versioned_docs/version-3.19-2/threat/suspicious-ips.mdx deleted file mode 100644 index 222a9cfa15..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/threat/suspicious-ips.mdx +++ /dev/null @@ -1,359 +0,0 @@ ---- -description: Add threat intelligence feeds to trace network flows of suspicious IP addresses, and optionally block traffic to them. ---- - -# Trace and block suspicious IPs - -## Big picture - -Add threat intelligence feeds to $[prodname] to trace network flows of suspicious IP addresses, and optionally block traffic to suspicious IPs. - -## Value - -$[prodname] integrates with threat intelligence feeds so you can detect when your Kubernetes clusters communicate with suspicious IPs. When communications are detected, an anomaly detection dashboard in the UI shows the full context, including which pod(s) were involved so you can analyze and remediate. You can also use a threat intelligence feed to power a dynamic deny-list, either to or from a specific group of sensitive pods, or your entire cluster. - -## Concepts - -### Pull or push threat feeds? - -$[prodname] supports both push and pull methods for updating threat feeds. Use the **pull method** for fully automated threat feed updates without user intervention. Use the **push method** to schedule your own updates or if your threat feed is not available over HTTP(S). - -### Suspicious IPs: test before you block - -There are many different types of threat intelligence feeds (community-curated, company-paid, and internally-developed) that you can choose to monitor in $[prodname]. We recommend that you assess the threat feed contents for false positives before blocking based on the feed. If you decide to block, test a subset of your workloads before rolling to production to ensure legitimate application traffic is not blocked. - -## Before you begin... - -### Required - -Privileges to manage GlobalThreatFeed and GlobalNetworkPolicy. - -### Recommended - -We recommend that you turn down the aggregation of flow logs sent to Elasticsearch for configuring threat feeds. If you do not adjust flow logs, Calico Enterprise aggregates over the external IPs for allowed traffic, and threat feed searches will not provide useful results (unless the traffic is denied by policy). Go to: [FelixConfiguration](../reference/resources/felixconfig.mdx) and set the field, **flowLogsFileAggregationKindForAllowed** to **1**. - -You can adjust the flow logs by running the following command: - -```bash -kubectl patch felixconfiguration default --type='merge' -p '{"spec":{"flowLogsFileAggregationKindForAllowed":1}}' -``` - -## How to - -This section describes how to pull or push threat feeds to $[prodname], and block traffic to a cluster for a suspicious IP. - -- [Pull threat feed updates](#pull-threat-feed-updates) -- [Push threat feed updates](#push-threat-feed-updates) -- [Block traffic to a cluster](#block-traffic-to-a-cluster) - -### Pull threat feed updates - -To add threat feeds to $[prodname] for automatic updates (default is once a day), the threat feed(s) must be available using HTTP(S), and return a newline-separated list of IP addresses or prefixes in CIDR notation. - -#### Using the web console - -1. From the web console, select **Threat Feeds** --> **Add Feed**. -2. Add your threat feed on the Add a New Threat Feed window. For example: - - **Feed Name**: feodo-tracker - - **Description**: This is the feodo-tracker threat feed. - - **URL**: [https://feodotracker.abuse.ch/downloads/ipblocklist.txt](https://feodotracker.abuse.ch/downloads/ipblocklist.txt) - - **Content type**: IPSet - - **Labels**: Choose a label from the list. -3. Click **Save Changes**. -
    - From the **Action** menu, you can view or edit the details that you entered and can download the manifest file. - -> Go to the Security Events page to view events that are generated when an IP is displayed on the threat feed list. For more information, see [Manage alerts](../observability/alerts.mdx). When you create a global threat feed in the web console, network traffic is not automatically blocked. If you find suspicious IPs on the Security Events page, you need to create a network policy to block the traffic. For help with policy, see [Block traffic to a cluster](#block-traffic-to-a-cluster). - -#### Using CLIs - -1. Create the GlobalThreatFeed YAML and save it to file. - The simplest example of this looks like the following. Replace the **name** and the **URL** with your feed. - - ```yaml - apiVersion: projectcalico.org/v3 - kind: GlobalThreatFeed - metadata: - name: my-threat-feed - spec: - content: IPSet - mode: Enabled - description: 'This is my threat feed' - feedType: Custom - pull: - http: - url: https://my.threatfeed.com/deny-list - ``` - -2. Add the global threat feed to the cluster. - - ```bash - kubectl apply -f - ``` - -> Go to the Security Events page to view events that are generated when an IP is displayed on the threat feed list. For more information, see [Manage alerts](../observability/alerts.mdx). - -### Push threat feed updates - -Use the push method if your threat feeds that are not in newline-delimited format, not available over HTTP, or if you prefer to push updates as they become available. - -1. Create the GlobalThreatFeed YAML and save it to file. - Replace the **name** field with your own name. The name is important in the later steps so make note of it. - - ```yaml - apiVersion: projectcalico.org/v3 - kind: GlobalThreatFeed - metadata: - name: my-threat-feed - spec: - content: IPSet - mode: Enabled - description: 'This is my threat feed' - feedType: Custom - ``` - -2. Add the global threat feed to the cluster. - - ```bash - kubectl apply -f - ``` - -3. Configure or program your threat feed to write updates to Elasticsearch. This Elasticsearch document is written using the **tigera_secure_ee_threatfeeds_domainnameset.\.** alias and must have the ID set to the name of the global threat feed object. The doc should have a single field called **ips**, containing a list of IP prefixes. For example: - - ``` - PUT tigera_secure_ee_threatfeeds_domainnameset.cluster./_doc/my-threat-feed/_doc/my-threat-feed - { - "ips" : ["99.99.99.99/32", "100.100.100.0/24"] - } - ``` - - See the Elasticsearch document APIs for how to create and update documents in Elasticsearch. - - If no alias exists in your Elasticsearch cluster, configure to write the Elastic document by specifying an index. - Since the index name `` contains a date pattern, make sure to send the request using the index name URL encoded. - - ``` - PUT /%3Ctigera_secure_ee_threatfeeds_ipset.cluster.linseed-%7Bnow%2Fs%7ByyyyMMdd%7D%7D-000001%3E/_doc/my-threat-feed - { - "domains" : ["malicious.badstuff", "hacks.r.us"] - } - ``` - -4. In the $[prodname] web console, go the “Security Events" page to view events that are generated when an IP is displayed on the threat feed list. - -### Block traffic to a cluster - -Create a new/edit existing threat feed to include the globalNetworkSet stanza, setting the labels you want to use to represent the deny-listed IPs. This stanza instructs $[prodname] to search for flows to and from the listed IP addresses, and maintain a GlobalNetworkSet containing the IP addresses. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalThreatFeed -metadata: - name: sample-global-threat-feed -spec: - content: IPSet - mode: Enabled - description: 'This is the sample global threat feed' - feedType: Custom - pull: - http: - url: https://an.example.threat.feed/deny-list - globalNetworkSet: - labels: - security-action: block -``` - -1. Add the global threat feed to the cluster. - - ```bash - kubectl apply -f - ``` - -2. Create a GlobalNetworkPolicy that blocks traffic based on the threat feed, by selecting sources or destinations using the labels you assigned in step 1. For example, the following GlobalNetworkPolicy blocks all traffic coming into the cluster if it came from any of the suspicious IPs. - - ```yaml - apiVersion: projectcalico.org/v3 - kind: GlobalNetworkPolicy - metadata: - name: default.blockthreats - spec: - tier: default - selector: all() - types: - - Ingress - ingress: - - action: Deny - source: - selector: security-action == 'block' - ``` - -3. Add the global network policy to the cluster. - - ```bash - kubectl apply -f - ``` - -## Tutorial - -In this tutorial, we’ll walk through setting up a threat feed to search for connections to suspicious IPs. Then, we’ll use the same threat feed to block traffic to those IPs. - -We will use the free [FEODO botnet tracker](https://feodotracker.abuse.ch/) from abuse.ch that lists IP addresses associated with command and control servers. But the steps are the same for your commercial or internal threat feeds. - -If you haven’t already adjusted your [aggregation flows](#before-you-begin), we recommend it before you start. - -### Configure the threat feed - -1. Create a file called feodo-tracker.yaml with the following contents: - - ```yaml - apiVersion: projectcalico.org/v3 - kind: GlobalThreatFeed - metadata: - name: feodo-tracker - spec: - content: IPSet - mode: Enabled - description: 'This is the feodo-tracker threat feed' - feedType: Custom - pull: - http: - url: https://feodotracker.abuse.ch/downloads/ipblocklist.txt - ``` - - This pulls updates using the default period of once per day. See the [Global Resource Threat Feed API](../reference/resources/globalthreatfeed.mdx) for all configuration options. - -2. Add the feed to your cluster. - - ```bash - kubectl apply -f feodo-tracker.yaml - ``` - -### Check search results - -Open the $[prodname] web console, and navigate to the “Security Events” page. If any of your pods have been communicating with the IP addresses in the FEODO tracker feed, you will see the results listed on this page. It is normal to not see any events listed on this page. - -### Block pods from contacting IPs - -If you have high confidence in the IP addresses listed as malicious in a threat feed, you can take stronger action than just searching for connections after the fact. For example, the FEODO tracker lists IP addresses used by command and control servers for botnets. We can configure $[prodname] to block all egress traffic to addresses on this list. - -It is strongly recommended that you assess the contents of a threat feed for false positives before using it as a deny-list, and that you apply it to a test subset of your workloads before rolling out application or cluster-wide. Failure to do so could cause legitimate application traffic to be blocked and could lead to an outage in your application. - -In this demo, we will apply the policy only to a test workload (so we do not impact other traffic). - -1. Create a file called **tf-ubuntu.yaml** with the following contents: - - ```yaml - apiVersion: v1 - kind: Pod - metadata: - labels: - docs.tigera.io-tutorial: threat-feed - name: tf-ubuntu - spec: - nodeSelector: - kubernetes.io/os: linux - containers: - - command: - - sleep - - '3600' - image: ubuntu - name: test - ``` - -2. Apply the pod configuration. - - ```bash - kubectl apply -f tf-ubuntu.yaml - ``` - -3. Edit the feodo-tracker.yaml to include a globalNetworkSet stanza: - - ```yaml - apiVersion: projectcalico.org/v3 - kind: GlobalThreatFeed - metadata: - name: feodo-tracker - spec: - content: IPSet - mode: Enabled - description: 'This is the feodo-tracker threat feed' - feedType: Custom - pull: - http: - url: https://feodotracker.abuse.ch/downloads/ipblocklist.txt - globalNetworkSet: - labels: - docs.tigera.io-threat-feed: feodo - ``` - -4. Reapply the new YAML. - - ```bash - kubectl apply -f feodo-tracker.yaml - ``` - -5. Verify that the GlobalNetworkSet is created. - - ```bash - kubectl get globalnetworksets threatfeed.feodo-tracker -o yaml - ``` - -### Apply global network policy - -We will now apply a GlobalNetworkPolicy that blocks the test workload from connecting to any IPs in the threat feed. - -1. Create a file called block-feodo.yaml with the following contents: - - ```yaml - apiVersion: projectcalico.org/v3 - kind: GlobalNetworkPolicy - metadata: - name: default.block-feodo - spec: - tier: default - selector: docs.tigera.io-tutorial == 'threat-feed' - types: - - Egress - egress: - - action: Deny - destination: - selector: docs.tigera.io-threat-feed == 'feodo' - - action: Allow - ``` - -2. Apply this policy to the cluster - - ```bash - kubectl apply -f block-feodo.yaml - ``` - -### Verify policy on test workload - -We will verify the policy from the test workload that we created earlier. - -1. Get a shell in the pod by running the following command: - - ```bash - kubectl exec -it tf-ubuntu -- bash - ``` - - You should get a prompt inside the pod. - -2. Install the ping command. - - ```bash - apt update && apt install iputils-ping - ``` - -3. Ping a known safe IP (like 8.8.8.8, Google’s public DNS server). - - ```bash - ping 8.8.8.8 - ``` - -4. Open the [FEODO tracker list](https://feodotracker.abuse.ch/downloads/ipblocklist.txt) and choose an IP on the list to ping. - You should not get connectivity, and the pings will show up as denied traffic in the flow logs. - -## Additional resources - -See [GlobalThreatFeed](../reference/resources/globalthreatfeed.mdx) resource definition for all configuration options. diff --git a/calico-enterprise_versioned_docs/version-3.19-2/threat/tor-vpn-feed-and-dashboard.mdx b/calico-enterprise_versioned_docs/version-3.19-2/threat/tor-vpn-feed-and-dashboard.mdx deleted file mode 100644 index cd68bff338..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/threat/tor-vpn-feed-and-dashboard.mdx +++ /dev/null @@ -1,69 +0,0 @@ ---- -description: Detect and analyze malicious anonymization activity using Tor-VPN feeds. ---- - -# Anonymization attacks - -## Big picture - -Detect and analyze malicious anonymization activity using Tor-VPN feeds. - -## Value - -**Tor and VPN infrastructure** are used in enabling anonymous communication, where an attacker can leverage anonymity to scan, attack or compromise the target. It’s hard for network security teams to track malicious actors using such anonymization tools. Hence **Tor and VPN feeds** come into play where the feeds track all the Tor bulk exit nodes as well as most of the anonymizing VPN infrastructure on the internet. **The Tor-VPN Dashboard** helps network security teams to monitor and respond to any detected activity where they have a clusterwide view and granular control over logs which is critical in stopping the possible attack in early stages. - -## Concepts - -### About Tor and VPN threats - -**Tor** is a popular anonymization network on the internet. It is also popular among the malicious actors, hacktivist groups, criminal enterprises as the infrastructure hides the real identity of an attacker carrying out malicious activities. To track down such attackers, Tor historically was subject to investigation by various state level intelligence agencies from US and UK for criminal activities such as Silk Road marketplace, Mirai Botnet C&C. Though it’s not possible to completely de-anonymize the attacker. Hence **Tor bulk exit feed** came into existence to track all the Tor exit IPs over the internet to know attackers using the Tor infrastructure. -Over the years, many Tor flaws became public and attackers evolved to leverage Tor network with additional VPN layers. There are many individual VPN providers which have the anonymizing infrastructure. Attackers can use these new breed of VPN providers with existing options like Tor to make sure of anonymity. To help security teams, the **X4B vpn feed** detects all the major VPN providers on the internet. - -### Tor-VPN feed types - -**Tor Bulk Exit feed** -The Tor Bulk Exit feed lists available Tor exit nodes on the internet which are used by Tor network. The list is continuously updated and maintained by the Tor project. An attacker using Tor network, is likely to use one of the bulk exit nodes to connect to your infrastructure. The network security teams can detect such activity with Tor bulk exit feed and investigate as required. - -**X4B VPN feed** -In recent times it became a trend to use multiple anonymization networks to hide real attacker identity. There are lots of lists of open proxies and tor nodes on the web, but surprisingly few usable ones dedicated to VPN providers and datacenters. This list combines known VPN netblocks and ASNs owned by datacenters and VPN providers. This list doesn't list all VPNs, but should list the vast majority of common ones. - -### The $[prodname] Tor-VPN dashboard - -The Tor-VPN dashboard helps network security teams to monitor and respond to any detected activity by Tor and VPN feeds. It provides a cluster context to the detection and shows multiple artifacts e.g. flow logs, filtering controls, a tag cloud and line graph to analyze the activity and respond faster. -The Tor-VPN dashboard may be accessed as below: - -- Log in to the $[prodname] web console, and go to **kibana**, select **dashboard**, and select **Tor-VPN Dashboard**. - -## Before you begin... - -### Required - -Privileges to manage GlobalThreatFeed i.e. clusterrole `intrusion-detection-controller` - -### Recommended - -We recommend that you turn down the aggregation of flow logs sent to Elasticsearch for configuring threat feeds. If you do not adjust flow logs, $[prodname] aggregates over the external IP addresses for allowed traffic, and threat feed searches will not provide useful results (unless the traffic is denied by policy). Go to: [FelixConfiguration](../reference/resources/felixconfig.mdx) and set the field, **flowLogsFileAggregationKindForAllowed** to **1**. - -## How to - -In this section we will look at how to add Tor and VPN feeds to $[prodname]. Installation process is straightforward as below. - -1. Add threat feed to the cluster. - For VPN Feed, - ```shell - kubectl apply -f $[filesUrl]/manifests/threatdef/vpn-feed.yaml - ``` - For Tor Bulk Exit Feed, - ```shell - kubectl apply -f $[filesUrl]/manifests/threatdef/tor-exit-feed.yaml - ``` -2. Now, you can monitor the Dashboard for any malicious activity. The dashboard can be found at the $[prodname] web console, go to "kibana" and then go to "Dashboard". Select "Tor-VPN Dashboard". -3. Additionally, feeds can be checked using following command: - ```shell - kubectl get globalthreatfeeds - ``` - -## Additional resources - -- See [GlobalThreatFeed](../reference/resources/globalthreatfeed.mdx) resource definition for all configuration options. -- Check example to Trace and block Suspicious IPs [Here](suspicious-ips.mdx) diff --git a/calico-enterprise_versioned_docs/version-3.19-2/threat/web-application-firewall.mdx b/calico-enterprise_versioned_docs/version-3.19-2/threat/web-application-firewall.mdx deleted file mode 100644 index aed0e36ef1..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/threat/web-application-firewall.mdx +++ /dev/null @@ -1,384 +0,0 @@ ---- -description: Configure Calico to use with Layer 7 Web Application Firewall. ---- - -# Workload-based Web Application Firewall (WAF) - -:::note - -This feature is tech preview. Tech preview features may be subject to significant changes before they become GA. - -::: - -## Big picture - -Protect cloud-native applications from application layer attacks with $[prodname] Workload-based Web Application Firewall (WAF). - -## Value - -Our workload-centric Web Application Firewall (WAF) protects your workloads from a variety of application layer attacks originating from within your cluster such as [SQL injection](https://owasp.org/www-community/attacks/SQL_Injection). Given that attacks on apps are the [leading cause of breaches](https://www.f5.com/labs/articles/threat-intelligence/application-protection-report-2019--episode-2--2018-breach-trend), you need to secure the HTTP traffic inside your cluster. - -Historically, web application firewalls (WAFs) were deployed at the edge of your cluster to filter incoming traffic. Our workload-based WAF solution takes a unique, cloud-native approach to web security by allowing you to implement zero-trust rules for workloads inside your cluster. - -## Concepts - -### About $[prodname] WAF - -WAF is deployed in your cluster along with Envoy DaemonSet. $[prodname] proxies selected service traffic through Envoy, checking HTTP requests using the industry-standard -[ModSecurity](https://owasp.org/www-project-modsecurity-core-rule-set/) with OWASP Core Rule Set `v4.0.0-rc2` with some modifications for Kubernetes workloads. - -You simply enable WAF in the web console, and determine the services that you want to enable for WAF protection. By default WAF is set to `DetectionOnly` so no traffic will be denied until you are ready to turn on blocking mode. - -Every request that WAF finds an issue with, will result in a Security Event being created for [you to review in the UI](#view-waf-events), regardless of whether the traffic was allowed or denied. This can greatly help in tuning later. - -#### How WAF determines if a request should be allowed or denied - -If you configure WAF in blocking mode, WAF will use something called [anomaly scoring mode](https://coreruleset.org/docs/2-how-crs-works/2-1-anomaly_scoring/) to determine if a request is allowed with `200 OK` or denied `403 Forbidden`. - -This works by matching a single HTTP request against all the configured WAF rules. Each rule has a score and WAF adds all the matched rule scores together, and compares it to the overall anomaly threshold score (100 by default). If the score is under the threshold the request is allowed and if the score is over the threshold the request is denied. Our WAF starts in detection mode only and with a high default scoring threshold so is safe to turn on and then [fine-tune the WAF](#manage-your-waf) for your specific needs in your cluster. - -## Before you begin - -**Not supported** -- GKE - -**Limitations** - -WAF cannot be used with: - - Host-networked client pods - - TLS traffic - - [LoadBalancer services](https://Kubernetes.io/docs/concepts/services-networking/service/#loadbalancer) - - Egress gateways - - WireGuard on AKS or EKS (unless you apply a specific kernel variable). Contact Support for help. - -:::note -When selecting and deselecting traffic for WAF, active connections may be disrupted. -::: - -:::caution - -Enabling WAF for certain system services may result in an undesired cluster state. -- Do not enable WAF for system service with the following prefixes: - - - `tigera-*` - - `calico-*` - - `kube-system` - - `openshift-*` - -- Do not enable WAF for system services with the following combination of name and namespaces: - - name: `Kubernetes`, namespace: `default` - - name: `openshift`, namespace: `default` - - name: `gatekeeper-webhook-service`, namespace: `gatekeeper-system` - -The rules are not overridden during upgrade, you will have to manage deploying updates to the OWASP Core Rule Set to the cluster over time. - -If you modify the rules, it is recommended to keep your rules in git or similar source control systems. - -::: - -## How to - -- [Enable WAF on your cluster](#enable-waf) -- [Apply WAF to your services](#apply-waf) -- [View WAF events](#view-waf-events) -- [Manage WAF Configuration](#manage-waf-configuration) -- [Disable WAF feature from your cluster](#disable-waf-feature-from-your-cluster) - -### Enable WAF - -#### (Optional) Deploy a sample application -If you don’t have an application to test WAF with or don’t want to use it right away against your own application, -we recommend that you install the [GoogleCloudPlatform/microservices-demo app](https://github.com/GoogleCloudPlatform/microservices-demo): - -```bash -kubectl apply -f https://raw.githubusercontent.com/GoogleCloudPlatform/microservices-demo/main/release/kubernetes-manifests.yaml -``` - -#### Enable WAF using the CLI - -##### Using kubectl - -In the ApplicationLayer custom resource, named `tigera-secure`, set the `webApplicationFirewall` field to `Enabled`. - -```bash -kubectl apply -f - < - -### Apply WAF to services - -Now that you have deployed WAF in your cluster, you can select the services you want to protect from application layer attacks. - -If you have deployed the sample application, you can apply WAF on a service associated with your app, as follows: -```bash -kubectl annotate svc frontend -n default --overwrite projectcalico.org/l7-logging=true -``` -Alternatively, you can use the web console to apply WAF to the `frontend` service. - -In this example, we applied WAF to the `frontend` service. This means that every request that goes through the `frontend` service is inspected. -However, the traffic is not blocked because the WAF rule is set to `DetectionOnly` by default. You can adjust rules and start blocking traffic by [fine-tuning your WAF](#manage-your-waf). - -In the previous example, we applied WAF to the `frontend` service of the sample application. Here, we are -applying WAF to a service of your own application. - -1. On the web console, click **Threat Defense**, **Web Application Firewall**. -2. Select the services you want WAF to inspect, and then click **Confirm Selections**. - - WAF services - -3. On the **Web Application Firewall** page, you can verify that WAF is enabled for a service by locating the service and checking that the **Status** column says **Enabled**. - -4. To make further changes to a service, click **Actions**, and then **Enable** or **Disable**. - -You have now applied WAF rule sets to your own services, and note that the traffic that goes through the selected services will be alerted but not blocked by default. - -#### Trigger a WAF event -If you would like to trigger a WAF event for testing purposes, you can simulate an SQL injection attack inside your cluster by crafting a HTTP request with a query string that WAF will detect as an SQL injection attempt. -The query string in this example has some SQL syntax embedded in the text. This is harmless and for demo purposes, but WAF will detect this pattern and create an event for this HTTP request. - -Run a simple curl command from any pod inside your cluster targeting a service you have selected for WAF protection e.g. from the demo app above we could attempt to send a simple HTTP request to the cartservice. -``` - curl http://cartservice/cart?artist=0+div+1+union%23foo*%2F*bar%0D%0Aselect%23foo%0D%0A1%2C2%2Ccurrent_user -``` - -### Manage WAF configuration - -Reviewing the default rule set config: - -```bash -Include @coraza.conf-recommended -Include @crs-setup.conf.example -Include @owasp_crs/*.conf - -SecRuleEngine DetectionOnly -``` - -The configuration file starts with importing the appropriate rule set config. We use Coraza WAF's recommended [Core Rule Set setup](https://coraza.io/docs/tutorials/coreruleset/) files: - -1. Coraza recommended [configuration](https://github.com/corazawaf/coraza/blob/main/coraza.conf-recommended) -1. The rest of the [coreruleset](https://github.com/coreruleset/coreruleset) files, currently [v4.0.0-rc2](https://github.com/coreruleset/coreruleset/tree/v4.0.0-rc2) - -These files can be customized if desired. Add all your customizations directly under `tigera.conf`: - -```bash -kubectl edit cm -n tigera-operator modsecurity-ruleset -``` - -After editing this ConfigMap successfully, the `modsecurity-ruleset` ConfigMap will be replaced in the `tigera-operator` namespace, -which then triggers a rolling restart of your L7 pods. This means that the HTTP connections going through L7 pods at the time of pod termination will be (RST) reset. - -:::note - -It is important to adhere to the [Core Rule Set documentation](https://coreruleset.org/docs) on how to edit the behaviour of - your WAF. A good place to begin at is the [Installing Core Rule Set](https://coreruleset.org/docs/1-getting-started/1-1-crs-installation/). - -In many scenarios, the default example CRS configuration will be a good enough starting point. It is recommended to review the example configuration file before -you deploy it to make sure it’s right for your environment. -::: - -#### Customization options - -##### Set WAF to block traffic -By default WAF will not block a request even if it has matching rule violations. The rule engine is set to `DetectionOnly`. You can configure to block traffic instead with an `HTTP 403 Forbidden` response status code when the combined matched rules scores exceed a certain threshold. - -1. Edit the configmap: - ```bash - kubectl edit cm -n tigera-operator modsecurity-ruleset - ``` -2. Look for `SecRuleEngine DetectionOnly` and change it to `SecRuleEngine On`. -3. Save your changes. This triggers a rolling update of the L7 pods. - -| Action | Description | Disruptive? | -| ------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------- | -| DetectionOnly | Traffic is not denied nor dropped. $[prodname] will log events. | No -| On | Denies HTTP traffic. $[prodname] will log the event in Security Events. | Yes | -| Off | Be cautious about using this option. Traffic is not denied, and there are no events. |No | Yes | - -##### Other basic customizations - -For basic customizations, it's best to add it after all the includes in `tigera.conf`. In fact, this is the reason why the `SecRuleEngine` directive and the rest of [our customizations](https://github.com/tigera/operator/blob/master/pkg/render/applicationlayer/ruleset/coreruleset/tigera.conf#L8-L17) are situated there. - -An example is adding a sampling mode. For that, the `tigera.conf` will look like this: - -```bash -# Core Rule Set activation -Include @coraza.conf-recommended -Include @crs-setup.conf.example -Include @owasp_crs/*.conf - -SecRuleEngine DetectionOnly - -# --- all customizations appear below this line, unless they need a specific loading order like plugins --- - -# --- Add sampling mode -# Read about sampling mode here https://coreruleset.org/docs/2-how-crs-works/2-4-sampling_mode/ -SecAction "id:900400,\ - phase:1,\ - pass,\ - nolog,\ - setvar:tx.sampling_percentage=50" -``` - -Also you can disable certain rules here: - -```bash -# --- disable 'Request content type is not allowed by policy' -SecRuleRemoveById 920420 -``` - -Change anomaly scoring threshold: - -```bash - SecAction \ - "id:900110,\ - phase:1,\ - nolog,\ - pass,\ - t:none,\ - setvar:tx.inbound_anomaly_score_threshold=25,\ - setvar:tx.outbound_anomaly_score_threshold=20" -``` - -Or even change rule action parameters or behavior. For example: - - ```bash - # --- append to more allowed content types to request bodies -SecAction \ - "id:900220,\ - phase:1,\ - nolog,\ - pass,\ - t:none,\ - setvar:'tx.allowed_request_content_type=|application/x-www-form-urlencoded| |multipart/form-data| |multipart/related| |text/xml| |application/xml| |application/soap+xml| |application/json| |application/cloudevents+json| |application/cloudevents-batch+json| |application/grpc|'" -``` - -##### Using Core Rule Set plugins - -Let's go with an example plugin: [Wordpress Rule Exclusions](https://github.com/coreruleset/wordpress-rule-exclusions-plugin/). - -Plugin files are the following: - -``` -wordpress-rule-exclusions-before.conf -wordpress-rule-exclusions-config.conf -``` - -To include these files properly, structure your work directory like so: - -``` -tigera.conf -wordpress-rule-exclusions-before.conf -wordpress-rule-exclusions-config.conf -``` - -and then `tigera.conf` contents should be: - -```bash -Include @coraza.conf-recommended - -Include /etc/modsecurity-ruleset/wordpress-rule-exclusions-config.conf -Include /etc/modsecurity-ruleset/wordpress-rule-exclusions-before.conf - -Include @crs-setup.conf.example -Include @owasp_crs/*.conf - -# if your plugin has an -after.conf, include them here -# but wordpress rule exclusions doesn't so we're adding a comment placeholder -# Include /etc/modsecurity-ruleset/wordpress-rule-exclusions-after.conf - -SecRuleEngine DetectionOnly -``` - -Then create and apply the configmap: - -```bash -## create the configuration map itself -kubectl create cm --dry-run=client \ - --from-file=tigera.conf \ - --from-file=wordpress-rule-exclusions-config.conf \ - --from-file=wordpress-rule-exclusions-before.conf \ - -n tigera-operator modsecurity-ruleset -o yaml > rule set.configmap.yaml - -## replace active configmap -kubectl replace -f rule set.configmap.yaml -``` - -Read more about the order of execution for plugins here: https://coreruleset.org/docs/4-about-plugins/4-1-plugins/ - -### View WAF events - -#### Security Events - -To view WAF events in a centralized security events dashboard, go to: **Threat defense**, **Security Events**. For help, see [Security Event Management](../threat/security-event-management). - -#### Kibana - -To view WAF events In Kibana, select the `tigera_secure_ee_waf*` index pattern. - -#### Disable WAF for a service - -To disable WAF on a service, use the Actions menu on the WAF board, or use the following command: - -```bash -kubectl annotate svc -n projectcalico.org/l7-logging- -``` - -### Disable WAF feature from your cluster - -To safely disable WAF, determine how to handle ApplicationLayer features and follow the steps: - -#### Disable all ApplicationLayer features, including WAF - -```bash -kubectl delete applicationlayer tigera-secure -``` - -#### Keep some ApplicationLayer features enabled - -To disable WAF but keep some ApplicationLayer features enabled, you must update the [ApplicationLayer](../reference/installation/api#applicationlayer) custom resource. - -Note that the [ApplicationLayer Specification](../reference/installation/api#applicationlayerspec) can specify configuration for [application logging](../reference/installation/api#operator.tigera.io/v1.logcollectionspec) and [application layer policy](../reference/installation/api#operator.tigera.io/v1.applicationlayerpolicystatustype) also. - -For the ApplicationLayer custom resource to be valid, at least one of these features have to be enabled, for example: - - -##### Valid YAML - -WAF enabled, ALP disabled, and Log collection is unspecified (and the default is disabled) - -```yaml -apiVersion: operator.tigera.io/v1 -kind: ApplicationLayer -metadata: - name: tigera-secure -spec: - webApplicationFirewall: Disabled - applicationLayerPolicy: Enabled -``` - -###### Invalid YAML - -WAF and ALP both set as disabled, log collection is unspecified (and the default is disabled) -```yaml -apiVersion: operator.tigera.io/v1 -kind: ApplicationLayer -metadata: - name: tigera-secure -spec: - webApplicationFirewall: Disabled - applicationLayerPolicy: Disabled -``` diff --git a/calico-enterprise_versioned_docs/version-3.19-2/variables.js b/calico-enterprise_versioned_docs/version-3.19-2/variables.js deleted file mode 100644 index c6396b8b10..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/variables.js +++ /dev/null @@ -1,37 +0,0 @@ -const releases = require('./releases.json'); -const componentImage = require('../../src/components/utils/componentImage'); - -const variables = { - releaseTitle: 'v3.19.8', - prodname: 'Calico Enterprise', - prodnamedash: 'calico-enterprise', - version: 'v3.19', - openSourceVersion: releases[0].calico.minor_version.slice(1), - baseUrl: '/calico-enterprise/3.19', - filesUrl: 'https://downloads.tigera.io/ee/v3.19.8', - // No rpmsUrl for this release - tutorialFilesURL: 'https://docs.tigera.io/files', - tmpScriptsURL: 'https://docs.tigera.io/calico-enterprise/3.19', - windowsScriptsURL: 'https://raw.githubusercontent.com/kubernetes-sigs/sig-windows-tools/master/hostprocess', - prodnameWindows: 'Calico Enterprise for Windows', - downloadsurl: 'https://downloads.tigera.io', - nodecontainer: 'cnx-node', - noderunning: 'calico-node', - rootDirWindows: 'C:\\TigeraCalico', - registry: 'quay.io/', - chart_version_name: 'v3.19.8-0', - tigeraOperator: releases[0]['tigera-operator'], - dikastesVersion: releases[0].components.dikastes.version, - releases, - imageNames: { - node: 'tigera/cnx-node', - kubeControllers: 'tigera/kube-controllers', - }, - componentImage: { - cnxNode: componentImage('cnx-node', releases[0]), - calicoctl: componentImage('calicoctl', releases[0]), - calicoq: componentImage('calicoq', releases[0]), - }, -}; - -module.exports = variables; diff --git a/calico-enterprise_versioned_sidebars/version-3.19-2-sidebars.json b/calico-enterprise_versioned_sidebars/version-3.19-2-sidebars.json deleted file mode 100644 index 402db1954d..0000000000 --- a/calico-enterprise_versioned_sidebars/version-3.19-2-sidebars.json +++ /dev/null @@ -1,1090 +0,0 @@ -{ - "calicoEnterpriseSidebar": [ - { - "type": "category", - "label": "About Calico", - "link": { - "type": "doc", - "id": "about/index" - }, - "items": [ - "about/calico-product-editions" - ] - }, - { - "type": "category", - "label": "Install and upgrade", - "link": { - "type": "doc", - "id": "getting-started/index" - }, - "items": [ - "getting-started/compatibility", - { - "type": "category", - "label": "Install on clusters", - "link": { - "type": "doc", - "id": "getting-started/install-on-clusters/index" - }, - "items": [ - { - "type": "category", - "label": "Kubernetes", - "link": { - "type": "doc", - "id": "getting-started/install-on-clusters/kubernetes/index" - }, - "items": [ - "getting-started/install-on-clusters/kubernetes/quickstart", - "getting-started/install-on-clusters/kubernetes/options-install", - "getting-started/install-on-clusters/kubernetes/generic-install", - "getting-started/install-on-clusters/kubernetes/helm" - ] - }, - { - "type": "category", - "label": "OpenShift", - "link": { - "type": "doc", - "id": "getting-started/install-on-clusters/openshift/index" - }, - "items": [ - "getting-started/install-on-clusters/openshift/requirements", - "getting-started/install-on-clusters/openshift/installation" - ] - }, - "getting-started/install-on-clusters/aks", - "getting-started/install-on-clusters/eks", - "getting-started/install-on-clusters/gke", - "getting-started/install-on-clusters/aws", - "getting-started/install-on-clusters/docker-enterprise", - "getting-started/install-on-clusters/rancher", - "getting-started/install-on-clusters/rke2", - "getting-started/install-on-clusters/rancher-ui", - "getting-started/install-on-clusters/tkg", - { - "type": "category", - "label": "Calico Enterprise for Windows", - "link": { - "type": "doc", - "id": "getting-started/install-on-clusters/windows-calico/index" - }, - "items": [ - "getting-started/install-on-clusters/windows-calico/limitations", - "getting-started/install-on-clusters/windows-calico/requirements", - "getting-started/install-on-clusters/windows-calico/operator", - "getting-started/install-on-clusters/windows-calico/rancher", - "getting-started/install-on-clusters/windows-calico/demo", - "getting-started/install-on-clusters/windows-calico/flowlogs", - "getting-started/install-on-clusters/windows-calico/dnspolicy", - "getting-started/install-on-clusters/windows-calico/troubleshoot" - ] - }, - { - "type": "category", - "label": "Install from a private registry", - "link": { - "type": "doc", - "id": "getting-started/install-on-clusters/private-registry/index" - }, - "items": [ - "getting-started/install-on-clusters/private-registry/private-registry-regular", - "getting-started/install-on-clusters/private-registry/private-registry-image-path" - ] - }, - "getting-started/install-on-clusters/calico-enterprise", - "getting-started/install-on-clusters/requirements" - ] - }, - { - "type": "category", - "label": "Install on non-cluster hosts", - "link": { - "type": "doc", - "id": "getting-started/bare-metal/index" - }, - "items": [ - "getting-started/bare-metal/about", - "getting-started/bare-metal/requirements" - ] - }, - { - "type": "category", - "label": "Upgrade", - "link": { - "type": "doc", - "id": "getting-started/upgrading/index" - }, - "items": [ - { - "type": "category", - "label": "Upgrade Calico Enterprise", - "link": { - "type": "doc", - "id": "getting-started/upgrading/upgrading-enterprise/index" - }, - "items": [ - { - "type": "category", - "label": "Kubernetes", - "link": { - "type": "doc", - "id": "getting-started/upgrading/upgrading-enterprise/kubernetes-upgrade-tsee/index" - }, - "items": [ - "getting-started/upgrading/upgrading-enterprise/kubernetes-upgrade-tsee/helm", - "getting-started/upgrading/upgrading-enterprise/kubernetes-upgrade-tsee/operator" - ] - }, - "getting-started/upgrading/upgrading-enterprise/openshift-upgrade" - ] - }, - { - "type": "category", - "label": "Upgrade from Calico to Calico Enterprise", - "link": { - "type": "doc", - "id": "getting-started/upgrading/upgrading-calico-to-calico-enterprise/index" - }, - "items": [ - { - "type": "category", - "label": "Kubernetes", - "link": { - "type": "doc", - "id": "getting-started/upgrading/upgrading-calico-to-calico-enterprise/upgrade-to-tsee/index" - }, - "items": [ - "getting-started/upgrading/upgrading-calico-to-calico-enterprise/upgrade-to-tsee/standard", - "getting-started/upgrading/upgrading-calico-to-calico-enterprise/upgrade-to-tsee/helm" - ] - }, - "getting-started/upgrading/upgrading-calico-to-calico-enterprise/upgrade-to-tsee-openshift" - ] - } - ] - }, - "getting-started/manifest-archive" - ] - }, - { - "type": "category", - "label": "Networking", - "link": { - "type": "doc", - "id": "networking/index" - }, - "items": [ - "networking/determine-best-networking", - { - "type": "category", - "label": "Networking basics", - "link": { - "type": "doc", - "id": "networking/training/index" - }, - "items": [ - "networking/training/about-networking", - "networking/training/about-kubernetes-networking" - ] - }, - { - "type": "category", - "label": "Configure Calico Enterprise networking", - "link": { - "type": "doc", - "id": "networking/configuring/index" - }, - "items": [ - "networking/configuring/bgp", - "networking/configuring/dual-tor", - "networking/configuring/multiple-networks", - "networking/configuring/vxlan-ipip", - "networking/configuring/advertise-service-ips", - "networking/configuring/mtu", - "networking/configuring/custom-bgp-config", - "networking/configuring/workloads-outside-cluster", - "networking/configuring/pod-mac-address", - "networking/configuring/node-local-dns-cache" - ] - }, - { - "type": "category", - "label": "Egress gateways", - "link": { - "type": "doc", - "id": "networking/egress/index" - }, - "items": [ - "networking/egress/egress-gateway-on-prem", - "networking/egress/egress-gateway-azure", - "networking/egress/egress-gateway-aws", - "networking/egress/egress-gateway-maintenance", - "networking/egress/external-network", - "networking/egress/troubleshoot" - ] - }, - { - "type": "category", - "label": "Customize IP address management", - "link": { - "type": "doc", - "id": "networking/ipam/index" - }, - "items": [ - "networking/ipam/get-started-ip-addresses", - "networking/ipam/initial-ippool", - "networking/ipam/ippools", - "networking/ipam/ip-autodetection", - "networking/ipam/ipv6", - "networking/ipam/use-specific-ip", - "networking/ipam/assign-ip-addresses-topology", - "networking/ipam/migrate-pools", - "networking/ipam/change-block-size", - "networking/ipam/legacy-firewalls" - ] - } - ] - }, - { - "type": "category", - "label": "Network policy", - "link": { - "type": "doc", - "id": "network-policy/index" - }, - "items": [ - { - "type": "category", - "label": "Policy recommendations", - "link": { - "type": "doc", - "id": "network-policy/recommendations/index" - }, - "items": [ - "network-policy/recommendations/policy-recommendations", - "network-policy/recommendations/learn-about-policy-recommendations" - ] - }, - "network-policy/policy-best-practices", - { - "type": "category", - "label": "Tiered network policy", - "link": { - "type": "doc", - "id": "network-policy/policy-tiers/index" - }, - "items": [ - "network-policy/policy-tiers/tiered-policy", - "network-policy/policy-tiers/allow-tigera", - "network-policy/policy-tiers/policy-tutorial-ui", - "network-policy/policy-tiers/rbac-tiered-policies" - ] - }, - "network-policy/networksets", - "network-policy/default-deny", - "network-policy/staged-network-policies", - "network-policy/policy-troubleshooting", - { - "type": "category", - "label": "Calico Enterprise network policy for beginners", - "link": { - "type": "doc", - "id": "network-policy/beginners/index" - }, - "items": [ - "network-policy/beginners/kubernetes-default-deny", - "network-policy/beginners/calico-network-policy", - "network-policy/beginners/calico-labels", - "network-policy/beginners/simple-policy-cnx", - { - "type": "category", - "label": "Policy rules", - "link": { - "type": "doc", - "id": "network-policy/beginners/policy-rules/index" - }, - "items": [ - "network-policy/beginners/policy-rules/policy-rules-overview", - "network-policy/beginners/policy-rules/namespace-policy", - "network-policy/beginners/policy-rules/service-accounts", - "network-policy/beginners/policy-rules/service-policy", - "network-policy/beginners/policy-rules/external-ips-policy", - "network-policy/beginners/policy-rules/icmp-ping" - ] - }, - { - "type": "category", - "label": "Policy for services", - "link": { - "type": "doc", - "id": "network-policy/beginners/services/index" - }, - "items": [ - "network-policy/beginners/services/kubernetes-node-ports", - "network-policy/beginners/services/services-cluster-ips" - ] - } - ] - }, - "network-policy/domain-based-policy", - { - "type": "category", - "label": "Application layer policies", - "link": { - "type": "doc", - "id": "network-policy/application-layer-policies/index" - }, - "items": [ - "network-policy/application-layer-policies/alp", - "network-policy/application-layer-policies/alp-tutorial" - ] - }, - { - "type": "category", - "label": "Policy for firewalls", - "link": { - "type": "doc", - "id": "network-policy/policy-firewalls/index" - }, - "items": [ - { - "type": "category", - "label": "Fortinet firewall integrations", - "link": { - "type": "doc", - "id": "network-policy/policy-firewalls/fortinet-integration/index" - }, - "items": [ - "network-policy/policy-firewalls/fortinet-integration/overview", - "network-policy/policy-firewalls/fortinet-integration/firewall-integration", - "network-policy/policy-firewalls/fortinet-integration/fortimgr-integration" - ] - } - ] - }, - { - "type": "category", - "label": "Policy for hosts and VMs", - "link": { - "type": "doc", - "id": "network-policy/hosts/index" - }, - "items": [ - "network-policy/hosts/protect-hosts", - "network-policy/hosts/kubernetes-nodes", - "network-policy/hosts/protect-hosts-tutorial", - "network-policy/hosts/host-forwarded-traffic" - ] - }, - { - "type": "category", - "label": "Policy for extreme traffic", - "link": { - "type": "doc", - "id": "network-policy/extreme-traffic/index" - }, - "items": [ - "network-policy/extreme-traffic/high-connection-workloads", - "network-policy/extreme-traffic/defend-dos-attack" - ] - }, - { - "type": "category", - "label": "Kubernetes for beginners", - "link": { - "type": "doc", - "id": "network-policy/get-started/index" - }, - "items": [ - "network-policy/get-started/about-network-policy", - "network-policy/get-started/kubernetes-network-policy", - "network-policy/get-started/kubernetes-demo", - "network-policy/get-started/kubernetes-policy-basic", - "network-policy/get-started/kubernetes-policy-advanced", - "network-policy/get-started/about-kubernetes-services", - "network-policy/get-started/about-kubernetes-ingress", - "network-policy/get-started/about-kubernetes-egress" - ] - } - ] - }, - { - "type": "category", - "label": "Observability", - "link": { - "type": "doc", - "id": "observability/index" - }, - "items": [ - "observability/get-started-cem", - "observability/alerts", - "observability/kibana", - "observability/packetcapture", - "observability/visualize-traffic", - { - "type": "category", - "label": "Calico Enterprise logs", - "link": { - "type": "doc", - "id": "observability/elastic/index" - }, - "items": [ - "observability/elastic/overview", - "observability/elastic/retention", - "observability/elastic/archive-storage", - "observability/elastic/rbac-elasticsearch", - { - "type": "category", - "label": "Flow logs", - "link": { - "type": "doc", - "id": "observability/elastic/flow/index" - }, - "items": [ - "observability/elastic/flow/datatypes", - "observability/elastic/flow/filtering", - "observability/elastic/flow/aggregation", - "observability/elastic/flow/hep", - "observability/elastic/flow/tcpstats", - "observability/elastic/flow/processpath" - ] - }, - "observability/elastic/audit-overview", - { - "type": "category", - "label": "DNS logs", - "link": { - "type": "doc", - "id": "observability/elastic/dns/index" - }, - "items": [ - "observability/elastic/dns/dns-logs", - "observability/elastic/dns/filtering-dns" - ] - }, - "observability/elastic/bgp", - { - "type": "category", - "label": "L7 logs", - "link": { - "type": "doc", - "id": "observability/elastic/l7/index" - }, - "items": [ - "observability/elastic/l7/configure", - "observability/elastic/l7/datatypes" - ] - }, - "observability/elastic/troubleshoot" - ] - }, - "observability/kube-audit", - "observability/iptables" - ] - }, - { - "type": "category", - "label": "Multi-cluster management", - "link": { - "type": "doc", - "id": "multicluster/index" - }, - "items": [ - { - "type": "category", - "label": "Set up multi-cluster management", - "link": { - "type": "doc", - "id": "multicluster/set-up-multi-cluster-management/index" - }, - "items": [ - { - "type": "category", - "label": "Standard operator install", - "link": { - "type": "doc", - "id": "multicluster/set-up-multi-cluster-management/standard-install/index" - }, - "items": [ - "multicluster/set-up-multi-cluster-management/standard-install/create-a-management-cluster", - "multicluster/set-up-multi-cluster-management/standard-install/create-a-managed-cluster" - ] - }, - { - "type": "category", - "label": "Helm install", - "link": { - "type": "doc", - "id": "multicluster/set-up-multi-cluster-management/helm-install/index" - }, - "items": [ - "multicluster/set-up-multi-cluster-management/helm-install/create-a-management-cluster-helm", - "multicluster/set-up-multi-cluster-management/helm-install/create-a-managed-cluster-helm" - ] - } - ] - }, - "multicluster/fine-tune-deployment", - "multicluster/change-cluster-type", - { - "type": "category", - "label": "Cluster mesh", - "link": { - "type": "doc", - "id": "multicluster/federation/index" - }, - "items": [ - "multicluster/federation/overview", - "multicluster/federation/kubeconfig", - "multicluster/federation/services-controller", - "multicluster/federation/aws" - ] - } - ] - }, - { - "type": "category", - "label": "Threat defense", - "link": { - "type": "doc", - "id": "threat/index" - }, - "items": [ - "threat/security-event-management", - "threat/configuring-webhooks", - "threat/suspicious-ips", - "threat/suspicious-domains", - "threat/tor-vpn-feed-and-dashboard", - "threat/honeypods", - "threat/deeppacketinspection", - "threat/web-application-firewall", - "threat/deploying-waf-ingress-gateway" - ] - }, - { - "type": "category", - "label": "Compliance and security", - "link": { - "type": "doc", - "id": "compliance/index" - }, - "items": [ - "compliance/overview", - "compliance/compliance-reports-cis", - "compliance/encrypt-cluster-pod-traffic" - ] - }, - { - "type": "category", - "label": "Operations", - "link": { - "type": "doc", - "id": "operations/index" - }, - "items": [ - { - "type": "category", - "label": "Calico Enterprise Manager UI", - "link": { - "type": "doc", - "id": "operations/cnx/index" - }, - "items": [ - "operations/cnx/access-the-manager", - "operations/cnx/authentication-quickstart", - "operations/cnx/configure-identity-provider", - "operations/cnx/roles-and-permissions" - ] - }, - { - "type": "category", - "label": "Secure component communications", - "link": { - "type": "doc", - "id": "operations/comms/index" - }, - "items": [ - "operations/comms/crypto-auth", - "operations/comms/secure-metrics", - "operations/comms/secure-bgp", - "operations/comms/manager-tls", - "operations/comms/log-storage-tls", - "operations/comms/linseed-tls", - "operations/comms/apiserver-tls", - "operations/comms/typha-node-tls", - "operations/comms/compliance-tls", - "operations/comms/packetcapture-tls", - "operations/comms/certificate-management" - ] - }, - { - "type": "category", - "label": "CLIs", - "link": { - "type": "doc", - "id": "operations/clis/index" - }, - "items": [ - { - "type": "category", - "label": "calicoctl", - "link": { - "type": "doc", - "id": "operations/clis/calicoctl/index" - }, - "items": [ - "operations/clis/calicoctl/install", - { - "type": "category", - "label": "Configure calicoctl", - "link": { - "type": "doc", - "id": "operations/clis/calicoctl/configure/index" - }, - "items": [ - "operations/clis/calicoctl/configure/overview", - "operations/clis/calicoctl/configure/datastore" - ] - } - ] - }, - { - "type": "category", - "label": "calicoq", - "link": { - "type": "doc", - "id": "operations/clis/calicoq/index" - }, - "items": [ - "operations/clis/calicoq/installing", - { - "type": "category", - "label": "Configure calicoq", - "link": { - "type": "doc", - "id": "operations/clis/calicoq/configure/index" - }, - "items": [ - "operations/clis/calicoq/configure/overview", - "operations/clis/calicoq/configure/datastore" - ] - } - ] - } - ] - }, - { - "type": "category", - "label": "Storage", - "link": { - "type": "doc", - "id": "operations/logstorage/index" - }, - "items": [ - "operations/logstorage/log-storage-recommendations", - "operations/logstorage/create-storage", - "operations/logstorage/adjust-log-storage-size", - "operations/logstorage/advanced-node-scheduling" - ] - }, - "operations/license-options", - { - "type": "category", - "label": "Monitoring", - "link": { - "type": "doc", - "id": "operations/monitor/index" - }, - "items": [ - { - "type": "category", - "label": "Prometheus", - "link": { - "type": "doc", - "id": "operations/monitor/prometheus/index" - }, - "items": [ - "operations/monitor/prometheus/support", - "operations/monitor/prometheus/byo-prometheus", - "operations/monitor/prometheus/configure-prometheus", - "operations/monitor/prometheus/alertmanager" - ] - }, - { - "type": "category", - "label": "Metrics", - "link": { - "type": "doc", - "id": "operations/monitor/metrics/index" - }, - "items": [ - "operations/monitor/metrics/recommended-metrics", - "operations/monitor/metrics/bgp-metrics", - "operations/monitor/metrics/license-agent", - "operations/monitor/metrics/policy-metrics", - "operations/monitor/metrics/elasticsearch-and-fluentd-metrics" - ] - } - ] - }, - { - "type": "category", - "label": "eBPF", - "link": { - "type": "doc", - "id": "operations/ebpf/index" - }, - "items": [ - "operations/ebpf/use-cases-ebpf", - "operations/ebpf/enabling-ebpf", - "operations/ebpf/install", - "operations/ebpf/troubleshoot-ebpf" - ] - }, - "operations/decommissioning-a-node", - { - "type": "category", - "label": "Troubleshooting", - "link": { - "type": "doc", - "id": "operations/troubleshoot/index" - }, - "items": [ - "operations/troubleshoot/troubleshooting", - "operations/troubleshoot/commands", - "operations/troubleshoot/component-logs" - ] - } - ] - }, - { - "type": "category", - "label": "Reference", - "link": { - "type": "doc", - "id": "reference/index" - }, - "items": [ - "reference/api", - "reference/installation/api", - "reference/installation/helm_customization", - "reference/installation/tigerastatus", - { - "type": "category", - "label": "CLIs", - "link": { - "type": "doc", - "id": "reference/clis/index" - }, - "items": [ - { - "type": "category", - "label": "calicoctl", - "link": { - "type": "doc", - "id": "reference/clis/calicoctl/index" - }, - "items": [ - "reference/clis/calicoctl/overview", - "reference/clis/calicoctl/apply", - { - "type": "category", - "label": "bgp", - "link": { - "type": "doc", - "id": "reference/clis/calicoctl/bgp/index" - }, - "items": [ - "reference/clis/calicoctl/bgp/overview", - "reference/clis/calicoctl/bgp/peers" - ] - }, - "reference/clis/calicoctl/captured-packets", - { - "type": "category", - "label": "cluster", - "link": { - "type": "doc", - "id": "reference/clis/calicoctl/cluster/index" - }, - "items": [ - "reference/clis/calicoctl/cluster/overview", - "reference/clis/calicoctl/cluster/diags" - ] - }, - "reference/clis/calicoctl/convert", - "reference/clis/calicoctl/create", - "reference/clis/calicoctl/delete", - "reference/clis/calicoctl/get", - { - "type": "category", - "label": "ipam", - "link": { - "type": "doc", - "id": "reference/clis/calicoctl/ipam/index" - }, - "items": [ - "reference/clis/calicoctl/ipam/overview", - "reference/clis/calicoctl/ipam/check", - "reference/clis/calicoctl/ipam/release", - "reference/clis/calicoctl/ipam/show", - "reference/clis/calicoctl/ipam/configure", - "reference/clis/calicoctl/ipam/split" - ] - }, - { - "type": "category", - "label": "datastore", - "link": { - "type": "doc", - "id": "reference/clis/calicoctl/datastore/index" - }, - "items": [ - "reference/clis/calicoctl/datastore/overview", - { - "type": "category", - "label": "migrate", - "link": { - "type": "doc", - "id": "reference/clis/calicoctl/datastore/migrate/index" - }, - "items": [ - "reference/clis/calicoctl/datastore/migrate/overview", - "reference/clis/calicoctl/datastore/migrate/lock", - "reference/clis/calicoctl/datastore/migrate/unlock" - ] - } - ] - }, - "reference/clis/calicoctl/label", - { - "type": "category", - "label": "node", - "link": { - "type": "doc", - "id": "reference/clis/calicoctl/node/index" - }, - "items": [ - "reference/clis/calicoctl/node/overview", - "reference/clis/calicoctl/node/run", - "reference/clis/calicoctl/node/status", - "reference/clis/calicoctl/node/diags", - "reference/clis/calicoctl/node/checksystem" - ] - }, - "reference/clis/calicoctl/patch", - "reference/clis/calicoctl/replace", - "reference/clis/calicoctl/version" - ] - }, - { - "type": "category", - "label": "calicoq", - "link": { - "type": "doc", - "id": "reference/clis/calicoq/index" - }, - "items": [ - "reference/clis/calicoq/overview", - "reference/clis/calicoq/selectors", - "reference/clis/calicoq/endpoint", - "reference/clis/calicoq/eval", - "reference/clis/calicoq/host", - "reference/clis/calicoq/policy", - "reference/clis/calicoq/version" - ] - } - ] - }, - { - "type": "category", - "label": "Resource definitions", - "link": { - "type": "doc", - "id": "reference/resources/index" - }, - "items": [ - "reference/resources/overview", - "reference/resources/bgpconfig", - "reference/resources/bgppeer", - "reference/resources/bgpfilter", - "reference/resources/blockaffinity", - "reference/resources/caliconodestatus", - { - "type": "category", - "label": "Compliance reports", - "link": { - "type": "doc", - "id": "reference/resources/compliance-reports/index" - }, - "items": [ - "reference/resources/compliance-reports/overview", - "reference/resources/compliance-reports/inventory", - "reference/resources/compliance-reports/network-access", - "reference/resources/compliance-reports/policy-audit", - "reference/resources/compliance-reports/cis-benchmark" - ] - }, - "reference/resources/deeppacketinspection", - "reference/resources/egressgatewaypolicy", - "reference/resources/externalnetwork", - "reference/resources/felixconfig", - "reference/resources/globalalert", - "reference/resources/globalnetworkpolicy", - "reference/resources/globalnetworkset", - "reference/resources/globalreport", - "reference/resources/globalthreatfeed", - "reference/resources/hostendpoint", - "reference/resources/ippool", - "reference/resources/ipreservation", - "reference/resources/ipamconfig", - "reference/resources/licensekey", - "reference/resources/kubecontrollersconfig", - "reference/resources/managedcluster", - "reference/resources/networkpolicy", - "reference/resources/networkset", - "reference/resources/node", - "reference/resources/packetcapture", - "reference/resources/policyrecommendations", - "reference/resources/profile", - "reference/resources/remoteclusterconfiguration", - "reference/resources/securityeventwebhook", - "reference/resources/stagedglobalnetworkpolicy", - "reference/resources/stagedkubernetesnetworkpolicy", - "reference/resources/stagednetworkpolicy", - "reference/resources/tier", - "reference/resources/workloadendpoint" - ] - }, - { - "type": "category", - "label": "Architecture and network design", - "link": { - "type": "doc", - "id": "reference/architecture/index" - }, - "items": [ - "reference/architecture/overview", - "reference/architecture/data-path", - { - "type": "category", - "label": "Network design", - "link": { - "type": "doc", - "id": "reference/architecture/design/index" - }, - "items": [ - "reference/architecture/design/l2-interconnect-fabric", - "reference/architecture/design/l3-interconnect-fabric" - ] - } - ] - }, - { - "type": "category", - "label": "Component resources", - "link": { - "type": "doc", - "id": "reference/component-resources/index" - }, - "items": [ - "reference/component-resources/configuration", - "reference/component-resources/configure-resources", - { - "type": "category", - "label": "Calico Enterprise Kubernetes controllers", - "link": { - "type": "doc", - "id": "reference/component-resources/kube-controllers/index" - }, - "items": [ - "reference/component-resources/kube-controllers/configuration", - "reference/component-resources/kube-controllers/prometheus" - ] - }, - { - "type": "category", - "label": "Calico Enterprise node (cnx-node)", - "link": { - "type": "doc", - "id": "reference/component-resources/node/index" - }, - "items": [ - "reference/component-resources/node/configuration", - { - "type": "category", - "label": "Felix", - "link": { - "type": "doc", - "id": "reference/component-resources/node/felix/index" - }, - "items": [ - "reference/component-resources/node/felix/configuration", - "reference/component-resources/node/felix/prometheus" - ] - } - ] - }, - { - "type": "category", - "label": "Typha for scaling", - "link": { - "type": "doc", - "id": "reference/component-resources/typha/index" - }, - "items": [ - "reference/component-resources/typha/overview", - "reference/component-resources/typha/configuration", - "reference/component-resources/typha/prometheus" - ] - } - ] - }, - { - "type": "category", - "label": "Configuration on public clouds", - "link": { - "type": "doc", - "id": "reference/public-cloud/index" - }, - "items": [ - "reference/public-cloud/aws", - "reference/public-cloud/azure", - "reference/public-cloud/gce" - ] - }, - { - "type": "category", - "label": "Host endpoints", - "link": { - "type": "doc", - "id": "reference/host-endpoints/index" - }, - "items": [ - "reference/host-endpoints/overview", - "reference/host-endpoints/connectivity", - "reference/host-endpoints/objects", - "reference/host-endpoints/selector", - "reference/host-endpoints/failsafe", - "reference/host-endpoints/pre-dnat", - "reference/host-endpoints/forwarded", - "reference/host-endpoints/summary", - "reference/host-endpoints/conntrack" - ] - }, - "reference/attribution", - "reference/rest-api-reference", - "reference/component-versions", - "reference/faq", - "reference/support-policy" - ] - }, - { - "type": "doc", - "id": "release-notes/index", - "label": "Release notes" - } - ] -} diff --git a/calico-enterprise_versions.json b/calico-enterprise_versions.json index fb4eeaaf24..f01210fdce 100644 --- a/calico-enterprise_versions.json +++ b/calico-enterprise_versions.json @@ -1,6 +1,5 @@ [ "3.22-2", "3.21-2", - "3.20-2", - "3.19-2" + "3.20-2" ] diff --git a/docusaurus.config.js b/docusaurus.config.js index 338739be37..a425e70be9 100644 --- a/docusaurus.config.js +++ b/docusaurus.config.js @@ -432,7 +432,7 @@ export default async function createAsyncConfig() { path: 'calico-enterprise', routeBasePath: 'calico-enterprise', editCurrentVersion: true, - onlyIncludeVersions: [...nextVersion, '3.22-2','3.21-2', '3.20-2', '3.19-2'], + onlyIncludeVersions: [...nextVersion, '3.22-2','3.21-2', '3.20-2'], lastVersion: '3.21-2', versions: { current: { @@ -455,11 +455,6 @@ export default async function createAsyncConfig() { path: '3.20', banner: 'none', }, - '3.19-2': { - label: '3.19', - path: '3.19', - banner: 'none', - }, }, sidebarPath: './sidebars-calico-enterprise.js', beforeDefaultRemarkPlugins: [variablesPlugin],