diff --git a/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/CalicoWindowsInstall.js b/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/CalicoWindowsInstall.js deleted file mode 100644 index e74ebaa1e4..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/CalicoWindowsInstall.js +++ /dev/null @@ -1,185 +0,0 @@ -import React from 'react'; - -import Admonition from '@theme/Admonition'; -import CodeBlock from '@theme/CodeBlock'; -import Link from '@docusaurus/Link'; - -import { prodname, prodnameWindows } from '../../variables'; - -function CalicoWindowsInstallFirstStep(props) { - if (props.networkingType === 'vxlan') { - return ( -
- Enable BGP service on the Windows nodes. Install the RemoteAccess service using the following Powershell - commands: -
-Then restart the computer:
-before running:
-- Sometimes the remote access service fails to start automatically after install. To make sure it is running, run - the following command: -
-
- Get the cluster's Kubernetes API server host and port, which will be used to update the {prodnameWindows}{' '}
- config map. The API server host and port is required so that the {prodnameWindows} installation script can
- create a kubeconfig file for {prodname} services. If your Windows nodes already have {prodnameWindows}{' '}
- installed manually, skip this step. The installation script will use the API server host and port from your
- node's existing kubeconfig file if the KUBERNETES_SERVICE_HOST and{' '}
- KUBERNETES_SERVICE_PORT variables are not provided in the calico-windows-config{' '}
- ConfigMap.
-
First, make a note of the address of the API server:
-- If you have a single API server with a static IP address, you can use its IP address and port. The IP can - be found by running: -
-The output should look like the following, with a single IP address and port under "ENDPOINTS":
-- If there are multiple entries under "ENDPOINTS", then your cluster must have more than one API server. In - this case, use the appropriate load balancing option below for your cluster. -
-
- If using DNS load balancing (as used by kops), use the FQDN and port of the API server{' '}
-
- api.internal.{'<'}clustername{'>'}
-
- .
-
- If you have multiple API servers with a load balancer in front, you should use the IP and port of the load - balancer. -
-
- If your cluster uses a ConfigMap to configure kube-proxy you can find the "right" way to
- reach the API server by examining the config map. For example:
-
- In this case, the server is d881b853aea312e00302a84f1e346a77.gr7.us-west-2.eks.amazonaws.com{' '}
- and the port is 443 (the standard HTTPS port).
-
- Create the kubernetes-services-endpoint ConfigMap with the Kubernetes API server
- host and port (discovered in the previous step) used to create a kubeconfig file for {prodname} services.
-
- Get the Kubernetes service clusterIP range configured in your cluster. This must - match the service-cluster-ip-range used by kube-apiserver. -
-- Add the Kubernetes service CIDR (discovered in the previous step) enable {prodnameWindows} on the Tigera Operator installation resource. -
-
- For example, with a Kubernetes service clusterIP range of 10.96.0.0/12:
-
- Depending on your platform, you may already have kube-proxy running on your Windows nodes. If kube-proxy is
- already running on your Windows nodes, skip this step. If kube-proxy is not running, you must install and run
- kube-proxy on each of the Windows nodes in your cluster. Note: The
- manifest provided in the kubernetes-sigs sig-windows-tools repository depends on the kubeconfig
- provided by the kube-proxy ConfigMap in the kube-system namespace.
-
- You must replace KUBE_PROXY_VERSION with your cluster's Kubernetes version in kube-proxy.yml to ensure the daemonset uses a kube-proxy Windows image that is compatible with your Kubernetes cluster. Use a command like the following to retrieve the YAML file, replace the version, and apply it:
-
Monitor the installation.
- The {prodnameWindows} HPC installation has 2 initContainers:uninstall-calico, which deals with removing previous manually installed {prodnameWindows} services, if any
- and install-cni, which installs needed CNI binaries and configuration, when using Calico CNI.
- - After these initContainers finish their execution, installation is complete. Next, the - {prodnameWindows} services are started in separate containers: -
-
- The calico-node-windows pods will be ready after their containers finish initializing.
-
- Many {props.cli} commands require access to the {prodname} datastore. In most circumstances,{' '}
- {props.cli} cannot achieve this connection by default. You can provide
- {props.cli} with the information it needs using either of the following.
-
- Configuration file: by default, {props.cli} will look for a configuration file
- at /etc/calico/{props.cli}.cfg. You can override this using the --config option
- with commands that require datastore access. The file can be in either YAML or JSON format. It must be valid
- and readable by {props.cli}. A YAML example follows.
-
- Environment variables: If {props.cli} cannot locate, read, or access a
- configuration file, it will check a specific set of environment variables.
-
- See the section that corresponds to your datastore type for a - full set of options and examples. -
-{props.cli} inside a container, any environment variables and configuration files must
- be passed to the container so they are available to the process inside. It can be useful to keep a running
- container (that sleeps) configured for your datastore, then it is possible to exec into the
- container and have an already configured environment.
- - {release.title !== 'master' && ( -
- - Release archive - {' '} - with Kubernetes manifests. Based on Calico {releases[0].calico.minor_version}. -
- )} - This release comprises the following components, and can be installed using{' '} -
- {release['tigera-operator'].registry}/{release['tigera-operator'].image}:
- {release['tigera-operator'].version}
-
-
- | Component | -Version | -
|---|---|
| {componentName} | -{release.components[componentName].version} | -
- To connect the managed cluster to your management cluster, you need to create and apply a connection manifest. - You can create a connection manifest from the Manager UI in the management cluster or manually using{' '} - {kubectlCmd}. -
-- In the Manager UI left navbar, click Managed Clusters. -
-- On the Managed Clusters page, click the button, Add Cluster. -
-Name your cluster that is easily recognized in a list of managed clusters, and click Create Cluster.
-Download the manifest.
-- Choose a name for your managed cluster and then add it to your management cluster. The - following commands will create a manifest with the name of your managed cluster in your current directory. -
-- First, decide on the name for your managed cluster. Because you will eventually have several managed - clusters, choose a name that can be easily recognized in a list of managed clusters. The name is also used - in steps that follow. -
-
- Get the namespace in which the Tigera Operator is running in your managed cluster (in most cases this will
- be tigera-operator):
-
- Add a managed cluster and save the manifest containing a{' '} - - ManagementClusterConnection - {' '} - and a Secret. -
-managementClusterAddr in the manifest is correct.
- - Apply the manifest that you modified in the step, - Add a managed cluster to the management cluster. -
-Monitor progress with the following command:
-management-cluster-connection and tigera-compliance show a status of{' '}
- Available.
- You have now successfully installed a managed cluster!
- > - ); -} diff --git a/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/EnvironmentFile.js b/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/EnvironmentFile.js deleted file mode 100644 index cd6eb04b03..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/EnvironmentFile.js +++ /dev/null @@ -1,58 +0,0 @@ -import React from 'react'; - -import Admonition from '@theme/Admonition'; -import CodeBlock from '@theme/CodeBlock'; -import Link from '@docusaurus/Link'; - -import { baseUrl } from '../../variables'; - -export default function EnvironmentFile(props) { - return ( - <> -- - Use the following guidelines and sample file to define the environment variables for starting Calico on the - host. For more help, see the{' '} - - {props.install === 'container' ? ( - {props.nodecontainer} configuration reference - ) : ( - Felix configuration reference - )} -
-For the Kubernetes datastore set the following:
-| Variable | -Configuration guidance | -
|---|---|
| KUBECONFIG | -Path to kubeconfig file to access the Kubernetes API Server | -
- Sample EnvironmentFile - save to /etc/calico/calico.env
-
- If you are installing on a cluster installed by EKS, GKE, AKS or Mirantis Kubernetes Engine (MKE), or you need
- to customize TLS certificates, you must customize this Helm chart by creating a{' '}
- values.yaml file. Otherwise, you can skip this step.
-
- If you are installing on a cluster installed by EKS, GKE, AKS or Mirantis Kubernetes Engine (MKE), set the{' '}
- kubernetesProvider as described in the{' '}
- Installation reference. For
- example:
-
- For Azure AKS cluster with no Kubernetes CNI pre-installed, create values.yaml with the following
- command:
-
- Add any other customizations you require to values.yaml by running the following command.
- For help, see Helm installation reference,
- or helm docs.
-
- Create the tigera-operator namespace:
-
- kubectl create namespace tigera-operator
-
- - Install the Tigera {prodname} operator and custom resource definitions using the Helm chart, and passing in - your image pull secrets -
- {renderCond3()} -
- Monitor progress, wait until apiserver shows a status of Available, then proceed
- to the next step.
-
Install your {prodname} license:
-You can now monitor progress with the following command:
-Congratulations! You have now installed {prodname} using the Helm 3 chart.
-Install the Google cloud storage helm repo plugin:
-Add the Calico helm repo:
-Get the Helm chart:
-
- or if you created a values.yaml above:
-
- or if you created a values.yaml above:
-
Install the Tigera Operator and custom resource definitions.
-- Install the Prometheus operator and related custom resource definitions. The Prometheus operator will be - used to deploy Prometheus server and Alertmanager to monitor {prodname} metrics. -
-Install your pull secret.
-
- If pulling images directly from quay.io/tigera, you will likely want to use the credentials
- provided to you by your Tigera support representative. If using a private registry, use your private
- registry credentials instead.
-
- Install any extra {prodname} resources needed at - cluster start using calicoctl. -
-- Download the Tigera custom resources. For more information on configuration options available in this - manifest, see the installation reference. -
-
- Remove the Manager custom resource from the manifest file.
-
- Remove the LogStorage custom resource from the manifest file.
-
Now apply the modified manifest.
-You can now monitor progress with the following command:
-- Install the Tigera custom resources. For more information on configuration options available in this - manifest, see the installation reference. -
-You can now monitor progress with the following command:
-
- Wait until the apiserver shows a status of Available, then proceed to{' '}
- install the {prodname} license.
-
- Wait until the apiserver shows a status of Available, then proceed to the next
- section.
-
- - Configure a storage class for {prodname} - - . -
-Install the Tigera Operator and custom resource definitions.
-- Install the Prometheus operator and related custom resource definitions. The Prometheus operator will be - used to deploy Prometheus server and Alertmanager to monitor {prodname} metrics. -
-Install your pull secret.
-
- If pulling images directly from quay.io/tigera, you will likely want to use the credentials
- provided to you by your Tigera support representative. If using a private registry, use your private
- registry credentials instead.
-
- Install any extra {prodname} resources needed at - cluster start using calicoctl. -
-- Download the Tigera custom resources. For more information on configuration options available in this - manifest, see the installation reference. -
-
- Remove the Manager custom resource from the manifest file.
-
- Remove the LogStorage custom resource from the manifest file.
-
Now apply the modified manifest.
-You can now monitor progress with the following command:
-- Install the Tigera custom resources. For more information on configuration options available in this - manifest, see the installation reference. -
-You can now monitor progress with the following command:
-
- Wait until the apiserver shows a status of Available, then proceed to the next
- section.
-
- Wait until the apiserver shows a status of Available, then proceed to{' '}
- install the {prodname} license.
-
In order to use {prodname}, you must install the license provided to you by Tigera.
-You can now monitor progress with the following command:
-
- To control managed clusters from your central management plane, you must ensure it is reachable for
- connections. The simplest way to get started (but not for production scenarios), is to configure a{' '}
- NodePort service to expose the management cluster. Note that the service must live within the{' '}
- tigera-manager namespace.
-
Create a service to expose the management cluster.
-- The following example of a NodePort service may not be suitable for production and high availability. - For options, see{' '} - - Fine-tune multi-cluster management for production - - . -
-Apply the following service manifest.
-- Export the service port number, and the public IP or host of the management cluster. (Ex. - "example.com:1234" or "10.0.0.10:1234".) -
-- Apply the{' '} - - ManagementCluster - {' '} - CR. -
-- To access resources in a managed cluster from the {prodname} Manager within the management cluster, the - logged-in user must have appropriate permissions defined in that managed cluster (clusterrole bindings). -
-
- Create an admin user called, mcm-user in the default namespace with full permissions, by
- applying the following commands.
-
Get the login token for your new admin user, and log in to {prodname} Manager.
-
- In the top right banner, your management cluster is displayed as the first entry in the cluster
- selection drop-down menu with the fixed name, management cluster.
-
- You have successfully installed a management cluster.
- > - )} - {props.clusterType === 'managed' && ( - <> -- To access resources belonging to a managed cluster from the {prodname} Manager UI, the service or user - account used to log in must have appropriate permissions defined in the managed cluster. -
-
- Let's define admin-level permissions for the service account (mcm-user) we created to log in to
- the Manager UI. Run the following command against your managed cluster.
-
Install the Tigera Operator and custom resource definitions.
-- Install the Prometheus operator and related custom resource definitions. The Prometheus operator will be - used to deploy Prometheus server and Alertmanager to monitor {prodname} metrics. -
-Install your pull secret.
-
- If pulling images directly from quay.io/tigera, you will likely want to use the credentials
- provided to you by your Tigera support representative. If using a private registry, use your private
- registry credentials instead.
-
- Install any extra {prodname} resources needed at - cluster start using calicoctl. -
-- Download the Tigera custom resources. For more information on configuration options available in this - manifest, see the installation reference. -
-
- Remove the Manager custom resource from the manifest file.
-
- Remove the LogStorage custom resource from the manifest file.
-
Now apply the modified manifest.
-Monitor progress with the following command:
-
- Wait until the apiserver shows a status of Available, then proceed to the next
- section.
-
- Install the Tigera custom resources. For more information on configuration options available in this - manifest, see the installation reference. -
-You can now monitor progress with the following command:
-
- Wait until the apiserver shows a status of Available, then proceed to{' '}
- install the {prodname} license.
-
- {prodname} networking cannot currently be installed on the EKS control plane nodes. As a result the control
- plane nodes will not be able to initiate network connections to {prodname} pods. (This is a general limitation
- of EKS's custom networking support, not specific to {prodname}.) As a workaround, trusted pods that require
- control plane nodes to connect to them, such as those implementing admission controller webhooks, can include{' '}
- hostNetwork:true in their pod spec. See the Kubernetes API{' '}
-
- pod spec
- {' '}
- definition for more information on this setting.
-
- For these instructions, we will use eksctl to provision the cluster. However, you can use any of
- the methods in{' '}
-
- Getting Started with Amazon EKS
-
-
- Before you get started, make sure you have downloaded and configured the{' '} - - necessary prerequisites - -
-First, create an Amazon EKS cluster without any nodes.
-
- Since this cluster will use {prodname} for networking, you must delete the aws-node daemon set
- to disable AWS VPC networking for pods.
-
- - Configure a storage class for {prodname}. - -
-Install the Tigera Operator and custom resource definitions.
-- Install the Prometheus operator and related custom resource definitions. The Prometheus operator will be - used to deploy Prometheus server and Alertmanager to monitor {prodname} metrics. -
-Install your pull secret.
-
- If pulling images directly from quay.io/tigera, you will likely want to use the credentials
- provided to you by your Tigera support representative. If using a private registry, use your private
- registry credentials instead.
-
- Install any extra {prodname} resources needed at - cluster start using calicoctl. -
-
- To configure {prodname} for use with the Calico CNI plugin, we must create an Installation{' '}
- resource that has spec.cni.type: Calico. Install the{' '}
- custom-resources-calico-cni.yaml manifest, which includes this configuration. For more
- information on configuration options available in this manifest, see{' '}
- the installation reference.
-
- Download the Tigera custom resources. For more information on configuration options available in this - manifest, see the installation reference. -
-
- Remove the Manager custom resource from the manifest file.
-
- Remove the LogStorage custom resource from the manifest file.
-
Now apply the modified manifest.
-Monitor progress with the following command:
-Finally, add nodes to the cluster.
---- Tip: Without the
---max-pods-per-nodeoption above, EKS will limit the{' '} - - number of pods based on node-type - - . Seeeksctl create nodegroup --helpfor the full set of node group options. -
Monitor progress with the following command:
-
- Wait until the apiserver shows a status of Available, then proceed to the next
- section.
-
In order to use {prodname}, you must install the license provided to you by Tigera.
-You can now monitor progress with the following command:
-
- To control managed clusters from your central management plane, you must ensure it is reachable for
- connections. The simplest way to get started (but not for production scenarios), is to configure a{' '}
- NodePort service to expose the management cluster. Note that the service must live within the{' '}
- tigera-manager namespace.
-
Create a service to expose the management cluster.
-- The following example of a NodePort service may not be suitable for production and high availability. - For options, see{' '} - - Fine-tune multi-cluster management for production - - . -
-Apply the following service manifest.
-- Export the service port number, and the public IP or host of the management cluster. (Ex. - "example.com:1234" or "10.0.0.10:1234".) -
-- Apply the{' '} - - ManagementCluster - {' '} - CR. -
-- To access resources in a managed cluster from the {prodname} Manager within the management cluster, the - logged-in user must have appropriate permissions defined in that managed cluster (clusterrole bindings). -
-
- Create an admin user called, mcm-user in the default namespace with full permissions, by
- applying the following commands.
-
Get the login token for your new admin user, and log in to {prodname} Manager.
-
- In the top right banner, your management cluster is displayed as the first entry in the cluster
- selection drop-down menu with the fixed name, management cluster.
-
- You have successfully installed a management cluster.
- > - )} - {props.clusterType === 'managed' && ( - <> -- To access resources belonging to a managed cluster from the {prodname} Manager UI, the service or user - account used to log in must have appropriate permissions defined in the managed cluster. -
-
- Let's define admin-level permissions for the service account (mcm-user) we created to log in to
- the Manager UI. Run the following command against your managed cluster.
-
Install the Tigera Operator and custom resource definitions.
-- Install the Prometheus operator and related custom resource definitions. The Prometheus operator will be - used to deploy Prometheus server and Alertmanager to monitor {prodname} metrics. -
-Install your pull secret.
-
- If pulling images directly from quay.io/tigera, you will likely want to use the credentials
- provided to you by your Tigera support representative. If using a private registry, use your private
- registry credentials instead.
-
- Install any extra Calico resources needed at cluster - start using calicoctl. -
-- Download the Tigera custom resources. For more information on configuration options available in this - manifest, see the installation reference. -
-
- Remove the Manager custom resource from the manifest file.
-
- Remove the LogStorage custom resource from the manifest file.
-
Now apply the modified manifest.
-You can now monitor progress with the following command:
-
- Wait until the apiserver shows a status of Available, then proceed to the next
- section.
-
- Install the Tigera custom resources. For more information on configuration options available in this - manifest, see the installation reference. -
-You can now monitor progress with the following command:
-
- Wait until the apiserver shows a status of Available, then proceed to the next
- section.
-
In order to use {prodname}, you must install the license provided to you by Tigera.
-You can now monitor progress with the following command:
-
- To control managed clusters from your central management plane, you must ensure it is reachable for
- connections. The simplest way to get started (but not for production scenarios), is to configure a{' '}
- NodePort service to expose the management cluster. Note that the service must live within the{' '}
- tigera-manager namespace.
-
Create a service to expose the management cluster.
-- The following example of a NodePort service may not be suitable for production and high availability. For - options, see{' '} - - Fine-tune multi-cluster management for production - - . -
-Apply the following service manifest.
-- Export the service port number, and the public IP or host of the management cluster. (Ex. - "example.com:1234" or "10.0.0.10:1234".) -
-- Apply the{' '} - - ManagementCluster - {' '} - CR. -
-- To access resources in a managed cluster from the {prodname} Manager within the management cluster, the - logged-in user must have appropriate permissions defined in that managed cluster (clusterrole bindings). -
-
- Create an admin user called, mcm-user in the default namespace with full permissions, by
- applying the following commands.
-
Get the login token for your new admin user, and log in to {prodname} Manager.
-
- In the top right banner, your management cluster is displayed as the first entry in the cluster selection
- drop-down menu with the fixed name, management cluster.
-
- You have successfully installed a management cluster.
-
-
- To access resources belonging to a managed cluster from the {prodname} Manager UI, the service or user account - used to log in must have appropriate permissions defined in the managed cluster. -
-
- Let's define admin-level permissions for the service account (mcm-user) we created to log in to
- the Manager UI. Run the following command against your managed cluster.
-
Install the Tigera Operator and custom resource definitions.
-- Install the Prometheus operator and related custom resource definitions. The Prometheus operator will be - used to deploy Prometheus server and Alertmanager to monitor {prodname} metrics. -
-
- If pulling images directly from quay.io/tigera, you will likely want to use the credentials
- provided to you by your Tigera support representative. If using a private registry, use your private
- registry credentials.
-
- Download the Tigera custom resources. For more information on configuration options available in this - manifest, see the installation reference. -
-
- Remove the Manager custom resource from the manifest file.
-
- Remove the LogStorage custom resource from the manifest file.
-
Now apply the modified manifest.
-- Install the Tigera custom resources. For more information on configuration options available, see{' '} - the installation reference. -
-You can now monitor progress with the following command:
-
- Wait until the apiserver shows a status of Available, then proceed to the next
- section.
-
Install the {prodname} license provided to you by Tigera.
-You can now monitor progress with the following command:
-
- To control managed clusters from your central management plane, you must ensure it is reachable for
- connections. The simplest way to get started (but not for production scenarios), is to configure a{' '}
- NodePort service to expose the management cluster. Note that the service must live within the{' '}
- tigera-manager namespace.
-
- Create a service to expose the management cluster. The following example of a NodePort service may not - be suitable for production and high availability. For options, see{' '} - - Fine-tune multi-cluster management for production - - . Apply the following service manifest. -
-- Export the service port number, and the public IP or host of the management cluster. (Ex. - "example.com:1234" or "10.0.0.10:1234".) -
-- - ManagementCluster - {' '} - CR. -
-- To access resources in a managed cluster from the {prodname} Manager within the management cluster, the - logged-in user must have appropriate permissions defined in that managed cluster (clusterrole bindings). -
-
- Create an admin user called, mcm-user in the default namespace with full permissions, by
- applying the following commands.
-
Get the login token for your new admin user, and log in to {prodname} Manager.
-
- In the top right banner, your management cluster is displayed as the first entry in the cluster
- selection drop-down menu with the fixed name, management cluster.
-
-
-
You have successfully installed a management cluster.
- > -- To access resources belonging to a managed cluster from the {prodname} Manager UI, the service or user - account used to log in must have appropriate permissions defined in the managed cluster. -
-
- Let's define admin-level permissions for the service account (mcm-user) we created to log
- in to the Manager UI. Run the following command against your managed cluster.
-
- First, create a staging directory for the installation. This directory will contain the configuration file, - along with cluster state files, that OpenShift installer will create: -
-Now run OpenShift installer to create a default configuration file:
-
- After the installer finishes, your staging directory will contain the configuration file{' '}
- install-config.yaml.
-
- Override the OpenShift networking to use {prodname} and update the AWS instance types to meet the{' '} - system requirements: -
-By default openshift-installer creates 3 replicas, you can change these settings by modifying the cloud-provider part in the install-config.yaml
-The following example changes the default deployment instance type and replica quantity.
-Now generate the Kubernetes manifests using your configuration file:
-For OpenShift v4.16 or newer on AWS, configure AWS security groups to allow BGP, typha and IP-in-IP encapsulation traffic by editing the OpenShift cluster-api manifests.
-Edit spec.network.cni.cniIngressRules in the cluster-api/02_infra-cluster.yaml file to add
- Edit the Installation custom resource manifest manifests/01-cr-installation.yaml so that it
- enables VXLAN and disables BGP. This is required for {prodnameWindows}:
-
- To provide additional configuration during installation (for example, BGP configuration or peers), use a - Kubernetes ConfigMap with your desired {prodname} resources. If you do not need to provide additional - configuration, skip this section. -
-
- To include {prodname} resources during installation, edit{' '}
- manifests/02-configmap-calico-resources.yaml in order to add your own configuration.
-
If you have a directory with the {prodname} resources, you can create the file with the command:
-
- With recent versions of kubectl it is necessary to have a kubeconfig configured or add{' '}
- --server='127.0.0.1:443' even though it is not used.
-
- If you have provided a calico-resources configmap and the tigera-operator pod fails to come up
- with Init:CrashLoopBackOff, check the output of the init-container with{' '}
- kubectl logs -n tigera-operator -l k8s-app=tigera-operator -c create-initial-resources.
-
Start the cluster creation with the following command and wait for it to complete.
-- {prodname} requires storage for logs and reports. Before finishing the installation, you must{' '} - create a StorageClass for {prodname}. -
- -- In order to use {prodname}, you must install the license provided to you by Tigera support representative. - Before applying the license, wait until the Tigera API server is ready with the following command: -
-
- Wait until the apiserver shows a status of Available.
-
After the Tigera API server is ready, apply the license:
-- Download the Tigera custom resources. For more information on configuration options available in this - manifest, see the installation reference. -
-
- Remove the Manager custom resource from the manifest file.
-
- Remove the LogStorage custom resource from the manifest file.
-
Now apply the modified manifest.
-Apply the custom resources for enterprise features.
-You can now monitor progress with the following command:
-
- When it shows all components with status Available, proceed to the next step.
-
(Optional) Apply the full CRDs including descriptions.
-
- To control managed clusters from your central management plane, you must ensure it is reachable for
- connections. The simplest way to get started (but not for production scenarios), is to configure a{' '}
- NodePort service to expose the management cluster. Note that the service must live within the{' '}
- tigera-manager namespace.
-
- Create a service to expose the management cluster. The following example of a NodePort service may not - be suitable for production and high availability. For options, see{' '} - - Fine-tune multi-cluster management for production - - . Apply the following service manifest. -
-- Export the service port number, and the public IP or host of the management cluster. (Ex. - "example.com:1234" or "10.0.0.10:1234".) -
-- Apply the{' '} - - ManagementCluster - {' '} - CR. -
-- To access resources in a managed cluster from the {prodname} Manager within the management cluster, the - logged-in user must have appropriate permissions defined in that managed cluster (clusterrole bindings). -
-
- Create an admin user called, mcm-user in the default namespace with full permissions, by
- applying the following commands.
-
Get the login token for your new admin user, and log in to {prodname} Manager.
-
- In the top right banner, your management cluster is displayed as the first entry in the cluster
- selection drop-down menu with the fixed name, management cluster.
-
-
-
You have successfully installed a management cluster.
- > -- To access resources belonging to a managed cluster from the {prodname} Manager UI, the service or user - account used to log in must have appropriate permissions defined in the managed cluster. -
-
- Let's define admin-level permissions for the service account (mcm-user) we created to log
- in to the Manager UI. Run the following command against your managed cluster.
-
- Next, install calicoctl and ensure strict - affinity is true: -
-
- Download the latest{' '}
-
- Windows Node Installer (WNI)
- {' '}
- binary wni that matches your OpenShift minor version.
-
- Next, determine the AMI id corresponding to Windows Server 1903 (build 18317) or greater. wni{' '}
- defaults to using Windows Server 2019 (build 10.0.17763) which does not include WinDSR support. One way to do
- this is by searching for AMI's matching the string{' '}
- Windows_Server-1903-English-Core-ContainersLatest in the Amazon EC2 console.
-
- Next, run wni to add a Windows node to your cluster. Replace AMI_ID, AWS_CREDENTIALS_PATH,
- AWS_KEY_NAME and AWS_PRIVATE_KEY_PATH with your values:
-
An example of running the above steps:
-
- The wni binary writes the instance details to the file windows-node-installer.json. An
- example of the file:
-
- Use the instance ID from the file and the path of the private key used to create the instance to get the - Administrator user's password: -
-Remote into the Windows node, open a Powershell window, and prepare the directory for Kubernetes files.
-- Copy the Kubernetes kubeconfig file (default location: openshift-tigera-install/auth/kubeconfig), to the - file c:\k\config. -
-- Download the powershell script, install-calico-windows.ps1. -
-- Run the installation script, replacing the Kubernetes version with the version corresponding to your version - of OpenShift. -
-
- Get the Kubernetes version with oc version and use only the major, minor, and patch version
- numbers. For example from a cluster that returns:
-
- You will use 1.18.3:
-
Install and start kube-proxy service. Execute following powershell script/commands.
-Verify kube-proxy service is running.
-
- From the Windows node, download the Windows Machine Config Bootstrapper wmcb.exe that matches your
- OpenShift minor version from{' '}
-
- Windows Machine Config Bootstrapper releases
-
- . For example, for OpenShift 4.5.x:
-
- Next, we will download the worker.ign file from the API server:
-
Next, we run wmcb to configure the kubelet:
---register-with-taints="os=Windows:NoSchedule" which will require Windows pods to
- tolerate that taint.
-
- Next, we make a copy of the kubeconfig because wmcb.exe expects the kubeconfig to be the file{' '}
- c:\k\kubeconfig. Then we configure kubelet to use Calico CNI:
-
Finally, clean up the additional files created on the Windows node:
-Exit the remote session to the Windows node and return to a shell to a Linux node.
-- We need to approve the CSR's generated by the kubelet's bootstrapping process. First, view the pending - CSR's: -
-For example:
-To approve the pending CSR's:
-For example:
-Finally, wait a minute or so and get all nodes:
-- If the Windows node registered itself successfully, it should appear in the list with a Ready status, ready to - run Windows pods! -
- > - ); -} diff --git a/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/InstallOpenShiftManifests.js b/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/InstallOpenShiftManifests.js deleted file mode 100644 index a2bb6a43c5..0000000000 --- a/calico-enterprise_versioned_docs/version-3.19-2/_includes/components/InstallOpenShiftManifests.js +++ /dev/null @@ -1,24 +0,0 @@ -import React from 'react'; - -import CodeBlock from '@theme/CodeBlock'; - -import { prodname, filesUrl } from '../../variables'; - -export default function InstallOpenShiftManifests(props) { - const uf = props.upgradeFrom; - const exclude1 = ' --exclude=01-cr-*'; - const exclude2 = ' --exclude=02-pull-secret.yaml'; - const flag1 = uf ? exclude1 : ''; - const flag2 = uf === 'Enterprise' ? exclude2 : ''; - - return ( - <> -Download the {prodname} manifests for OpenShift and add them to the generated manifests directory:
-- CNI support -
-Calico CNI for networking with {prodname} network policy
-The geeky details of what you get:
-- Required -
-- A compatible OpenShift cluster -
-- Your environment meets the {prodname}{' '} - - system requirements - -
-- A{' '} - - RedHat account - {' '} - for the pull secret to provision an OpenShift cluster. -
-- OpenShift command line interface from{' '} - - cloud.redhat.com - -
-- Cluster meets the {prodname}{' '} - system requirements -
-- If installing on AWS, a{' '} - - configured AWS account - {' '} - appropriate for OpenShift 4, and have{' '} - - set up your AWS credentials - - . Note that the OpenShift installer supports a subset of{' '} - - AWS regions - - . -
-- OpenShift installer and OpenShift command line interface from{' '} - cloud.redhat.com -
-- A{' '} - - generated a local SSH private key - {' '} - that is added to your ssh-agent -
-- A Tigera license key and credentials -
-- Limitations -
-
- Due to an upstream issue, Windows
- pods can only be run in specific namespaces if you disable SCC. To do this, label the namespace with{' '}
- openshift.io/run-level: "1".
-
Apply the {prodname} manifests for the Prometheus operator.
-- Update the contents of the secret with the image pull secret provided to you by Tigera support representative. -
-
- For example, if the secret is located at ~/.docker/config.json, run the following commands.
-
- In order to install images from your private registry, you must first pull the images from Tigera's - registry, re-tag them with your own registry, and then push the newly tagged images to your own registry. -
-Use the following commands to pull the required {prodname} images.
-
- Retag the images with the name of your private registry $PRIVATE_REGISTRY and{' '}
- $IMAGE_PATH.
-
Push the images to your private registry.
-Use crane cp to copy the Windows images to your private registry.
For hybrid Linux + Windows clusters, use crane cp on the following Windows images to copy them to your private registry.
crane cp the private {prodnameWindows} images to a public registry.
- Before applying tigera-operator.yaml, modify registry references to use your custom registry:
-
- Next, ensure that an image pull secret has been configured for your custom registry. Set the enviroment variable{' '}
- PRIVATE_REGISTRY_PULL_SECRET to the secret name. Then add the image pull secret to the operator
- deployment spec:
-
- If you are installing Prometheus operator as part of {prodname}, then before applying{' '}
- tigera-prometheus-operator.yaml, modify registry references to use your custom registry:
-
- Before applying custom-resources.yaml, modify registry references to use your custom registry:
-
- For Openshift, after downloading all manifests modify the following to use your custom registry: -
-registry to the secret tigera-pull-secret
-
- Set the spec.registry and spec.imagePath field of your Installation resource to the
- name of your custom registry. For example:
-
- In order to install images from your private registry, you must first pull the images from Tigera's - registry, re-tag them with your own registry, and then push the newly tagged images to your own registry. -
-Use the following commands to pull the required {prodname} images.
-
- Retag the images with the name of your private registry $PRIVATE_REGISTRY.
-
Push the images to your private registry.
-Use crane cp to copy the Windows images to your private registry.
For hybrid Linux + Windows clusters, use crane cp on the following Windows images to copy them to your private registry.
crane cp the private {prodnameWindows} images to a public registry.
- Before applying tigera-operator.yaml, modify registry references to use your custom registry:
-
- Next, ensure that an image pull secret has been configured for your custom registry. Set the enviroment variable{' '}
- PRIVATE_REGISTRY_PULL_SECRET to the secret name. Then add the image pull secret to the operator
- deployment spec:
-
- If you are installing Prometheus operator as part of {prodname}, then before applying{' '}
- tigera-prometheus-operator.yaml, modify registry references to use your custom registry:
-
- Before applying custom-resources.yaml, modify registry references to use your custom registry:
-
- Set the spec.registry field of your Installation resource to the name of your custom registry. For
- example:
-
| Patch version | -Release archive link | -
|---|---|
| {release.title} | -- {release.releaseArchiveURL} - | -
If you are using one of the recommended distributions, you will already satisfy these.
-- Due to the large number of distributions and kernel version out there, it’s hard to be precise about the names - of the particular kernel modules that are required to run {prodname}. However, in general, you’ll need: -
-
- The iptables modules (both the “legacy” and “nft” variants are supported). These are typically
- broken up into many small modules, one for each type of match criteria and one for each type of action.{' '}
- {prodname} requires:
-
set, rpfilter, addrtype,{' '}
- comment, conntrack, icmp, tcp, udp,{' '}
- ipvs, icmpv6 (if IPv6 is enabled in your kernel), mark,{' '}
- multiport, rpfilter, sctp, ipvs (if using
- kube-proxy in IPVS mode).
- REJECT, ACCEPT, DROP,{' '}
- LOG.
- IP sets support.
-Netfilter Conntrack support compiled in (with SCTP support if using SCTP).
-
- IPVS support if using kube-proxy in IPVS mode.
-
- IPIP, VXLAN, Wireguard support, if using {prodname} networking in one of those modes. -
-
- eBPF (including the tc hook support) and XDP (if you want to use the eBPF data plane).
-
x86-64 or arm64 processor with at least 2 cores, 8.0GB RAM and 20 GB free disk space
-- Linux kernel 5.10 or later with required dependencies. The - following distributions have the required kernel, its dependencies, and are known to work well with{' '} - {prodname} and {props.orch}. -
-- If your node is running RHEL 8 or RHEL 9, you must install a specialized policy package before you install {prodname}. - With this package, {prodname} can use SELinux contexts in a series of rules that allow it to interact with persistent and ephemeral data in nonstandard host system locations. -
-If your node has RHEL 8 installed, then run the following command:
-If your node has RHEL 9 installed, then run the following command:
-
- {prodname} must be able to manage cali*
- interfaces on the host. When IPIP is enabled (the default),
- {prodname} also needs to be able to manage tunl*
- interfaces. When VXLAN is enabled, {prodname} also needs to be able to manage the vxlan.calico{' '}
- interface.
-
- Many Linux distributions, such as most of the above, include NetworkManager. By default, NetworkManager - does not allow - {prodname} to manage interfaces. If your nodes have NetworkManager, complete the steps in{' '} - - Preventing NetworkManager from controlling {prodname} interfaces - {' '} - before installing {prodname}. -
-- If your Linux distribution comes with installed Firewalld or another iptables manager it should be disabled. - These may interfere with rules added by {prodname} and result in unexpected behavior. -
-- If a host firewall is needed, it can be configured by {prodname} HostEndpoint and GlobalNetworkPolicy. - More information about configuration at Security for host. -
-- In order to properly run Elasticsearch, nodes must be configured according to the{' '} - - Elasticsearch system configuration documentation. - -
-- The Typha autoscaler requires a minimum number of Linux worker nodes based on total number of schedulable - nodes. -
-| Total schedulable nodes | -Required Linux nodes for Typha replicas | -
|---|---|
| 1 | -1 | -
| 2 | -2 | -
| 3 | -3 | -
| up to 250 | -4 | -
| up to 500 | -5 | -
| up to 1000 | -6 | -
| up to 1500 | -7 | -
| up to 2000 | -8 | -
| 2000 or more | -10 | -
- Ensure that your hosts and firewalls allow the necessary traffic based on your configuration. See{' '} - Component architecture to view the following - components. -
-| Configuration | -Host(s) | -Port/protocol | -
|---|---|---|
| - {prodname} networking options - | -IP-in-IP (default) | -Protocol number 4 | -
| - | BGP | -TCP 179 | -
| - | VXLAN | -UDP 4789 | -
| - | Wireguard | -UDP 51820 (default) | -
| - | IPv6 Wireguard | -UDP 51821 (default) | -
| - Cluster scaling - | -Any {prodname} networking option above with Typha agents enabled | -TCP 5473 (default) | -
| - APIs - | -Kubernetes API (kube-apiserver) to access Kubernetes API datastore | -Often TCP 443 or 6443* | -
| - | {prodname} API server | -TCP 8080 and 5443 (default) | -
| - APIs - | -Kubernetes API (kube-apiserver) to access Kubernetes API datastore | -Often TCP 443 or 8443* | -
| - | {prodname} API server | -TCP 8080 and 5443 (default) | -
| - Nodes - | -calico-node (Felix, BIRD, confd) | -TCP 9090 (default) | -
| - Component metrics - | -Prometheus metrics | -TCP 9081 (default) | -
| - | Prometheus BGP metrics | -TCP 9900 (default) | -
| - | Prometheus API service | -TCP 9090 (default) | -
| - | Prometheus Alertmanager | -TCP 9093 (default) | -
| - Logs and storage - | -Elasticsearch with fluentd datastore | -TCP 9200 (default) | -
| - | Elasticssearch for cloud (ECK) | -TCP 9443 (default) | -
| - | Elasticsearch gateway | -TCP 5444 (default) | -
| - Visibility and troubleshooting - | -Kibana | -TCP 5601 (default) | -
| - | Packet capture API | -TCP 8444 (default) | -
| - | {prodname} Manager UI | -TCP 9443 (default) | -
| - Intrusion Detection System (IDS) - | -{prodname} intrusion detection | -TCP 5443 (default) | -
| - Compliance - | -{prodname} compliance | -TCP 5443 (default) | -
| - Multi-cluster management - | -Additional port required for Manager UI | -TCP 9449 | -
| - Egress gateway - | -{prodname} egress gateway | -UDP 4790 | -
- *{' '}
-
- The value passed to kube-apiserver using the --secure-port
- flag. If you cannot locate this, check the targetPort value returned by
- kubectl get svc kubernetes -o yaml.
-
-
- *{' '} - - If your compute hosts connect directly and don’t use IP-in-IP, you don’t need to allow IP-in-IP traffic. - -
- )} - > - ); -} - -function Privileges(props) { - return ( - <> -
- Ensure that {prodname} has the CAP_SYS_ADMIN privilege.
-
- The simplest way to provide the necessary privilege is to run {prodname} as root or in a privileged container. -
- {props.orch === orchestrators.Kubernetes && ( - <> -- When installed as a Kubernetes daemon set, {prodname} meets this requirement by running as a privileged - container. This requires that the kubelet be allowed to run privileged containers. There are two ways this - can be achieved. -
---allow-privileged on the kubelet (deprecated).
- - Switch the active operator to the one that will be installed to the new namespace. First, download the - helper script: -
-Then switch the active operator. This will deactivate the currently running operator.
-Download the new manifests for Tigera Operator.
-Download the new manifests for Prometheus operator.
-- If you previously{' '} - - installed using a private registry - - , you will need to{' '} - - push the new images{' '} - - and then{' '} - - update the manifest - {' '} - downloaded in the previous step. -
-Apply the manifest for Tigera Operator.
-operator.tigera.io or projectcalico.org resources to
- utilize new fields available in the update you must make sure you make those changes after applying the{' '}
- tigera-operator.yaml.
- If you downloaded the manifests for Prometheus operator from the earlier step, then apply them now.
-Install your pull secret.
-
- {' '}
- If pulling images directly from quay.io/tigera, you will likely want to use the credentials
- provided to you by your Tigera support representative. If using a private registry, use your private
- registry credentials instead.
-
Download the custom resources manifest.
-
- If you are{' '}
-
- installing using a private registry
-
- , you will need to update the manifest downloaded in the previous step. Update the spec.registry, spec.imagePath, and spec.imagePrefix fields of the installation resource with the registry name, image path, and image prefix of your private registry.
-
- Apply the Tigera custom resources manifest. For more information on configuration options available in this - manifest, see the installation reference. -
-- Install the Tigera custom resources. For more information on configuration options available in this - manifest, see the installation reference. -
-If your cluster has OIDC login configured, follow these steps:
-a. Save a copy of your Manager for reference.
-b. Remove the deprecated fields from your Manager resource.
-- c. If you are currently using v3.2 and are using OIDC with Kibana verify that you have the following - resources in your cluster: -
-- If both of these resources are present, you can continue with the next step. Otherwise, use the - instructions to{' '} - - configure an identity provider - {' '} - to configure OIDC. -
-- d. Follow{' '} - - configure an identity provider - - . -
-- If your cluster is a management cluster using v3.1 or older, apply a{' '} - - ManagementCluster{' '} - - CR to your cluster. -
-- If your cluster is v3.7 or older, apply a new{' '} - Monitor - CR to your cluster. -
-- If your cluster is v3.16 or older, apply a new{' '} - PolicyRecommendation - CR to your cluster. -
-You can monitor progress with the following command:
-kubectl get tigerastatus -o yaml to get more details.
- - If your cluster includes egress gateways, follow the{' '} - - egress gateway upgrade instructions - - . -
-\ || \ | "Or": matches if and only if either `IPv4: Log from BIRD process IPv6: Log from BIRD6 process |
-| `message` | text | The message contained in the log. |
-
-Once a set of BGP logs has accumulated in Elasticsearch, you can perform many interesting queries. Depending on the field that you want to query, different techniques are required. For example:
-
-- To view BGP logs only for IPv4 or IPv6, query on the `ip_version` field and sort by `logtime`
-- To see all logs from a specific node, query on the `host` field
-- To view events in the cluster, query on the `message` field
diff --git a/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/dns/dns-logs.mdx b/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/dns/dns-logs.mdx
deleted file mode 100644
index 2d8956ab4f..0000000000
--- a/calico-enterprise_versioned_docs/version-3.19-2/observability/elastic/dns/dns-logs.mdx
+++ /dev/null
@@ -1,64 +0,0 @@
----
-description: Key/value pairs of DNS activity logs and how to construct queries.
----
-
-# Configure DNS logs
-
-$[prodname] pushes DNS activity logs to Elasticsearch, for DNS information that is obtained from [trusted DNS servers](../../../network-policy/domain-based-policy.mdx#trusted-dns-servers). The following table
-details the key/value pairs in the JSON blob, including their
-[Elasticsearch datatype](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-types.html).
-This information should assist you in constructing queries.
-
-| Name | Datatype | Description |
-| ------------------ | ----------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `start_time` | date | When the collection of the log began in UNIX timestamp format. |
-| `end_time` | date | When the collection of the log concluded in UNIX timestamp format. |
-| `type` | keyword | This field contains one of the following values:LOG: Indicates that this is a normal DNS activity log.UNLOGGED: Indicates that this log is reporting DNS activity that could not be logged in detail because of [DNSLogsFilePerNodeLimit](../../../reference/resources/felixconfig.mdx#spec). |
-| `count` | long | When `type` is:LOG: How many DNS lookups there were, during the log collection interval, with details matching this log.UNLOGGED: The number of DNS responses that could not be logged in detail because of [DNSLogsFilePerNodeLimit](../../../reference/resources/felixconfig.mdx#spec). In this case none of the following fields are provided. |
-| `client_ip` | ip | The IP address of the client pod. A null value indicates aggregation. |
-| `client_name` | keyword | This field contains one of the following values:
● The name of the client pod.
● -: the name of the pod was aggregated. Check client_name_aggr for the pod name prefix.
This field contains one of the following values:
● The name of the DNS server pod.
● -: the DNS server is not a pod.
This field contains one of the following values:
● The aggregated name of the DNS server pod.
● pvt: the DNS server is not a pod. Its IP address belongs to a private subnet.
● pub: the DNS server is not a pod. Its IP address does not belong to a private subnet. It is probably on the public internet.
dest_name_aggr for more information, such as the name of the pod if it was aggregated. |
-| `dest_name_aggr` | keyword | Contains one of the following values:source_name_aggr for more information, such as the name of the pod if it was aggregated. |
-| `source_name_aggr` | keyword | Contains one of the following values:
-1. Wait for the update to complete.
- The blue progress bar at the top of the page displays the message, “Cluster config update in progress.”
-1. Under **Logging**, **Cloudwatch**, make a note of the URL value for a later step, then click the link.
-
-1. In the **Log Streams** list, make note of the common prefix (for example, kube-apiserver-audit) for a later step.
-
-1. Make note of the region where the cluster is hosted (for example, `us-west-2`) for a later step.
-
-### Create a restricted AWS user for compliance reporting
-
-1. Go to the **AWS IAM console** and add a user.
-1. On the **Add user** page, make these changes:
-
- a. Select **Access type**, **Programmatic access**.
-
-
-
- b. In the Set permissions section, select the policy, `CloudWatchLogsReadOnlyAccess` to set read only permissions.
-
-
-
-1. Optional: In the **Add tags** section, add a tag for the user based on your cluster information.
-1. Click **Submit** to create a restricted user.
-
-### Update $[prodname] log collector with EKS values
-
-1. Update the `tigera-secure` LogCollector resource with values from the EKS configuration.
-
- where:
-
- - `additionalSources`: Section where EKS Cloudwatch logs are specified.
- - `eksCloudwatchLog`: Configuration section containing EKS Cloudwatch logs.
- - `fetchInterval`: Interval in seconds for $[prodname] to get logs from Cloudwatch. Default: 60 seconds, this fetches 1MB every 60 seconds, adjust it based number on CRUD operations performed on cluster resource.
- - `groupName`: Name of the `Log Group` (value from "Enable audit logs in EKS")
- - `region`: AWS region where EKS cluster is hosted (value from "Enable audit logs in EKS")
- - `streamPrefix`: Prefix of `Log Stream` (value from "Enable audit logs in EKS")
-
- **Example**
-
- ```yaml
- apiVersion: operator.tigera.io/v1
- kind: LogCollector
- metadata:
- name: tigera-secure
- spec:
- additionalSources:
- eksCloudwatchLog:
- fetchInterval: 60
- groupName: /aws/eks/mitch-eks-kube-audit-log-forwarder/cluster
- region: us-west-2
- streamPrefix: kube-apiserver-audit-
- status:
- state: Ready
- ```
-
-### Configure authentication between $[prodname] and Cloudwatch logs
-
-In this step, you add AWS authentication information to enable $[prodname] to get logs from the EKS Cloudwatch instance.
-
-Add a Secret with the name, `tigera-eks-log-forwarder-secret` in the namespace, `tigera-operator`, and the AWS [Security Credentials](https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html) in the data section.
-
-```yaml
-apiVersion: v1
-kind: Secret
-metadata:
- name: tigera-eks-log-forwarder-secret
- namespace: tigera-operator
-type: Opaque
-data:
- aws-id: $(echo -n (typha_cache_size\{syncer="bgp"\}) (typha_cache_size\{syncer="dpi"\})(typha_cache_size\{syncer="felix"\})(typha_cache_size\{syncer="node-status"\}) (typha_cache_size\{syncer="tunnel-ip-allocation"\})sum by (instance) (typha_cache_size)max by (instance) (typha_cache_size) |
-| Example value | Example of: max by (instance) (typha_cache_size\{syncer="felix"\})\{instance="10.0.1.20:9093"\} 661\{instance="10.0.1.31:9093"\} 661 |
-| Explanation | The total number of key/value pairs in Typha's in-memory cache.This metric represents the scale of the $[prodname] datastore as it tracks how many WEPs (pods and services), HEPs (hostendpoints), networksets, globalnetworksets, $[prodname] Network Policies etc that Typha is aware of across the entire Calico Federation.You can use this metric to monitor individual syncers to Typha (like Felix, BGP etc), or to get a sum of all syncers. We recommend that you monitor the largest syncer but it is completely up to you. This is a good metric to understand how much data is in Typha. Note: If all Typhas are in sync then they should have the same value for this metric. |
-| Threshold value recommendation | The value of this metric will depend on the scale of the Calico Federation and will always increase as WEPs, $[prodname] network policies and clusters are added. Achieve a baseline first, then monitor for any unexpected increases from the baseline. |
-| Threshold breach symptoms | Unexpected increases may indicate memory leaks and performance issues with Typha. |
-| Threshold breach recommendations | Check CPU usage on Typha pods and Kubernetes nodes. Increase resources if needed, rollout and restart Typha(s) if needed. |
-| Priority level | Optional. |
-
-### CPU usage
-
-| CPU usage | |
-| -------------------------------- | ------------------------------------------------------------ |
-| Metric | rate(process_cpu_seconds_total\{30s\}) \* 100 |
-| Example value | \{endpoint="metrics-port", instance="10.0.1.20:9093", job="typha-metrics-svc", namespace="calico-system", pod="calico-typha-6c6cc9fcf7-csbdl", service="typha-metrics-svc"\} 0.27999999999999403 |
-| Explanation | CPU in use by Typha represented as a percentage of a core. |
-| Threshold value recommendation | A spike at startup is normal. It is recommended to achieve a baseline first, then monitor for any unexpected increases from this baseline. A rule of thumb is to investigate maintained CPU usage above 90%. |
-| Threshold breach symptoms | Unexpected maintained CPU usage could cause Typha to fall behind in updating its clients (for example, Felix) and could cause delays to policy updates. |
-| Threshold breach recommendations | Check CPU usage on Kubernetes nodes. If needed, increase resources, and rollout restart Typha(s). |
-| Priority level | Recommended. |
-
-### Memory usage
-
-| Memory usage | |
-| -------------------------------- | ------------------------------------------------------------ |
-| Metric | process_resident_memory_bytes |
-| Example value | process_resident_memory_bytes\{endpoint="metrics-port", instance="10.0.1.20:9093", job="typha-metrics-svc", namespace="calico-system", pod="calico-typha-6c6cc9fcf7-csbdl", service="typha-metrics-svc"\} 80515072 |
-| Explanation | Amount of memory used by Typha. |
-| Threshold value recommendation | It is recommended to achieve a baseline first, then monitor for any unexpected increases from this baseline. A rule of thumb is to investigate if maintained memory usage is above 90% of what is available from the underlying node. The metric can also be used for memory leaks. In this case, the metric would show Typhas' memory consumption rising over time, even though the cluster is in a stable state. |
-| Threshold breach symptoms | Unexpected maintained memory usage could cause Typha to fall behind in updating its clients (for example, Felix) and could cause delays to policy updates. |
-| Threshold breach recommendations | Check memory usage on Kubernetes nodes. Increase resources if needed, and rollout restart Typha(s) if needed. |
-| Priority level | Recommended. |
-
-## Typha cluster mesh metrics
-
-The following metrics are applicable only if you have implemented [Cluster mesh](multicluster/federation/overview.mdx).
-
-Note that this metric requires a count syntax because you will have a copy of the metric per RemoteClusterConfiguration. As shown in the table, the value `2 = In Sync` reflects good connections.
-
-```
-remote_cluster_connection_status\{cluster="foo"\} = 2
-remote_cluster_connection_status\{cluster="bar"\} = 2
-remote_cluster_connection_status\{cluster="baz"\} = 1
-```
-
-### Remote cluster connections (in-sync)
-
-| Remote cluster connections (in-sync) | |
-| ------------------------------------ | ------------------------------------------------------------ |
-| Metric | count by (instance) (remote_cluster_connection_status == 2) |
-| Explanation | This represents the number of remote cluster connections that are connected and in sync. Each remote cluster will report a *connection_status* value from the following list:remote_cluster_connection_status[cluster="foo"] = 2remote_cluster_connection_status[cluster="bar"] = 2remote_cluster_connection_status[cluster="baz"] = 'count by (instance) (remote_cluster_connection_status != 2) |
-| Explanation | Number of remote cluster connections that are not in sync (i.e. resyncing or failing to connect). Each remote cluster will report a *connection_status* value from the following list:typha_connections_accepted |
-| Example value | typha_connections_accepted\{endpoint="metrics-port", instance="10.0.1.20:9093", job="typha-metrics-svc", namespace="calico-system", pod="calico-typha-6c6cc9fcf7-csbdl", service="typha-metrics-svc"\} 10 |
-| Explanation | Total number of connections accepted over time. This value always increases. |
-| Threshold value recommendation | A steady increase over time is normal. Counters rising after a Felix or Typha restart is also normal (as clients get rebalanced). Investigate connection counters that rise rapidly with no Felix or Typha restarts. |
-| Threshold breach symptoms | Counters rising when there are no Felix or Typha restarts, or no action that could cause restarts (an upgrade for example), could indicate unexpected Felix or Typha restarts or issues. |
-| Threshold breach recommendations | Check resource usage on Typha(s) and Kubernetes nodes. Increase resources if needed. |
-| Priority level | Optional. |
-
-### Client connections actively streaming
-
-| Client connections actively streaming | |
-| ------------------------------------- | ------------------------------------------------------------ |
-| Metric | sum by (instance) (typha_connections_streaming) |
-| Example value | \{instance="10.0.1.20:9093"\} 10 \{instance="10.0.1.31:9093"\} 5 |
-| Explanation | Current number of active connections that are "streaming" (have completed the handshake), to this Typha. After a connection has been Accepted (reported in the previous metric), there will be a handshake before the connection is deemed to be actively streaming. This indicates how many clients are connected to a Typha. The sum reflects per-cache metrics as well. |
-| Threshold value recommendation | Compare the value for Total Connections Accepted and Client Connections Actively Streaming. The fluctuation of these values should be in-sync with each other if Accepted Connections are turning into Actively Streamed connections. If there is a discrepancy , you should investigate. Note: As always, it is recommended to baseline the relationship between these two metrics to have a sense of what is normal. It is also worth noting that in smaller clusters, it is normal for Typha to be unbalanced. Typha can handle hundreds of connections so it is of no concern if all nodes in a 10-node cluster (for example) connect to the same Typha. |
-| Threshold breach symptoms | Felix is not getting updates from Typha. $[prodname] network policies are out-of-sync. |
-| Threshold breach recommendations | Check Typha and Felix logs, and rollout restart Typha(s) if needed. |
-| Priority level | Recommended. |
-
-### Rebalanced client connections
-
-| Rebalanced client connections | |
-| -------------------------------- | ------------------------------------------------------------ |
-| Metric | rate(typha_connections_dropped\{$_rate_interval\}) |
-| Example value | \{endpoint="metrics-port", instance="10.0.1.20:9093", job="typha-metrics-svc", namespace="calico-system", pod="calico-typha-6c6cc9fcf7-csbdl", service="typha-metrics-svc"\} |
-| Explanation | Number of client connections dropped to rebalance and share the load across different Typhas. |
-| Threshold value recommendation | It is normal to see this value increasing sometimes. Investigate if connection dropped counters is rising constantly. If all Typhas are dropping connections because all Typhas believe they have too much load, this also warrants investigation. |
-| Threshold breach symptoms | Dropping connections is rate limited so it should not affect the cluster as a whole. Typha clients, like Felix, will get dropped sometimes (but not constantly), and could result in periodic delays to policy updates. |
-| Threshold breach recommendations | Ensure that the Kubernetes nodes have enough resources. |
-| Priority level | Optional. |
-
-### 99 percentile client fall-behind
-
-| 99 percentile client fall-behind | |
-| -------------------------------- | ------------------------------------------------------------ |
-| Metric | max by (instance) (typha_client_latency_secs\{quantile='0.99'\}) |
-| Example value | \{instance="10.0.1.20:9093"\} 0.1234\{instance="10.0.1.31:9093"\} 0.1234 |
-| Explanation | This metric measures how far behind Typha's client-handling threads are at reading updates.This metric will increase if:max by (instance) (typha_client_write_latency_secs) |
-| Example value | \{instance="10.0.1.20:9093"\} 0.007450815 |
-| Explanation | Time for Typha to write to a client's socket (for example, Felix). |
-| Threshold value recommendation | If the write latency is increasing, this indicates that a client (for example, Felix) is having an issue, or the network is having an issue. It is normal for intermittent spikes. Investigate any persistent latency. |
-| Threshold breach symptoms | Typha clients will lag behind in receiving updates that Typha is sending. Potential symptoms include $[prodname] network policies being out-of-sync. |
-| Threshold breach recommendations | Check Felix logs and resource usage. |
-| Priority level | Recommended. |
-
-### 99 percentile client ping latency
-
-| 99 percentile client ping latency | |
-| --------------------------------- | ------------------------------------------------------------ |
-| Metric | max by (instance) (typha_ping_latency\{quantile="0.99"\}) |
-| Example value | \{instance="10.0.1.20:9093"\} 0.034285331 |
-| Explanation | This metric tracks the round-trip-time from Typha to a client. How long it takes for Typha's clients to respond to pings over the Typha protocol. |
-| Threshold value recommendation | An increase in this metric above 1 second indicates that the clients, network or Typha are more heavily loaded. It is normal for intermittent spikes. Persistent latency above 1 second warrants investigation. |
-| Threshold breach symptoms | Typha clients could be behind in time on updates Typha is sending. Potential symptoms include $[prodname] network policies being out-of-sync. |
-| Threshold breach recommendations | Check Typha and Felix logs and resource usage. It is recommended to focus on Felix logs and resource usage first, as there is generally more overhead with Felix and thus more of a chance of overload. Check if the node is overloaded and review/increase calico-node/Typha CPU requests if needed. If needed, rollout restart Typha(s) and calico-node(s). |
-| Priority level | Recommended. |
-
-## Typha cache internals
-
-### 99 percentile breadcrumb size
-
-| 99 percentile breadcrumb size | |
-| -------------------------------- | ------------------------------------------------------------ |
-| Metric | max by (instance) (typha_breadcrumb_size\{quantile="0.99"\}) |
-| Explanation | Typha stores datastore changes as a series of blocks called breadcrumbs. Typha will store updates inside of these breadcrumbs (for example if a pod churned, this would be a single update). Typha can store multiple updates in a single breadcrumb with the default maximum size number being 100. |
-| Threshold value recommendation | Typha generating blocks of size 100 during start up is normal. Investigate if Typha is consistently generating blocks of size 90+, which can indicate Typha is overloaded. |
-| Threshold breach symptoms | Maintained block of sizes of 100 can indicate that Typha is falling behind on information and updates contained in the datastore. This will lead to Typha clients also falling behind (for example, $[prodname] network policy object may not be current). |
-| Threshold breach recommendations | Check Typha logs and resource usage. Check if there is a lot of activity within the cluster that would cause Typha to send large breadcrumbs (for example, a huge amount of pod churn). If possible, reduce churn rate of resources on the cluster. |
-| Priority level | Recommended. |
-
-### Non-blocking breadcrumbs fraction
-
-| Non-blocking breadcrumb fraction | |
-| -------------------------------- | ------------------------------------------------------------ |
-| Metric | (sum by (instance) (rate(typha_breadcrumb_non_block\{30s\})))/((sum by (instance) (rate(typha_breadcrumb_non_block\{30s\})))+(sum by (instance) (rate(typha_breadcrumb_block\{30s\})))) |
-| Example value | \{instance="10.0.1.20:9093"\} NaN |
-| Explanation | Typha stores datastore changes as a series of blocks called "breadcrumbs". Each client "follows the breadcrumbs" either by blocking and waiting, or skipping to the next one (non-blocking) if it is already available. Non-blocking breadcrumb actions indicates that Typha is constantly sending breadcrumbs to keep up with the datastore. Blocking breadcrumb actions indicate that Typha and the client have caught up, are up-to-date, and are waiting on the next breadcrumb. This metric will give a ratio between blocking and non-blocking actions that can indicate the health of Typha, its clients, and the cluster. |
-| Threshold value recommendation | As the load on Typha increases, the ratio of skip-ahead, non-blocking reads, increases. If it approaches 100% then Typha may be overloaded (since clients only do non-blocking reads when they're behind). |
-| Threshold breach symptoms | Consistent non-blocking breadcrumbs could indicate that Typha is falling behind on information and updates contained in the datastore. This will lead to Typha clients also being behind (for example, $[prodname] network policy object may not be current). |
-| Threshold breach recommendations | Check Typha and Felix logs and resource usage. Check if there is a lot of activity within the cluster that would cause Typha to continuously send non-blocking breadcrumbs. |
-| Priority level | Recommended. |
-
-### Datastore updates total
-
-| Datastore updates total | |
-| -------------------------------- | ------------------------------------------------------------ |
-| Metric | sum by (instance) (rate(typha_updates_total\{30s\})) |
-| Example value | \{instance="10.0.1.20:9093"\} 0 |
-| Explanation | The rate of updates from the datastore(s). For example, updates to Pods/Nodes/Policies/etc. |
-| Threshold value recommendation | Intermittent spikes are expected. Constant updates indicates a very busy cluster (for example, lots of pod churn). |
-| Threshold breach symptoms | Constant updates could lead to overloaded Typhas whereTyphas clients could fall behind. |
-| Threshold breach recommendations | Ensure Typha has enough resources to handle a very dynamic cluster. |
-| Priority level | Optional. |
-
-### Datastore update skipped (no-ops)
-
-| Datastore update skipped (no-ops) | |
-| --------------------------------- | ------------------------------------------------------------ |
-| Metric | sum by (instance) (rate(typha_updates_skipped\{30s\})) |
-| Example value | \{instance="10.0.1.20:9093"\} 0 |
-| Explanation | The number of updates from the datastore that Typha detected were no-ops. For example, an update to a Kubernetes node resource that did not touch any values that is of interest to $[prodname]. Such updates are not propagated to clients, which saves resources. |
-| Threshold value recommendation | N/A |
-| Threshold breach symptoms | N/A |
-| Threshold breach recommendations | N/A |
-| Priority level | Optional. |
-
-## Typha snapshot details
-
-### Snapshot send time
-
-| Median snapshot send time | |
-| -------------------------------- | ------------------------------------------------------------ |
-| Metric | max by (instance) (typha_client_snapshot_send_secs\{quantile="0.5"\}) |
-| Example value | \{instance="10.0.1.20:9093"\} NaN |
-| Explanation | The median time to stream the initial datastore snapshot to each client. It is useful to know the time it takes for a client to receive the data when it connects; it does not include time to process the data. |
-| Threshold value recommendation | Investigate if this value is moving towards 10s of seconds. |
-| Threshold breach symptoms | High values of this metric could indicate that newly-started clients are taking a long time to get the latest snapshot of the datastore, increasing the window of time where networking/policy updates are not being applied to the data plane during a restart/upgrade. Typha has a write timeout for writing the snapshot; if a client cannot receive the snapshot within that timeout, it is disconnected. Clients falling behind on information and updates contained in the datastore (for example, $[prodname] network policy object may not be current). |
-| Threshold breach recommendations | Check Typha and calico-node logs and resource usage. Check for network congestion. Investigate why a particular calico-node is slow; it is likely on an overloaded node with insufficient CPU). |
-| Priority level | Optional. |
-
-### Clients requiring grace period
-
-| Clients requiring grace period | |
-| -------------------------------- | ------------------------------------------------------------ |
-| Metric | sum by (instance) (typha_connections_grace_used) |
-| Example value | \{instance="10.0.1.20:9093"\} 0 |
-| Explanation | The number of Typhas with clients that required a grace period. After sending the snapshot to the client, Typha allows a grace period for the client to catch up to the most recent data. Typha sending the initial snapshot should take < 1 second, but the processing of the snapshot could take longer, so this grace period is there to allow the newly connected client to process the snapshot. |
-| Threshold value recommendation | If this metric is constantly increasing, it can indicate potential performance issues with Typha and clients. It can indicate that performance is being impacted and may warrant investigation. |
-| Threshold breach symptoms | High values of this metric could indicate clients falling behind on information and updates contained in the datastore (for example, $[prodname] network policy object may not be current). |
-| Threshold breach recommendations | Check Typha and calico-node logs and resource usage. Check for network congestion, and determine the root cause. |
-| Priority level | Optional. |
-
-### Max snapshot size (raw)
-
-| Max snapshot size (raw) | |
-| -------------------------------- | ------------------------------------------------------------ |
-| Metric | max(typha_snapshot_raw_bytes) |
-| Example value | \{\} 557359 |
-| Explanation | The raw size in bytes of snapshots sent from Typha to clients. |
-| Threshold value recommendation | N/A |
-| Threshold breach symptoms | N/A |
-| Threshold breach recommendations | N/A |
-| Priority Level | Optional. |
-
-### Max snapshot size (compressed)
-
-| Max snapshot size (compressed) | |
-| -------------------------------- | ------------------------------------------------------------ |
-| Metric | max(typha_snapshot_compressed_bytes) |
-| Example value | \{\}134845 |
-| Explanation | The compressed size in bytes of snapshots sent from Typha to clients. |
-| Threshold value recommendation | This metric can be helpful for customers to estimate the bandwidth requirements for Felix to startup. For example, if the compressed snapshot size is 20MB in size on average, and 1000 Felix/calico-nodes start up, the bandwidth requirements could be estimated at 20GB between the pool of Typha and the set of Felixes across the network. |
-| Threshold breach symptoms | N/A |
-| Threshold breach recommendations | N/A |
-| Priority Level | Optional. |
-
-## Policy metrics
-
-:::note
-The following policy metrics are a separate endpoint exposed by Felix that are used in the web console. They require special Prometheus configuration to scrape the metrics. For details, see [Policy metrics](./policy-metrics).
-
-:::
-
-### Denied traffic
-
-| Denied traffic | |
-| -------------------------------- | ------------------------------------------------------------ |
-| Metric | calico_denied_packets
calico_denied_bytes |
-| Example value | calico_denied_packets\{endpoint="calico-metrics-port", instance="ip-10-0-1-30.ca-central-1.compute.internal", job="calico-node-metrics", namespace="calico-system", pod="calico-node-6pcqm", policy="default |
-| Explanation | Number of packets or bytes that have been dropped by explicit or implicit deny rules. Note that you'll get one instance of `calico_denied_packets/bytes` for each policy rule that is denying traffic. For example: calico_denied_packets\{policy="tier1\|fv/policy1\|0\|deny\|-1",scrIP="10.245.13.133"\} |
-| Threshold value recommendation | The general rule of thumb is this metric should report zero at a stable state. Any deviation means that policy and traffic have diverged. Achieving a zero state depends on the stability and maturity of your cluster and policy. |
-| Threshold breach symptoms | Either unexpected traffic is being denied because of an attack (one example), or expected traffic is being denied because of a misconfiguration in a policy. |
-| Threshold breach recommendations | If this metric indicates that policy and traffic have diverged, the recommended steps are: Determine if an attack is causing the metric to spike, or if these flows should be allowed. If the flow should indeed be allowed, update the policy or a preceding policy to allow this traffic. |
-| Priority level | Recommended. |
-
-### Traffic per rule
-
-| Traffic per rule | |
-| -------------------------------- | ------------------------------------------------------------ |
-| Metric | cnx_policy_rule_bytescnx_policy_rule_packets |
-| Example value | cnx_policy_rule_bytes\{action="allow", endpoint="calico-metrics-port", instance="ip-10-0-1-20.ca-central-1.compute.internal", job="calico-node-metrics", namespace="calico-system", pod="calico-node-qzpkt", policy="es-kube-controller-access", rule_direction="egress", rule_index="1", service="calico-node-metrics", tier="allow-tigera", traffic_direction="inbound"\} |
-| Explanation | Number of bytes or packets handled by $[prodname] network policy rules. |
-| Threshold value recommendation | This metric should usually be non-zero (unless expected). A zero value indicates the rule is not matching any packets, and could be surplus to requirements. |
-| Threshold breach symptoms | N/A |
-| Threshold breach recommendations | If this metrics consistently reports a zero value over an acceptable period of time, you can consider removing the policy rule. |
-| Priority Level | Optional. |
-
-### Connections per policy rule
-
-| Connections per policy rule | |
-| -------------------------------- | ------------------------------------------------------------ |
-| Metric | cnx_policy_rule_connections |
-| Example value | cnx_policy_rule_connections\{endpoint="calico-metrics-port", instance="ip-10-0-1-20.ca-central-1.compute.internal", job="calico-node-metrics", namespace="calico-system", pod="calico-node-qzpkt", policy="es-kube-controller-access", rule_direction="egress", rule_index="0", service="calico-node-metrics", tier="allow-tigera", traffic_direction="outbound"\} |
-| Explanation | Number connections handled by $[prodname] policy rules. |
-| Threshold value recommendation | This metric is similar to *Traffic per Rule* but this deals more with flow monitoring. This metric should usually be non-zero. A zero value indicates that the rule is not matching any packets and could be surplus to requirements. |
-| Threshold breach symptoms | N/A |
-| Threshold breach recommendations | If this metrics consistently reports a zero value over an acceptable period of time, this policy rule can be considered for removal. |
-| Priority Level | Optional. |
-## Felix cluster-state metrics
-
-### CPU usage
-
-| CPU usage | |
-| -------------------------------- | ------------------------------------------------------------ |
-| Metric | rate(process_cpu_seconds_total\{30s\}) \* 100 |
-| Example value | \{endpoint="metrics-port", instance="10.0.1.20:9091", job="felix-metrics-svc", namespace="calico-system", pod="calico-node-qzpkt", service="felix-metrics-svc"\}3.1197504199664072 |
-| Explanation | CPU in use by calico-node represented as a percentage of a core. |
-| Threshold value recommendation | A spike at startup is normal. It is recommended to first achieve a baseline and then monitor for any unexpected increases from this baseline. Investigate if maintained CPU usage goes above 90%. |
-| Threshold breach symptoms | Unexpected maintained CPU usage could cause Felix to fall behind and could cause delays to policy updates. |
-| Threshold breach recommendations | Check CPU usage on Kubernetes nodes. Increase resources if needed, rollout restart calico-node(s) if needed. |
-| Priority level | Recommended. |
-
-### Memory usage
-
-| Memory usage | |
-| -------------------------------- | ------------------------------------------------------------ |
-| Metric | process_resident_memory_bytes |
-| Example value | process_resident_memory_bytes\{endpoint="metrics-port", instance="10.0.1.20:9091", job="felix-metrics-svc", namespace="calico-system", pod="calico-node-qzpkt", service="felix-metrics-svc"\} 98996224 |
-| Explanation | Amount of memory in use by calico-node. |
-| Threshold value recommendation | Recommended to achieve a baseline first, then monitor for any unexpected increases from this baseline. Investigate if maintained CPU usage goes above 90% of what is available from the underlying node. |
-| Threshold breach symptoms | Unexpected, maintained, memory usage could cause Felix to fall behind and could cause delays to policy updates. |
-| Threshold breach recommendations | Check memory usage on Kubernetes nodes. Increase resources if needed, rollout restart typha(s) if needed. |
-| Priority level | Recommended. |
-
-### Active hosts on each endpoint
-
-| Active hosts on each endpoint | |
-| -------------------------------- | ------------------------------------------------------------ |
-| Metric | felix_active_local_endpoints |
-| Example value | felix_active_local_endpoints\{endpoint="metrics-port", instance="10.0.1.30:9091", job="felix-metrics-svc", namespace="calico-system", pod="calico-node-6pcqm", service="felix-metrics-svc"\} 36 |
-| Explanation | Number of active pod-networked pods, and HEPs, on this node. |
-| Threshold value recommendation | Threshold relates to resource limits on the node for example kubelet's max pods setting. |
-| Threshold breach symptoms | Suggests Felix is getting out of sync. |
-| Threshold breach recommendations | Rolling restart calico-node and report issue to support. |
-| Priority level | Optional. |
-
-### Active calico nodes
-
-| Active calico nodes | |
-| -------------------------------- | ------------------------------------------------------------ |
-| Metric | max(felix_cluster_num_hosts) |
-| Example value | \{\} 3 |
-| Explanation | Total number of nodes in the cluster that have calico-node deployed and running. |
-| Threshold value recommendation | This value should be equal to the number of nodes in the cluster. If there are discrepancies, then calico-nodes on some nodes are having issues. |
-| Threshold breach symptoms | $[prodname] network policies on affected nodes could be out-of-sync. |
-| Threshold breach recommendations | Check calico-node logs, rollout restart calico-node if needed. |
-| Priority level | Recommended. |
-
-### Felix cluster policies
-
-| Felix cluster policies | |
-| -------------------------------- | ------------------------------------------------------------ |
-| Metric | felix_cluster_num_policies |
-| Example value | felix_cluster_num_policies\{endpoint="metrics-port", instance="10.0.1.20:9091", job="felix-metrics-svc", namespace="calico-system", pod="calico-node-qzpkt", service="felix-metrics-svc"\} 58 |
-| Explanation | Total number of $[prodname] network policies in the cluster. |
-| Threshold value recommendation | Because $[prodname] is a distributed system, the number of policies should be generally consistent across all nodes. It is expected to have some skew between nodes for a short period of time while they sync, however they should never be out of sync for very long. |
-| Threshold breach symptoms | If nodes are out of sync for long time, calico-nodes may be having issues or experiencing resource contention. Check the Errors Plot to see if there are any iptables errors reported. |
-| Threshold breach recommendations | Redeploy calico-node if issues are seen, and increase resources if needed. |
-| Priority level | Optional. |
-
-### Felix active local policies
-
-| Felix active local policies | |
-| -------------------------------- | ------------------------------------------------------------ |
-| Metric | felix_active_local_policies |
-| Example value | felix_active_local_policies\{endpoint="metrics-port", instance="10.0.1.30:9091", job="felix-metrics-svc", namespace="calico-system", pod="calico-node-6pcqm", service="felix-metrics-svc"\} 44 |
-| Explanation | Total number of network policies deployed on per node basis. |
-| Threshold value recommendation | There is no hard limit to active policies. We can handle 1000+ active policies, but it impacts performance, especially if there's pod churn. The best solution is to optimize policies by combining multiple rules into one policy, and make sure that top-level policy selectors are being used. |
-| Threshold breach symptoms | N/A |
-| Threshold breach recommendations | Redeploy calico-node if issues are seen, and increase resources if needed. |
-| Priority level | Recommended. |
-
-### Felix open FDS
-
-| Felix open FDS | |
-| -------------------------------- | ------------------------------------------------------------ |
-| Metric | sum by (pod) (process_open_fds\{pod=~"calico-node.*"\}) |
-| Example value | \{pod="calico-node-6pcqm"\} 90 |
-| Explanation | Number of opened file descriptors per calico-node pod. |
-| Threshold value recommendation | Alert on this metric when it approaches the ulimit (as reported in `process_max_fds` value). You should not be anywhere near the maximum. |
-| Threshold breach symptoms | Felix may become unstable/crash or fail to apply updates as it should. These failures and issues are logged. |
-| Threshold breach recommendations | Check Felix logs, redeploy calico-node if you see log issues, and increase `max_fds value` if possible. |
-| Priority Level | Optional. |
-
-### Felix max FDS
-
-| Felix max FDS | |
-| -------------------------------- | ------------------------------------------------------------ |
-| Metric | sum by (pod) (process_max_fds\{pod=~"calico-node.*"\}) |
-| Example value | \{pod="calico-node-qzpkt"\} 1048576 |
-| Explanation | Maximum number of opened file descriptors allowed per calico-node pod. |
-| Threshold value recommendation | N/A |
-| Threshold breach symptoms | N/A |
-| Threshold breach recommendations | N/A |
-| Priority level | Optional. |
-
-### Felix resync started
-
-| Felix resync started | |
-| -------------------------------- | ------------------------------------------------------------ |
-| Metric | sum(rate(felix_resyncs_started\{5m\})) |
-| Explanation | This is the number of times that Typha has reported to Felix that it is re-connecting with the datastore. |
-| Threshold value recommendation | Occasional resyncs are normal. Investigate resync counters that rapidly rise. |
-| Threshold breach symptoms | Typha pods may be having issues or experiencing resource contention. Some calico-nodes that are paired with Typha pods experiencing issues will not be able to sync with the datastore. |
-| Threshold breach recommendations | Investigate the root cause to avoid redeploying Typha (which can be very disruptive). Check resource contention and network connectivity from Typha to the datastore to see if Typha is working fine or if the API server is overloaded. |
-| Priority level | Recommended. |
-
-### Felix dropped logs
-
-| Felix dropped logs | |
-| -------------------------------- | ------------------------------------------------------------ |
-| Metric | felix_logs_dropped |
-| Example value | felix_logs_dropped\{endpoint="metrics-port", instance="10.0.1.20:9091", job="felix-metrics-svc", namespace="calico-system", pod="calico-node-qzpkt", service="felix-metrics-svc"\} 0 |
-| Explanation | The number of logs Felix has dropped. Note that this metric does not count flow-logs; it counts logs to stdout. |
-| Threshold value recommendation | Occasional drops are normal. Investigate if drop counters rapidly rise. |
-| Threshold breach symptoms | Felix will drop logs if it cannot keep up with writing them out. These are ordinary code logs, not flow logs. Calico-node may be under resource constraints. |
-| Threshold breach recommendations | Check CPU usage on calico-nodes and Kubernetes nodes. Increase resources if needed, and rollout restart calico-node(s) if needed. |
-| Priority level | Optional. |
-
-## Felix error metrics
-
-### IPset errors
-
-| IPset errors | |
-| -------------------------------- | ------------------------------------------------------------ |
-| Metric | sum(rate(felix_ipset_errors\{5m\})) |
-| Example value | \{\} 0 |
-| Explanation | Number of ipset creation, modification, and deletion command failures. This metric reports how many times the ipset command has failed when Felix tried to run it. An error can occur when Felix sends bad ipset command data, or the kernel throws an error (potentially because it was too busy to handle this request at that time). |
-| Threshold value recommendation | Occasional errors are normal. Investigate error counters that rapidly rise. |
-| Threshold breach symptoms | $[prodname] network policies may not scope all endpoints in network policy rules. Cluster nodes may be under resource contention, which may result in other _error and _seconds metrics rising. Repeated errors could mean some persistent problem (for example, some other process has created an IP set with that name, which is incompatible). |
-| Threshold breach recommendations | See the Errors Plot graph to determine if the scope is cluster-wide or node-local. Check calico-node logs. Check resource usage and contention on Kubernetes nodes and calico-nodes. Add nodes/resources if needed. If resource contention is not seen, restart calico-node(s) and monitor. Ensure that other process using IPtables are not blocking $[prodname] network policy management. |
-| Priority level | Optional. |
-
-### Iptables restore errors
-
-| Iptables restore errors | |
-| -------------------------------- | ------------------------------------------------------------ |
-| Metric | sum(rate(felix_iptables_restore_errors\{5m\})) |
-| Explanation | The number of iptables-restore errors over five minutes. The iptables-restore command is used when $[prodname] makes a change to iptables. For example, a new WEP or HEP is created, changes to a WEP or HEP or a change to a policy that affects a WEP or HEP. |
-| Threshold value recommendation | Occasional errors are normal. Investigate error counters that rapidly rise. |
-| Threshold breach symptoms | $[prodname] network policies are not up to date. Cluster nodes may be under resource contention, which may result in other _error and _seconds metrics rising. |
-| Threshold breach recommendations | See the Errors Plot graph to determine if the scope is cluster-wide or node-local. Check calico-node logs. Check resource usage and contention on Kubernetes nodes and calico-nodes. Add nodes/resources if needed. If no resource contention is seen, restart calico-node and monitor. |
-| Priority level | Optional. |
-
-### Iptables save errors
-
-| Iptables save errors | |
-| -------------------------------- | ------------------------------------------------------------ |
-| Metric | sum(rate(felix_iptables_save_errors\{5m\})) |
-| Example value | \{\} 0 |
-| Explanation | Number of iptables-save errors. The iptables-save command is run before every iptables-restore command so that $[prodname] has the current state of iptables. |
-| Threshold value recommendation | Occasional errors are normal. Investigate error counters that rapidly rise. |
-| Threshold breach symptoms | $[prodname] network policies are not up to date. Cluster nodes may be under resource contention, which may result in other _error and _seconds metrics rising. Repeated errors could mean some persistent problem (for example, some other process has creating iptables rules that $[prodname] cannot decode with the version of iptables-save in use). |
-| Threshold breach recommendations | See the Errors Plot graph to determine if the scope is cluster-wide or node-local. Check calico-node logs. Check resource usage and contention on Kubernetes nodes and calico-nodes. Add nodes/resources if needed. If no resource contention is seen, restart calico-node and monitor. |
-| Priority level | Optional. |
-
-### Felix log errors
-
-| Felix log errors | |
-| -------------------------------- | ------------------------------------------------------------ |
-| Metric | sum(rate(felix_log_errors\{5m\})) |
-| Example value | \{\} 0 |
-| Explanation | The number of times Felix fails to write out a log because the log buffer is full. |
-| Threshold value recommendation | Occasional errors are normal. Investigate error counters that rapidly rise. |
-| Threshold breach symptoms | Calico-node may be under resource contention, which may result in other _error and _seconds metrics rising. |
-| Threshold breach recommendations | See the Errors Plot graph to determine if the scope is cluster-wide or node-local. Check resource usage and contention on Kubernetes nodes and calico-nodes. Add nodes/resources if needed. If no resource contention is seen, restart calico-node and monitor. |
-| Priority level | Optional. |
-
-### Monitor Felix metrics using a graph
-
-| Errors plot graph | |
-| -------------------------------- | ------------------------------------------------------------ |
-| Metric | rate(felix_ipset_errors\{5m\}) \|\| rate(felix_iptables_restore_errors[5m]) \|\| rate(felix_iptables_save_errors[5m]) \|\| rate(felix_log_errors\{5m\}) |
-| Example value | \{endpoint="metrics-port", instance="10.0.1.20:9091", job="felix-metrics-svc", namespace="calico-system", pod="calico-node-qzpkt", service="felix-metrics-svc"\} 0 |
-| Explanation | Checks if there have been any iptables-save, iptables-restore, or ipset command errors in the past five minutes. Keeps track of what node is reporting which error. |
-| Threshold value recommendation | Occasional errors are normal. Investigate error counters that rapidly rise. For this specific metric it is worth focusing on the metric that is spiking, and referencing that metric information. |
-| Threshold breach symptoms | Dependent on the specific metric that is logging errors. |
-| Threshold breach recommendations | If more than one metric is rising, check if all rising metrics are related to a specific calico-node. If this is the case, then the issue is local to that calico-node. Check calico-node logs. Check resource usage for the node and calico-node pod. If more than one metric is rising rapidly across all calico-nodes, then it is a cluster-wide issue and cluster health must be checked. Check cluster resource usage, cluster networking/infrastructure health, and restart calico-nodes and calico-typha pods. |
-| Priority level | Recommended. |
-
-## Felix time-based metrics
-
-### Data plane apply time quantile 0.5/0.9/0.99
-
-| Data plane apply time quantile 0.5/0.9/0.99 | |
-| ------------------------------------------ | ------------------------------------------------------------ |
-| Metric | felix_int_dataplane_apply_time_seconds\{quantile="0.5"\}felix_int_dataplane_apply_time_seconds\{quantile="0.9"\}felix_int_dataplane_apply_time_seconds\{quantile="0.99"\} |
-| Example value | felix_int_dataplane_apply_time_seconds\{quantile="0.5"\}:felix_int_dataplane_apply_time_seconds\{endpoint="metrics-port", instance="10.0.1.30:9091", job="felix-metrics-svc", namespace="calico-system", pod="calico-node-6pcqm", quantile="0.5", service="felix-metrics-svc"\} 0.020859218 |
-| Explanation | Time in seconds that it took to apply a data plane update ,viewed at the median, 90th percentile, and 99th percentile. |
-| Threshold value recommendation | Thresholds will vary depending on cluster size and rate of churn. It is recommended that a baseline be set to determine a normal threshold value. In the field we have seen >10s in extremely high-scale clusters with 100k+ endpoints and lots of policy/Kubernetes services. |
-| Threshold breach symptoms | Large time-to-apply values will cause a delay between $[prodname] network policy commits and enforcement in the data plane. This is dependent on how $[prodname] waiting for kube-proxy to release the iptables lock, which is influenced by the number of services in use. |
-| Threshold breach recommendations | Increase cluster resources, and reduce the number of Kubernetes services if possible. |
-| Priority level | Recommended. |
-
-### Felix route table list seconds quantile 0.5/0.9/0.99
-
-| Felix route table list seconds quantile 0.5/0.9/0.99 | |
-| ---------------------------------------------------- | ------------------------------------------------------------ |
-| Metric | felix_route_table_list_seconds\{quantile="0.5"\}felix_route_table_list_seconds\{quantile="0.9"\}felix_route_table_list_seconds\{quantile="0.99"\} |
-| Example value | felix_route_table_list_seconds\{quantile="0.5"\}:felix_route_table_list_seconds\{endpoint="metrics-port",instance="10.0.1.30:9091",job="felix-metrics-svc",namespace="calico-system", pod="calico-node-6pcqm",quantile="0.5", service="felix-metrics-svc"\} 0.000860426 |
-| Explanation | Time to list all the interfaces during a resync, viewed at the median, 90th percentile and 99th percentile. |
-| Threshold value recommendation | Thresholds will vary depending on the number of cali interfaces per node. It is recommended that a baseline be set to determine a normal threshold value. |
-| Threshold breach symptoms | High values indicate high CPU usage in felix and slow data plane updates. |
-| Threshold breach recommendations | Increase cluster resources. Reduce the number of cali interfaces per node where possible. |
-| Priority level | Optional. |
-
-### Felix graph update time quantile 0.5/0.9/0/99
-
-| Felix graph update time seconds quantile 0.5/0.9/0.99 | |
-| ----------------------------------------------------- | ------------------------------------------------------------ |
-| Metric | felix_calc_graph_update_time_seconds\{quantile="0.5"\}felix_calc_graph_update_time_seconds\{quantile="0.9"\}felix_calc_graph_update_time_seconds\{quantile="0.99"\} |
-| Example value | felix_calc_graph_update_time_seconds\{quantile="0.5"\}:felix_calc_graph_update_time_seconds\{endpoint="metrics-port",instance="10.0.1.30:9091", job="felix-metrics-svc",namespace="calico-system", pod="calico-node-6pcqm",quantile="0.5", service="felix-metrics-svc"\} 0.00007129 |
-| Explanation | This metric reports the time taken to update the calculation graph for each datastore on an update call, viewed at the median, 90th percentile and 99th percentile. The calculation graph is the Felix component that takes all the policies/workload endpoints/host endpoints information that it has received from Typha, and distills it down to data plane updates that are relevant for this node. |
-| Threshold value recommendation | After *start of day* (where we will typically get a large update), then values should be sub 1 second (with occasional blips to 1+ seconds). Should be measured in milliseconds with the occasional blip to a second or two. Investigate if the result is constantly in values of seconds. |
-| Threshold breach symptoms | High values indicate high CPU usage in felix and slow data plane updates. |
-| Threshold breach recommendations | Increase cluster resources. Check calico-node logs. Rollout restart calico-node(s) if needed. |
-| Priority level | Recommended. |
diff --git a/calico-enterprise_versioned_docs/version-3.19-2/operations/monitor/prometheus/alertmanager.mdx b/calico-enterprise_versioned_docs/version-3.19-2/operations/monitor/prometheus/alertmanager.mdx
deleted file mode 100644
index 6dfbb692b0..0000000000
--- a/calico-enterprise_versioned_docs/version-3.19-2/operations/monitor/prometheus/alertmanager.mdx
+++ /dev/null
@@ -1,103 +0,0 @@
----
-description: Configure Alertmanager, a Prometheus feature that routes alerts.
----
-
-# Configure Alertmanager
-
-Alertmanager is used by $[prodname] to route alerts from Prometheus to the administrators.
-It handles routing, deduplicating, grouping, silencing and inhibition of alerts.
-
-More detailed information about Alertmanager is available in the [upstream documentation](https://prometheus.io/docs/alerting/latest/configuration).
-
-### Updating the AlertManager config
-
-- Save the current alertmanager secret, usually named `alertmanager-| Project | -Artifact Name | -LICENSE | -
|---|---|---|
| abbrev | -abbrev-1.1.1.tgz | -ISC | -
| algoliasearch | -algoliasearch.umd-4.2.0.js | -MIT | -
| ansi-regex | -ansi-regex-2.1.1.tgz | -MIT | -
| ansi-styles | -ansi-styles-3.2.1.tgz | -MIT | -
| @types/anymatch | -anymatch-1.3.1.tgz | -MIT | -
| app-policy | -app-policy-v3.16.6 | -Tigera Proprietary | -
| argparse | -argparse-1.0.9.tgz | -MIT | -
| arr-diff | -arr-diff-4.0.0.tgz | -MIT | -
| arr-flatten | -arr-flatten-1.1.0.tgz | -MIT | -
| arr-union | -arr-union-3.1.0.tgz | -MIT | -
| array-unique | -array-unique-0.3.2.tgz | -MIT | -
| asap | -asap-2.0.6.tgz | -MIT | -
| assign-symbols | -assign-symbols-1.0.0.tgz | -MIT | -
| atob | -atob-2.1.2.tgz | -Apache 2.0 | -
| atomicwrites | -atomicwrites-1.4.0-py2.py3-none-any.whl | -MIT | -
| attrs | -attrs-20.3.0-py2.py3-none-any.whl | -MIT | -
| avsdf-base | -avsdf-base-1.0.0.tgz | -MIT | -
| babel-standalone | -babel-6.26.0.min.js | -MIT | -
| babel-runtime | -babel-runtime-6.26.0.tgz | -MIT | -
| backports.functools-lru-cache | -backports.functools_lru_cache-1.6.1-py2.py3-none-any.whl | -MIT | -
| balanced-match | -balanced-match-0.4.2.tgz | -MIT | -
| balanced-match | -balanced-match-1.0.0.tgz | -MIT | -
| base | -base-0.11.2.tgz | -MIT | -
| beautifulsoup4 | -beautifulsoup4-4.9.3-py2-none-any.whl | -MIT | -
| big.js | -big.js-5.2.2.tgz | -MIT | -
| boolbase | -boolbase-1.0.0.tgz | -ISC | -
| twitter-bootstrap | -bootstrap-3.3.7.min.js | -MIT | -
| bootstrap | -bootstrap-3.4.1.tgz | -MIT | -
| brace-expansion | -brace-expansion-1.1.11.tgz | -MIT | -
| braces | -braces-2.3.2.tgz | -MIT | -
| bs4 | -bs4-0.0.1.tar.gz | -MIT | -
| buffer-from | -buffer-from-1.1.1.tgz | -MIT | -
| cache-base | -cache-base-1.0.1.tgz | -MIT | -
| cachetools | -cachetools-3.1.1-py2.py3-none-any.whl | -MIT | -
| projectcalico | -calico-v3.17.1 | -Apache 2.0 | -
| projectcalico | -calicoctl-v3.17.1 | -Apache 2.0 | -
| camel-case | -camel-case-4.1.1.tgz | -MIT | -
| certifi | -certifi-2020.12.5-py2.py3-none-any.whl | -Mozilla 2.0 | -
| chain-function | -chain-function-1.0.0.tgz | -MIT | -
| chalk | -chalk-2.4.2.tgz | -MIT | -
| chardet | -chardet-3.0.4-py2.py3-none-any.whl | -LGPL 3.0 | -
| chardet | -chardet-4.0.0-py2.py3-none-any.whl | -LGPL 3.0 | -
| chevrotain | -chevrotain-6.5.0.tgz | -Apache 2.0 | -
| cidr-regex | -cidr-regex-2.0.10.tgz | -BSD 2 | -
| class-transformer | -class-transformer-0.3.1.tgz | -MIT | -
| class-utils | -class-utils-0.3.6.tgz | -MIT | -
| class-validator | -class-validator-0.9.1.tgz | -MIT | -
| classnames | -classnames-2.2.5.tgz | -MIT | -
| classnames | -classnames-2.2.6.tgz | -MIT | -
| clean-css | -clean-css-4.2.3.tgz | -MIT | -
| clipboard.js | -clipboard-2.0.0.min.js | -MIT | -
| cni-plugin | -cni-plugin-v3.16.6 | -Tigera Proprietary | -
| @babel/code-frame | -code-frame-7.10.1.tgz | -MIT | -
| codemirror | -codemirror-5.57.0.js | -MIT | -
| codemirror | -codemirror-5.57.0.tgz | -MIT | -
| collection-visit | -collection-visit-1.0.0.tgz | -MIT | -
| color-convert | -color-convert-1.9.3.tgz | -MIT | -
| color-name | -color-name-1.1.3.tgz | -MIT | -
| commander | -commander-2.13.0.tgz | -MIT | -
| commander | -commander-2.20.3.tgz | -MIT | -
| commander | -commander-4.1.1.tgz | -MIT | -
| component-emitter | -component-emitter-1.3.0.tgz | -MIT | -
| concat-map | -concat-map-0.0.1.tgz | -MIT | -
| configparser | -configparser-4.0.2-py2.py3-none-any.whl | -MIT | -
| connected-react-router | -connected-react-router-6.5.2.tgz | -MIT | -
| contextlib2 | -contextlib2-0.6.0.post1-py2.py3-none-any.whl | -Python 2.0 | -
| copy-descriptor | -copy-descriptor-0.1.1.tgz | -MIT | -
| @popperjs/core | -core-2.4.4.tgz | -MIT | -
| core-js | -core-js-1.2.7.tgz | -MIT | -
| core-js | -core-js-2.5.1.tgz | -MIT | -
| core-js | -core-js-2.5.7.tgz | -MIT | -
| core-js | -core-js-3.6.5.tgz | -MIT | -
| cose-base | -cose-base-1.0.3.tgz | -MIT | -
| create-react-class | -create-react-class-15.6.2.tgz | -MIT | -
| css-box-model | -css-box-model-1.1.1.tgz | -MIT | -
| css-select | -css-select-1.2.0.tgz | -BSD 2 | -
| css-what | -css-what-2.1.3.tgz | -BSD 2 | -
| @ungap/custom-elements | -custom-elements-0.1.12.tgz | -ISC | -
| cytoscape | -cytoscape-3.15.2.tgz | -MIT | -
| cytoscape | -cytoscape-3.18.0.min.js | -MIT | -
| cytoscape-avsdf | -cytoscape-avsdf-1.0.0.tgz | -MIT | -
| cytoscape-cise | -cytoscape-cise-1.0.0.tgz | -MIT | -
| cytoscape-context-menus | -cytoscape-context-menus-4.0.0.tgz | -MIT | -
| cytoscape-cose-bilkent | -cytoscape-cose-bilkent-4.1.0.tgz | -MIT | -
| cytoscape-dagre-cluster-fix | -cytoscape-dagre-cluster-fix-2.2.5.tgz | -MIT | -
| cytoscape-expand-collapse | -cytoscape-expand-collapse-4.0.0.tgz | -MIT | -
| cytoscape-fcose | -cytoscape-fcose-1.2.3.tgz | -MIT | -
| cytoscape-layers | -cytoscape-layers-2.1.0.tgz | -MIT | -
| cytoscape-navigator | -cytoscape-navigator-2.0.1.tgz | -MIT | -
| cytoscape-popper | -cytoscape-popper-1.0.7.js | -MIT | -
| cytoscape-popper | -cytoscape-popper-1.0.7.tgz | -MIT | -
| d3 | -d3-5.5.0.tgz | -BSD 3 | -
| d3-array | -d3-array-1.2.1.tgz | -BSD 3 | -
| d3-array | -d3-array-1.2.4.tgz | -BSD 3 | -
| d3-axis | -d3-axis-1.0.8.tgz | -BSD 3 | -
| d3-brush | -d3-brush-1.0.4.tgz | -BSD 3 | -
| d3-chord | -d3-chord-1.0.4.tgz | -BSD 3 | -
| d3-collection | -d3-collection-1.0.4.tgz | -BSD 3 | -
| d3-collection | -d3-collection-1.0.7.tgz | -BSD 3 | -
| d3-color | -d3-color-1.0.3.tgz | -BSD 3 | -
| d3-contour | -d3-contour-1.2.0.tgz | -BSD 3 | -
| d3-dispatch | -d3-dispatch-1.0.3.tgz | -BSD 3 | -
| d3-drag | -d3-drag-1.2.1.tgz | -BSD 3 | -
| d3-dsv | -d3-dsv-1.0.8.tgz | -BSD 3 | -
| d3-ease | -d3-ease-1.0.3.tgz | -BSD 3 | -
| d3-fetch | -d3-fetch-1.1.0.tgz | -BSD 3 | -
| d3-force | -d3-force-1.1.0.tgz | -BSD 3 | -
| d3-format | -d3-format-1.2.2.tgz | -BSD 3 | -
| d3-geo | -d3-geo-1.10.0.tgz | -BSD 3 | -
| d3-hierarchy | -d3-hierarchy-1.1.6.tgz | -BSD 3 | -
| d3-interpolate | -d3-interpolate-1.1.6.tgz | -BSD 3 | -
| d3-interpolate | -d3-interpolate-1.3.2.tgz | -BSD 3 | -
| d3-path | -d3-path-1.0.5.tgz | -BSD 3 | -
| d3-polygon | -d3-polygon-1.0.3.tgz | -BSD 3 | -
| d3-quadtree | -d3-quadtree-1.0.3.tgz | -BSD 3 | -
| d3-random | -d3-random-1.1.0.tgz | -BSD 3 | -
| d3-sankey-circular | -d3-sankey-circular-0.34.0.tgz | -MIT | -
| d3-scale | -d3-scale-2.0.0.tgz | -BSD 3 | -
| d3-scale | -d3-scale-2.1.2.tgz | -BSD 3 | -
| d3-scale-chromatic | -d3-scale-chromatic-1.3.0.tgz | -BSD 3 | -
| d3-selection | -d3-selection-1.3.0.tgz | -BSD 3 | -
| d3-shape | -d3-shape-1.2.0.tgz | -BSD 3 | -
| d3-shape | -d3-shape-1.2.3.tgz | -BSD 3 | -
| d3-shape | -d3-shape-1.3.7.tgz | -BSD 3 | -
| d3-time | -d3-time-1.0.8.tgz | -BSD 3 | -
| d3-time-format | -d3-time-format-2.1.1.tgz | -BSD 3 | -
| d3-timer | -d3-timer-1.0.7.tgz | -BSD 3 | -
| d3-transition | -d3-transition-1.1.1.tgz | -BSD 3 | -
| d3-voronoi | -d3-voronoi-1.1.2.tgz | -BSD 3 | -
| d3-zoom | -d3-zoom-1.7.1.tgz | -BSD 3 | -
| dagre | -dagre-0.7.4.js | -MIT | -
| dagre-cluster-fix | -dagre-cluster-fix-0.9.3.tgz | -MIT | -
| debug | -debug-2.6.9.tgz | -MIT | -
| decimal.js-light | -decimal.js-light-2.5.0.tgz | -MIT | -
| decode-uri-component | -decode-uri-component-0.2.0.tgz | -MIT | -
| deepdiff | -deepdiff-3.3.0-py2-none-any.whl | -MIT | -
| deepmerge | -deepmerge-2.1.1.tgz | -MIT | -
| define-properties | -define-properties-1.1.2.tgz | -MIT | -
| define-properties | -define-properties-1.1.3.tgz | -MIT | -
| define-property | -define-property-0.2.5.tgz | -MIT | -
| define-property | -define-property-1.0.0.tgz | -MIT | -
| define-property | -define-property-2.0.2.tgz | -MIT | -
| diff | -diff-3.5.0.tgz | -BSD 3 | -
| diff2html | -diff2html-2.4.0.tgz | -MIT | -
| dom-converter | -dom-converter-0.2.0.tgz | -MIT | -
| dom-helpers | -dom-helpers-3.3.1.tgz | -MIT | -
| dom-helpers | -dom-helpers-3.4.0.tgz | -MIT | -
| dom-serializer | -dom-serializer-0.2.2.tgz | -MIT | -
| dom-walk | -dom-walk-0.1.2.tgz | -MIT | -
| domelementtype | -domelementtype-1.3.1.tgz | -BSD 2 | -
| domelementtype | -domelementtype-2.0.1.tgz | -BSD 2 | -
| domhandler | -domhandler-2.4.2.tgz | -BSD 2 | -
| domutils | -domutils-1.5.1.tgz | -BSD 2 | -
| domutils | -domutils-1.7.0.tgz | -BSD 2 | -
| dot-case | -dot-case-3.0.3.tgz | -MIT | -
| elasticsearch | -elasticsearch-6.8.1-py2.py3-none-any.whl | -Apache 2.0 | -
| elementary-circuits-directed-graph | -elementary-circuits-directed-graph-1.2.0.tgz | -MIT | -
| emojis-list | -emojis-list-3.0.0.tgz | -MIT | -
| encoding | -encoding-0.1.12.tgz | -MIT | -
| entities | -entities-1.1.2.tgz | -BSD 2 | -
| entities | -entities-2.0.3.tgz | -BSD 2 | -
| es-abstract | -es-abstract-1.17.5.tgz | -MIT | -
| es-to-primitive | -es-to-primitive-1.2.1.tgz | -MIT | -
| es5-shim | -es5-shim-4.3.1.js | -MIT | -
| escape-string-regexp | -escape-string-regexp-1.0.5.tgz | -MIT | -
| esprima | -esprima-4.0.0.tgz | -BSD 2 | -
| expand-brackets | -expand-brackets-2.1.4.tgz | -MIT | -
| extend-shallow | -extend-shallow-2.0.1.tgz | -MIT | -
| extend-shallow | -extend-shallow-3.0.2.tgz | -MIT | -
| extglob | -extglob-2.0.4.tgz | -MIT | -
| fast-levenshtein | -fast-levenshtein-2.0.6.tgz | -MIT | -
| fbjs | -fbjs-0.8.16.tgz | -MIT | -
| felix | -felix-v3.17.2 | -Tigera Proprietary | -
| file-saver | -file-saver-2.0.1.tgz | -MIT | -
| fill-range | -fill-range-4.0.0.tgz | -MIT | -
| @fortawesome/fontawesome-common-types | -fontawesome-common-types-0.2.32.tgz | -MIT | -
| @fortawesome/fontawesome-svg-core | -fontawesome-svg-core-1.2.10.tgz | -MIT | -
| for-in | -for-in-1.0.2.tgz | -MIT | -
| foreach | -foreach-2.0.5.tgz | -MIT | -
| fork-ts-checker-webpack-plugin | -fork-ts-checker-webpack-plugin-4.1.6.tgz | -MIT | -
| formik | -formik-2.1.3.tgz | -MIT | -
| fragment-cache | -fragment-cache-0.2.1.tgz | -MIT | -
| @fortawesome/free-brands-svg-icons | -free-brands-svg-icons-5.6.1.tgz | -CC BY 4.0 | -
| @fortawesome/free-regular-svg-icons | -free-regular-svg-icons-5.15.1.tgz | -CC BY 4.0 | -
| @fortawesome/free-solid-svg-icons | -free-solid-svg-icons-5.6.1.tgz | -CC BY 4.0 | -
| funcsigs | -funcsigs-1.0.2-py2.py3-none-any.whl | -Apache 2.0 | -
| function-bind | -function-bind-1.1.1.tgz | -MIT | -
| get-value | -get-value-2.0.6.tgz | -MIT | -
| github.com/alecthomas/participle | -github.com/alecthomas/participle-v0.3.0 | -MIT | -
| github.com/apparentlymart/go-cidr/cidr | -github.com/apparentlymart/go-cidr/cidr-v1.0.1 | -MIT | -
| github.com/aquasecurity/kube-bench/check | -github.com/aquasecurity/kube-bench/check-v0.0.34 | -Apache 2.0 | -
| github.com/araddon/dateparse | -github.com/araddon/dateparse-262228af701ebf3932b8b8488da6781b9d585c88 | -MIT | -
| github.com/avast/retry-go | -github.com/avast/retry-go-v2.2.0 | -MIT | -
| github.com/aws/aws-lambda-go/events | -github.com/aws/aws-lambda-go/events-v1.13.3 | -Apache 2.0 | -
| github.com/aws/aws-sdk-go/aws | -github.com/aws/aws-sdk-go/aws-v1.25.8 | -Apache 2.0 | -
| github.com/bmizerany/pat | -github.com/bmizerany/pat-6226ea591a40176dd3ff9cd8eff81ed6ca721a00 | -MIT | -
| github.com/bronze1man/gostrongswanvici | -github.com/bronze1man/gostrongswanvici-27d02f80ba4008de552efb746b3f6eaa7718b518 | -MIT | -
| github.com/buger/jsonparser | -github.com/buger/jsonparser-v1.0.0 | -MIT | -
| github.com/burntsushi/toml | -github.com/burntsushi/toml-v0.3.1 | -MIT | -
| github.com/caimeo/iniflags | -github.com/caimeo/iniflags-ef4ae6c5cd79d20db0b18bc5ebd8657fac7260e5 | -BSD 2 | -
| github.com/cloudflare/cfssl/log | -github.com/cloudflare/cfssl/log-v1.4.1 | -BSD 2 | -
| github.com/containernetworking/cni/libcni | -github.com/containernetworking/cni/libcni-v0.8.0 | -Apache 2.0 | -
| github.com/containernetworking/plugins/pkg/hns | -github.com/containernetworking/plugins/pkg/hns-v0.8.5 | -Apache 2.0 | -
| github.com/coreos/go-oidc | -github.com/coreos/go-oidc-v2.1.0 | -Apache 2.0 | -
| github.com/coreos/go-semver/semver | -github.com/coreos/go-semver/semver-v0.3.0 | -Apache 2.0 | -
| github.com/davecgh/go-spew/spew | -github.com/davecgh/go-spew/spew-v1.1.1 | -ISC | -
| github.com/docker/docker/api/types | -github.com/docker/docker/api/types-v1.13.1 | -Apache 2.0 | -
| github.com/docker/docker/client | -github.com/docker/docker/client-v1.13.1 | -Apache 2.0 | -
| github.com/docker/go-connections/nat | -github.com/docker/go-connections/nat-v0.4.0 | -Apache 2.0 | -
| github.com/docopt/docopt-go | -github.com/docopt/docopt-go-ee0de3bc6815ee19d4a46c7eb90f829db0e014b1 | -MIT | -
| github.com/elastic/go-elasticsearch/v7 | -github.com/elastic/go-elasticsearch/v7-v7.3.0 | -Apache 2.0 | -
| github.com/envoyproxy/data-plane-api/envoy/api/v2/core | -github.com/envoyproxy/data-plane-api/envoy/api/v2/core-ffd420ef8a9ad148642236aa6d89e2855b41c821 | -Apache 2.0 | -
| github.com/fsnotify/fsnotify | -github.com/fsnotify/fsnotify-v1.4.9 | -BSD 3 | -
| github.com/gavv/monotime | -github.com/gavv/monotime-30dba43534243e3484a34676a0f068d12b989f84 | -Apache 2.0 | -
| github.com/getlantern/deepcopy | -github.com/getlantern/deepcopy-v1 | -Apache 2.0 | -
| github.com/ghodss/yaml | -github.com/ghodss/yaml-v1.0.0 | -BSD 3 | -
| github.com/go-ini/ini | -github.com/go-ini/ini-v1.43.0 | -Apache 2.0 | -
| github.com/go-logr/logr | -github.com/go-logr/logr-v0.3.0 | -Apache 2.0 | -
| github.com/go-openapi/spec | -github.com/go-openapi/spec-v0.19.3 | -Apache 2.0 | -
| github.com/go-sql-driver/mysql | -github.com/go-sql-driver/mysql-v1.4.1 | -Mozilla 2.0 | -
| github.com/gofrs/flock | -github.com/gofrs/flock-v0.8.0 | -BSD 3 | -
| github.com/gogo/googleapis/google/rpc | -github.com/gogo/googleapis/google/rpc-v1.2.0 | -Apache 2.0 | -
| github.com/gogo/protobuf/proto | -github.com/gogo/protobuf/proto-v1.3.1 | -BSD 3 | -
| github.com/golang-collections/collections/stack | -github.com/golang-collections/collections/stack-604e922904d35e97f98a774db7881f049cd8d970 | -MIT | -
| github.com/google/go-cmp/cmp | -github.com/google/go-cmp/cmp-v0.4.0 | -BSD 3 | -
| github.com/google/gofuzz | -github.com/google/gofuzz-v1.1.0 | -Apache 2.0 | -
| github.com/google/gopacket | -github.com/google/gopacket-v1.1.18 | -BSD 3 | -
| github.com/google/netstack/tcpip/header | -github.com/google/netstack/tcpip/header-55fcc16cd0eb096d8418f7bc5162483c31a4e82b | -Apache 2.0 | -
| github.com/hashicorp/go-version | -github.com/hashicorp/go-version-v1.2.1 | -Mozilla 2.0 | -
| github.com/hashicorp/golang-lru | -github.com/hashicorp/golang-lru-v0.5.1 | -Mozilla 2.0 | -
| github.com/hashicorp/yamux | -github.com/hashicorp/yamux-2f1d1f20f75d5404f53b9edf6b53ed5505508675 | -Mozilla 2.0 | -
| github.com/howeyc/fsnotify | -github.com/howeyc/fsnotify-v0.9.0 | -BSD 3 | -
| github.com/hpcloud/tail | -github.com/hpcloud/tail-v1.0.0 | -MIT | -
| github.com/ishidawataru/sctp | -github.com/ishidawataru/sctp-00ab2ac2db07a138417639ef3f39672c65dbb9a0 | -BSD 3 | -
| github.com/jarcoal/httpmock | -github.com/jarcoal/httpmock-v1.0.5 | -MIT | -
| github.com/jinzhu/copier | -github.com/jinzhu/copier-v0.1.0 | -MIT | -
| github.com/jmespath/go-jmespath | -github.com/jmespath/go-jmespath-0.3.0 | -Apache 2.0 | -
| github.com/joho/godotenv | -github.com/joho/godotenv-v1.3.0 | -MIT | -
| github.com/jpillora/backoff | -github.com/jpillora/backoff-v1.0.0 | -MIT | -
| github.com/json-iterator/go | -github.com/json-iterator/go-v1.1.10 | -MIT | -
| github.com/juju/clock | -github.com/juju/clock-9c5c9712527c7986f012361e7d13756b4d99543d | -LGPL 3.0 | -
| github.com/juju/errors | -github.com/juju/errors-3fe23663418fc1d724868c84f21b7519bbac7441 | -LGPL 3.0 | -
| github.com/juju/mutex | -github.com/juju/mutex-d21b13acf4bfd8a8b0482a3a78e44d98880b40d3 | -LGPL 3.0 | -
| github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/utils | -github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/utils-v1.1.0 | -Apache 2.0 | -
| github.com/kardianos/osext | -github.com/kardianos/osext-2bc1f35cddc0cc527b4bc3dce8578fc2a6c11384 | -BSD 3 | -
| github.com/kelseyhightower/envconfig | -github.com/kelseyhightower/envconfig-v1.4.0 | -MIT | -
| github.com/kelseyhightower/memkv | -github.com/kelseyhightower/memkv-v0.1.1 | -MIT | -
| github.com/konsorten/go-windows-terminal-sequences | -github.com/konsorten/go-windows-terminal-sequences-v1.0.1 | -MIT | -
| github.com/lestrrat-go/file-rotatelogs | -github.com/lestrrat-go/file-rotatelogs-v2.4.0 | -MIT | -
| github.com/libp2p/go-reuseport | -github.com/libp2p/go-reuseport-v0.0.1 | -ISC | -
| github.com/lithammer/dedent | -github.com/lithammer/dedent-v1.1.0 | -MIT | -
| github.com/mailru/easyjson | -github.com/mailru/easyjson-v0.7.0 | -MIT | -
| github.com/masterminds/sprig | -github.com/masterminds/sprig-v2.19.0 | -MIT | -
| github.com/mcuadros/go-version | -github.com/mcuadros/go-version-92cdf37c5b7579ebaf7a036da94b40995972088d | -MIT | -
| github.com/microsoft/hcsshim | -github.com/microsoft/hcsshim-v0.8.6 | -MIT | -
| github.com/mipearson/rfw | -github.com/mipearson/rfw-6f0a6f3266ba1058df9ef0c94cda1cecd2e62852 | -MIT | -
| github.com/mitchellh/go-homedir | -github.com/mitchellh/go-homedir-v1.1.0 | -MIT | -
| github.com/modern-go/concurrent | -github.com/modern-go/concurrent-1.0.3 | -Apache 2.0 | -
| github.com/modern-go/reflect2 | -github.com/modern-go/reflect2-v1.0.1 | -Apache 2.0 | -
| github.com/natefinch/atomic | -github.com/natefinch/atomic-a62ce929ffcc871a51e98c6eba7b20321e3ed62d | -MIT | -
| github.com/nmrshll/go-cp | -github.com/nmrshll/go-cp-61436d3b7cfa1bc1e8e455c35d8f60b8e51ccc2e | -MIT | -
| github.com/nxadm/tail | -github.com/nxadm/tail-v1.4.4 | -MIT | -
| github.com/olekukonko/tablewriter | -github.com/olekukonko/tablewriter-v0.0.2 | -MIT | -
| github.com/olivere/elastic/v7 | -github.com/olivere/elastic/v7-v7.0.6 | -MIT | -
| github.com/onsi/ginkgo | -github.com/onsi/ginkgo-v1.15.0 | -MIT | -
| github.com/onsi/gomega | -github.com/onsi/gomega-v1.7.0 | -MIT | -
| github.com/openshift/api/config/v1 | -github.com/openshift/api/config/v1-d0822898eabb929c40c5146116252477abab8d18 | -Apache 2.0 | -
| github.com/openshift/library-go/pkg/crypto | -github.com/openshift/library-go/pkg/crypto-9350cd67a9110bcaf9a85d391fa264afbbff1342 | -Apache 2.0 | -
| github.com/osrg/gobgp/client | -github.com/osrg/gobgp/client-v1.22 | -Apache 2.0 | -
| github.com/paloaltonetworks/pango | -github.com/paloaltonetworks/pango-v0.1.1 | -ISC | -
| github.com/patrickmn/go-cache | -github.com/patrickmn/go-cache-v2.1.0 | -MIT | -
| github.com/pkg/errors | -github.com/pkg/errors-v0.8.1 | -BSD 2 | -
| github.com/projectcalico/cni-plugin | -github.com/projectcalico/cni-plugin | -Apache 2.0 | -
| github.com/projectcalico/felix | -github.com/projectcalico/felix | -Apache 2.0 | -
| github.com/projectcalico/go-json | -github.com/projectcalico/go-json/json-6219dc7339ba20ee4c57df0a8baac62317d19cb1 | -BSD 2 | -
| github.com/projectcalico/go-yaml-wrapper | -github.com/projectcalico/go-yaml-wrapper-090425220c545f6d179db17af395f5aac30b6926 | -BSD 3 | -
| kube-controllers | -kube-controllers | -Tigera Proprietary | -
| github.com/projectcalico/libcalico-go | -github.com/projectcalico/libcalico-go/lib | -Apache 2.0 | -
| github.com/projectcalico/pod2daemon | -github.com/projectcalico/pod2daemon | -Apache 2.0 | -
| typha | -typha | -Tigera Proprietary | -
| github.com/prometheus/client_golang/ | -github.com/prometheus/client_golang/prometheus-v1.7.1 | -Apache 2.0 | -
| github.com/rakelkar/gonetsh/netsh | -github.com/rakelkar/gonetsh/netsh-e5c5ffe4bdf04bc060fc45ff4aca2349f51c94a7 | -Apache 2.0 | -
| github.com/robfig/cron | -github.com/robfig/cron-v1.2.0 | -MIT | -
| github.com/satori/go.uuid | -github.com/satori/go.uuid-v1.2.0 | -MIT | -
| github.com/shirou/gopsutil/process | -github.com/shirou/gopsutil/process-v2.19.03 | -BSD 3 | -
| github.com/sirupsen/logrus | -github.com/sirupsen/logrus-v1.4.2 | -MIT | -
| github.com/sirupsen/logrus | -github.com/sirupsen/logrus-v1.6.0 | -MIT | -
| github.com/spf13/cobra | -github.com/spf13/cobra-v0.0.3 | -Apache 2.0 | -
| github.com/spf13/cobra | -github.com/spf13/cobra-v1.0.0 | -Apache 2.0 | -
| github.com/spf13/pflag | -github.com/spf13/pflag-v1.0.5 | -BSD 3 | -
| github.com/spf13/viper | -github.com/spf13/viper-v1.6.1 | -MIT | -
| github.com/stretchr/testify | -github.com/stretchr/testify/mock-v1.4.0 | -MIT | -
| github.com/termie/go-shutil | -github.com/termie/go-shutil-bcacb06fecaeec8dc42af03c87c6949f4a05c74c | -MIT | -
| github.com/tigera/api | -github.com/tigera/api/ | -Apache 2.0 | -
| github.com/vishvananda/netlink | -github.com/vishvananda/netlink-v1.1.0 | -Apache 2.0 | -
| github.com/willf/bitset | -github.com/willf/bitset-v1.1.11 | -BSD 3 | -
| github.com/workiva/go-datastructures/trie/ctrie | -github.com/workiva/go-datastructures/trie/ctrie-v1.0.50 | -Apache 2.0 | -
| github.com/x-cray/logrus-prefixed-formatter | -github.com/x-cray/logrus-prefixed-formatter-v0.5.2 | -MIT | -
| github.com/yalp/jsonpath | -github.com/yalp/jsonpath-5cc68e5049a040829faef3a44c00ec4332f6dec7 | -BSD 3 | -
| global | -global-4.4.0.tgz | -MIT | -
| go.etcd.io/etcd/ | -go.etcd.io/etcd/client-v0.5.0-alpha.5.0.20201125193152-8a03d2e9614b | -Apache 2.0 | -
| go.uber.org/zap | -go.uber.org/zap-v1.15.0 | -MIT | -
| golang.org/x/crypto | -golang.org/x/crypto/ | -BSD 3 | -
| golang.org/x/net | -golang.org/x/net/ | -BSD 3 | -
| golang.org/x/sync/ | -golang.org/x/sync/ | -BSD 3 | -
| golang.org/x/sys | -golang.org/x/sys | -BSD 3 | -
| github.com/golang/text | -golang.org/x/text | -Golang BSD + Patents | -
| golang.zx2c4.com/wireguard/ | -golang.zx2c4.com/wireguard/ | -MIT | -
| google-libphonenumber | -google-libphonenumber-3.2.2.tgz | -Apache 2.0 | -
| google.golang.org/grpc | -google.golang.org/grpc-v1.27.0 | -Apache 2.0 | -
| google-auth | -google_auth-1.26.1-py2.py3-none-any.whl | -Apache 2.0 | -
| gopkg.in/fsnotify/fsnotify.v1 | -gopkg.in/fsnotify/fsnotify.v1-v1.4.7 | -BSD 3 | -
| gopkg.in/go-playground/validator.v9 | -gopkg.in/go-playground/validator.v9-v9.30.2 | -MIT | -
| gopkg.in/inf.v0 | -gopkg.in/inf.v0-v0.9.0 | -BSD 3 | -
| gopkg.in/natefinch/lumberjack.v2 | -gopkg.in/natefinch/lumberjack.v2-v2.0.0 | -MIT | -
| gopkg.in/square/go-jose.v2 | -gopkg.in/square/go-jose.v2-v2.2.3 | -Apache 2.0 | -
| gopkg.in/tchap/go-patricia.v2/patricia | -gopkg.in/tchap/go-patricia.v2/patricia-v2.3.0 | -MIT | -
| gopkg.in/tomb.v1 | -gopkg.in/tomb.v1-dd632973f1e7218eb1089048e0798ec9ae7dceb8 | -BSD 3 | -
| gopkg.in/yaml.v2 | -gopkg.in/yaml.v2-v2.4.0 | -Apache 2.0 | -
| graphlib | -graphlib-2.1.8.tgz | -MIT | -
| gud | -gud-1.0.0.tgz | -MIT | -
| has | -has-1.0.3.tgz | -MIT | -
| has-flag | -has-flag-3.0.0.tgz | -MIT | -
| has-symbols | -has-symbols-1.0.1.tgz | -MIT | -
| has-value | -has-value-0.3.1.tgz | -MIT | -
| has-value | -has-value-1.0.0.tgz | -MIT | -
| has-values | -has-values-0.1.4.tgz | -MIT | -
| has-values | -has-values-1.0.0.tgz | -MIT | -
| he | -he-1.2.0.tgz | -MIT | -
| heap | -heap-0.2.6.tgz | -Python 2.0 | -
| @babel/helper-validator-identifier | -helper-validator-identifier-7.10.1.tgz | -MIT | -
| @babel/highlight | -highlight-7.10.1.tgz | -MIT | -
| history | -history-4.9.0.tgz | -MIT | -
| hogan.js | -Apache 2.0 | -|
| hogan.js | -hogan.js-3.0.2.tgz | -Apache 2.0 | -
| hoist-non-react-statics | -hoist-non-react-statics-3.1.0.tgz | -BSD 3 | -
| hoist-non-react-statics | -hoist-non-react-statics-3.3.0.tgz | -BSD 3 | -
| @types/html-minifier-terser | -html-minifier-terser-5.1.0.tgz | -MIT | -
| html-minifier-terser | -html-minifier-terser-5.1.1.tgz | -MIT | -
| html-webpack-plugin | -html-webpack-plugin-4.3.0.tgz | -MIT | -
| htmlparser2 | -htmlparser2-3.10.1.tgz | -MIT | -
| humps | -humps-2.0.1.tgz | -MIT | -
| icepick | -icepick-1.3.0.tgz | -MIT | -
| iconv-lite | -iconv-lite-0.4.23.tgz | -MIT | -
| idna | -idna-2.10-py2.py3-none-any.whl | -BSD 3 | -
| idna | -idna-2.7-py2.py3-none-any.whl | -BSD 2 | -
| immutable | -immutable-3.8.2.tgz | -MIT | -
| importlib-metadata | -importlib_metadata-2.1.1-py2.py3-none-any.whl | -Apache 2.0 | -
| inherits | -inherits-2.0.4.tgz | -ISC | -
| instantsearch.js | -instantsearch.production-4.4.1.min.js | -MIT | -
| invariant | -invariant-2.2.2.tgz | -BSD 3 | -
| invariant | -invariant-2.2.4.tgz | -MIT | -
| ip-regex | -ip-regex-2.1.0.tgz | -MIT | -
| ip-regex | -ip-regex-4.1.0.tgz | -MIT | -
| ipaddr.js | -ipaddr.js-1.9.1.tgz | -MIT | -
| ipaddress | -ipaddress-1.0.23-py2.py3-none-any.whl | -Python 2.0 | -
| is-accessor-descriptor | -is-accessor-descriptor-0.1.6.tgz | -MIT | -
| is-accessor-descriptor | -is-accessor-descriptor-1.0.0.tgz | -MIT | -
| is-buffer | -is-buffer-1.1.6.tgz | -MIT | -
| is-callable | -is-callable-1.1.5.tgz | -MIT | -
| is-cidr | -is-cidr-3.1.0.tgz | -BSD 2 | -
| is-data-descriptor | -is-data-descriptor-0.1.4.tgz | -MIT | -
| is-data-descriptor | -is-data-descriptor-1.0.0.tgz | -MIT | -
| is-date-object | -is-date-object-1.0.1.tgz | -MIT | -
| is-descriptor | -is-descriptor-0.1.6.tgz | -MIT | -
| is-descriptor | -is-descriptor-1.0.2.tgz | -MIT | -
| is-extendable | -is-extendable-0.1.1.tgz | -MIT | -
| is-extendable | -is-extendable-1.0.1.tgz | -MIT | -
| is-ip | -is-ip-3.1.0.tgz | -MIT | -
| is-number | -is-number-3.0.0.tgz | -MIT | -
| is-plain-object | -is-plain-object-2.0.4.tgz | -MIT | -
| is-regex | -is-regex-1.0.5.tgz | -MIT | -
| is-stream | -is-stream-1.1.0.tgz | -MIT | -
| is-symbol | -is-symbol-1.0.3.tgz | -MIT | -
| is-windows | -is-windows-1.0.2.tgz | -MIT | -
| isarray | -isarray-0.0.1.tgz | -MIT | -
| isarray | -isarray-1.0.0.tgz | -MIT | -
| isobject | -isobject-2.1.0.tgz | -MIT | -
| isobject | -isobject-3.0.1.tgz | -MIT | -
| isomorphic-fetch | -isomorphic-fetch-2.2.1.tgz | -MIT | -
| jquery | -jquery-2.2.0.min.js | -MIT | -
| jquery | -jquery-3.4.0.min.js | -MIT | -
| js-cookie | -js-cookie-2.2.1.tgz | -MIT | -
| js-tokens | -js-tokens-3.0.2.tgz | -MIT | -
| js-tokens | -js-tokens-4.0.0.tgz | -MIT | -
| js-yaml | -js-yaml-3.14.0.tgz | -MIT | -
| jsan | -jsan-3.1.13.tgz | -MIT | -
| json5 | -json5-1.0.1.tgz | -MIT | -
| jsonpickle | -jsonpickle-2.0.0-py2.py3-none-any.whl | -BSD 2 | -
| jsrsasign | -jsrsasign-5.1.0.tgz | -MIT | -
| k8s.io/api | -k8s.io/api | -Apache 2.0 | -
| k8s.io/apiextensions-apiserver/ | -k8s.io/apiextensions-apiserver/ | -Apache 2.0 | -
| k8s.io/apimachinery | -k8s.io/apimachinery/ | -Apache 2.0 | -
| k8s.io/apiserver | -k8s.io/apiserver | -Apache 2.0 | -
| k8s.io/client-go | -k8s.io/client-go/ | -Apache 2.0 | -
| k8s.io/component-base/ | -k8s.io/component-base/ | -Apache 2.0 | -
| k8s.io/klog | -k8s.io/klog-v1.0.0 | -Apache 2.0 | -
| k8s.io/kube-aggregator/ | -k8s.io/kube-aggregator/ | -Apache 2.0 | -
| k8s.io/kube-openapi/ | -k8s.io/kube-openapi | -Apache 2.0 | -
| k8s.io/kubernetes/ | -k8s.io/kubernetes/ | -Apache 2.0 | -
| k8s.io/utils/strings | -k8s.io/utils/strings | -Apache 2.0 | -
| keycode | -keycode-2.1.9.tgz | -MIT | -
| kind-of | -kind-of-3.2.2.tgz | -MIT | -
| kind-of | -kind-of-4.0.0.tgz | -MIT | -
| kind-of | -kind-of-5.1.0.tgz | -MIT | -
| kind-of | -kind-of-6.0.3.tgz | -MIT | -
| kube-controllers | -kube-controllers-v3.0.11 | -Tigera Proprietary | -
| kube-controllers | -kube-controllers-v3.16.6 | -Tigera Proprietary | -
| kubernetes | -kubernetes-12.0.1-py2.py3-none-any.whl | -Apache 2.0 | -
| layout-base | -layout-base-1.0.2.tgz | -MIT | -
| projectcalico | -libcalico-go-v3.18.0-0.dev | -Apache 2.0 | -
| loader-utils | -loader-utils-1.4.0.tgz | -MIT | -
| lodash | -lodash-4.17.19.tgz | -MIT | -
| lodash | -lodash-4.17.20.tgz | -MIT | -
| lodash-es | -lodash-es-4.17.15.tgz | -MIT | -
| lodash.debounce | -lodash.debounce-4.0.8.tgz | -MIT | -
| lodash.get | -lodash.get-4.4.2.tgz | -MIT | -
| lodash.isequal | -lodash.isequal-4.5.0.tgz | -MIT | -
| lodash.throttle | -lodash.throttle-4.1.1.tgz | -MIT | -
| lodash.topath | -lodash.topath-4.5.2.tgz | -MIT | -
| pimterry | -loglevel-v1.6.8 | -MIT | -
| loose-envify | -loose-envify-1.3.1.tgz | -MIT | -
| loose-envify | -loose-envify-1.4.0.tgz | -MIT | -
| lower-case | -lower-case-2.0.1.tgz | -MIT | -
| map-cache | -map-cache-0.2.2.tgz | -MIT | -
| map-visit | -map-visit-1.0.0.tgz | -MIT | -
| math-expression-evaluator | -math-expression-evaluator-1.2.17.tgz | -MIT | -
| megacubo | -megacubo-br-Megacubo_15.4.7_linux_ia32 | -LGPL 2.1 | -
| memoize-one | -memoize-one-4.0.3.tgz | -MIT | -
| microevent.ts | -microevent.ts-0.1.1.tgz | -MIT | -
| micromatch | -micromatch-3.1.10.tgz | -MIT | -
| min-document | -min-document-2.19.0.tgz | -MIT | -
| mini-create-react-context | -mini-create-react-context-0.3.2.tgz | -MIT | -
| minimatch | -minimatch-3.0.4.tgz | -ISC | -
| minimist | -minimist-1.2.5.tgz | -MIT | -
| mixin-deep | -mixin-deep-1.3.2.tgz | -MIT | -
| mkdirp | -mkdirp-0.3.0.tgz | -MIT X11 | -
| mocha | -mocha-1.6.0.js | -MIT | -
| moment | -moment-2.22.2.tgz | -MIT | -
| more-itertools | -more_itertools-5.0.0-py2-none-any.whl | -MIT | -
| ms | -ms-2.0.0.tgz | -MIT | -
| nanoid | -nanoid-2.1.7.tgz | -MIT | -
| nanomatch | -nanomatch-1.2.13.tgz | -MIT | -
| netaddr | -netaddr-0.7.19-py2.py3-none-any.whl | -BSD 3 | -
| no-case | -no-case-3.0.3.tgz | -MIT | -
| @types/node | -node-9.3.0.tgz | -MIT | -
| node-fetch | -node-fetch-1.7.3.tgz | -MIT | -
| nodejs | -node-v10.23.1 | -Node.js | -
| node | -node-v3.17.2 | -Tigera Proprietary | -
| nopt | -nopt-1.0.10.tgz | -MIT | -
| nose-timer | -nose-timer-0.7.1.tar.gz | -MIT | -
| nose-parameterized | -nose_parameterized-0.6.0-py2.py3-none-any.whl | -BSD 3 | -
| nth-check | -nth-check-1.0.2.tgz | -BSD 2 | -
| oauthlib | -oauthlib-3.1.0-py2.py3-none-any.whl | -BSD 3 | -
| object-assign | -object-assign-4.1.1.tgz | -MIT | -
| object-copy | -object-copy-0.1.0.tgz | -MIT | -
| object-inspect | -object-inspect-1.7.0.tgz | -MIT | -
| object-keys | -object-keys-1.0.11.tgz | -MIT | -
| object-keys | -object-keys-1.1.1.tgz | -MIT | -
| object-visit | -object-visit-1.0.1.tgz | -MIT | -
| object.assign | -object.assign-4.1.0.tgz | -MIT | -
| object.getownpropertydescriptors | -object.getownpropertydescriptors-2.1.0.tgz | -MIT | -
| object.pick | -object.pick-1.3.0.tgz | -MIT | -
| oidc-client | -oidc-client-1.4.1.tgz | -Apache 2.0 | -
| openshift | -origin-v3.6.1 | -Apache 2.0 | -
| packaging | -packaging-20.9-py2.py3-none-any.whl | -BSD 2 | -
| param-case | -param-case-3.0.3.tgz | -MIT | -
| parse-duration | -parse-duration-0.4.4.tgz | -MIT | -
| pascal-case | -pascal-case-3.1.1.tgz | -MIT | -
| pascalcase | -pascalcase-0.1.1.tgz | -MIT | -
| path-to-regexp | -path-to-regexp-1.7.0.tgz | -MIT | -
| pathlib2 | -pathlib2-2.3.5-py2.py3-none-any.whl | -MIT | -
| pegjs | -pegjs-0.10.0.tgz | -MIT | -
| performance-now | -performance-now-2.1.0.tgz | -MIT | -
| pluggy | -pluggy-0.13.1-py2.py3-none-any.whl | -MIT | -
| popper.js | -popper-1.16.0.js | -MIT | -
| popper.js | -popper.js-1.16.1.tgz | -MIT | -
| posix-character-classes | -posix-character-classes-0.1.1.tgz | -MIT | -
| pretty-error | -pretty-error-2.1.1.tgz | -MIT | -
| process | -process-0.11.10.tgz | -MIT | -
| promise | -promise-7.3.1.tgz | -MIT | -
| prop-types | -prop-types-15.5.10.js | -BSD 3 | -
| prop-types | -prop-types-15.6.0.tgz | -MIT | -
| prop-types | -prop-types-15.6.2.js | -BSD 3 | -
| prop-types | -prop-types-15.6.2.tgz | -MIT | -
| prop-types | -prop-types-15.7.2.tgz | -MIT | -
| prop-types-extra | -prop-types-extra-1.0.1.tgz | -MIT | -
| py | -py-1.10.0-py2.py3-none-any.whl | -MIT | -
| pyasn1 | -pyasn1-0.4.8-py2.py3-none-any.whl | -BSD 2 | -
| pyasn1-modules | -pyasn1_modules-0.2.8-py2.py3-none-any.whl | -BSD 2 | -
| pyparsing | -pyparsing-2.4.7-py2.py3-none-any.whl | -MIT | -
| pytest | -pytest-4.6.11-py2.py3-none-any.whl | -MIT | -
| python-dateutil | -python_dateutil-2.8.1-py2.py3-none-any.whl | -BSD 3 | -
| PyYAML | -PyYAML-5.4.1-cp27-cp27mu-manylinux1_x86_64.whl | -MIT | -
| raf | -raf-3.4.0.tgz | -MIT | -
| raf-schd | -raf-schd-4.0.0.tgz | -MIT | -
| raven-js | -raven-js-3.22.1.tgz | -BSD 2 | -
| react | -react-15.6.1.js | -MIT | -
| react | -react-16.13.1.tgz | -MIT | -
| react | -react-16.8.6.tgz | -MIT | -
| react-beautiful-dnd | -react-beautiful-dnd-10.0.2.tgz | -Apache 2.0 | -
| react-bootstrap | -react-bootstrap-0.32.1.tgz | -MIT | -
| react-codemirror2 | -react-codemirror2-5.1.0.tgz | -MIT | -
| react-confirm | -react-confirm-0.1.16.tgz | -MIT | -
| react-cytoscapejs | -react-cytoscapejs-1.2.1.tgz | -MIT | -
| plotly | -react-cytoscapejs-v1.2.1 | -MIT | -
| react-day-picker | -react-day-picker-7.4.8.tgz | -MIT | -
| react-dom | -react-dom-15.6.1.js | -MIT | -
| react-dom | -react-dom-16.13.1.tgz | -MIT | -
| react-dom | -react-dom-16.8.6.tgz | -MIT | -
| react-draggable | -react-draggable-3.0.5.tgz | -MIT | -
| react-fast-compare | -react-fast-compare-2.0.4.tgz | -MIT | -
| react-filter-box | -react-filter-box-3.4.1.tgz | -MIT | -
| @fortawesome/react-fontawesome | -react-fontawesome-0.1.3.tgz | -MIT | -
| react-grid-layout | -react-grid-layout-0.16.3.tgz | -MIT | -
| react-hot-loader | -react-hot-loader-4.12.21.tgz | -MIT | -
| react-input-autosize | -react-input-autosize-2.1.2.tgz | -MIT | -
| JedWatson | -react-input-autosize-v2.1.2 | -MIT | -
| react-is | -react-is-16.6.3.tgz | -MIT | -
| react-is | -react-is-16.8.6.tgz | -MIT | -
| react-is | -react-is-16.9.0.tgz | -MIT | -
| react-json-pretty | -react-json-pretty-2.2.0.tgz | -MIT | -
| react-lifecycles-compat | -react-lifecycles-compat-3.0.4.tgz | -MIT | -
| react-native-segmented-control-tab | -react-native-segmented-control-tab-3.2.1.tgz | -MIT | -
| react-new-window | -react-new-window-0.1.2.tgz | -MIT | -
| react-notification-system | -react-notification-system-0.2.17.tgz | -MIT | -
| react-overlays | -react-overlays-0.8.3.tgz | -MIT | -
| react-prop-types | -react-prop-types-0.4.0.tgz | -MIT | -
| react-querybuilder | -react-querybuilder-3.0.0.tgz | -MIT | -
| react-redux | -react-redux-5.1.1.tgz | -MIT | -
| react-redux | -react-redux-7.1.1.tgz | -MIT | -
| react-redux-form | -react-redux-form-1.16.5.tgz | -MIT | -
| react-resizable | -react-resizable-1.7.5.tgz | -MIT | -
| react-resize-detector | -react-resize-detector-2.3.0.tgz | -MIT | -
| react-router | -react-router-5.0.1.tgz | -MIT | -
| react-router-dom | -react-router-dom-5.0.1.tgz | -MIT | -
| react-show | -react-show-2.0.4.tgz | -MIT | -
| react-smooth | -react-smooth-1.0.2.tgz | -MIT | -
| react-split-pane | -react-split-pane-0.1.92.tgz | -MIT | -
| react-style-proptype | -react-style-proptype-3.2.2.tgz | -MIT | -
| react-switch | -react-switch-5.0.0.tgz | -MIT | -
| react-table-6 | -react-table-6-6.11.0.tgz | -MIT | -
| react-table | -react-table-7.5.1.tgz | -MIT | -
| react-tooltip | -react-tooltip-4.2.11.tgz | -MIT | -
| react-transition-group | -react-transition-group-2.2.1.tgz | -BSD 3 | -
| react-transition-group | -react-transition-group-2.7.1.tgz | -BSD 3 | -
| readable-stream | -readable-stream-3.6.0.tgz | -MIT | -
| recharts | -recharts-1.5.0.tgz | -MIT | -
| recharts-scale | -recharts-scale-0.4.2.tgz | -MIT | -
| reduce-css-calc | -reduce-css-calc-1.3.0.tgz | -MIT | -
| reduce-function-call | -reduce-function-call-1.0.2.tgz | -MIT | -
| redux | -redux-4.0.1.tgz | -MIT | -
| redux | -redux-4.0.4.tgz | -MIT | -
| redux-immutable | -redux-immutable-4.0.0.tgz | -BSD 3 | -
| redux-merge-immutable-reducers | -redux-merge-immutable-reducers-0.1.4.tgz | -MIT | -
| redux-thunk | -redux-thunk-2.3.0.tgz | -MIT | -
| reflect-metadata | -reflect-metadata-0.1.13.tgz | -Apache 2.0 | -
| regenerator-runtime | -regenerator-runtime-0.11.1.tgz | -MIT | -
| regenerator-runtime | -regenerator-runtime-0.12.1.tgz | -MIT | -
| regenerator-runtime | -regenerator-runtime-0.13.3.tgz | -MIT | -
| regenerator-runtime | -regenerator-runtime-0.13.5.tgz | -MIT | -
| regex-not | -regex-not-1.0.2.tgz | -MIT | -
| regexp-to-ast | -regexp-to-ast-0.4.0.tgz | -MIT | -
| relateurl | -relateurl-0.2.7.tgz | -MIT | -
| remotedev-serialize | -remotedev-serialize-0.1.8.tgz | -MIT | -
| renderkid | -renderkid-2.0.3.tgz | -MIT | -
| repeat-element | -repeat-element-1.1.3.tgz | -MIT | -
| repeat-string | -repeat-string-1.6.1.tgz | -MIT | -
| requests | -requests-2.20.1-py2.py3-none-any.whl | -Apache 2.0 | -
| requests | -requests-2.25.1-py2.py3-none-any.whl | -Apache 2.0 | -
| requests-oauthlib | -requests_oauthlib-1.3.0-py2.py3-none-any.whl | -ISC | -
| reselect | -reselect-2.5.4.tgz | -MIT | -
| @juggle/resize-observer | -resize-observer-3.2.0.tgz | -Apache 2.0 | -
| resize-observer-polyfill | -resize-observer-polyfill-1.5.1.tgz | -MIT | -
| resolve-pathname | -resolve-pathname-2.2.0.tgz | -MIT | -
| resolve-url | -resolve-url-0.2.1.tgz | -MIT | -
| ret | -ret-0.1.15.tgz | -MIT | -
| rsa | -rsa-4.5-py2.py3-none-any.whl | -Apache 2.0 | -
| @babel/runtime | -runtime-7.1.5.tgz | -MIT | -
| @babel/runtime | -runtime-7.5.5.tgz | -MIT | -
| @babel/runtime-corejs2 | -runtime-corejs2-7.1.5.tgz | -MIT | -
| rw | -rw-1.3.3.tgz | -BSD 3 | -
| safe-buffer | -safe-buffer-5.2.1.tgz | -MIT | -
| safe-regex | -safe-regex-1.1.0.tgz | -MIT | -
| safer-buffer | -safer-buffer-2.1.2.tgz | -MIT | -
| scandir | -scandir-1.10.0.tar.gz | -BSD 3 | -
| scheduler | -scheduler-0.13.6.tgz | -MIT | -
| scheduler | -scheduler-0.18.0.tgz | -MIT | -
| scheduler | -scheduler-0.19.1.tgz | -MIT | -
| seamless-immutable | -seamless-immutable-7.1.4.tgz | -BSD 3 | -
| semver | -semver-5.7.1.tgz | -ISC | -
| set-value | -set-value-2.0.1.tgz | -MIT | -
| setimmediate | -setimmediate-1.0.5.tgz | -MIT | -
| setuptools | -setuptools-44.1.1-py2.py3-none-any.whl | -MIT | -
| shallow-compare | -shallow-compare-1.2.2.tgz | -MIT | -
| shallowequal | -shallowequal-1.1.0.tgz | -MIT | -
| sigs.k8s.io/controller-runtime | -sigs.k8s.io/controller-runtime-v0.7.0 | -Apache 2.0 | -
| sigs.k8s.io/kind/pkg/errors | -sigs.k8s.io/kind/pkg/errors-v0.9.0 | -Apache 2.0 | -
| sigs.k8s.io/yaml | -sigs.k8s.io/yaml-v1.2.0 | -BSD 3 | -
| simplejson | -simplejson-3.13.2.tar.gz | -Academic 2.1 | -
| six | -six-1.15.0-py2.py3-none-any.whl | -MIT | -
| snapdragon | -snapdragon-0.8.2.tgz | -MIT | -
| snapdragon-node | -snapdragon-node-2.1.1.tgz | -MIT | -
| snapdragon-util | -snapdragon-util-3.0.1.tgz | -MIT | -
| soupsieve | -soupsieve-1.9.6-py2.py3-none-any.whl | -MIT | -
| @types/source-list-map | -source-list-map-0.1.2.tgz | -MIT | -
| source-map | -source-map-0.5.7.tgz | -BSD 3 | -
| source-map | -source-map-0.6.1.tgz | -BSD 3 | -
| source-map | -source-map-0.7.3.tgz | -BSD 3 | -
| source-map-resolve | -source-map-resolve-0.5.3.tgz | -MIT | -
| source-map-support | -source-map-support-0.5.19.tgz | -MIT | -
| source-map-url | -source-map-url-0.4.0.tgz | -MIT | -
| split-string | -split-string-3.1.0.tgz | -MIT | -
| sprintf | -sprintf-1.0.3.js | -BSD | -
| sprintf-js | -sprintf-js-1.0.3.tgz | -BSD 3 | -
| static-extend | -static-extend-0.1.2.tgz | -MIT | -
| string.prototype.trimend | -string.prototype.trimend-1.0.1.tgz | -MIT | -
| string.prototype.trimleft | -string.prototype.trimleft-2.1.2.tgz | -MIT | -
| string.prototype.trimright | -string.prototype.trimright-2.1.2.tgz | -MIT | -
| string.prototype.trimstart | -string.prototype.trimstart-1.0.1.tgz | -MIT | -
| string_decoder | -string_decoder-1.3.0.tgz | -MIT | -
| strip-ansi | -strip-ansi-3.0.1.tgz | -MIT | -
| strongly-connected-components | -strongly-connected-components-1.0.1.tgz | -MIT | -
| supports-color | -supports-color-5.5.0.tgz | -MIT | -
| swagger-ui | -swagger-ui-bundle-3.37.0.js | -Apache 2.0 | -
| swagger-ui | -swagger-ui-standalone-preset-3.37.0.js | -Apache 2.0 | -
| symbol-observable | -symbol-observable-1.2.0.tgz | -MIT | -
| @types/tapable | -tapable-1.0.5.tgz | -MIT | -
| tapable | -tapable-1.1.3.tgz | -MIT | -
| Jstarfish | -Technical-Learning-609d9d75ca68e30aee8757b26f52bf132c644be7 | -ISC | -
| termcolor | -termcolor-1.1.0.tar.gz | -MIT | -
| terser | -terser-4.7.0.tgz | -BSD 2 | -
| tiny-invariant | -tiny-invariant-1.0.3.tgz | -MIT | -
| tiny-invariant | -tiny-invariant-1.0.6.tgz | -MIT | -
| tiny-warning | -tiny-warning-1.0.2.tgz | -MIT | -
| tiny-warning | -tiny-warning-1.0.3.tgz | -MIT | -
| tippy.js | -tippy-bundle.iife-5.2.1.min.js | -MIT | -
| tippy.js | -tippy.js-6.2.5.tgz | -MIT | -
| to-object-path | -to-object-path-0.3.0.tgz | -MIT | -
| to-regex | -to-regex-3.0.2.tgz | -MIT | -
| to-regex-range | -to-regex-range-2.1.1.tgz | -MIT | -
| tslib | -tslib-1.10.0.js | -Apache 2.0 | -
| tslib | -tslib-1.10.0.tgz | -Apache 2.0 | -
| tslib | -tslib-1.13.0.js | -Apache 2.0 | -
| tslib | -tslib.es6-1.10.0.js | -Apache 2.0 | -
| tslib | -tslib.es6-1.13.0.js | -Apache 2.0 | -
| typescript-fsa | -typescript-fsa-2.5.0.tgz | -MIT | -
| typescript-fsa-reducers | -typescript-fsa-reducers-0.4.5.tgz | -MIT | -
| ua-parser-js | -ua-parser-js-0.7.18.tgz | -MIT | -
| @types/uglify-js | -uglify-js-3.9.2.tgz | -MIT | -
| uncontrollable | -uncontrollable-4.1.0.tgz | -MIT | -
| union-value | -union-value-1.0.1.tgz | -MIT | -
| unset-value | -unset-value-1.0.0.tgz | -MIT | -
| urix | -urix-0.1.0.tgz | -MIT | -
| urllib3 | -urllib3-1.24.3-py2.py3-none-any.whl | -MIT | -
| urllib3 | -urllib3-1.26.3-py2.py3-none-any.whl | -MIT | -
| use | -use-3.1.1.tgz | -MIT | -
| util-deprecate | -util-deprecate-1.0.2.tgz | -MIT | -
| util.promisify | -util.promisify-1.0.0.tgz | -MIT | -
| utila | -utila-0.4.0.tgz | -MIT | -
| uuid | -uuid-7.0.3.tgz | -MIT | -
| validator | -validator-10.4.0.tgz | -MIT | -
| value-equal | -value-equal-0.4.0.tgz | -MIT | -
| warning | -warning-3.0.0.tgz | -BSD 3 | -
| wcwidth | -wcwidth-0.2.5-py2.py3-none-any.whl | -MIT | -
| @types/webpack | -webpack-4.41.17.tgz | -MIT | -
| @types/webpack-sources | -webpack-sources-1.4.0.tgz | -MIT | -
| websocket_client | -websocket_client-0.57.0-py2.py3-none-any.whl | -BSD 3 | -
| whatwg-fetch | -whatwg-fetch-2.0.4.tgz | -MIT | -
| worker-rpc | -worker-rpc-0.1.1.tgz | -MIT | -
| zipp | -zipp-1.2.0-py2.py3-none-any.whl | -MIT | -
| github.com/projectcalico/bird | -github.com/projectcalico/bird/blob/v0.3.3 | -GPL | -
| egress-gateway | -egress-gateway | -Tigera Proprietary | -